pax_global_header 0000666 0000000 0000000 00000000064 14774251704 0014525 g ustar 00root root 0000000 0000000 52 comment=8f7cdf560d3dcc3fefa57e8aea534b555470fda9
golang-github-minio-madmin-go-3.0.104/ 0000775 0000000 0000000 00000000000 14774251704 0017400 5 ustar 00root root 0000000 0000000 golang-github-minio-madmin-go-3.0.104/.copyright.tmpl 0000664 0000000 0000000 00000001277 14774251704 0022373 0 ustar 00root root 0000000 0000000 Copyright (c) ${years} ${owner}.
This file is part of ${projectname}
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as
published by the Free Software Foundation, either version 3 of the
License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see .
golang-github-minio-madmin-go-3.0.104/.github/ 0000775 0000000 0000000 00000000000 14774251704 0020740 5 ustar 00root root 0000000 0000000 golang-github-minio-madmin-go-3.0.104/.github/workflows/ 0000775 0000000 0000000 00000000000 14774251704 0022775 5 ustar 00root root 0000000 0000000 golang-github-minio-madmin-go-3.0.104/.github/workflows/go.yml 0000664 0000000 0000000 00000003461 14774251704 0024131 0 ustar 00root root 0000000 0000000 name: Go
on:
pull_request:
branches:
- main
- master
- v3
push:
branches:
- main
- master
- v3
jobs:
build:
name: Lint checks Go ${{ matrix.go-version }}
runs-on: ubuntu-latest
strategy:
matrix:
go-version: [1.22.x,1.23.x]
steps:
- name: Set up Go ${{ matrix.go-version }}
uses: actions/setup-go@v5
with:
go-version: ${{ matrix.go-version }}
id: go
- name: Check out code into the Go module directory
uses: actions/checkout@v4
- name: Check lint
uses: golangci/golangci-lint-action@v7
- name: Regenerate, vet and test
run: |
go vet ./...
go install -v github.com/tinylib/msgp@latest
go install -v golang.org/x/tools/cmd/stringer@latest
export PATH=${PATH}:$(go env GOPATH)/bin
go generate ./... >/dev/null
(! git diff --name-only | grep '_gen.go$$') || (echo "Non-committed changes in auto-generated code is detected, please commit them to proceed." && false)
test:
name: Testing Go ${{ matrix.go-version }} on ${{ matrix.os }}
runs-on: ${{ matrix.os }}
strategy:
matrix:
go-version: [1.23.x]
os: [ubuntu-latest, windows-latest, macos-latest]
steps:
- name: Set up Go ${{ matrix.go-version }} on ${{ matrix.os }}
uses: actions/setup-go@v5
with:
go-version: ${{ matrix.go-version }}
id: go
- name: Check out code into the Go module directory
uses: actions/checkout@v4
- name: Test on ${{ matrix.os }}
if: matrix.os == 'ubuntu-latest'
run: |
go test -v -race ./...
GOARCH=386 GOOS=linux go test -v ./...
- name: Test on ${{ matrix.os }}
if: matrix.os != 'ubuntu-latest'
run: |
go test -v -race ./...
golang-github-minio-madmin-go-3.0.104/.github/workflows/vulncheck.yml 0000664 0000000 0000000 00000001227 14774251704 0025504 0 ustar 00root root 0000000 0000000 name: VulnCheck
on:
pull_request:
branches:
- master
- main
- v3
push:
branches:
- master
- main
- v3
jobs:
vulncheck:
name: Analysis
runs-on: ubuntu-latest
strategy:
matrix:
go-version: [ 1.23.x ]
steps:
- name: Check out code into the Go module directory
uses: actions/checkout@v4
- uses: actions/setup-go@v5
with:
go-version: ${{ matrix.go-version }}
check-latest: true
- name: Get govulncheck
run: go install golang.org/x/vuln/cmd/govulncheck@latest
shell: bash
- name: Run govulncheck
run: govulncheck ./...
shell: bash
golang-github-minio-madmin-go-3.0.104/.gitignore 0000664 0000000 0000000 00000000425 14774251704 0021371 0 ustar 00root root 0000000 0000000 # Binaries for programs and plugins
*.exe
*.exe~
*.dll
*.so
*.dylib
# Test binary, built with `go test -c`
*.test
# Output of the go coverage tool, specifically when used with LiteIDE
*.out
# Dependency directories (remove the comment below to include it)
# vendor/
*~
.idea golang-github-minio-madmin-go-3.0.104/.golangci.yml 0000664 0000000 0000000 00000001345 14774251704 0021767 0 ustar 00root root 0000000 0000000 version: "2"
linters:
default: none
enable:
- govet
- ineffassign
- misspell
- prealloc
- revive
- staticcheck
- unconvert
- unused
exclusions:
generated: lax
rules:
- path: (.+)\.go$
text: should have comment
- path: (.+)\.go$
text: should have a package comment
- path: (.+)\.go$
text: error strings should not be capitalized or end with punctuation or a newline
- path: (.+)\.go$
text: don't use ALL_CAPS in Go names
paths:
- third_party$
- builtin$
- examples$
formatters:
enable:
- gofumpt
- goimports
exclusions:
generated: lax
paths:
- third_party$
- builtin$
- examples$
golang-github-minio-madmin-go-3.0.104/CONTRIBUTING.md 0000664 0000000 0000000 00000002477 14774251704 0021643 0 ustar 00root root 0000000 0000000
### Developer Guidelines
``madmin-go`` welcomes your contribution. To make the process as seamless as possible, we ask for the following:
* Go ahead and fork the project and make your changes. We encourage pull requests to discuss code changes.
- Fork it
- Create your feature branch (git checkout -b my-new-feature)
- Commit your changes (git commit -am 'Add some feature')
- Push to the branch (git push origin my-new-feature)
- Create new Pull Request
* When you're ready to create a pull request, be sure to:
- Have test cases for the new code. If you have questions about how to do it, please ask in your pull request.
- Run `go fmt`
- Squash your commits into a single commit. `git rebase -i`. It's okay to force update your pull request.
- Make sure `go test -race ./...` and `go build` completes.
NOTE: go test runs functional tests and requires you to have a AWS S3 account. Set them as environment variables
``ACCESS_KEY`` and ``SECRET_KEY``. To run shorter version of the tests please use ``go test -short -race ./...``
* Read [Effective Go](https://github.com/golang/go/wiki/CodeReviewComments) article from Golang project
- `madmin-go` project is strictly conformant with Golang style
- if you happen to observe offending code, please feel free to send a pull request
golang-github-minio-madmin-go-3.0.104/LICENSE 0000664 0000000 0000000 00000103333 14774251704 0020410 0 ustar 00root root 0000000 0000000 GNU AFFERO GENERAL PUBLIC LICENSE
Version 3, 19 November 2007
Copyright (C) 2007 Free Software Foundation, Inc.
Everyone is permitted to copy and distribute verbatim copies
of this license document, but changing it is not allowed.
Preamble
The GNU Affero General Public License is a free, copyleft license for
software and other kinds of works, specifically designed to ensure
cooperation with the community in the case of network server software.
The licenses for most software and other practical works are designed
to take away your freedom to share and change the works. By contrast,
our General Public Licenses are intended to guarantee your freedom to
share and change all versions of a program--to make sure it remains free
software for all its users.
When we speak of free software, we are referring to freedom, not
price. Our General Public Licenses are designed to make sure that you
have the freedom to distribute copies of free software (and charge for
them if you wish), that you receive source code or can get it if you
want it, that you can change the software or use pieces of it in new
free programs, and that you know you can do these things.
Developers that use our General Public Licenses protect your rights
with two steps: (1) assert copyright on the software, and (2) offer
you this License which gives you legal permission to copy, distribute
and/or modify the software.
A secondary benefit of defending all users' freedom is that
improvements made in alternate versions of the program, if they
receive widespread use, become available for other developers to
incorporate. Many developers of free software are heartened and
encouraged by the resulting cooperation. However, in the case of
software used on network servers, this result may fail to come about.
The GNU General Public License permits making a modified version and
letting the public access it on a server without ever releasing its
source code to the public.
The GNU Affero General Public License is designed specifically to
ensure that, in such cases, the modified source code becomes available
to the community. It requires the operator of a network server to
provide the source code of the modified version running there to the
users of that server. Therefore, public use of a modified version, on
a publicly accessible server, gives the public access to the source
code of the modified version.
An older license, called the Affero General Public License and
published by Affero, was designed to accomplish similar goals. This is
a different license, not a version of the Affero GPL, but Affero has
released a new version of the Affero GPL which permits relicensing under
this license.
The precise terms and conditions for copying, distribution and
modification follow.
TERMS AND CONDITIONS
0. Definitions.
"This License" refers to version 3 of the GNU Affero General Public License.
"Copyright" also means copyright-like laws that apply to other kinds of
works, such as semiconductor masks.
"The Program" refers to any copyrightable work licensed under this
License. Each licensee is addressed as "you". "Licensees" and
"recipients" may be individuals or organizations.
To "modify" a work means to copy from or adapt all or part of the work
in a fashion requiring copyright permission, other than the making of an
exact copy. The resulting work is called a "modified version" of the
earlier work or a work "based on" the earlier work.
A "covered work" means either the unmodified Program or a work based
on the Program.
To "propagate" a work means to do anything with it that, without
permission, would make you directly or secondarily liable for
infringement under applicable copyright law, except executing it on a
computer or modifying a private copy. Propagation includes copying,
distribution (with or without modification), making available to the
public, and in some countries other activities as well.
To "convey" a work means any kind of propagation that enables other
parties to make or receive copies. Mere interaction with a user through
a computer network, with no transfer of a copy, is not conveying.
An interactive user interface displays "Appropriate Legal Notices"
to the extent that it includes a convenient and prominently visible
feature that (1) displays an appropriate copyright notice, and (2)
tells the user that there is no warranty for the work (except to the
extent that warranties are provided), that licensees may convey the
work under this License, and how to view a copy of this License. If
the interface presents a list of user commands or options, such as a
menu, a prominent item in the list meets this criterion.
1. Source Code.
The "source code" for a work means the preferred form of the work
for making modifications to it. "Object code" means any non-source
form of a work.
A "Standard Interface" means an interface that either is an official
standard defined by a recognized standards body, or, in the case of
interfaces specified for a particular programming language, one that
is widely used among developers working in that language.
The "System Libraries" of an executable work include anything, other
than the work as a whole, that (a) is included in the normal form of
packaging a Major Component, but which is not part of that Major
Component, and (b) serves only to enable use of the work with that
Major Component, or to implement a Standard Interface for which an
implementation is available to the public in source code form. A
"Major Component", in this context, means a major essential component
(kernel, window system, and so on) of the specific operating system
(if any) on which the executable work runs, or a compiler used to
produce the work, or an object code interpreter used to run it.
The "Corresponding Source" for a work in object code form means all
the source code needed to generate, install, and (for an executable
work) run the object code and to modify the work, including scripts to
control those activities. However, it does not include the work's
System Libraries, or general-purpose tools or generally available free
programs which are used unmodified in performing those activities but
which are not part of the work. For example, Corresponding Source
includes interface definition files associated with source files for
the work, and the source code for shared libraries and dynamically
linked subprograms that the work is specifically designed to require,
such as by intimate data communication or control flow between those
subprograms and other parts of the work.
The Corresponding Source need not include anything that users
can regenerate automatically from other parts of the Corresponding
Source.
The Corresponding Source for a work in source code form is that
same work.
2. Basic Permissions.
All rights granted under this License are granted for the term of
copyright on the Program, and are irrevocable provided the stated
conditions are met. This License explicitly affirms your unlimited
permission to run the unmodified Program. The output from running a
covered work is covered by this License only if the output, given its
content, constitutes a covered work. This License acknowledges your
rights of fair use or other equivalent, as provided by copyright law.
You may make, run and propagate covered works that you do not
convey, without conditions so long as your license otherwise remains
in force. You may convey covered works to others for the sole purpose
of having them make modifications exclusively for you, or provide you
with facilities for running those works, provided that you comply with
the terms of this License in conveying all material for which you do
not control copyright. Those thus making or running the covered works
for you must do so exclusively on your behalf, under your direction
and control, on terms that prohibit them from making any copies of
your copyrighted material outside their relationship with you.
Conveying under any other circumstances is permitted solely under
the conditions stated below. Sublicensing is not allowed; section 10
makes it unnecessary.
3. Protecting Users' Legal Rights From Anti-Circumvention Law.
No covered work shall be deemed part of an effective technological
measure under any applicable law fulfilling obligations under article
11 of the WIPO copyright treaty adopted on 20 December 1996, or
similar laws prohibiting or restricting circumvention of such
measures.
When you convey a covered work, you waive any legal power to forbid
circumvention of technological measures to the extent such circumvention
is effected by exercising rights under this License with respect to
the covered work, and you disclaim any intention to limit operation or
modification of the work as a means of enforcing, against the work's
users, your or third parties' legal rights to forbid circumvention of
technological measures.
4. Conveying Verbatim Copies.
You may convey verbatim copies of the Program's source code as you
receive it, in any medium, provided that you conspicuously and
appropriately publish on each copy an appropriate copyright notice;
keep intact all notices stating that this License and any
non-permissive terms added in accord with section 7 apply to the code;
keep intact all notices of the absence of any warranty; and give all
recipients a copy of this License along with the Program.
You may charge any price or no price for each copy that you convey,
and you may offer support or warranty protection for a fee.
5. Conveying Modified Source Versions.
You may convey a work based on the Program, or the modifications to
produce it from the Program, in the form of source code under the
terms of section 4, provided that you also meet all of these conditions:
a) The work must carry prominent notices stating that you modified
it, and giving a relevant date.
b) The work must carry prominent notices stating that it is
released under this License and any conditions added under section
7. This requirement modifies the requirement in section 4 to
"keep intact all notices".
c) You must license the entire work, as a whole, under this
License to anyone who comes into possession of a copy. This
License will therefore apply, along with any applicable section 7
additional terms, to the whole of the work, and all its parts,
regardless of how they are packaged. This License gives no
permission to license the work in any other way, but it does not
invalidate such permission if you have separately received it.
d) If the work has interactive user interfaces, each must display
Appropriate Legal Notices; however, if the Program has interactive
interfaces that do not display Appropriate Legal Notices, your
work need not make them do so.
A compilation of a covered work with other separate and independent
works, which are not by their nature extensions of the covered work,
and which are not combined with it such as to form a larger program,
in or on a volume of a storage or distribution medium, is called an
"aggregate" if the compilation and its resulting copyright are not
used to limit the access or legal rights of the compilation's users
beyond what the individual works permit. Inclusion of a covered work
in an aggregate does not cause this License to apply to the other
parts of the aggregate.
6. Conveying Non-Source Forms.
You may convey a covered work in object code form under the terms
of sections 4 and 5, provided that you also convey the
machine-readable Corresponding Source under the terms of this License,
in one of these ways:
a) Convey the object code in, or embodied in, a physical product
(including a physical distribution medium), accompanied by the
Corresponding Source fixed on a durable physical medium
customarily used for software interchange.
b) Convey the object code in, or embodied in, a physical product
(including a physical distribution medium), accompanied by a
written offer, valid for at least three years and valid for as
long as you offer spare parts or customer support for that product
model, to give anyone who possesses the object code either (1) a
copy of the Corresponding Source for all the software in the
product that is covered by this License, on a durable physical
medium customarily used for software interchange, for a price no
more than your reasonable cost of physically performing this
conveying of source, or (2) access to copy the
Corresponding Source from a network server at no charge.
c) Convey individual copies of the object code with a copy of the
written offer to provide the Corresponding Source. This
alternative is allowed only occasionally and noncommercially, and
only if you received the object code with such an offer, in accord
with subsection 6b.
d) Convey the object code by offering access from a designated
place (gratis or for a charge), and offer equivalent access to the
Corresponding Source in the same way through the same place at no
further charge. You need not require recipients to copy the
Corresponding Source along with the object code. If the place to
copy the object code is a network server, the Corresponding Source
may be on a different server (operated by you or a third party)
that supports equivalent copying facilities, provided you maintain
clear directions next to the object code saying where to find the
Corresponding Source. Regardless of what server hosts the
Corresponding Source, you remain obligated to ensure that it is
available for as long as needed to satisfy these requirements.
e) Convey the object code using peer-to-peer transmission, provided
you inform other peers where the object code and Corresponding
Source of the work are being offered to the general public at no
charge under subsection 6d.
A separable portion of the object code, whose source code is excluded
from the Corresponding Source as a System Library, need not be
included in conveying the object code work.
A "User Product" is either (1) a "consumer product", which means any
tangible personal property which is normally used for personal, family,
or household purposes, or (2) anything designed or sold for incorporation
into a dwelling. In determining whether a product is a consumer product,
doubtful cases shall be resolved in favor of coverage. For a particular
product received by a particular user, "normally used" refers to a
typical or common use of that class of product, regardless of the status
of the particular user or of the way in which the particular user
actually uses, or expects or is expected to use, the product. A product
is a consumer product regardless of whether the product has substantial
commercial, industrial or non-consumer uses, unless such uses represent
the only significant mode of use of the product.
"Installation Information" for a User Product means any methods,
procedures, authorization keys, or other information required to install
and execute modified versions of a covered work in that User Product from
a modified version of its Corresponding Source. The information must
suffice to ensure that the continued functioning of the modified object
code is in no case prevented or interfered with solely because
modification has been made.
If you convey an object code work under this section in, or with, or
specifically for use in, a User Product, and the conveying occurs as
part of a transaction in which the right of possession and use of the
User Product is transferred to the recipient in perpetuity or for a
fixed term (regardless of how the transaction is characterized), the
Corresponding Source conveyed under this section must be accompanied
by the Installation Information. But this requirement does not apply
if neither you nor any third party retains the ability to install
modified object code on the User Product (for example, the work has
been installed in ROM).
The requirement to provide Installation Information does not include a
requirement to continue to provide support service, warranty, or updates
for a work that has been modified or installed by the recipient, or for
the User Product in which it has been modified or installed. Access to a
network may be denied when the modification itself materially and
adversely affects the operation of the network or violates the rules and
protocols for communication across the network.
Corresponding Source conveyed, and Installation Information provided,
in accord with this section must be in a format that is publicly
documented (and with an implementation available to the public in
source code form), and must require no special password or key for
unpacking, reading or copying.
7. Additional Terms.
"Additional permissions" are terms that supplement the terms of this
License by making exceptions from one or more of its conditions.
Additional permissions that are applicable to the entire Program shall
be treated as though they were included in this License, to the extent
that they are valid under applicable law. If additional permissions
apply only to part of the Program, that part may be used separately
under those permissions, but the entire Program remains governed by
this License without regard to the additional permissions.
When you convey a copy of a covered work, you may at your option
remove any additional permissions from that copy, or from any part of
it. (Additional permissions may be written to require their own
removal in certain cases when you modify the work.) You may place
additional permissions on material, added by you to a covered work,
for which you have or can give appropriate copyright permission.
Notwithstanding any other provision of this License, for material you
add to a covered work, you may (if authorized by the copyright holders of
that material) supplement the terms of this License with terms:
a) Disclaiming warranty or limiting liability differently from the
terms of sections 15 and 16 of this License; or
b) Requiring preservation of specified reasonable legal notices or
author attributions in that material or in the Appropriate Legal
Notices displayed by works containing it; or
c) Prohibiting misrepresentation of the origin of that material, or
requiring that modified versions of such material be marked in
reasonable ways as different from the original version; or
d) Limiting the use for publicity purposes of names of licensors or
authors of the material; or
e) Declining to grant rights under trademark law for use of some
trade names, trademarks, or service marks; or
f) Requiring indemnification of licensors and authors of that
material by anyone who conveys the material (or modified versions of
it) with contractual assumptions of liability to the recipient, for
any liability that these contractual assumptions directly impose on
those licensors and authors.
All other non-permissive additional terms are considered "further
restrictions" within the meaning of section 10. If the Program as you
received it, or any part of it, contains a notice stating that it is
governed by this License along with a term that is a further
restriction, you may remove that term. If a license document contains
a further restriction but permits relicensing or conveying under this
License, you may add to a covered work material governed by the terms
of that license document, provided that the further restriction does
not survive such relicensing or conveying.
If you add terms to a covered work in accord with this section, you
must place, in the relevant source files, a statement of the
additional terms that apply to those files, or a notice indicating
where to find the applicable terms.
Additional terms, permissive or non-permissive, may be stated in the
form of a separately written license, or stated as exceptions;
the above requirements apply either way.
8. Termination.
You may not propagate or modify a covered work except as expressly
provided under this License. Any attempt otherwise to propagate or
modify it is void, and will automatically terminate your rights under
this License (including any patent licenses granted under the third
paragraph of section 11).
However, if you cease all violation of this License, then your
license from a particular copyright holder is reinstated (a)
provisionally, unless and until the copyright holder explicitly and
finally terminates your license, and (b) permanently, if the copyright
holder fails to notify you of the violation by some reasonable means
prior to 60 days after the cessation.
Moreover, your license from a particular copyright holder is
reinstated permanently if the copyright holder notifies you of the
violation by some reasonable means, this is the first time you have
received notice of violation of this License (for any work) from that
copyright holder, and you cure the violation prior to 30 days after
your receipt of the notice.
Termination of your rights under this section does not terminate the
licenses of parties who have received copies or rights from you under
this License. If your rights have been terminated and not permanently
reinstated, you do not qualify to receive new licenses for the same
material under section 10.
9. Acceptance Not Required for Having Copies.
You are not required to accept this License in order to receive or
run a copy of the Program. Ancillary propagation of a covered work
occurring solely as a consequence of using peer-to-peer transmission
to receive a copy likewise does not require acceptance. However,
nothing other than this License grants you permission to propagate or
modify any covered work. These actions infringe copyright if you do
not accept this License. Therefore, by modifying or propagating a
covered work, you indicate your acceptance of this License to do so.
10. Automatic Licensing of Downstream Recipients.
Each time you convey a covered work, the recipient automatically
receives a license from the original licensors, to run, modify and
propagate that work, subject to this License. You are not responsible
for enforcing compliance by third parties with this License.
An "entity transaction" is a transaction transferring control of an
organization, or substantially all assets of one, or subdividing an
organization, or merging organizations. If propagation of a covered
work results from an entity transaction, each party to that
transaction who receives a copy of the work also receives whatever
licenses to the work the party's predecessor in interest had or could
give under the previous paragraph, plus a right to possession of the
Corresponding Source of the work from the predecessor in interest, if
the predecessor has it or can get it with reasonable efforts.
You may not impose any further restrictions on the exercise of the
rights granted or affirmed under this License. For example, you may
not impose a license fee, royalty, or other charge for exercise of
rights granted under this License, and you may not initiate litigation
(including a cross-claim or counterclaim in a lawsuit) alleging that
any patent claim is infringed by making, using, selling, offering for
sale, or importing the Program or any portion of it.
11. Patents.
A "contributor" is a copyright holder who authorizes use under this
License of the Program or a work on which the Program is based. The
work thus licensed is called the contributor's "contributor version".
A contributor's "essential patent claims" are all patent claims
owned or controlled by the contributor, whether already acquired or
hereafter acquired, that would be infringed by some manner, permitted
by this License, of making, using, or selling its contributor version,
but do not include claims that would be infringed only as a
consequence of further modification of the contributor version. For
purposes of this definition, "control" includes the right to grant
patent sublicenses in a manner consistent with the requirements of
this License.
Each contributor grants you a non-exclusive, worldwide, royalty-free
patent license under the contributor's essential patent claims, to
make, use, sell, offer for sale, import and otherwise run, modify and
propagate the contents of its contributor version.
In the following three paragraphs, a "patent license" is any express
agreement or commitment, however denominated, not to enforce a patent
(such as an express permission to practice a patent or covenant not to
sue for patent infringement). To "grant" such a patent license to a
party means to make such an agreement or commitment not to enforce a
patent against the party.
If you convey a covered work, knowingly relying on a patent license,
and the Corresponding Source of the work is not available for anyone
to copy, free of charge and under the terms of this License, through a
publicly available network server or other readily accessible means,
then you must either (1) cause the Corresponding Source to be so
available, or (2) arrange to deprive yourself of the benefit of the
patent license for this particular work, or (3) arrange, in a manner
consistent with the requirements of this License, to extend the patent
license to downstream recipients. "Knowingly relying" means you have
actual knowledge that, but for the patent license, your conveying the
covered work in a country, or your recipient's use of the covered work
in a country, would infringe one or more identifiable patents in that
country that you have reason to believe are valid.
If, pursuant to or in connection with a single transaction or
arrangement, you convey, or propagate by procuring conveyance of, a
covered work, and grant a patent license to some of the parties
receiving the covered work authorizing them to use, propagate, modify
or convey a specific copy of the covered work, then the patent license
you grant is automatically extended to all recipients of the covered
work and works based on it.
A patent license is "discriminatory" if it does not include within
the scope of its coverage, prohibits the exercise of, or is
conditioned on the non-exercise of one or more of the rights that are
specifically granted under this License. You may not convey a covered
work if you are a party to an arrangement with a third party that is
in the business of distributing software, under which you make payment
to the third party based on the extent of your activity of conveying
the work, and under which the third party grants, to any of the
parties who would receive the covered work from you, a discriminatory
patent license (a) in connection with copies of the covered work
conveyed by you (or copies made from those copies), or (b) primarily
for and in connection with specific products or compilations that
contain the covered work, unless you entered into that arrangement,
or that patent license was granted, prior to 28 March 2007.
Nothing in this License shall be construed as excluding or limiting
any implied license or other defenses to infringement that may
otherwise be available to you under applicable patent law.
12. No Surrender of Others' Freedom.
If conditions are imposed on you (whether by court order, agreement or
otherwise) that contradict the conditions of this License, they do not
excuse you from the conditions of this License. If you cannot convey a
covered work so as to satisfy simultaneously your obligations under this
License and any other pertinent obligations, then as a consequence you may
not convey it at all. For example, if you agree to terms that obligate you
to collect a royalty for further conveying from those to whom you convey
the Program, the only way you could satisfy both those terms and this
License would be to refrain entirely from conveying the Program.
13. Remote Network Interaction; Use with the GNU General Public License.
Notwithstanding any other provision of this License, if you modify the
Program, your modified version must prominently offer all users
interacting with it remotely through a computer network (if your version
supports such interaction) an opportunity to receive the Corresponding
Source of your version by providing access to the Corresponding Source
from a network server at no charge, through some standard or customary
means of facilitating copying of software. This Corresponding Source
shall include the Corresponding Source for any work covered by version 3
of the GNU General Public License that is incorporated pursuant to the
following paragraph.
Notwithstanding any other provision of this License, you have
permission to link or combine any covered work with a work licensed
under version 3 of the GNU General Public License into a single
combined work, and to convey the resulting work. The terms of this
License will continue to apply to the part which is the covered work,
but the work with which it is combined will remain governed by version
3 of the GNU General Public License.
14. Revised Versions of this License.
The Free Software Foundation may publish revised and/or new versions of
the GNU Affero General Public License from time to time. Such new versions
will be similar in spirit to the present version, but may differ in detail to
address new problems or concerns.
Each version is given a distinguishing version number. If the
Program specifies that a certain numbered version of the GNU Affero General
Public License "or any later version" applies to it, you have the
option of following the terms and conditions either of that numbered
version or of any later version published by the Free Software
Foundation. If the Program does not specify a version number of the
GNU Affero General Public License, you may choose any version ever published
by the Free Software Foundation.
If the Program specifies that a proxy can decide which future
versions of the GNU Affero General Public License can be used, that proxy's
public statement of acceptance of a version permanently authorizes you
to choose that version for the Program.
Later license versions may give you additional or different
permissions. However, no additional obligations are imposed on any
author or copyright holder as a result of your choosing to follow a
later version.
15. Disclaimer of Warranty.
THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
16. Limitation of Liability.
IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
SUCH DAMAGES.
17. Interpretation of Sections 15 and 16.
If the disclaimer of warranty and limitation of liability provided
above cannot be given local legal effect according to their terms,
reviewing courts shall apply local law that most closely approximates
an absolute waiver of all civil liability in connection with the
Program, unless a warranty or assumption of liability accompanies a
copy of the Program in return for a fee.
END OF TERMS AND CONDITIONS
How to Apply These Terms to Your New Programs
If you develop a new program, and you want it to be of the greatest
possible use to the public, the best way to achieve this is to make it
free software which everyone can redistribute and change under these terms.
To do so, attach the following notices to the program. It is safest
to attach them to the start of each source file to most effectively
state the exclusion of warranty; and each file should have at least
the "copyright" line and a pointer to where the full notice is found.
Copyright (C)
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see .
Also add information on how to contact you by electronic and paper mail.
If your software can interact with users remotely through a computer
network, you should also make sure that it provides a way for users to
get its source. For example, if your program is a web application, its
interface could display a "Source" link that leads users to an archive
of the code. There are many ways you could offer source, and different
solutions will be better for different programs; see section 13 for the
specific requirements.
You should also get your employer (if you work as a programmer) or school,
if any, to sign a "copyright disclaimer" for the program, if necessary.
For more information on this, and how to apply and follow the GNU AGPL, see
.
golang-github-minio-madmin-go-3.0.104/MAINTAINERS.md 0000664 0000000 0000000 00000002254 14774251704 0021477 0 ustar 00root root 0000000 0000000 # For maintainers only
## Responsibilities
Please go through this link [Maintainer Responsibility](https://gist.github.com/abperiasamy/f4d9b31d3186bbd26522)
### Making new releases
Tag and sign your release commit, additionally this step requires you to have access to MinIO's trusted private key.
```sh
$ export GNUPGHOME=/media/${USER}/minio/trusted
$ git tag -s 4.0.0
$ git push
$ git push --tags
```
### Update version
Once release has been made update `libraryVersion` constant in `api.go` to next to be released version.
```sh
$ grep libraryVersion api.go
libraryVersion = "4.0.1"
```
Commit your changes
```
$ git commit -a -m "Update version for next release" --author "MinIO Trusted "
```
### Announce
Announce new release by adding release notes at https://github.com/minio/madmin-go/releases from `trusted@min.io` account. Release notes requires two sections `highlights` and `changelog`. Highlights is a bulleted list of salient features in this release and Changelog contains list of all commits since the last release.
To generate `changelog`
```sh
$ git log --no-color --pretty=format:'-%d %s (%cr) <%an>' ..
```
golang-github-minio-madmin-go-3.0.104/NOTICE 0000664 0000000 0000000 00000000557 14774251704 0020313 0 ustar 00root root 0000000 0000000 MinIO Project, (C) 2015-2022 MinIO, Inc.
This product includes software developed at MinIO, Inc.
(https://min.io/).
The MinIO project contains unmodified/modified subcomponents too with
separate copyright notices and license terms. Your use of the source
code for these subcomponents is subject to the terms and conditions
of GNU Affero General Public License 3.0.
golang-github-minio-madmin-go-3.0.104/README.md 0000664 0000000 0000000 00000057460 14774251704 0020673 0 ustar 00root root 0000000 0000000 # Golang Admin Client API Reference [](https://slack.min.io)
The MinIO Admin Golang Client SDK provides APIs to manage MinIO services.
This quickstart guide will show you how to install the MinIO Admin client SDK, connect to MinIO admin service, and provide a walkthrough of a simple file uploader.
This document assumes that you have a working [Golang setup](https://golang.org/doc/install).
## Initialize MinIO Admin Client object.
## MinIO
```go
package main
import (
"fmt"
"github.com/minio/madmin-go/v3"
)
func main() {
// Use a secure connection.
ssl := true
// Initialize minio client object.
mdmClnt, err := madmin.New("your-minio.example.com:9000", "YOUR-ACCESSKEYID", "YOUR-SECRETKEY", ssl)
if err != nil {
fmt.Println(err)
return
}
// Fetch service status.
st, err := mdmClnt.ServerInfo()
if err != nil {
fmt.Println(err)
return
}
for _, peerInfo := range serversInfo {
log.Printf("Node: %s, Info: %v\n", peerInfo.Addr, peerInfo.Data)
}
}
```
| Service operations | Info operations | Healing operations | Config operations |
|:------------------------------------|:-----------------------------------------|:-------------------|:--------------------------|
| [`ServiceTrace`](#ServiceTrace) | [`ServerInfo`](#ServerInfo) | [`Heal`](#Heal) | [`GetConfig`](#GetConfig) |
| [`ServiceStop`](#ServiceStop) | [`StorageInfo`](#StorageInfo) | | [`SetConfig`](#SetConfig) |
| [`ServiceRestart`](#ServiceRestart) | [`AccountInfo`](#AccountInfo) | | |
| Top operations | IAM operations | Misc | KMS |
|:------------------------|:--------------------------------------|:--------------------------------------------------|:--------------------------------|
| [`TopLocks`](#TopLocks) | [`AddUser`](#AddUser) | [`StartProfiling`](#StartProfiling) | [`GetKeyStatus`](#GetKeyStatus) |
| | [`SetPolicy`](#SetPolicy) | [`DownloadProfilingData`](#DownloadProfilingData) | |
| | [`ListUsers`](#ListUsers) | [`ServerUpdate`](#ServerUpdate) | |
| | [`AddCannedPolicy`](#AddCannedPolicy) | | |
## 1. Constructor
### New(endpoint string, accessKeyID string, secretAccessKey string, ssl bool) (*AdminClient, error)
Initializes a new admin client object.
__Parameters__
| Param | Type | Description |
|:------------------|:---------|:----------------------------------------------------------|
| `endpoint` | _string_ | MinIO endpoint. |
| `accessKeyID` | _string_ | Access key for the object storage endpoint. |
| `secretAccessKey` | _string_ | Secret key for the object storage endpoint. |
| `ssl` | _bool_ | Set this value to 'true' to enable secure (HTTPS) access. |
## 2. Service operations
### ServiceStatus(ctx context.Context) (ServiceStatusMetadata, error)
Fetch service status, replies disk space used, backend type and total disks offline/online (applicable in distributed mode).
| Param | Type | Description |
|-----------------|-------------------------|------------------------------------------------------------|
| `serviceStatus` | _ServiceStatusMetadata_ | Represents current server status info in following format: |
| Param | Type | Description |
|-----------------------------|-----------------|------------------------------------|
| `st.ServerVersion.Version` | _string_ | Server version. |
| `st.ServerVersion.CommitID` | _string_ | Server commit id. |
| `st.Uptime` | _time.Duration_ | Server uptime duration in seconds. |
__Example__
```go
st, err := madmClnt.ServiceStatus(context.Background())
if err != nil {
log.Fatalln(err)
}
log.Printf("%#v\n", st)
```
### ServiceRestart(ctx context.Context) error
Sends a service action restart command to MinIO server.
__Example__
```go
// To restart the service, restarts all servers in the cluster.
err := madmClnt.ServiceRestart(context.Background())
if err != nil {
log.Fatalln(err)
}
log.Println("Success")
```
### ServiceStop(ctx context.Context) error
Sends a service action stop command to MinIO server.
__Example__
```go
// To stop the service, stops all servers in the cluster.
err := madmClnt.ServiceStop(context.Background())
if err != nil {
log.Fatalln(err)
}
log.Println("Success")
```
### ServiceTrace(ctx context.Context, allTrace bool, doneCh <-chan struct{}) <-chan TraceInfo
Enable HTTP request tracing on all nodes in a MinIO cluster
__Example__
``` go
doneCh := make(chan struct{})
defer close(doneCh)
// listen to all trace including internal API calls
allTrace := true
// Start listening on all trace activity.
traceCh := madmClnt.ServiceTrace(context.Background(), allTrace, doneCh)
for traceInfo := range traceCh {
fmt.Println(traceInfo.String())
}
```
## 3. Info operations
### ServerInfo(ctx context.Context) ([]ServerInfo, error)
Fetches information for all cluster nodes, such as server properties, storage information, network statistics, etc.
| Param | Type | Description |
|----------------------------------|--------------------|--------------------------------------------------------------------|
| `si.Addr` | _string_ | Address of the server the following information is retrieved from. |
| `si.ConnStats` | _ServerConnStats_ | Connection statistics from the given server. |
| `si.HTTPStats` | _ServerHTTPStats_ | HTTP connection statistics from the given server. |
| `si.Properties` | _ServerProperties_ | Server properties such as region, notification targets. |
| Param | Type | Description |
|-----------------------------|-----------------|----------------------------------------------------|
| `ServerProperties.Uptime` | _time.Duration_ | Total duration in seconds since server is running. |
| `ServerProperties.Version` | _string_ | Current server version. |
| `ServerProperties.CommitID` | _string_ | Current server commitID. |
| `ServerProperties.Region` | _string_ | Configured server region. |
| `ServerProperties.SQSARN` | _[]string_ | List of notification target ARNs. |
| Param | Type | Description |
|------------------------------------|----------|-------------------------------------|
| `ServerConnStats.TotalInputBytes` | _uint64_ | Total bytes received by the server. |
| `ServerConnStats.TotalOutputBytes` | _uint64_ | Total bytes sent by the server. |
| Param | Type | Description |
|--------------------------------------|-------------------------|---------------------------------------------------------|
| `ServerHTTPStats.TotalHEADStats` | _ServerHTTPMethodStats_ | Total statistics regarding HEAD operations |
| `ServerHTTPStats.SuccessHEADStats` | _ServerHTTPMethodStats_ | Total statistics regarding successful HEAD operations |
| `ServerHTTPStats.TotalGETStats` | _ServerHTTPMethodStats_ | Total statistics regarding GET operations |
| `ServerHTTPStats.SuccessGETStats` | _ServerHTTPMethodStats_ | Total statistics regarding successful GET operations |
| `ServerHTTPStats.TotalPUTStats` | _ServerHTTPMethodStats_ | Total statistics regarding PUT operations |
| `ServerHTTPStats.SuccessPUTStats` | _ServerHTTPMethodStats_ | Total statistics regarding successful PUT operations |
| `ServerHTTPStats.TotalPOSTStats` | _ServerHTTPMethodStats_ | Total statistics regarding POST operations |
| `ServerHTTPStats.SuccessPOSTStats` | _ServerHTTPMethodStats_ | Total statistics regarding successful POST operations |
| `ServerHTTPStats.TotalDELETEStats` | _ServerHTTPMethodStats_ | Total statistics regarding DELETE operations |
| `ServerHTTPStats.SuccessDELETEStats` | _ServerHTTPMethodStats_ | Total statistics regarding successful DELETE operations |
| Param | Type | Description |
|-------------------------------------|----------|-------------------------------------------------|
| `ServerHTTPMethodStats.Count` | _uint64_ | Total number of operations. |
| `ServerHTTPMethodStats.AvgDuration` | _string_ | Average duration of Count number of operations. |
| Param | Type | Description |
|----------------------|----------|-------------------------------------------------------|
| `DriveInfo.UUID` | _string_ | Unique ID for each disk provisioned by server format. |
| `DriveInfo.Endpoint` | _string_ | Endpoint location of the remote/local disk. |
| `DriveInfo.State` | _string_ | Current state of the disk at endpoint. |
__Example__
```go
serversInfo, err := madmClnt.ServerInfo(context.Background())
if err != nil {
log.Fatalln(err)
}
for _, peerInfo := range serversInfo {
log.Printf("Node: %s, Info: %v\n", peerInfo.Addr, peerInfo.Data)
}
```
### StorageInfo(ctx context.Context) (StorageInfo, error)
Fetches Storage information for all cluster nodes.
| Param | Type | Description |
|-------------------------|------------|---------------------------------------------|
| `storageInfo.Used` | _[]int64_ | Used disk spaces. |
| `storageInfo.Total` | _[]int64_ | Total disk spaces. |
| `storageInfo.Available` | _[]int64_ | Available disk spaces. |
| `StorageInfo.Backend` | _struct{}_ | Represents backend type embedded structure. |
| Param | Type | Description |
|----------------------------|-----------------|--------------------------------------------------------------------------------------------------------------------------|
| `Backend.Type` | _BackendType_ | Type of backend used by the server currently only FS or Erasure. |
| `Backend.OnlineDisks` | _BackendDisks_ | Total number of disks online per node (only applies to Erasure backend) represented in map[string]int, is empty for FS. |
| `Backend.OfflineDisks` | _BackendDisks_ | Total number of disks offline per node (only applies to Erasure backend) represented in map[string]int, is empty for FS. |
| `Backend.StandardSCParity` | _int_ | Parity disks set for standard storage class, is empty for FS. |
| `Backend.RRSCParity` | _int_ | Parity disks set for reduced redundancy storage class, is empty for FS. |
| `Backend.Sets` | _[][]DriveInfo_ | Represents topology of drives in erasure coded sets. |
__Example__
```go
storageInfo, err := madmClnt.StorageInfo(context.Background())
if err != nil {
log.Fatalln(err)
}
log.Println(storageInfo)
```
### AccountInfo(ctx context.Context) (AccountInfo, error)
Fetches accounting usage information for the current authenticated user
| Param | Type | Description |
|--------------------------------|----------------------|-------------------------|
| `AccountInfo.AccountName` | _string_ | Account name. |
| `AccountInfo.Buckets` | _[]BucketAccessInfo_ | Bucket usage info. |
| Param | Type | Description |
|----------------------------|-----------------|-----------------------------------------|
| `BucketAccessInfo.Name` | _string_ | The name of the current bucket
| `BucketAccessInfo.Size` | _uint64_ | The total size of the current bucket
| `BucketAccessInfo.Created` | _time.Time_ | Bucket creation time
| `BucketAccessInfo.Access` | _AccountAccess_ | Type of access of the current account
| Param | Type | Description |
|------------------------|---------|------------------------------------------------------------------|
| `AccountAccess.Read` | _bool_ | Indicate if the bucket is readable by the current account name. |
| `AccountAccess.Write` | _bool_ | Indocate if the bucket is writable by the current account name. |
__Example__
```go
accountInfo, err := madmClnt.AccountInfo(context.Background())
if err != nil {
log.Fatalln(err)
}
log.Println(accountInfo)
```
## 5. Heal operations
### Heal(ctx context.Context, bucket, prefix string, healOpts HealOpts, clientToken string, forceStart bool, forceStop bool) (start HealStartSuccess, status HealTaskStatus, err error)
Start a heal sequence that scans data under given (possible empty)
`bucket` and `prefix`. The `recursive` bool turns on recursive
traversal under the given path. `dryRun` does not mutate on-disk data,
but performs data validation.
Two heal sequences on overlapping paths may not be initiated.
The progress of a heal should be followed using the same API `Heal`
by providing the `clientToken` previously obtained from a `Heal`
API. The server accumulates results of the heal traversal and waits
for the client to receive and acknowledge them using the status
request by providing `clientToken`.
__Example__
``` go
opts := madmin.HealOpts{
Recursive: true,
DryRun: false,
}
forceStart := false
forceStop := false
healPath, err := madmClnt.Heal(context.Background(), "", "", opts, "", forceStart, forceStop)
if err != nil {
log.Fatalln(err)
}
log.Printf("Heal sequence started at %s", healPath)
```
#### HealStartSuccess structure
| Param | Type | Description |
|-------------------|-------------|----------------------------------------------------------------------------------------------------------------------------------|
| `s.ClientToken` | _string_ | A unique token for a successfully started heal operation, this token is used to request realtime progress of the heal operation. |
| `s.ClientAddress` | _string_ | Address of the client which initiated the heal operation, the client address has the form "host:port". |
| `s.StartTime` | _time.Time_ | Time when heal was initially started. |
#### HealTaskStatus structure
| Param | Type | Description |
|-------------------|--------------------|---------------------------------------------------|
| `s.Summary` | _string_ | Short status of heal sequence |
| `s.FailureDetail` | _string_ | Error message in case of heal sequence failure |
| `s.HealSettings` | _HealOpts_ | Contains the booleans set in the `HealStart` call |
| `s.Items` | _[]HealResultItem_ | Heal records for actions performed by server |
#### HealResultItem structure
| Param | Type | Description |
|------------------------|----------------|-----------------------------------------------------------------|
| `ResultIndex` | _int64_ | Index of the heal-result record |
| `Type` | _HealItemType_ | Represents kind of heal operation in the heal record |
| `Bucket` | _string_ | Bucket name |
| `Object` | _string_ | Object name |
| `Detail` | _string_ | Details about heal operation |
| `DiskInfo.AvailableOn` | _[]int_ | List of disks on which the healed entity is present and healthy |
| `DiskInfo.HealedOn` | _[]int_ | List of disks on which the healed entity was restored |
l
## 6. Config operations
### GetConfig(ctx context.Context) ([]byte, error)
Get current `config.json` of a MinIO server.
__Example__
``` go
configBytes, err := madmClnt.GetConfig(context.Background())
if err != nil {
log.Fatalf("failed due to: %v", err)
}
// Pretty-print config received as json.
var buf bytes.Buffer
err = json.Indent(buf, configBytes, "", "\t")
if err != nil {
log.Fatalf("failed due to: %v", err)
}
log.Println("config received successfully: ", string(buf.Bytes()))
```
### SetConfig(ctx context.Context, config io.Reader) error
Set a new `config.json` for a MinIO server.
__Example__
``` go
config := bytes.NewReader([]byte(`config.json contents go here`))
if err := madmClnt.SetConfig(context.Background(), config); err != nil {
log.Fatalf("failed due to: %v", err)
}
log.Println("SetConfig was successful")
```
## 7. Top operations
### TopLocks(ctx context.Context) (LockEntries, error)
Get the oldest locks from MinIO server.
__Example__
``` go
locks, err := madmClnt.TopLocks(context.Background())
if err != nil {
log.Fatalf("failed due to: %v", err)
}
out, err := json.Marshal(locks)
if err != nil {
log.Fatalf("Marshal failed due to: %v", err)
}
log.Println("TopLocks received successfully: ", string(out))
```
## 8. IAM operations
### AddCannedPolicy(ctx context.Context, policyName string, policy *iampolicy.Policy) error
Create a new canned policy on MinIO server.
__Example__
```
policy, err := iampolicy.ParseConfig(strings.NewReader(`{"Version": "2012-10-17","Statement": [{"Action": ["s3:GetObject"],"Effect": "Allow","Resource": ["arn:aws:s3:::my-bucketname/*"],"Sid": ""}]}`))
if err != nil {
log.Fatalln(err)
}
if err = madmClnt.AddCannedPolicy(context.Background(), "get-only", policy); err != nil {
log.Fatalln(err)
}
```
### AddUser(ctx context.Context, user string, secret string) error
Add a new user on a MinIO server.
__Example__
``` go
if err = madmClnt.AddUser(context.Background(), "newuser", "newstrongpassword"); err != nil {
log.Fatalln(err)
}
```
### SetPolicy(ctx context.Context, policyName, entityName string, isGroup bool) error
Enable a canned policy `get-only` for a given user or group on MinIO server.
__Example__
``` go
if err = madmClnt.SetPolicy(context.Background(), "get-only", "newuser", false); err != nil {
log.Fatalln(err)
}
```
### ListUsers(ctx context.Context) (map[string]UserInfo, error)
Lists all users on MinIO server.
__Example__
``` go
users, err := madmClnt.ListUsers(context.Background());
if err != nil {
log.Fatalln(err)
}
for k, v := range users {
fmt.Printf("User %s Status %s\n", k, v.Status)
}
```
## 9. Misc operations
### ServerUpdate(ctx context.Context, updateURL string) (ServerUpdateStatus, error)
Sends a update command to MinIO server, to update MinIO server to latest release. In distributed setup it updates all servers atomically.
__Example__
```go
// Updates all servers and restarts all the servers in the cluster.
// optionally takes an updateURL, which is used to update the binary.
us, err := madmClnt.ServerUpdate(context.Background(), updateURL)
if err != nil {
log.Fatalln(err)
}
if us.CurrentVersion != us.UpdatedVersion {
log.Printf("Updated server version from %s to %s successfully", us.CurrentVersion, us.UpdatedVersion)
}
```
### StartProfiling(ctx context.Context, profiler string) error
Ask all nodes to start profiling using the specified profiler mode
__Example__
``` go
startProfilingResults, err = madmClnt.StartProfiling(context.Background(), "cpu")
if err != nil {
log.Fatalln(err)
}
for _, result := range startProfilingResults {
if !result.Success {
log.Printf("Unable to start profiling on node `%s`, reason = `%s`\n", result.NodeName, result.Error)
} else {
log.Printf("Profiling successfully started on node `%s`\n", result.NodeName)
}
}
```
### DownloadProfilingData(ctx context.Context) ([]byte, error)
Download profiling data of all nodes in a zip format.
__Example__
``` go
profilingData, err := madmClnt.DownloadProfilingData(context.Background())
if err != nil {
log.Fatalln(err)
}
profilingFile, err := os.Create("/tmp/profiling-data.zip")
if err != nil {
log.Fatal(err)
}
if _, err := io.Copy(profilingFile, profilingData); err != nil {
log.Fatal(err)
}
if err := profilingFile.Close(); err != nil {
log.Fatal(err)
}
if err := profilingData.Close(); err != nil {
log.Fatal(err)
}
log.Println("Profiling data successfully downloaded.")
```
## 11. KMS
### GetKeyStatus(ctx context.Context, keyID string) (*KMSKeyStatus, error)
Requests status information about one particular KMS master key
from a MinIO server. The keyID is optional and the server will
use the default master key (configured via `MINIO_KMS_VAULT_KEY_NAME`
or `MINIO_KMS_MASTER_KEY`) if the keyID is empty.
__Example__
``` go
keyInfo, err := madmClnt.GetKeyStatus(context.Background(), "my-minio-key")
if err != nil {
log.Fatalln(err)
}
if keyInfo.EncryptionErr != "" {
log.Fatalf("Failed to perform encryption operation using '%s': %v\n", keyInfo.KeyID, keyInfo.EncryptionErr)
}
if keyInfo.UpdateErr != "" {
log.Fatalf("Failed to perform key re-wrap operation using '%s': %v\n", keyInfo.KeyID, keyInfo.UpdateErr)
}
if keyInfo.DecryptionErr != "" {
log.Fatalf("Failed to perform decryption operation using '%s': %v\n", keyInfo.KeyID, keyInfo.DecryptionErr)
}
```
## License
All versions of this SDK starting from [v2.0.0](https://github.com/minio/madmin-go/releases/tag/v2.0.0) are distributed under the GNU AGPLv3 license that can be found in the [LICENSE](https://github.com/minio/madmin-go/blob/master/LICENSE) file.
golang-github-minio-madmin-go-3.0.104/anonymous-api.go 0000664 0000000 0000000 00000020654 14774251704 0022535 0 ustar 00root root 0000000 0000000 //
// Copyright (c) 2015-2024 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as
// published by the Free Software Foundation, either version 3 of the
// License, or (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see .
//
package madmin
import (
"bytes"
"context"
"crypto/sha256"
"encoding/hex"
"errors"
"fmt"
"io"
"net/http"
"net/http/cookiejar"
"net/http/httptrace"
"net/http/httputil"
"net/url"
"os"
"strings"
"github.com/minio/minio-go/v7/pkg/s3utils"
"golang.org/x/net/publicsuffix"
)
// AnonymousClient implements an anonymous http client for MinIO
type AnonymousClient struct {
// Parsed endpoint url provided by the caller
endpointURL *url.URL
// Indicate whether we are using https or not
secure bool
// Needs allocation.
httpClient *http.Client
// Advanced functionality.
isTraceEnabled bool
traceOutput io.Writer
}
func NewAnonymousClientNoEndpoint() (*AnonymousClient, error) {
// Initialize cookies to preserve server sent cookies if any and replay
// them upon each request.
jar, err := cookiejar.New(&cookiejar.Options{PublicSuffixList: publicsuffix.List})
if err != nil {
return nil, err
}
clnt := new(AnonymousClient)
// Instantiate http client and bucket location cache.
clnt.httpClient = &http.Client{
Jar: jar,
Transport: DefaultTransport(true),
}
return clnt, nil
}
// NewAnonymousClient can be used for anonymous APIs without credentials set
func NewAnonymousClient(endpoint string, secure bool) (*AnonymousClient, error) {
// Initialize cookies to preserve server sent cookies if any and replay
// them upon each request.
jar, err := cookiejar.New(&cookiejar.Options{PublicSuffixList: publicsuffix.List})
if err != nil {
return nil, err
}
// construct endpoint.
endpointURL, err := getEndpointURL(endpoint, secure)
if err != nil {
return nil, err
}
clnt := new(AnonymousClient)
// Remember whether we are using https or not
clnt.secure = secure
// Save endpoint URL, user agent for future uses.
clnt.endpointURL = endpointURL
// Instantiate http client and bucket location cache.
clnt.httpClient = &http.Client{
Jar: jar,
Transport: DefaultTransport(secure),
}
return clnt, nil
}
// SetCustomTransport - set new custom transport.
func (an *AnonymousClient) SetCustomTransport(customHTTPTransport http.RoundTripper) {
// Set this to override default transport
// ``http.DefaultTransport``.
//
// This transport is usually needed for debugging OR to add your
// own custom TLS certificates on the client transport, for custom
// CA's and certs which are not part of standard certificate
// authority follow this example :-
//
// tr := &http.Transport{
// TLSClientConfig: &tls.Config{RootCAs: pool},
// DisableCompression: true,
// }
// api.SetTransport(tr)
//
if an.httpClient != nil {
an.httpClient.Transport = customHTTPTransport
}
}
// TraceOn - enable HTTP tracing.
func (an *AnonymousClient) TraceOn(outputStream io.Writer) {
// if outputStream is nil then default to os.Stdout.
if outputStream == nil {
outputStream = os.Stdout
}
// Sets a new output stream.
an.traceOutput = outputStream
// Enable tracing.
an.isTraceEnabled = true
}
// executeMethod - does a simple http request to the target with parameters provided in the request
func (an AnonymousClient) executeMethod(ctx context.Context, method string, reqData requestData, trace *httptrace.ClientTrace) (res *http.Response, err error) {
defer func() {
if err != nil {
// close idle connections before returning, upon error.
an.httpClient.CloseIdleConnections()
}
}()
// Instantiate a new request.
var req *http.Request
req, err = an.newRequest(ctx, method, reqData)
if err != nil {
return nil, err
}
if trace != nil {
req = req.WithContext(httptrace.WithClientTrace(req.Context(), trace))
}
// Initiate the request.
res, err = an.do(req)
if err != nil {
return nil, err
}
return res, err
}
// newRequest - instantiate a new HTTP request for a given method.
func (an AnonymousClient) newRequest(ctx context.Context, method string, reqData requestData) (req *http.Request, err error) {
// If no method is supplied default to 'POST'.
if method == "" {
method = "POST"
}
// Construct a new target URL.
targetURL, err := an.makeTargetURL(reqData)
if err != nil {
return nil, err
}
// Initialize a new HTTP request for the method.
req, err = http.NewRequestWithContext(ctx, method, targetURL.String(), nil)
if err != nil {
return nil, err
}
for k, v := range reqData.customHeaders {
req.Header.Set(k, v[0])
}
if length := len(reqData.content); length > 0 {
req.ContentLength = int64(length)
}
sum := sha256.Sum256(reqData.content)
req.Header.Set("X-Amz-Content-Sha256", hex.EncodeToString(sum[:]))
req.Body = io.NopCloser(bytes.NewReader(reqData.content))
return req, nil
}
// makeTargetURL make a new target url.
func (an AnonymousClient) makeTargetURL(r requestData) (*url.URL, error) {
u := an.endpointURL
if r.endpointOverride != nil {
u = r.endpointOverride
} else if u == nil {
return nil, errors.New("endpoint not configured unable to use AnonymousClient")
}
host := u.Host
scheme := u.Scheme
urlStr := scheme + "://" + host + r.relPath
// If there are any query values, add them to the end.
if len(r.queryValues) > 0 {
urlStr = urlStr + "?" + s3utils.QueryEncode(r.queryValues)
}
u, err := url.Parse(urlStr)
if err != nil {
return nil, err
}
return u, nil
}
// do - execute http request.
func (an AnonymousClient) do(req *http.Request) (*http.Response, error) {
resp, err := an.httpClient.Do(req)
if err != nil {
// Handle this specifically for now until future Golang versions fix this issue properly.
if urlErr, ok := err.(*url.Error); ok {
if strings.Contains(urlErr.Err.Error(), "EOF") {
return nil, &url.Error{
Op: urlErr.Op,
URL: urlErr.URL,
Err: errors.New("Connection closed by foreign host " + urlErr.URL + ". Retry again."),
}
}
}
return nil, err
}
// Response cannot be non-nil, report if its the case.
if resp == nil {
msg := "Response is empty. " // + reportIssue
return nil, ErrInvalidArgument(msg)
}
// If trace is enabled, dump http request and response.
if an.isTraceEnabled {
err = an.dumpHTTP(req, resp)
if err != nil {
return nil, err
}
}
return resp, nil
}
// dumpHTTP - dump HTTP request and response.
func (an AnonymousClient) dumpHTTP(req *http.Request, resp *http.Response) error {
// Starts http dump.
_, err := fmt.Fprintln(an.traceOutput, "---------START-HTTP---------")
if err != nil {
return err
}
// Only display request header.
reqTrace, err := httputil.DumpRequestOut(req, false)
if err != nil {
return err
}
// Write request to trace output.
_, err = fmt.Fprint(an.traceOutput, string(reqTrace))
if err != nil {
return err
}
// Only display response header.
var respTrace []byte
// For errors we make sure to dump response body as well.
if resp.StatusCode != http.StatusOK &&
resp.StatusCode != http.StatusPartialContent &&
resp.StatusCode != http.StatusNoContent {
respTrace, err = httputil.DumpResponse(resp, true)
if err != nil {
return err
}
} else {
// WORKAROUND for https://github.com/golang/go/issues/13942.
// httputil.DumpResponse does not print response headers for
// all successful calls which have response ContentLength set
// to zero. Keep this workaround until the above bug is fixed.
if resp.ContentLength == 0 {
var buffer bytes.Buffer
if err = resp.Header.Write(&buffer); err != nil {
return err
}
respTrace = buffer.Bytes()
respTrace = append(respTrace, []byte("\r\n")...)
} else {
respTrace, err = httputil.DumpResponse(resp, false)
if err != nil {
return err
}
}
}
// Write response to trace output.
_, err = fmt.Fprint(an.traceOutput, strings.TrimSuffix(string(respTrace), "\r\n"))
if err != nil {
return err
}
// Ends the http dump.
_, err = fmt.Fprintln(an.traceOutput, "---------END-HTTP---------")
return err
}
golang-github-minio-madmin-go-3.0.104/api-error-response.go 0000664 0000000 0000000 00000006770 14774251704 0023475 0 ustar 00root root 0000000 0000000 //
// Copyright (c) 2015-2024 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as
// published by the Free Software Foundation, either version 3 of the
// License, or (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see .
//
package madmin
import (
"encoding/hex"
"encoding/json"
"encoding/xml"
"fmt"
"io"
"net/http"
"unicode/utf8"
)
/* **** SAMPLE ERROR RESPONSE ****
AccessDenied
Access Denied
bucketName
objectName
F19772218238A85A
GuWkjyviSiGHizehqpmsD1ndz5NClSP19DOT+s2mv7gXGQ8/X1lhbDGiIJEXpGFD
*/
// ErrorResponse - Is the typed error returned by all API operations.
type ErrorResponse struct {
XMLName xml.Name `xml:"Error" json:"-"`
Code string
Message string
BucketName string
Key string
RequestID string `xml:"RequestId"`
HostID string `xml:"HostId"`
// Region where the bucket is located. This header is returned
// only in HEAD bucket and ListObjects response.
Region string
}
// Error - Returns HTTP error string
func (e ErrorResponse) Error() string {
return e.Message
}
const (
reportIssue = "Please report this issue at https://github.com/minio/minio/issues."
)
// httpRespToErrorResponse returns a new encoded ErrorResponse
// structure as error.
func httpRespToErrorResponse(resp *http.Response) error {
if resp == nil || resp.Body == nil {
msg := "Response is empty. " + reportIssue
return ErrInvalidArgument(msg)
}
defer closeResponse(resp)
// Limit to 100K
body, err := io.ReadAll(io.LimitReader(resp.Body, 100<<10))
if err != nil {
return ErrorResponse{
Code: resp.Status,
Message: fmt.Sprintf("Failed to read server response: %s.", err),
}
}
var errResp ErrorResponse
// Decode the json error
err = json.Unmarshal(body, &errResp)
if err != nil {
// We might get errors as XML, try that.
xmlErr := xml.Unmarshal(body, &errResp)
if xmlErr != nil {
bodyString := string(body)
if !utf8.Valid(body) {
bodyString = hex.EncodeToString(body)
}
if len(bodyString) > 1024 {
bodyString = bodyString[:1021] + "..."
}
return ErrorResponse{
Code: resp.Status,
Message: fmt.Sprintf("Failed to parse server response (%s): %s", err.Error(), bodyString),
}
}
}
return errResp
}
// ToErrorResponse - Returns parsed ErrorResponse struct from body and
// http headers.
//
// For example:
//
// import admin "github.com/minio/madmin-go/v3"
// ...
// ...
// ss, err := adm.ServiceStatus(...)
// if err != nil {
// resp := admin.ToErrorResponse(err)
// }
// ...
func ToErrorResponse(err error) ErrorResponse {
switch err := err.(type) {
case ErrorResponse:
return err
default:
return ErrorResponse{}
}
}
// ErrInvalidArgument - Invalid argument response.
func ErrInvalidArgument(message string) error {
return ErrorResponse{
Code: "InvalidArgument",
Message: message,
RequestID: "minio",
}
}
golang-github-minio-madmin-go-3.0.104/api-log-entry.go 0000664 0000000 0000000 00000003737 14774251704 0022430 0 ustar 00root root 0000000 0000000 //
// Copyright (c) 2015-2024 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as
// published by the Free Software Foundation, either version 3 of the
// License, or (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see .
//
package madmin
// Args - defines the arguments for the API.
type logArgs struct {
Bucket string `json:"bucket,omitempty"`
Object string `json:"object,omitempty"`
Metadata map[string]string `json:"metadata,omitempty"`
}
// Trace - defines the trace.
type logTrace struct {
Message string `json:"message,omitempty"`
Source []string `json:"source,omitempty"`
Variables map[string]interface{} `json:"variables,omitempty"`
}
// API - defines the api type and its args.
type logAPI struct {
Name string `json:"name,omitempty"`
Args *logArgs `json:"args,omitempty"`
}
// Entry - defines fields and values of each log entry.
type logEntry struct {
DeploymentID string `json:"deploymentid,omitempty"`
Level string `json:"level"`
LogKind LogKind `json:"errKind"`
Time string `json:"time"`
API *logAPI `json:"api,omitempty"`
RemoteHost string `json:"remotehost,omitempty"`
Host string `json:"host,omitempty"`
RequestID string `json:"requestID,omitempty"`
UserAgent string `json:"userAgent,omitempty"`
Message string `json:"message,omitempty"`
Trace *logTrace `json:"error,omitempty"`
}
golang-github-minio-madmin-go-3.0.104/api-log.go 0000664 0000000 0000000 00000007550 14774251704 0021266 0 ustar 00root root 0000000 0000000 //
// Copyright (c) 2015-2024 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as
// published by the Free Software Foundation, either version 3 of the
// License, or (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see .
//
package madmin
import (
"context"
"encoding/json"
"net/http"
"net/url"
"strconv"
)
// LogMask is a bit mask for log types.
type LogMask uint64
const (
// LogMaskMinIO - mask for MinIO type log
LogMaskMinIO LogMask = 1 << iota // Deprecated Jan 2024
// LogMaskApplication - mask for MinIO type log
LogMaskApplication // Deprecated Jan 2024
LogMaskFatal
LogMaskWarning
LogMaskError
LogMaskEvent
LogMaskInfo
// LogMaskAll must be the last.
LogMaskAll LogMask = (1 << iota) - 1
)
// Mask returns the LogMask as uint64
func (m LogMask) Mask() uint64 {
return uint64(m)
}
// Contains returns whether all flags in other is present in t.
func (m LogMask) Contains(other LogMask) bool {
return m&other == other
}
// LogKind specifies the kind of error log
type LogKind string
const (
// LogKindMinio - MinIO log type
LogKindMinio LogKind = "MINIO" // Deprecated Jan 2024
// LogKindApplication - Application log type
LogKindApplication LogKind = "APPLICATION" // Deprecated Jan 2024
// LogKindAll - all logs type
LogKindAll LogKind = "ALL" // Deprecated Jan 2024
LogKindFatal LogKind = "FATAL"
LogKindWarning LogKind = "WARNING"
LogKindError LogKind = "ERROR"
LogKindEvent LogKind = "EVENT"
LogKindInfo LogKind = "INFO"
)
// LogMask returns the mask based on the kind.
func (l LogKind) LogMask() LogMask {
switch l {
case LogKindMinio:
return LogMaskMinIO
case LogKindApplication:
return LogMaskApplication
case LogKindFatal:
return LogMaskFatal
case LogKindWarning:
return LogMaskWarning
case LogKindError:
return LogMaskError
case LogKindEvent:
return LogMaskEvent
case LogKindInfo:
return LogMaskInfo
}
return LogMaskAll
}
func (l LogKind) String() string {
return string(l)
}
// LogInfo holds console log messages
type LogInfo struct {
logEntry
ConsoleMsg string
NodeName string `json:"node"`
Err error `json:"-"`
}
// GetLogs - listen on console log messages.
func (adm AdminClient) GetLogs(ctx context.Context, node string, lineCnt int, logKind string) <-chan LogInfo {
logCh := make(chan LogInfo, 1)
// Only success, start a routine to start reading line by line.
go func(logCh chan<- LogInfo) {
defer close(logCh)
urlValues := make(url.Values)
urlValues.Set("node", node)
urlValues.Set("limit", strconv.Itoa(lineCnt))
urlValues.Set("logType", logKind)
for {
reqData := requestData{
relPath: adminAPIPrefix + "/log",
queryValues: urlValues,
}
// Execute GET to call log handler
resp, err := adm.executeMethod(ctx, http.MethodGet, reqData)
if err != nil {
closeResponse(resp)
return
}
if resp.StatusCode != http.StatusOK {
logCh <- LogInfo{Err: httpRespToErrorResponse(resp)}
return
}
dec := json.NewDecoder(resp.Body)
for {
var info LogInfo
if err = dec.Decode(&info); err != nil {
break
}
select {
case <-ctx.Done():
return
case logCh <- info:
}
}
}
}(logCh)
// Returns the log info channel, for caller to start reading from.
return logCh
}
// Mask returns the mask based on the error level.
func (l LogInfo) Mask() uint64 {
return l.LogKind.LogMask().Mask()
}
golang-github-minio-madmin-go-3.0.104/api.go 0000664 0000000 0000000 00000036544 14774251704 0020514 0 ustar 00root root 0000000 0000000 //
// Copyright (c) 2015-2024 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as
// published by the Free Software Foundation, either version 3 of the
// License, or (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see .
//
package madmin
import (
"bytes"
"context"
"crypto/sha256"
"encoding/hex"
"errors"
"fmt"
"io"
"math/rand"
"net/http"
"net/http/cookiejar"
"net/http/httputil"
"net/url"
"os"
"regexp"
"runtime"
"strings"
"syscall"
"time"
"github.com/minio/minio-go/v7/pkg/credentials"
"github.com/minio/minio-go/v7/pkg/s3utils"
"github.com/minio/minio-go/v7/pkg/signer"
"golang.org/x/net/publicsuffix"
)
// AdminClient implements Amazon S3 compatible methods.
type AdminClient struct {
/// Standard options.
// Parsed endpoint url provided by the user.
endpointURL *url.URL
// Holds various credential providers.
credsProvider *credentials.Credentials
// User supplied.
appInfo struct {
appName string
appVersion string
}
// Indicate whether we are using https or not
secure bool
// Needs allocation.
httpClient *http.Client
random *rand.Rand
// Advanced functionality.
isTraceEnabled bool
traceOutput io.Writer
}
// Global constants.
const (
libraryName = "madmin-go"
libraryVersion = "3.0.70"
libraryAdminURLPrefix = "/minio/admin"
libraryKMSURLPrefix = "/minio/kms"
)
// User Agent should always following the below style.
// Please open an issue to discuss any new changes here.
//
// MinIO (OS; ARCH) LIB/VER APP/VER
const (
libraryUserAgentPrefix = "MinIO (" + runtime.GOOS + "; " + runtime.GOARCH + ") "
libraryUserAgent = libraryUserAgentPrefix + libraryName + "/" + libraryVersion
)
// Options for New method
type Options struct {
Creds *credentials.Credentials
Secure bool
Transport http.RoundTripper
// Add future fields here
}
// New - instantiate minio admin client
//
// Deprecated: please use NewWithOptions
func New(endpoint string, accessKeyID, secretAccessKey string, secure bool) (*AdminClient, error) {
creds := credentials.NewStaticV4(accessKeyID, secretAccessKey, "")
clnt, err := privateNew(endpoint, &Options{Creds: creds, Secure: secure})
if err != nil {
return nil, err
}
return clnt, nil
}
// NewWithOptions - instantiate minio admin client with options.
func NewWithOptions(endpoint string, opts *Options) (*AdminClient, error) {
clnt, err := privateNew(endpoint, opts)
if err != nil {
return nil, err
}
return clnt, nil
}
func privateNew(endpoint string, opts *Options) (*AdminClient, error) {
// Initialize cookies to preserve server sent cookies if any and replay
// them upon each request.
jar, err := cookiejar.New(&cookiejar.Options{PublicSuffixList: publicsuffix.List})
if err != nil {
return nil, err
}
// construct endpoint.
endpointURL, err := getEndpointURL(endpoint, opts.Secure)
if err != nil {
return nil, err
}
clnt := new(AdminClient)
// Save the credentials.
clnt.credsProvider = opts.Creds
// Remember whether we are using https or not
clnt.secure = opts.Secure
// Save endpoint URL, user agent for future uses.
clnt.endpointURL = endpointURL
tr := opts.Transport
if tr == nil {
tr = DefaultTransport(opts.Secure)
}
// Instantiate http client and bucket location cache.
clnt.httpClient = &http.Client{
Jar: jar,
Transport: tr,
}
// Add locked pseudo-random number generator.
clnt.random = rand.New(&lockedRandSource{src: rand.NewSource(time.Now().UTC().UnixNano())})
// Return.
return clnt, nil
}
// SetAppInfo - add application details to user agent.
func (adm *AdminClient) SetAppInfo(appName string, appVersion string) {
// if app name and version is not set, we do not a new user
// agent.
if appName != "" && appVersion != "" {
adm.appInfo.appName = appName
adm.appInfo.appVersion = appVersion
}
}
// SetCustomTransport - set new custom transport.
// Deprecated: please use Options{Transport: tr} to provide custom transport.
func (adm *AdminClient) SetCustomTransport(customHTTPTransport http.RoundTripper) {
// Set this to override default transport
// ``http.DefaultTransport``.
//
// This transport is usually needed for debugging OR to add your
// own custom TLS certificates on the client transport, for custom
// CA's and certs which are not part of standard certificate
// authority follow this example :-
//
// tr := &http.Transport{
// TLSClientConfig: &tls.Config{RootCAs: pool},
// DisableCompression: true,
// }
// api.SetTransport(tr)
//
if adm.httpClient != nil {
adm.httpClient.Transport = customHTTPTransport
}
}
// TraceOn - enable HTTP tracing.
func (adm *AdminClient) TraceOn(outputStream io.Writer) {
// if outputStream is nil then default to os.Stdout.
if outputStream == nil {
outputStream = os.Stdout
}
// Sets a new output stream.
adm.traceOutput = outputStream
// Enable tracing.
adm.isTraceEnabled = true
}
// TraceOff - disable HTTP tracing.
func (adm *AdminClient) TraceOff() {
// Disable tracing.
adm.isTraceEnabled = false
}
// requestMetadata - is container for all the values to make a
// request.
type requestData struct {
customHeaders http.Header
queryValues url.Values
relPath string // URL path relative to admin API base endpoint
content []byte
contentReader io.Reader
// endpointOverride overrides target URL with anonymousClient
endpointOverride *url.URL
// isKMS replaces URL prefix with /kms
isKMS bool
}
// Filter out signature value from Authorization header.
func (adm AdminClient) filterSignature(req *http.Request) {
/// Signature V4 authorization header.
// Save the original auth.
origAuth := req.Header.Get("Authorization")
// Strip out accessKeyID from:
// Credential=////aws4_request
regCred := regexp.MustCompile("Credential=([A-Z0-9]+)/")
newAuth := regCred.ReplaceAllString(origAuth, "Credential=**REDACTED**/")
// Strip out 256-bit signature from: Signature=<256-bit signature>
regSign := regexp.MustCompile("Signature=([[0-9a-f]+)")
newAuth = regSign.ReplaceAllString(newAuth, "Signature=**REDACTED**")
// Set a temporary redacted auth
req.Header.Set("Authorization", newAuth)
}
// dumpHTTP - dump HTTP request and response.
func (adm AdminClient) dumpHTTP(req *http.Request, resp *http.Response) error {
// Starts http dump.
_, err := fmt.Fprintln(adm.traceOutput, "---------START-HTTP---------")
if err != nil {
return err
}
// Filter out Signature field from Authorization header.
adm.filterSignature(req)
// Only display request header.
reqTrace, err := httputil.DumpRequestOut(req, false)
if err != nil {
return err
}
// Write request to trace output.
_, err = fmt.Fprint(adm.traceOutput, string(reqTrace))
if err != nil {
return err
}
// Only display response header.
var respTrace []byte
// For errors we make sure to dump response body as well.
if resp.StatusCode != http.StatusOK &&
resp.StatusCode != http.StatusPartialContent &&
resp.StatusCode != http.StatusNoContent {
respTrace, err = httputil.DumpResponse(resp, true)
if err != nil {
return err
}
} else {
// WORKAROUND for https://github.com/golang/go/issues/13942.
// httputil.DumpResponse does not print response headers for
// all successful calls which have response ContentLength set
// to zero. Keep this workaround until the above bug is fixed.
if resp.ContentLength == 0 {
var buffer bytes.Buffer
if err = resp.Header.Write(&buffer); err != nil {
return err
}
respTrace = buffer.Bytes()
respTrace = append(respTrace, []byte("\r\n")...)
} else {
respTrace, err = httputil.DumpResponse(resp, false)
if err != nil {
return err
}
}
}
// Write response to trace output.
_, err = fmt.Fprint(adm.traceOutput, strings.TrimSuffix(string(respTrace), "\r\n"))
if err != nil {
return err
}
// Ends the http dump.
_, err = fmt.Fprintln(adm.traceOutput, "---------END-HTTP---------")
return err
}
// do - execute http request.
func (adm AdminClient) do(req *http.Request) (*http.Response, error) {
resp, err := adm.httpClient.Do(req)
if err != nil {
// Handle this specifically for now until future Golang versions fix this issue properly.
if urlErr, ok := err.(*url.Error); ok {
if strings.Contains(urlErr.Err.Error(), "EOF") {
return nil, &url.Error{
Op: urlErr.Op,
URL: urlErr.URL,
Err: errors.New("Connection closed by foreign host " + urlErr.URL + ". Retry again."),
}
}
}
return nil, err
}
// Response cannot be non-nil, report if its the case.
if resp == nil {
msg := "Response is empty. " // + reportIssue
return nil, ErrInvalidArgument(msg)
}
// If trace is enabled, dump http request and response.
if adm.isTraceEnabled {
err = adm.dumpHTTP(req, resp)
if err != nil {
return nil, err
}
}
return resp, nil
}
// List of success status.
var successStatus = []int{
http.StatusOK,
http.StatusNoContent,
http.StatusPartialContent,
}
// RequestData exposing internal data structure requestData
type RequestData struct {
CustomHeaders http.Header
QueryValues url.Values
RelPath string // URL path relative to admin API base endpoint
Content []byte
}
// ExecuteMethod - similar to internal method executeMethod() useful
// for writing custom requests.
func (adm AdminClient) ExecuteMethod(ctx context.Context, method string, reqData RequestData) (res *http.Response, err error) {
return adm.executeMethod(ctx, method, requestData{
customHeaders: reqData.CustomHeaders,
queryValues: reqData.QueryValues,
relPath: reqData.RelPath,
content: reqData.Content,
})
}
// executeMethod - instantiates a given method, and retries the
// request upon any error up to maxRetries attempts in a binomially
// delayed manner using a standard back off algorithm.
func (adm AdminClient) executeMethod(ctx context.Context, method string, reqData requestData) (res *http.Response, err error) {
reqRetry := MaxRetry // Indicates how many times we can retry the request
defer func() {
if err != nil {
// close idle connections before returning, upon error.
adm.httpClient.CloseIdleConnections()
}
}()
// Create cancel context to control 'newRetryTimer' go routine.
retryCtx, cancel := context.WithCancel(ctx)
// Indicate to our routine to exit cleanly upon return.
defer cancel()
for range adm.newRetryTimer(retryCtx, reqRetry, DefaultRetryUnit, DefaultRetryCap, MaxJitter) {
// Instantiate a new request.
var req *http.Request
req, err = adm.newRequest(ctx, method, reqData)
if err != nil {
return nil, err
}
// Initiate the request.
res, err = adm.do(req)
if err != nil {
// Give up right away if it is a connection refused problem
if errors.Is(err, syscall.ECONNREFUSED) {
return nil, err
}
// Give up if caller canceled.
if ctx.Err() != nil {
return nil, ctx.Err()
}
// retry all network errors.
continue
}
// For any known successful http status, return quickly.
for _, httpStatus := range successStatus {
if httpStatus == res.StatusCode {
return res, nil
}
}
// Read the body to be saved later.
errBodyBytes, err := io.ReadAll(res.Body)
// res.Body should be closed
closeResponse(res)
if err != nil {
return nil, err
}
// Save the body.
errBodySeeker := bytes.NewReader(errBodyBytes)
res.Body = io.NopCloser(errBodySeeker)
// For errors verify if its retryable otherwise fail quickly.
errResponse := ToErrorResponse(httpRespToErrorResponse(res))
// Save the body back again.
errBodySeeker.Seek(0, 0) // Seek back to starting point.
res.Body = io.NopCloser(errBodySeeker)
// Verify if error response code is retryable.
if isAdminErrCodeRetryable(errResponse.Code) {
continue // Retry.
}
// Verify if http status code is retryable.
if isHTTPStatusRetryable(res.StatusCode) {
continue // Retry.
}
break
}
// Return an error when retry is canceled or deadlined
if e := retryCtx.Err(); e != nil {
return nil, e
}
return res, err
}
// set User agent.
func (adm AdminClient) setUserAgent(req *http.Request) {
req.Header.Set("User-Agent", libraryUserAgent)
if adm.appInfo.appName != "" && adm.appInfo.appVersion != "" {
req.Header.Set("User-Agent", libraryUserAgent+" "+adm.appInfo.appName+"/"+adm.appInfo.appVersion)
}
}
// GetAccessAndSecretKey - retrieves the access and secret keys.
func (adm AdminClient) GetAccessAndSecretKey() (string, string) {
value, err := adm.credsProvider.GetWithContext(adm.CredContext())
if err != nil {
return "", ""
}
return value.AccessKeyID, value.SecretAccessKey
}
// GetEndpointURL - returns the endpoint for the admin client.
func (adm AdminClient) GetEndpointURL() *url.URL {
return adm.endpointURL
}
func (adm AdminClient) getSecretKey() string {
value, err := adm.credsProvider.GetWithContext(adm.CredContext())
if err != nil {
// Return empty, call will fail.
return ""
}
return value.SecretAccessKey
}
// newRequest - instantiate a new HTTP request for a given method.
func (adm AdminClient) newRequest(ctx context.Context, method string, reqData requestData) (req *http.Request, err error) {
// If no method is supplied default to 'POST'.
if method == "" {
method = "POST"
}
// Default all requests to ""
location := ""
// Construct a new target URL.
targetURL, err := adm.makeTargetURL(reqData)
if err != nil {
return nil, err
}
// Initialize a new HTTP request for the method.
req, err = http.NewRequestWithContext(ctx, method, targetURL.String(), bytes.NewReader(reqData.content))
if err != nil {
return nil, err
}
value, err := adm.credsProvider.GetWithContext(adm.CredContext())
if err != nil {
return nil, err
}
var (
accessKeyID = value.AccessKeyID
secretAccessKey = value.SecretAccessKey
sessionToken = value.SessionToken
)
adm.setUserAgent(req)
for k, v := range reqData.customHeaders {
req.Header.Set(k, v[0])
}
if length := len(reqData.content); length > 0 {
req.ContentLength = int64(length)
}
sum := sha256.Sum256(reqData.content)
req.Header.Set("X-Amz-Content-Sha256", hex.EncodeToString(sum[:]))
if reqData.contentReader != nil {
req.Body = io.NopCloser(reqData.contentReader)
} else {
req.Body = io.NopCloser(bytes.NewReader(reqData.content))
}
req = signer.SignV4(*req, accessKeyID, secretAccessKey, sessionToken, location)
return req, nil
}
// makeTargetURL make a new target url.
func (adm AdminClient) makeTargetURL(r requestData) (*url.URL, error) {
host := adm.endpointURL.Host
scheme := adm.endpointURL.Scheme
prefix := libraryAdminURLPrefix
if r.isKMS {
prefix = libraryKMSURLPrefix
}
urlStr := scheme + "://" + host + prefix + r.relPath
// If there are any query values, add them to the end.
if len(r.queryValues) > 0 {
urlStr = urlStr + "?" + s3utils.QueryEncode(r.queryValues)
}
u, err := url.Parse(urlStr)
if err != nil {
return nil, err
}
return u, nil
}
// CredContext returns the context for fetching credentials
func (adm AdminClient) CredContext() *credentials.CredContext {
return &credentials.CredContext{
Client: adm.httpClient,
}
}
golang-github-minio-madmin-go-3.0.104/api_test.go 0000664 0000000 0000000 00000001766 14774251704 0021551 0 ustar 00root root 0000000 0000000 //
// Copyright (c) 2015-2024 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as
// published by the Free Software Foundation, either version 3 of the
// License, or (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see .
//
// Package madmin_test
package madmin_test
import (
"testing"
"github.com/minio/madmin-go/v3"
)
func TestMinioAdminClient(t *testing.T) {
_, err := madmin.New("localhost:9000", "food", "food123", true)
if err != nil {
t.Fatal(err)
}
}
golang-github-minio-madmin-go-3.0.104/bandwidth.go 0000664 0000000 0000000 00000005140 14774251704 0021673 0 ustar 00root root 0000000 0000000 //
// Copyright (c) 2015-2024 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as
// published by the Free Software Foundation, either version 3 of the
// License, or (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see .
//
package madmin
import (
"context"
"encoding/json"
"net/http"
"net/url"
"strings"
)
// BandwidthDetails for the measured bandwidth
type BandwidthDetails struct {
LimitInBytesPerSecond int64 `json:"limitInBits"`
CurrentBandwidthInBytesPerSecond float64 `json:"currentBandwidth"`
}
// BucketBandwidthReport captures the details for all buckets.
type BucketBandwidthReport struct {
BucketStats map[string]BandwidthDetails `json:"bucketStats,omitempty"`
}
// Report includes the bandwidth report or the error encountered.
type Report struct {
Report BucketBandwidthReport `json:"report"`
Err error `json:"error,omitempty"`
}
// GetBucketBandwidth - Gets a channel reporting bandwidth measurements for replication buckets. If no buckets
// generate replication traffic an empty map is returned in the report until traffic is seen.
func (adm *AdminClient) GetBucketBandwidth(ctx context.Context, buckets ...string) <-chan Report {
queryValues := url.Values{}
ch := make(chan Report)
if len(buckets) > 0 {
queryValues.Set("buckets", strings.Join(buckets, ","))
}
reqData := requestData{
relPath: adminAPIPrefix + "/bandwidth",
queryValues: queryValues,
}
resp, err := adm.executeMethod(ctx, http.MethodGet, reqData)
if err != nil {
defer closeResponse(resp)
ch <- Report{Err: err}
return ch
}
if resp.StatusCode != http.StatusOK {
ch <- Report{Err: httpRespToErrorResponse(resp)}
return ch
}
dec := json.NewDecoder(resp.Body)
go func(ctx context.Context, ch chan<- Report, resp *http.Response) {
defer func() {
closeResponse(resp)
close(ch)
}()
for {
var report BucketBandwidthReport
if err = dec.Decode(&report); err != nil {
ch <- Report{Err: err}
return
}
select {
case <-ctx.Done():
return
case ch <- Report{Report: report}:
}
}
}(ctx, ch, resp)
return ch
}
golang-github-minio-madmin-go-3.0.104/batch-job.go 0000664 0000000 0000000 00000036013 14774251704 0021563 0 ustar 00root root 0000000 0000000 //
// Copyright (c) 2015-2024 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as
// published by the Free Software Foundation, either version 3 of the
// License, or (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see .
//
package madmin
import (
"context"
"encoding/json"
"errors"
"fmt"
"io"
"net/http"
"net/url"
"time"
)
// BatchJobType type to describe batch job types
type BatchJobType string
const (
BatchJobReplicate BatchJobType = "replicate"
BatchJobKeyRotate BatchJobType = "keyrotate"
BatchJobExpire BatchJobType = "expire"
BatchJobCatalog BatchJobType = "catalog"
)
// SupportedJobTypes supported job types
var SupportedJobTypes = []BatchJobType{
BatchJobReplicate,
BatchJobKeyRotate,
BatchJobExpire,
// No need to add new types here, they are added directly in server.
}
// BatchJobReplicateTemplate provides a sample template
// for batch replication
const BatchJobReplicateTemplate = `replicate:
apiVersion: v1
# source of the objects to be replicated
source:
type: TYPE # valid values are "s3" or "minio"
bucket: BUCKET
prefix: PREFIX # 'PREFIX' is optional
# If your source is the 'local' alias specified to 'mc batch start', then the 'endpoint' and 'credentials' fields are optional and can be omitted
# Either the 'source' or 'remote' *must* be the "local" deployment
endpoint: "http[s]://HOSTNAME:PORT"
# path: "on|off|auto" # "on" enables path-style bucket lookup. "off" enables virtual host (DNS)-style bucket lookup. Defaults to "auto"
credentials:
accessKey: ACCESS-KEY # Required
secretKey: SECRET-KEY # Required
# sessionToken: SESSION-TOKEN # Optional only available when rotating credentials are used
snowball: # automatically activated if the source is local
disable: false # optionally turn-off snowball archive transfer
batch: 100 # upto this many objects per archive
inmemory: true # indicates if the archive must be staged locally or in-memory
compress: false # S2/Snappy compressed archive
smallerThan: 256KiB # create archive for all objects smaller than 256KiB
skipErrs: false # skips any source side read() errors
# target where the objects must be replicated
target:
type: TYPE # valid values are "s3" or "minio"
bucket: BUCKET
prefix: PREFIX # 'PREFIX' is optional
# If your source is the 'local' alias specified to 'mc batch start', then the 'endpoint' and 'credentials' fields are optional and can be omitted
# Either the 'source' or 'remote' *must* be the "local" deployment
endpoint: "http[s]://HOSTNAME:PORT"
# path: "on|off|auto" # "on" enables path-style bucket lookup. "off" enables virtual host (DNS)-style bucket lookup. Defaults to "auto"
credentials:
accessKey: ACCESS-KEY
secretKey: SECRET-KEY
# sessionToken: SESSION-TOKEN # Optional only available when rotating credentials are used
# NOTE: All flags are optional
# - filtering criteria only applies for all source objects match the criteria
# - configurable notification endpoints
# - configurable retries for the job (each retry skips successfully previously replaced objects)
flags:
filter:
newerThan: "7d" # match objects newer than this value (e.g. 7d10h31s)
olderThan: "7d" # match objects older than this value (e.g. 7d10h31s)
createdAfter: "date" # match objects created after "date"
createdBefore: "date" # match objects created before "date"
## NOTE: tags are not supported when "source" is remote.
# tags:
# - key: "name"
# value: "pick*" # match objects with tag 'name', with all values starting with 'pick'
# metadata:
# - key: "content-type"
# value: "image/*" # match objects with 'content-type', with all values starting with 'image/'
notify:
endpoint: "https://notify.endpoint" # notification endpoint to receive job status events
token: "Bearer xxxxx" # optional authentication token for the notification endpoint
retry:
attempts: 10 # number of retries for the job before giving up
delay: "500ms" # least amount of delay between each retry
`
// BatchJobKeyRotateTemplate provides a sample template
// for batch key rotation
const BatchJobKeyRotateTemplate = `keyrotate:
apiVersion: v1
bucket: BUCKET
prefix: PREFIX
encryption:
type: sse-s3 # valid values are sse-s3 and sse-kms
key: # valid only for sse-kms
context: # valid only for sse-kms
# optional flags based filtering criteria
# for all objects
flags:
filter:
newerThan: "7d" # match objects newer than this value (e.g. 7d10h31s)
olderThan: "7d" # match objects older than this value (e.g. 7d10h31s)
createdAfter: "date" # match objects created after "date"
createdBefore: "date" # match objects created before "date"
tags:
- key: "name"
value: "pick*" # match objects with tag 'name', with all values starting with 'pick'
metadata:
- key: "content-type"
value: "image/*" # match objects with 'content-type', with all values starting with 'image/'
kmskey: "key-id" # match objects with KMS key-id (applicable only for sse-kms)
notify:
endpoint: "https://notify.endpoint" # notification endpoint to receive job status events
token: "Bearer xxxxx" # optional authentication token for the notification endpoint
retry:
attempts: 10 # number of retries for the job before giving up
delay: "500ms" # least amount of delay between each retry
`
// BatchJobExpireTemplate provides a sample template
// for batch expiring objects
const BatchJobExpireTemplate = `expire:
apiVersion: v1
bucket: mybucket # Bucket where this job will expire matching objects from
prefix: myprefix # (Optional) Prefix under which this job will expire objects matching the rules below.
rules:
- type: object # objects with zero ore more older versions
name: NAME # match object names that satisfy the wildcard expression.
olderThan: 70h # match objects older than this value
createdBefore: "2006-01-02T15:04:05.00Z" # match objects created before "date"
tags:
- key: name
value: pick* # match objects with tag 'name', all values starting with 'pick'
metadata:
- key: content-type
value: image/* # match objects with 'content-type', all values starting with 'image/'
size:
lessThan: 10MiB # match objects with size less than this value (e.g. 10MiB)
greaterThan: 1MiB # match objects with size greater than this value (e.g. 1MiB)
purge:
# retainVersions: 0 # (default) delete all versions of the object. This option is the fastest.
# retainVersions: 5 # keep the latest 5 versions of the object.
- type: deleted # objects with delete marker as their latest version
name: NAME # match object names that satisfy the wildcard expression.
olderThan: 10h # match objects older than this value (e.g. 7d10h31s)
createdBefore: "2006-01-02T15:04:05.00Z" # match objects created before "date"
purge:
# retainVersions: 0 # (default) delete all versions of the object. This option is the fastest.
# retainVersions: 5 # keep the latest 5 versions of the object including delete markers.
notify:
endpoint: https://notify.endpoint # notification endpoint to receive job completion status
token: Bearer xxxxx # optional authentication token for the notification endpoint
retry:
attempts: 10 # number of retries for the job before giving up
delay: 500ms # least amount of delay between each retry
`
// BatchJobResult returned by StartBatchJob
type BatchJobResult struct {
ID string `json:"id"`
Type BatchJobType `json:"type"`
User string `json:"user,omitempty"`
Started time.Time `json:"started"`
Elapsed time.Duration `json:"elapsed,omitempty"`
}
// StartBatchJob start a new batch job, input job description is in YAML.
func (adm *AdminClient) StartBatchJob(ctx context.Context, job string) (BatchJobResult, error) {
resp, err := adm.executeMethod(ctx, http.MethodPost,
requestData{
relPath: adminAPIPrefix + "/start-job",
content: []byte(job),
},
)
if err != nil {
return BatchJobResult{}, err
}
defer closeResponse(resp)
if resp.StatusCode != http.StatusOK {
return BatchJobResult{}, httpRespToErrorResponse(resp)
}
res := BatchJobResult{}
dec := json.NewDecoder(resp.Body)
if err = dec.Decode(&res); err != nil {
return res, err
}
return res, nil
}
// BatchJobStatus contains the last batch job metric
type BatchJobStatus struct {
LastMetric JobMetric
}
// BatchJobStatus returns the status of the given job.
func (adm *AdminClient) BatchJobStatus(ctx context.Context, jobID string) (BatchJobStatus, error) {
values := make(url.Values)
values.Set("jobId", jobID)
resp, err := adm.executeMethod(ctx, http.MethodGet,
requestData{
relPath: adminAPIPrefix + "/status-job",
queryValues: values,
},
)
if err != nil {
return BatchJobStatus{}, err
}
defer closeResponse(resp)
if resp.StatusCode != http.StatusOK {
return BatchJobStatus{}, httpRespToErrorResponse(resp)
}
res := BatchJobStatus{}
dec := json.NewDecoder(resp.Body)
if err = dec.Decode(&res); err != nil {
return res, err
}
return res, nil
}
// DescribeBatchJob - describes a currently running Job.
func (adm *AdminClient) DescribeBatchJob(ctx context.Context, jobID string) (string, error) {
values := make(url.Values)
values.Set("jobId", jobID)
resp, err := adm.executeMethod(ctx, http.MethodGet,
requestData{
relPath: adminAPIPrefix + "/describe-job",
queryValues: values,
},
)
if err != nil {
return "", err
}
defer closeResponse(resp)
if resp.StatusCode != http.StatusOK {
return "", httpRespToErrorResponse(resp)
}
buf, err := io.ReadAll(resp.Body)
if err != nil {
return "", err
}
return string(buf), nil
}
// GenerateBatchJobOpts is to be implemented in future.
type GenerateBatchJobOpts struct {
Type BatchJobType
}
// GenerateBatchJob creates a new job template from standard template
// TODO: allow configuring yaml values
func (adm *AdminClient) GenerateBatchJob(_ context.Context, opts GenerateBatchJobOpts) (string, error) {
// TODO: allow configuring the template to fill values from GenerateBatchJobOpts
switch opts.Type {
case BatchJobReplicate:
return BatchJobReplicateTemplate, nil
case BatchJobKeyRotate:
return BatchJobKeyRotateTemplate, nil
case BatchJobExpire:
return BatchJobExpireTemplate, nil
}
return "", fmt.Errorf("unknown batch job requested: %s", opts.Type)
}
// GetSupportedBatchJobTypes returns the list of server supported batch job
// types.
func (adm *AdminClient) GetSupportedBatchJobTypes(ctx context.Context) (supportedTypes []BatchJobType, apiUnavailable bool, err error) {
resp, err := adm.executeMethod(ctx, http.MethodGet,
requestData{
relPath: adminAPIPrefix + "/list-supported-job-types",
},
)
if err != nil {
return nil, false, err
}
defer closeResponse(resp)
var buf []byte
switch resp.StatusCode {
case http.StatusNotFound, http.StatusUpgradeRequired:
apiUnavailable = true
case http.StatusOK:
if buf, err = io.ReadAll(resp.Body); err == nil {
err = json.Unmarshal(buf, &supportedTypes)
}
default:
err = httpRespToErrorResponse(resp)
}
return supportedTypes, apiUnavailable, err
}
// GenerateBatchJobV2 creates a new job template by requesting the server. This
// is an EOS only API and returns apiUnavailable=true when API is unsupported.
func (adm *AdminClient) GenerateBatchJobV2(ctx context.Context, opts GenerateBatchJobOpts) (template string, apiUnavailable bool, err error) {
if opts.Type == "" {
err = fmt.Errorf("batch job type is required")
return "", false, err
}
values := make(url.Values)
values.Set("jobType", string(opts.Type))
resp, err := adm.executeMethod(ctx, http.MethodGet,
requestData{
relPath: adminAPIPrefix + "/generate-job",
queryValues: values,
},
)
if err != nil {
return "", false, err
}
defer closeResponse(resp)
var buf []byte
switch resp.StatusCode {
case http.StatusNotFound, http.StatusUpgradeRequired:
apiUnavailable = true
case http.StatusOK:
if buf, err = io.ReadAll(resp.Body); err == nil {
template = string(buf)
}
default:
err = httpRespToErrorResponse(resp)
}
return template, apiUnavailable, err
}
// ListBatchJobsResult contains entries for all current jobs.
type ListBatchJobsResult struct {
Jobs []BatchJobResult `json:"jobs"`
}
// ListBatchJobsFilter returns list based on following
// filtering params.
type ListBatchJobsFilter struct {
ByJobType string
}
// ListBatchJobs list all the currently active batch jobs
func (adm *AdminClient) ListBatchJobs(ctx context.Context, fl *ListBatchJobsFilter) (ListBatchJobsResult, error) {
if fl == nil {
return ListBatchJobsResult{}, errors.New("ListBatchJobsFilter cannot be nil")
}
values := make(url.Values)
values.Set("jobType", fl.ByJobType)
resp, err := adm.executeMethod(ctx, http.MethodGet,
requestData{
relPath: adminAPIPrefix + "/list-jobs",
queryValues: values,
},
)
if err != nil {
return ListBatchJobsResult{}, err
}
defer closeResponse(resp)
if resp.StatusCode != http.StatusOK {
return ListBatchJobsResult{}, httpRespToErrorResponse(resp)
}
d := json.NewDecoder(resp.Body)
result := ListBatchJobsResult{}
if err = d.Decode(&result); err != nil {
return result, err
}
return result, nil
}
// CancelBatchJob cancels ongoing batch job.
func (adm *AdminClient) CancelBatchJob(ctx context.Context, jobID string) error {
values := make(url.Values)
values.Set("id", jobID)
resp, err := adm.executeMethod(ctx, http.MethodDelete,
requestData{
relPath: adminAPIPrefix + "/cancel-job",
queryValues: values,
},
)
if err != nil {
return err
}
defer closeResponse(resp)
if resp.StatusCode != http.StatusNoContent {
return httpRespToErrorResponse(resp)
}
return nil
}
// CatalogDataFile contains information about an output file from a catalog job run.
type CatalogDataFile struct {
Key string `json:"key"`
Size uint64 `json:"size"`
MD5Checksum string `json:"MD5Checksum"`
}
// CatalogManifestVersion represents the version of a catalog manifest.
type CatalogManifestVersion string
const (
CatalogManifestVersionV1 CatalogManifestVersion = "v1"
)
// CatalogManifest represents the manifest of a catalog job's result.
type CatalogManifest struct {
Version CatalogManifestVersion `json:"version"`
JobID string `json:"jobID"`
StartTimestamp string `json:"startTimestamp"`
Files []CatalogDataFile `json:"files"`
}
golang-github-minio-madmin-go-3.0.104/bucket-metadata.go 0000664 0000000 0000000 00000006013 14774251704 0022762 0 ustar 00root root 0000000 0000000 //
// Copyright (c) 2015-2024 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as
// published by the Free Software Foundation, either version 3 of the
// License, or (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see .
//
package madmin
import (
"context"
"encoding/json"
"io"
"net/http"
"net/url"
)
// ExportBucketMetadata makes an admin call to export bucket metadata of a bucket
func (adm *AdminClient) ExportBucketMetadata(ctx context.Context, bucket string) (io.ReadCloser, error) {
path := adminAPIPrefix + "/export-bucket-metadata"
queryValues := url.Values{}
queryValues.Set("bucket", bucket)
resp, err := adm.executeMethod(ctx,
http.MethodGet, requestData{
relPath: path,
queryValues: queryValues,
},
)
if err != nil {
return nil, err
}
if resp.StatusCode != http.StatusOK {
closeResponse(resp)
return nil, httpRespToErrorResponse(resp)
}
return resp.Body, nil
}
// MetaStatus status of metadata import
type MetaStatus struct {
IsSet bool `json:"isSet"`
Err string `json:"error,omitempty"`
}
// BucketStatus reflects status of bucket metadata import
type BucketStatus struct {
ObjectLock MetaStatus `json:"olock"`
Versioning MetaStatus `json:"versioning"`
Policy MetaStatus `json:"policy"`
Tagging MetaStatus `json:"tagging"`
SSEConfig MetaStatus `json:"sse"`
Lifecycle MetaStatus `json:"lifecycle"`
Notification MetaStatus `json:"notification"`
Quota MetaStatus `json:"quota"`
Cors MetaStatus `json:"cors"`
Err string `json:"error,omitempty"`
}
// BucketMetaImportErrs reports on bucket metadata import status.
type BucketMetaImportErrs struct {
Buckets map[string]BucketStatus `json:"buckets,omitempty"`
}
// ImportBucketMetadata makes an admin call to set bucket metadata of a bucket from imported content
func (adm *AdminClient) ImportBucketMetadata(ctx context.Context, bucket string, contentReader io.ReadCloser) (r BucketMetaImportErrs, err error) {
content, err := io.ReadAll(contentReader)
if err != nil {
return r, err
}
path := adminAPIPrefix + "/import-bucket-metadata"
queryValues := url.Values{}
queryValues.Set("bucket", bucket)
resp, err := adm.executeMethod(ctx,
http.MethodPut, requestData{
relPath: path,
queryValues: queryValues,
content: content,
},
)
defer closeResponse(resp)
if err != nil {
return r, err
}
if resp.StatusCode != http.StatusOK {
return r, httpRespToErrorResponse(resp)
}
err = json.NewDecoder(resp.Body).Decode(&r)
return r, err
}
golang-github-minio-madmin-go-3.0.104/bucket-targets.go 0000664 0000000 0000000 00000012112 14774251704 0022650 0 ustar 00root root 0000000 0000000 //
// Copyright (c) 2015-2025 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as
// published by the Free Software Foundation, either version 3 of the
// License, or (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see .
//
package madmin
import (
"fmt"
"net/url"
"time"
)
//msgp:clearomitted
//msgp:tag json
//go:generate msgp
// BucketTargets represents a slice of bucket targets by type and endpoint
type BucketTargets struct {
Targets []BucketTarget `json:"targets"`
}
// Empty returns true if struct is empty.
func (t BucketTargets) Empty() bool {
if len(t.Targets) == 0 {
return true
}
empty := true
for _, t := range t.Targets {
if !t.Empty() {
return false
}
}
return empty
}
// ServiceType represents service type
type ServiceType string
const (
// ReplicationService specifies replication service
ReplicationService ServiceType = "replication"
)
// IsValid returns true if ARN type represents replication
func (t ServiceType) IsValid() bool {
return t == ReplicationService
}
// BucketTarget represents the target bucket and site association.
type BucketTarget struct {
SourceBucket string `json:"sourcebucket"`
Endpoint string `json:"endpoint"`
Credentials *Credentials `json:"credentials"`
TargetBucket string `json:"targetbucket"`
Secure bool `json:"secure"`
Path string `json:"path,omitempty"`
API string `json:"api,omitempty"`
Arn string `json:"arn,omitempty"`
Type ServiceType `json:"type"`
Region string `json:"region,omitempty"`
BandwidthLimit int64 `json:"bandwidthlimit,omitempty"`
ReplicationSync bool `json:"replicationSync"`
StorageClass string `json:"storageclass,omitempty"`
HealthCheckDuration time.Duration `json:"healthCheckDuration,omitempty"`
DisableProxy bool `json:"disableProxy"`
ResetBeforeDate time.Time `json:"resetBeforeDate,omitempty"`
ResetID string `json:"resetID,omitempty"`
TotalDowntime time.Duration `json:"totalDowntime"`
LastOnline time.Time `json:"lastOnline"`
Online bool `json:"isOnline"`
Latency LatencyStat `json:"latency"`
DeploymentID string `json:"deploymentID,omitempty"`
Edge bool `json:"edge"` // target is recipient of edge traffic
EdgeSyncBeforeExpiry bool `json:"edgeSyncBeforeExpiry"` // must replicate to edge before expiry
OfflineCount int64 `json:"offlineCount"`
}
// Credentials holds access and secret keys.
type Credentials struct {
AccessKey string `xml:"AccessKeyId" json:"accessKey,omitempty"`
SecretKey string `xml:"SecretAccessKey" json:"secretKey,omitempty"`
SessionToken string `xml:"SessionToken" json:"sessionToken,omitempty"`
Expiration time.Time `xml:"Expiration" json:"expiration,omitempty"`
}
// Clone returns shallow clone of BucketTarget without secret key in credentials
func (t *BucketTarget) Clone() BucketTarget {
return BucketTarget{
SourceBucket: t.SourceBucket,
Endpoint: t.Endpoint,
TargetBucket: t.TargetBucket,
Credentials: &Credentials{AccessKey: t.Credentials.AccessKey},
Secure: t.Secure,
Path: t.Path,
API: t.API,
Arn: t.Arn,
Type: t.Type,
Region: t.Region,
BandwidthLimit: t.BandwidthLimit,
ReplicationSync: t.ReplicationSync,
StorageClass: t.StorageClass, // target storage class
HealthCheckDuration: t.HealthCheckDuration,
DisableProxy: t.DisableProxy,
ResetBeforeDate: t.ResetBeforeDate,
ResetID: t.ResetID,
TotalDowntime: t.TotalDowntime,
LastOnline: t.LastOnline,
Online: t.Online,
Latency: t.Latency,
DeploymentID: t.DeploymentID,
Edge: t.Edge,
EdgeSyncBeforeExpiry: t.EdgeSyncBeforeExpiry,
OfflineCount: t.OfflineCount,
}
}
// URL returns target url
func (t BucketTarget) URL() *url.URL {
scheme := "http"
if t.Secure {
scheme = "https"
}
return &url.URL{
Scheme: scheme,
Host: t.Endpoint,
}
}
// Empty returns true if struct is empty.
func (t BucketTarget) Empty() bool {
return t.String() == "" || t.Credentials == nil
}
func (t *BucketTarget) String() string {
return fmt.Sprintf("%s %s", t.Endpoint, t.TargetBucket)
}
golang-github-minio-madmin-go-3.0.104/bucket-targets_gen.go 0000664 0000000 0000000 00000107714 14774251704 0023516 0 ustar 00root root 0000000 0000000 package madmin
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
import (
"time"
"github.com/tinylib/msgp/msgp"
)
// DecodeMsg implements msgp.Decodable
func (z *BucketTarget) DecodeMsg(dc *msgp.Reader) (err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, err = dc.ReadMapHeader()
if err != nil {
err = msgp.WrapError(err)
return
}
var zb0001Mask uint16 /* 10 bits */
_ = zb0001Mask
for zb0001 > 0 {
zb0001--
field, err = dc.ReadMapKeyPtr()
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "sourcebucket":
z.SourceBucket, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "SourceBucket")
return
}
case "endpoint":
z.Endpoint, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "Endpoint")
return
}
case "credentials":
if dc.IsNil() {
err = dc.ReadNil()
if err != nil {
err = msgp.WrapError(err, "Credentials")
return
}
z.Credentials = nil
} else {
if z.Credentials == nil {
z.Credentials = new(Credentials)
}
err = z.Credentials.DecodeMsg(dc)
if err != nil {
err = msgp.WrapError(err, "Credentials")
return
}
}
case "targetbucket":
z.TargetBucket, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "TargetBucket")
return
}
case "secure":
z.Secure, err = dc.ReadBool()
if err != nil {
err = msgp.WrapError(err, "Secure")
return
}
case "path":
z.Path, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "Path")
return
}
zb0001Mask |= 0x1
case "api":
z.API, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "API")
return
}
zb0001Mask |= 0x2
case "arn":
z.Arn, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "Arn")
return
}
zb0001Mask |= 0x4
case "type":
{
var zb0002 string
zb0002, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "Type")
return
}
z.Type = ServiceType(zb0002)
}
case "region":
z.Region, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "Region")
return
}
zb0001Mask |= 0x8
case "bandwidthlimit":
z.BandwidthLimit, err = dc.ReadInt64()
if err != nil {
err = msgp.WrapError(err, "BandwidthLimit")
return
}
zb0001Mask |= 0x10
case "replicationSync":
z.ReplicationSync, err = dc.ReadBool()
if err != nil {
err = msgp.WrapError(err, "ReplicationSync")
return
}
case "storageclass":
z.StorageClass, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "StorageClass")
return
}
zb0001Mask |= 0x20
case "healthCheckDuration":
z.HealthCheckDuration, err = dc.ReadDuration()
if err != nil {
err = msgp.WrapError(err, "HealthCheckDuration")
return
}
zb0001Mask |= 0x40
case "disableProxy":
z.DisableProxy, err = dc.ReadBool()
if err != nil {
err = msgp.WrapError(err, "DisableProxy")
return
}
case "resetBeforeDate":
z.ResetBeforeDate, err = dc.ReadTime()
if err != nil {
err = msgp.WrapError(err, "ResetBeforeDate")
return
}
zb0001Mask |= 0x80
case "resetID":
z.ResetID, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "ResetID")
return
}
zb0001Mask |= 0x100
case "totalDowntime":
z.TotalDowntime, err = dc.ReadDuration()
if err != nil {
err = msgp.WrapError(err, "TotalDowntime")
return
}
case "lastOnline":
z.LastOnline, err = dc.ReadTime()
if err != nil {
err = msgp.WrapError(err, "LastOnline")
return
}
case "isOnline":
z.Online, err = dc.ReadBool()
if err != nil {
err = msgp.WrapError(err, "Online")
return
}
case "latency":
err = z.Latency.DecodeMsg(dc)
if err != nil {
err = msgp.WrapError(err, "Latency")
return
}
case "deploymentID":
z.DeploymentID, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "DeploymentID")
return
}
zb0001Mask |= 0x200
case "edge":
z.Edge, err = dc.ReadBool()
if err != nil {
err = msgp.WrapError(err, "Edge")
return
}
case "edgeSyncBeforeExpiry":
z.EdgeSyncBeforeExpiry, err = dc.ReadBool()
if err != nil {
err = msgp.WrapError(err, "EdgeSyncBeforeExpiry")
return
}
case "offlineCount":
z.OfflineCount, err = dc.ReadInt64()
if err != nil {
err = msgp.WrapError(err, "OfflineCount")
return
}
default:
err = dc.Skip()
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
// Clear omitted fields.
if zb0001Mask != 0x3ff {
if (zb0001Mask & 0x1) == 0 {
z.Path = ""
}
if (zb0001Mask & 0x2) == 0 {
z.API = ""
}
if (zb0001Mask & 0x4) == 0 {
z.Arn = ""
}
if (zb0001Mask & 0x8) == 0 {
z.Region = ""
}
if (zb0001Mask & 0x10) == 0 {
z.BandwidthLimit = 0
}
if (zb0001Mask & 0x20) == 0 {
z.StorageClass = ""
}
if (zb0001Mask & 0x40) == 0 {
z.HealthCheckDuration = 0
}
if (zb0001Mask & 0x80) == 0 {
z.ResetBeforeDate = (time.Time{})
}
if (zb0001Mask & 0x100) == 0 {
z.ResetID = ""
}
if (zb0001Mask & 0x200) == 0 {
z.DeploymentID = ""
}
}
return
}
// EncodeMsg implements msgp.Encodable
func (z *BucketTarget) EncodeMsg(en *msgp.Writer) (err error) {
// check for omitted fields
zb0001Len := uint32(25)
var zb0001Mask uint32 /* 25 bits */
_ = zb0001Mask
if z.Path == "" {
zb0001Len--
zb0001Mask |= 0x20
}
if z.API == "" {
zb0001Len--
zb0001Mask |= 0x40
}
if z.Arn == "" {
zb0001Len--
zb0001Mask |= 0x80
}
if z.Region == "" {
zb0001Len--
zb0001Mask |= 0x200
}
if z.BandwidthLimit == 0 {
zb0001Len--
zb0001Mask |= 0x400
}
if z.StorageClass == "" {
zb0001Len--
zb0001Mask |= 0x1000
}
if z.HealthCheckDuration == 0 {
zb0001Len--
zb0001Mask |= 0x2000
}
if z.ResetBeforeDate == (time.Time{}) {
zb0001Len--
zb0001Mask |= 0x8000
}
if z.ResetID == "" {
zb0001Len--
zb0001Mask |= 0x10000
}
if z.DeploymentID == "" {
zb0001Len--
zb0001Mask |= 0x200000
}
// variable map header, size zb0001Len
err = en.WriteMapHeader(zb0001Len)
if err != nil {
return
}
// skip if no fields are to be emitted
if zb0001Len != 0 {
// write "sourcebucket"
err = en.Append(0xac, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74)
if err != nil {
return
}
err = en.WriteString(z.SourceBucket)
if err != nil {
err = msgp.WrapError(err, "SourceBucket")
return
}
// write "endpoint"
err = en.Append(0xa8, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74)
if err != nil {
return
}
err = en.WriteString(z.Endpoint)
if err != nil {
err = msgp.WrapError(err, "Endpoint")
return
}
// write "credentials"
err = en.Append(0xab, 0x63, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73)
if err != nil {
return
}
if z.Credentials == nil {
err = en.WriteNil()
if err != nil {
return
}
} else {
err = z.Credentials.EncodeMsg(en)
if err != nil {
err = msgp.WrapError(err, "Credentials")
return
}
}
// write "targetbucket"
err = en.Append(0xac, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74)
if err != nil {
return
}
err = en.WriteString(z.TargetBucket)
if err != nil {
err = msgp.WrapError(err, "TargetBucket")
return
}
// write "secure"
err = en.Append(0xa6, 0x73, 0x65, 0x63, 0x75, 0x72, 0x65)
if err != nil {
return
}
err = en.WriteBool(z.Secure)
if err != nil {
err = msgp.WrapError(err, "Secure")
return
}
if (zb0001Mask & 0x20) == 0 { // if not omitted
// write "path"
err = en.Append(0xa4, 0x70, 0x61, 0x74, 0x68)
if err != nil {
return
}
err = en.WriteString(z.Path)
if err != nil {
err = msgp.WrapError(err, "Path")
return
}
}
if (zb0001Mask & 0x40) == 0 { // if not omitted
// write "api"
err = en.Append(0xa3, 0x61, 0x70, 0x69)
if err != nil {
return
}
err = en.WriteString(z.API)
if err != nil {
err = msgp.WrapError(err, "API")
return
}
}
if (zb0001Mask & 0x80) == 0 { // if not omitted
// write "arn"
err = en.Append(0xa3, 0x61, 0x72, 0x6e)
if err != nil {
return
}
err = en.WriteString(z.Arn)
if err != nil {
err = msgp.WrapError(err, "Arn")
return
}
}
// write "type"
err = en.Append(0xa4, 0x74, 0x79, 0x70, 0x65)
if err != nil {
return
}
err = en.WriteString(string(z.Type))
if err != nil {
err = msgp.WrapError(err, "Type")
return
}
if (zb0001Mask & 0x200) == 0 { // if not omitted
// write "region"
err = en.Append(0xa6, 0x72, 0x65, 0x67, 0x69, 0x6f, 0x6e)
if err != nil {
return
}
err = en.WriteString(z.Region)
if err != nil {
err = msgp.WrapError(err, "Region")
return
}
}
if (zb0001Mask & 0x400) == 0 { // if not omitted
// write "bandwidthlimit"
err = en.Append(0xae, 0x62, 0x61, 0x6e, 0x64, 0x77, 0x69, 0x64, 0x74, 0x68, 0x6c, 0x69, 0x6d, 0x69, 0x74)
if err != nil {
return
}
err = en.WriteInt64(z.BandwidthLimit)
if err != nil {
err = msgp.WrapError(err, "BandwidthLimit")
return
}
}
// write "replicationSync"
err = en.Append(0xaf, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x79, 0x6e, 0x63)
if err != nil {
return
}
err = en.WriteBool(z.ReplicationSync)
if err != nil {
err = msgp.WrapError(err, "ReplicationSync")
return
}
if (zb0001Mask & 0x1000) == 0 { // if not omitted
// write "storageclass"
err = en.Append(0xac, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x63, 0x6c, 0x61, 0x73, 0x73)
if err != nil {
return
}
err = en.WriteString(z.StorageClass)
if err != nil {
err = msgp.WrapError(err, "StorageClass")
return
}
}
if (zb0001Mask & 0x2000) == 0 { // if not omitted
// write "healthCheckDuration"
err = en.Append(0xb3, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e)
if err != nil {
return
}
err = en.WriteDuration(z.HealthCheckDuration)
if err != nil {
err = msgp.WrapError(err, "HealthCheckDuration")
return
}
}
// write "disableProxy"
err = en.Append(0xac, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x50, 0x72, 0x6f, 0x78, 0x79)
if err != nil {
return
}
err = en.WriteBool(z.DisableProxy)
if err != nil {
err = msgp.WrapError(err, "DisableProxy")
return
}
if (zb0001Mask & 0x8000) == 0 { // if not omitted
// write "resetBeforeDate"
err = en.Append(0xaf, 0x72, 0x65, 0x73, 0x65, 0x74, 0x42, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x44, 0x61, 0x74, 0x65)
if err != nil {
return
}
err = en.WriteTime(z.ResetBeforeDate)
if err != nil {
err = msgp.WrapError(err, "ResetBeforeDate")
return
}
}
if (zb0001Mask & 0x10000) == 0 { // if not omitted
// write "resetID"
err = en.Append(0xa7, 0x72, 0x65, 0x73, 0x65, 0x74, 0x49, 0x44)
if err != nil {
return
}
err = en.WriteString(z.ResetID)
if err != nil {
err = msgp.WrapError(err, "ResetID")
return
}
}
// write "totalDowntime"
err = en.Append(0xad, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x44, 0x6f, 0x77, 0x6e, 0x74, 0x69, 0x6d, 0x65)
if err != nil {
return
}
err = en.WriteDuration(z.TotalDowntime)
if err != nil {
err = msgp.WrapError(err, "TotalDowntime")
return
}
// write "lastOnline"
err = en.Append(0xaa, 0x6c, 0x61, 0x73, 0x74, 0x4f, 0x6e, 0x6c, 0x69, 0x6e, 0x65)
if err != nil {
return
}
err = en.WriteTime(z.LastOnline)
if err != nil {
err = msgp.WrapError(err, "LastOnline")
return
}
// write "isOnline"
err = en.Append(0xa8, 0x69, 0x73, 0x4f, 0x6e, 0x6c, 0x69, 0x6e, 0x65)
if err != nil {
return
}
err = en.WriteBool(z.Online)
if err != nil {
err = msgp.WrapError(err, "Online")
return
}
// write "latency"
err = en.Append(0xa7, 0x6c, 0x61, 0x74, 0x65, 0x6e, 0x63, 0x79)
if err != nil {
return
}
err = z.Latency.EncodeMsg(en)
if err != nil {
err = msgp.WrapError(err, "Latency")
return
}
if (zb0001Mask & 0x200000) == 0 { // if not omitted
// write "deploymentID"
err = en.Append(0xac, 0x64, 0x65, 0x70, 0x6c, 0x6f, 0x79, 0x6d, 0x65, 0x6e, 0x74, 0x49, 0x44)
if err != nil {
return
}
err = en.WriteString(z.DeploymentID)
if err != nil {
err = msgp.WrapError(err, "DeploymentID")
return
}
}
// write "edge"
err = en.Append(0xa4, 0x65, 0x64, 0x67, 0x65)
if err != nil {
return
}
err = en.WriteBool(z.Edge)
if err != nil {
err = msgp.WrapError(err, "Edge")
return
}
// write "edgeSyncBeforeExpiry"
err = en.Append(0xb4, 0x65, 0x64, 0x67, 0x65, 0x53, 0x79, 0x6e, 0x63, 0x42, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x45, 0x78, 0x70, 0x69, 0x72, 0x79)
if err != nil {
return
}
err = en.WriteBool(z.EdgeSyncBeforeExpiry)
if err != nil {
err = msgp.WrapError(err, "EdgeSyncBeforeExpiry")
return
}
// write "offlineCount"
err = en.Append(0xac, 0x6f, 0x66, 0x66, 0x6c, 0x69, 0x6e, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74)
if err != nil {
return
}
err = en.WriteInt64(z.OfflineCount)
if err != nil {
err = msgp.WrapError(err, "OfflineCount")
return
}
}
return
}
// MarshalMsg implements msgp.Marshaler
func (z *BucketTarget) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize())
// check for omitted fields
zb0001Len := uint32(25)
var zb0001Mask uint32 /* 25 bits */
_ = zb0001Mask
if z.Path == "" {
zb0001Len--
zb0001Mask |= 0x20
}
if z.API == "" {
zb0001Len--
zb0001Mask |= 0x40
}
if z.Arn == "" {
zb0001Len--
zb0001Mask |= 0x80
}
if z.Region == "" {
zb0001Len--
zb0001Mask |= 0x200
}
if z.BandwidthLimit == 0 {
zb0001Len--
zb0001Mask |= 0x400
}
if z.StorageClass == "" {
zb0001Len--
zb0001Mask |= 0x1000
}
if z.HealthCheckDuration == 0 {
zb0001Len--
zb0001Mask |= 0x2000
}
if z.ResetBeforeDate == (time.Time{}) {
zb0001Len--
zb0001Mask |= 0x8000
}
if z.ResetID == "" {
zb0001Len--
zb0001Mask |= 0x10000
}
if z.DeploymentID == "" {
zb0001Len--
zb0001Mask |= 0x200000
}
// variable map header, size zb0001Len
o = msgp.AppendMapHeader(o, zb0001Len)
// skip if no fields are to be emitted
if zb0001Len != 0 {
// string "sourcebucket"
o = append(o, 0xac, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74)
o = msgp.AppendString(o, z.SourceBucket)
// string "endpoint"
o = append(o, 0xa8, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74)
o = msgp.AppendString(o, z.Endpoint)
// string "credentials"
o = append(o, 0xab, 0x63, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73)
if z.Credentials == nil {
o = msgp.AppendNil(o)
} else {
o, err = z.Credentials.MarshalMsg(o)
if err != nil {
err = msgp.WrapError(err, "Credentials")
return
}
}
// string "targetbucket"
o = append(o, 0xac, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74)
o = msgp.AppendString(o, z.TargetBucket)
// string "secure"
o = append(o, 0xa6, 0x73, 0x65, 0x63, 0x75, 0x72, 0x65)
o = msgp.AppendBool(o, z.Secure)
if (zb0001Mask & 0x20) == 0 { // if not omitted
// string "path"
o = append(o, 0xa4, 0x70, 0x61, 0x74, 0x68)
o = msgp.AppendString(o, z.Path)
}
if (zb0001Mask & 0x40) == 0 { // if not omitted
// string "api"
o = append(o, 0xa3, 0x61, 0x70, 0x69)
o = msgp.AppendString(o, z.API)
}
if (zb0001Mask & 0x80) == 0 { // if not omitted
// string "arn"
o = append(o, 0xa3, 0x61, 0x72, 0x6e)
o = msgp.AppendString(o, z.Arn)
}
// string "type"
o = append(o, 0xa4, 0x74, 0x79, 0x70, 0x65)
o = msgp.AppendString(o, string(z.Type))
if (zb0001Mask & 0x200) == 0 { // if not omitted
// string "region"
o = append(o, 0xa6, 0x72, 0x65, 0x67, 0x69, 0x6f, 0x6e)
o = msgp.AppendString(o, z.Region)
}
if (zb0001Mask & 0x400) == 0 { // if not omitted
// string "bandwidthlimit"
o = append(o, 0xae, 0x62, 0x61, 0x6e, 0x64, 0x77, 0x69, 0x64, 0x74, 0x68, 0x6c, 0x69, 0x6d, 0x69, 0x74)
o = msgp.AppendInt64(o, z.BandwidthLimit)
}
// string "replicationSync"
o = append(o, 0xaf, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x79, 0x6e, 0x63)
o = msgp.AppendBool(o, z.ReplicationSync)
if (zb0001Mask & 0x1000) == 0 { // if not omitted
// string "storageclass"
o = append(o, 0xac, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x63, 0x6c, 0x61, 0x73, 0x73)
o = msgp.AppendString(o, z.StorageClass)
}
if (zb0001Mask & 0x2000) == 0 { // if not omitted
// string "healthCheckDuration"
o = append(o, 0xb3, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e)
o = msgp.AppendDuration(o, z.HealthCheckDuration)
}
// string "disableProxy"
o = append(o, 0xac, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x50, 0x72, 0x6f, 0x78, 0x79)
o = msgp.AppendBool(o, z.DisableProxy)
if (zb0001Mask & 0x8000) == 0 { // if not omitted
// string "resetBeforeDate"
o = append(o, 0xaf, 0x72, 0x65, 0x73, 0x65, 0x74, 0x42, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x44, 0x61, 0x74, 0x65)
o = msgp.AppendTime(o, z.ResetBeforeDate)
}
if (zb0001Mask & 0x10000) == 0 { // if not omitted
// string "resetID"
o = append(o, 0xa7, 0x72, 0x65, 0x73, 0x65, 0x74, 0x49, 0x44)
o = msgp.AppendString(o, z.ResetID)
}
// string "totalDowntime"
o = append(o, 0xad, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x44, 0x6f, 0x77, 0x6e, 0x74, 0x69, 0x6d, 0x65)
o = msgp.AppendDuration(o, z.TotalDowntime)
// string "lastOnline"
o = append(o, 0xaa, 0x6c, 0x61, 0x73, 0x74, 0x4f, 0x6e, 0x6c, 0x69, 0x6e, 0x65)
o = msgp.AppendTime(o, z.LastOnline)
// string "isOnline"
o = append(o, 0xa8, 0x69, 0x73, 0x4f, 0x6e, 0x6c, 0x69, 0x6e, 0x65)
o = msgp.AppendBool(o, z.Online)
// string "latency"
o = append(o, 0xa7, 0x6c, 0x61, 0x74, 0x65, 0x6e, 0x63, 0x79)
o, err = z.Latency.MarshalMsg(o)
if err != nil {
err = msgp.WrapError(err, "Latency")
return
}
if (zb0001Mask & 0x200000) == 0 { // if not omitted
// string "deploymentID"
o = append(o, 0xac, 0x64, 0x65, 0x70, 0x6c, 0x6f, 0x79, 0x6d, 0x65, 0x6e, 0x74, 0x49, 0x44)
o = msgp.AppendString(o, z.DeploymentID)
}
// string "edge"
o = append(o, 0xa4, 0x65, 0x64, 0x67, 0x65)
o = msgp.AppendBool(o, z.Edge)
// string "edgeSyncBeforeExpiry"
o = append(o, 0xb4, 0x65, 0x64, 0x67, 0x65, 0x53, 0x79, 0x6e, 0x63, 0x42, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x45, 0x78, 0x70, 0x69, 0x72, 0x79)
o = msgp.AppendBool(o, z.EdgeSyncBeforeExpiry)
// string "offlineCount"
o = append(o, 0xac, 0x6f, 0x66, 0x66, 0x6c, 0x69, 0x6e, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74)
o = msgp.AppendInt64(o, z.OfflineCount)
}
return
}
// UnmarshalMsg implements msgp.Unmarshaler
func (z *BucketTarget) UnmarshalMsg(bts []byte) (o []byte, err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
var zb0001Mask uint16 /* 10 bits */
_ = zb0001Mask
for zb0001 > 0 {
zb0001--
field, bts, err = msgp.ReadMapKeyZC(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "sourcebucket":
z.SourceBucket, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "SourceBucket")
return
}
case "endpoint":
z.Endpoint, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Endpoint")
return
}
case "credentials":
if msgp.IsNil(bts) {
bts, err = msgp.ReadNilBytes(bts)
if err != nil {
return
}
z.Credentials = nil
} else {
if z.Credentials == nil {
z.Credentials = new(Credentials)
}
bts, err = z.Credentials.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "Credentials")
return
}
}
case "targetbucket":
z.TargetBucket, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "TargetBucket")
return
}
case "secure":
z.Secure, bts, err = msgp.ReadBoolBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Secure")
return
}
case "path":
z.Path, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Path")
return
}
zb0001Mask |= 0x1
case "api":
z.API, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "API")
return
}
zb0001Mask |= 0x2
case "arn":
z.Arn, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Arn")
return
}
zb0001Mask |= 0x4
case "type":
{
var zb0002 string
zb0002, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Type")
return
}
z.Type = ServiceType(zb0002)
}
case "region":
z.Region, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Region")
return
}
zb0001Mask |= 0x8
case "bandwidthlimit":
z.BandwidthLimit, bts, err = msgp.ReadInt64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "BandwidthLimit")
return
}
zb0001Mask |= 0x10
case "replicationSync":
z.ReplicationSync, bts, err = msgp.ReadBoolBytes(bts)
if err != nil {
err = msgp.WrapError(err, "ReplicationSync")
return
}
case "storageclass":
z.StorageClass, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "StorageClass")
return
}
zb0001Mask |= 0x20
case "healthCheckDuration":
z.HealthCheckDuration, bts, err = msgp.ReadDurationBytes(bts)
if err != nil {
err = msgp.WrapError(err, "HealthCheckDuration")
return
}
zb0001Mask |= 0x40
case "disableProxy":
z.DisableProxy, bts, err = msgp.ReadBoolBytes(bts)
if err != nil {
err = msgp.WrapError(err, "DisableProxy")
return
}
case "resetBeforeDate":
z.ResetBeforeDate, bts, err = msgp.ReadTimeBytes(bts)
if err != nil {
err = msgp.WrapError(err, "ResetBeforeDate")
return
}
zb0001Mask |= 0x80
case "resetID":
z.ResetID, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "ResetID")
return
}
zb0001Mask |= 0x100
case "totalDowntime":
z.TotalDowntime, bts, err = msgp.ReadDurationBytes(bts)
if err != nil {
err = msgp.WrapError(err, "TotalDowntime")
return
}
case "lastOnline":
z.LastOnline, bts, err = msgp.ReadTimeBytes(bts)
if err != nil {
err = msgp.WrapError(err, "LastOnline")
return
}
case "isOnline":
z.Online, bts, err = msgp.ReadBoolBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Online")
return
}
case "latency":
bts, err = z.Latency.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "Latency")
return
}
case "deploymentID":
z.DeploymentID, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "DeploymentID")
return
}
zb0001Mask |= 0x200
case "edge":
z.Edge, bts, err = msgp.ReadBoolBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Edge")
return
}
case "edgeSyncBeforeExpiry":
z.EdgeSyncBeforeExpiry, bts, err = msgp.ReadBoolBytes(bts)
if err != nil {
err = msgp.WrapError(err, "EdgeSyncBeforeExpiry")
return
}
case "offlineCount":
z.OfflineCount, bts, err = msgp.ReadInt64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "OfflineCount")
return
}
default:
bts, err = msgp.Skip(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
// Clear omitted fields.
if zb0001Mask != 0x3ff {
if (zb0001Mask & 0x1) == 0 {
z.Path = ""
}
if (zb0001Mask & 0x2) == 0 {
z.API = ""
}
if (zb0001Mask & 0x4) == 0 {
z.Arn = ""
}
if (zb0001Mask & 0x8) == 0 {
z.Region = ""
}
if (zb0001Mask & 0x10) == 0 {
z.BandwidthLimit = 0
}
if (zb0001Mask & 0x20) == 0 {
z.StorageClass = ""
}
if (zb0001Mask & 0x40) == 0 {
z.HealthCheckDuration = 0
}
if (zb0001Mask & 0x80) == 0 {
z.ResetBeforeDate = (time.Time{})
}
if (zb0001Mask & 0x100) == 0 {
z.ResetID = ""
}
if (zb0001Mask & 0x200) == 0 {
z.DeploymentID = ""
}
}
o = bts
return
}
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z *BucketTarget) Msgsize() (s int) {
s = 3 + 13 + msgp.StringPrefixSize + len(z.SourceBucket) + 9 + msgp.StringPrefixSize + len(z.Endpoint) + 12
if z.Credentials == nil {
s += msgp.NilSize
} else {
s += z.Credentials.Msgsize()
}
s += 13 + msgp.StringPrefixSize + len(z.TargetBucket) + 7 + msgp.BoolSize + 5 + msgp.StringPrefixSize + len(z.Path) + 4 + msgp.StringPrefixSize + len(z.API) + 4 + msgp.StringPrefixSize + len(z.Arn) + 5 + msgp.StringPrefixSize + len(string(z.Type)) + 7 + msgp.StringPrefixSize + len(z.Region) + 15 + msgp.Int64Size + 16 + msgp.BoolSize + 13 + msgp.StringPrefixSize + len(z.StorageClass) + 20 + msgp.DurationSize + 13 + msgp.BoolSize + 16 + msgp.TimeSize + 8 + msgp.StringPrefixSize + len(z.ResetID) + 14 + msgp.DurationSize + 11 + msgp.TimeSize + 9 + msgp.BoolSize + 8 + z.Latency.Msgsize() + 13 + msgp.StringPrefixSize + len(z.DeploymentID) + 5 + msgp.BoolSize + 21 + msgp.BoolSize + 13 + msgp.Int64Size
return
}
// DecodeMsg implements msgp.Decodable
func (z *BucketTargets) DecodeMsg(dc *msgp.Reader) (err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, err = dc.ReadMapHeader()
if err != nil {
err = msgp.WrapError(err)
return
}
for zb0001 > 0 {
zb0001--
field, err = dc.ReadMapKeyPtr()
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "targets":
var zb0002 uint32
zb0002, err = dc.ReadArrayHeader()
if err != nil {
err = msgp.WrapError(err, "Targets")
return
}
if cap(z.Targets) >= int(zb0002) {
z.Targets = (z.Targets)[:zb0002]
} else {
z.Targets = make([]BucketTarget, zb0002)
}
for za0001 := range z.Targets {
err = z.Targets[za0001].DecodeMsg(dc)
if err != nil {
err = msgp.WrapError(err, "Targets", za0001)
return
}
}
default:
err = dc.Skip()
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
return
}
// EncodeMsg implements msgp.Encodable
func (z *BucketTargets) EncodeMsg(en *msgp.Writer) (err error) {
// map header, size 1
// write "targets"
err = en.Append(0x81, 0xa7, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73)
if err != nil {
return
}
err = en.WriteArrayHeader(uint32(len(z.Targets)))
if err != nil {
err = msgp.WrapError(err, "Targets")
return
}
for za0001 := range z.Targets {
err = z.Targets[za0001].EncodeMsg(en)
if err != nil {
err = msgp.WrapError(err, "Targets", za0001)
return
}
}
return
}
// MarshalMsg implements msgp.Marshaler
func (z *BucketTargets) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize())
// map header, size 1
// string "targets"
o = append(o, 0x81, 0xa7, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73)
o = msgp.AppendArrayHeader(o, uint32(len(z.Targets)))
for za0001 := range z.Targets {
o, err = z.Targets[za0001].MarshalMsg(o)
if err != nil {
err = msgp.WrapError(err, "Targets", za0001)
return
}
}
return
}
// UnmarshalMsg implements msgp.Unmarshaler
func (z *BucketTargets) UnmarshalMsg(bts []byte) (o []byte, err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
for zb0001 > 0 {
zb0001--
field, bts, err = msgp.ReadMapKeyZC(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "targets":
var zb0002 uint32
zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Targets")
return
}
if cap(z.Targets) >= int(zb0002) {
z.Targets = (z.Targets)[:zb0002]
} else {
z.Targets = make([]BucketTarget, zb0002)
}
for za0001 := range z.Targets {
bts, err = z.Targets[za0001].UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "Targets", za0001)
return
}
}
default:
bts, err = msgp.Skip(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
o = bts
return
}
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z *BucketTargets) Msgsize() (s int) {
s = 1 + 8 + msgp.ArrayHeaderSize
for za0001 := range z.Targets {
s += z.Targets[za0001].Msgsize()
}
return
}
// DecodeMsg implements msgp.Decodable
func (z *Credentials) DecodeMsg(dc *msgp.Reader) (err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, err = dc.ReadMapHeader()
if err != nil {
err = msgp.WrapError(err)
return
}
var zb0001Mask uint8 /* 4 bits */
_ = zb0001Mask
for zb0001 > 0 {
zb0001--
field, err = dc.ReadMapKeyPtr()
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "accessKey":
z.AccessKey, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "AccessKey")
return
}
zb0001Mask |= 0x1
case "secretKey":
z.SecretKey, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "SecretKey")
return
}
zb0001Mask |= 0x2
case "sessionToken":
z.SessionToken, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "SessionToken")
return
}
zb0001Mask |= 0x4
case "expiration":
z.Expiration, err = dc.ReadTime()
if err != nil {
err = msgp.WrapError(err, "Expiration")
return
}
zb0001Mask |= 0x8
default:
err = dc.Skip()
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
// Clear omitted fields.
if zb0001Mask != 0xf {
if (zb0001Mask & 0x1) == 0 {
z.AccessKey = ""
}
if (zb0001Mask & 0x2) == 0 {
z.SecretKey = ""
}
if (zb0001Mask & 0x4) == 0 {
z.SessionToken = ""
}
if (zb0001Mask & 0x8) == 0 {
z.Expiration = (time.Time{})
}
}
return
}
// EncodeMsg implements msgp.Encodable
func (z *Credentials) EncodeMsg(en *msgp.Writer) (err error) {
// check for omitted fields
zb0001Len := uint32(4)
var zb0001Mask uint8 /* 4 bits */
_ = zb0001Mask
if z.AccessKey == "" {
zb0001Len--
zb0001Mask |= 0x1
}
if z.SecretKey == "" {
zb0001Len--
zb0001Mask |= 0x2
}
if z.SessionToken == "" {
zb0001Len--
zb0001Mask |= 0x4
}
if z.Expiration == (time.Time{}) {
zb0001Len--
zb0001Mask |= 0x8
}
// variable map header, size zb0001Len
err = en.Append(0x80 | uint8(zb0001Len))
if err != nil {
return
}
// skip if no fields are to be emitted
if zb0001Len != 0 {
if (zb0001Mask & 0x1) == 0 { // if not omitted
// write "accessKey"
err = en.Append(0xa9, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4b, 0x65, 0x79)
if err != nil {
return
}
err = en.WriteString(z.AccessKey)
if err != nil {
err = msgp.WrapError(err, "AccessKey")
return
}
}
if (zb0001Mask & 0x2) == 0 { // if not omitted
// write "secretKey"
err = en.Append(0xa9, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x4b, 0x65, 0x79)
if err != nil {
return
}
err = en.WriteString(z.SecretKey)
if err != nil {
err = msgp.WrapError(err, "SecretKey")
return
}
}
if (zb0001Mask & 0x4) == 0 { // if not omitted
// write "sessionToken"
err = en.Append(0xac, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x54, 0x6f, 0x6b, 0x65, 0x6e)
if err != nil {
return
}
err = en.WriteString(z.SessionToken)
if err != nil {
err = msgp.WrapError(err, "SessionToken")
return
}
}
if (zb0001Mask & 0x8) == 0 { // if not omitted
// write "expiration"
err = en.Append(0xaa, 0x65, 0x78, 0x70, 0x69, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e)
if err != nil {
return
}
err = en.WriteTime(z.Expiration)
if err != nil {
err = msgp.WrapError(err, "Expiration")
return
}
}
}
return
}
// MarshalMsg implements msgp.Marshaler
func (z *Credentials) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize())
// check for omitted fields
zb0001Len := uint32(4)
var zb0001Mask uint8 /* 4 bits */
_ = zb0001Mask
if z.AccessKey == "" {
zb0001Len--
zb0001Mask |= 0x1
}
if z.SecretKey == "" {
zb0001Len--
zb0001Mask |= 0x2
}
if z.SessionToken == "" {
zb0001Len--
zb0001Mask |= 0x4
}
if z.Expiration == (time.Time{}) {
zb0001Len--
zb0001Mask |= 0x8
}
// variable map header, size zb0001Len
o = append(o, 0x80|uint8(zb0001Len))
// skip if no fields are to be emitted
if zb0001Len != 0 {
if (zb0001Mask & 0x1) == 0 { // if not omitted
// string "accessKey"
o = append(o, 0xa9, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4b, 0x65, 0x79)
o = msgp.AppendString(o, z.AccessKey)
}
if (zb0001Mask & 0x2) == 0 { // if not omitted
// string "secretKey"
o = append(o, 0xa9, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x4b, 0x65, 0x79)
o = msgp.AppendString(o, z.SecretKey)
}
if (zb0001Mask & 0x4) == 0 { // if not omitted
// string "sessionToken"
o = append(o, 0xac, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x54, 0x6f, 0x6b, 0x65, 0x6e)
o = msgp.AppendString(o, z.SessionToken)
}
if (zb0001Mask & 0x8) == 0 { // if not omitted
// string "expiration"
o = append(o, 0xaa, 0x65, 0x78, 0x70, 0x69, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e)
o = msgp.AppendTime(o, z.Expiration)
}
}
return
}
// UnmarshalMsg implements msgp.Unmarshaler
func (z *Credentials) UnmarshalMsg(bts []byte) (o []byte, err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
var zb0001Mask uint8 /* 4 bits */
_ = zb0001Mask
for zb0001 > 0 {
zb0001--
field, bts, err = msgp.ReadMapKeyZC(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "accessKey":
z.AccessKey, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "AccessKey")
return
}
zb0001Mask |= 0x1
case "secretKey":
z.SecretKey, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "SecretKey")
return
}
zb0001Mask |= 0x2
case "sessionToken":
z.SessionToken, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "SessionToken")
return
}
zb0001Mask |= 0x4
case "expiration":
z.Expiration, bts, err = msgp.ReadTimeBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Expiration")
return
}
zb0001Mask |= 0x8
default:
bts, err = msgp.Skip(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
// Clear omitted fields.
if zb0001Mask != 0xf {
if (zb0001Mask & 0x1) == 0 {
z.AccessKey = ""
}
if (zb0001Mask & 0x2) == 0 {
z.SecretKey = ""
}
if (zb0001Mask & 0x4) == 0 {
z.SessionToken = ""
}
if (zb0001Mask & 0x8) == 0 {
z.Expiration = (time.Time{})
}
}
o = bts
return
}
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z *Credentials) Msgsize() (s int) {
s = 1 + 10 + msgp.StringPrefixSize + len(z.AccessKey) + 10 + msgp.StringPrefixSize + len(z.SecretKey) + 13 + msgp.StringPrefixSize + len(z.SessionToken) + 11 + msgp.TimeSize
return
}
// DecodeMsg implements msgp.Decodable
func (z *ServiceType) DecodeMsg(dc *msgp.Reader) (err error) {
{
var zb0001 string
zb0001, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err)
return
}
(*z) = ServiceType(zb0001)
}
return
}
// EncodeMsg implements msgp.Encodable
func (z ServiceType) EncodeMsg(en *msgp.Writer) (err error) {
err = en.WriteString(string(z))
if err != nil {
err = msgp.WrapError(err)
return
}
return
}
// MarshalMsg implements msgp.Marshaler
func (z ServiceType) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize())
o = msgp.AppendString(o, string(z))
return
}
// UnmarshalMsg implements msgp.Unmarshaler
func (z *ServiceType) UnmarshalMsg(bts []byte) (o []byte, err error) {
{
var zb0001 string
zb0001, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
(*z) = ServiceType(zb0001)
}
o = bts
return
}
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z ServiceType) Msgsize() (s int) {
s = msgp.StringPrefixSize + len(string(z))
return
}
golang-github-minio-madmin-go-3.0.104/bucket-targets_gen_test.go 0000664 0000000 0000000 00000015241 14774251704 0024546 0 ustar 00root root 0000000 0000000 package madmin
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
import (
"bytes"
"testing"
"github.com/tinylib/msgp/msgp"
)
func TestMarshalUnmarshalBucketTarget(t *testing.T) {
v := BucketTarget{}
bts, err := v.MarshalMsg(nil)
if err != nil {
t.Fatal(err)
}
left, err := v.UnmarshalMsg(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
}
left, err = msgp.Skip(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
}
}
func BenchmarkMarshalMsgBucketTarget(b *testing.B) {
v := BucketTarget{}
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.MarshalMsg(nil)
}
}
func BenchmarkAppendMsgBucketTarget(b *testing.B) {
v := BucketTarget{}
bts := make([]byte, 0, v.Msgsize())
bts, _ = v.MarshalMsg(bts[0:0])
b.SetBytes(int64(len(bts)))
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
bts, _ = v.MarshalMsg(bts[0:0])
}
}
func BenchmarkUnmarshalBucketTarget(b *testing.B) {
v := BucketTarget{}
bts, _ := v.MarshalMsg(nil)
b.ReportAllocs()
b.SetBytes(int64(len(bts)))
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := v.UnmarshalMsg(bts)
if err != nil {
b.Fatal(err)
}
}
}
func TestEncodeDecodeBucketTarget(t *testing.T) {
v := BucketTarget{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
m := v.Msgsize()
if buf.Len() > m {
t.Log("WARNING: TestEncodeDecodeBucketTarget Msgsize() is inaccurate")
}
vn := BucketTarget{}
err := msgp.Decode(&buf, &vn)
if err != nil {
t.Error(err)
}
buf.Reset()
msgp.Encode(&buf, &v)
err = msgp.NewReader(&buf).Skip()
if err != nil {
t.Error(err)
}
}
func BenchmarkEncodeBucketTarget(b *testing.B) {
v := BucketTarget{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
en := msgp.NewWriter(msgp.Nowhere)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.EncodeMsg(en)
}
en.Flush()
}
func BenchmarkDecodeBucketTarget(b *testing.B) {
v := BucketTarget{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
rd := msgp.NewEndlessReader(buf.Bytes(), b)
dc := msgp.NewReader(rd)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
err := v.DecodeMsg(dc)
if err != nil {
b.Fatal(err)
}
}
}
func TestMarshalUnmarshalBucketTargets(t *testing.T) {
v := BucketTargets{}
bts, err := v.MarshalMsg(nil)
if err != nil {
t.Fatal(err)
}
left, err := v.UnmarshalMsg(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
}
left, err = msgp.Skip(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
}
}
func BenchmarkMarshalMsgBucketTargets(b *testing.B) {
v := BucketTargets{}
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.MarshalMsg(nil)
}
}
func BenchmarkAppendMsgBucketTargets(b *testing.B) {
v := BucketTargets{}
bts := make([]byte, 0, v.Msgsize())
bts, _ = v.MarshalMsg(bts[0:0])
b.SetBytes(int64(len(bts)))
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
bts, _ = v.MarshalMsg(bts[0:0])
}
}
func BenchmarkUnmarshalBucketTargets(b *testing.B) {
v := BucketTargets{}
bts, _ := v.MarshalMsg(nil)
b.ReportAllocs()
b.SetBytes(int64(len(bts)))
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := v.UnmarshalMsg(bts)
if err != nil {
b.Fatal(err)
}
}
}
func TestEncodeDecodeBucketTargets(t *testing.T) {
v := BucketTargets{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
m := v.Msgsize()
if buf.Len() > m {
t.Log("WARNING: TestEncodeDecodeBucketTargets Msgsize() is inaccurate")
}
vn := BucketTargets{}
err := msgp.Decode(&buf, &vn)
if err != nil {
t.Error(err)
}
buf.Reset()
msgp.Encode(&buf, &v)
err = msgp.NewReader(&buf).Skip()
if err != nil {
t.Error(err)
}
}
func BenchmarkEncodeBucketTargets(b *testing.B) {
v := BucketTargets{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
en := msgp.NewWriter(msgp.Nowhere)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.EncodeMsg(en)
}
en.Flush()
}
func BenchmarkDecodeBucketTargets(b *testing.B) {
v := BucketTargets{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
rd := msgp.NewEndlessReader(buf.Bytes(), b)
dc := msgp.NewReader(rd)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
err := v.DecodeMsg(dc)
if err != nil {
b.Fatal(err)
}
}
}
func TestMarshalUnmarshalCredentials(t *testing.T) {
v := Credentials{}
bts, err := v.MarshalMsg(nil)
if err != nil {
t.Fatal(err)
}
left, err := v.UnmarshalMsg(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
}
left, err = msgp.Skip(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
}
}
func BenchmarkMarshalMsgCredentials(b *testing.B) {
v := Credentials{}
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.MarshalMsg(nil)
}
}
func BenchmarkAppendMsgCredentials(b *testing.B) {
v := Credentials{}
bts := make([]byte, 0, v.Msgsize())
bts, _ = v.MarshalMsg(bts[0:0])
b.SetBytes(int64(len(bts)))
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
bts, _ = v.MarshalMsg(bts[0:0])
}
}
func BenchmarkUnmarshalCredentials(b *testing.B) {
v := Credentials{}
bts, _ := v.MarshalMsg(nil)
b.ReportAllocs()
b.SetBytes(int64(len(bts)))
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := v.UnmarshalMsg(bts)
if err != nil {
b.Fatal(err)
}
}
}
func TestEncodeDecodeCredentials(t *testing.T) {
v := Credentials{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
m := v.Msgsize()
if buf.Len() > m {
t.Log("WARNING: TestEncodeDecodeCredentials Msgsize() is inaccurate")
}
vn := Credentials{}
err := msgp.Decode(&buf, &vn)
if err != nil {
t.Error(err)
}
buf.Reset()
msgp.Encode(&buf, &v)
err = msgp.NewReader(&buf).Skip()
if err != nil {
t.Error(err)
}
}
func BenchmarkEncodeCredentials(b *testing.B) {
v := Credentials{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
en := msgp.NewWriter(msgp.Nowhere)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.EncodeMsg(en)
}
en.Flush()
}
func BenchmarkDecodeCredentials(b *testing.B) {
v := Credentials{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
rd := msgp.NewEndlessReader(buf.Bytes(), b)
dc := msgp.NewReader(rd)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
err := v.DecodeMsg(dc)
if err != nil {
b.Fatal(err)
}
}
}
golang-github-minio-madmin-go-3.0.104/cgroup/ 0000775 0000000 0000000 00000000000 14774251704 0020677 5 ustar 00root root 0000000 0000000 golang-github-minio-madmin-go-3.0.104/cgroup/linux.go 0000664 0000000 0000000 00000012050 14774251704 0022363 0 ustar 00root root 0000000 0000000 //go:build linux
// +build linux
// Copyright (c) 2015-2024 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as
// published by the Free Software Foundation, either version 3 of the
// License, or (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see .
//
// Package cgroup implements parsing for all the cgroup
// categories and functionality in a simple way.
package cgroup
import (
"bufio"
"bytes"
"fmt"
"io"
"os"
"os/exec"
"path/filepath"
"strconv"
"strings"
)
// DO NOT EDIT following constants are chosen defaults for any kernel
// after 3.x, please open a GitHub issue https://github.com/minio/madmin-go/v2/issues
// and discuss first if you wish to change this.
const (
// Default string for looking for kernel memory param.
memoryLimitKernelParam = "memory.limit_in_bytes"
// Points to sys path memory path.
cgroupMemSysPath = "/sys/fs/cgroup/memory"
// Default docker prefix.
dockerPrefixName = "/docker/"
// Proc controller group path.
cgroupFileTemplate = "/proc/%d/cgroup"
)
// CGEntries - represents all the entries in a process cgroup file
// at /proc//cgroup as key value pairs.
type CGEntries map[string]string
// GetEntries reads and parses all the cgroup entries for a given process.
func GetEntries(pid int) (CGEntries, error) {
r, err := os.Open(fmt.Sprintf(cgroupFileTemplate, pid))
if err != nil {
return nil, err
}
defer r.Close()
return parseProcCGroup(r)
}
// parseProcCGroup - cgroups are always in the following
// format once enabled you need to know the pid of the
// application you are looking for so that the the
// following parsing logic only parses the file located
// at /proc//cgroup.
//
// CGROUP entries id, component and path are always in
// the following format. “ID:COMPONENT:PATH“
//
// Following code block parses this information and
// returns a procCGroup which is a parsed list of all
// the line by line entires from /proc//cgroup.
func parseProcCGroup(r io.Reader) (CGEntries, error) {
cgEntries := CGEntries{}
// Start reading cgroup categories line by line
// and process them into procCGroup structure.
scanner := bufio.NewScanner(r)
for scanner.Scan() {
line := scanner.Text()
tokens := strings.SplitN(line, ":", 3)
if len(tokens) < 3 {
continue
}
name, path := tokens[1], tokens[2]
for _, token := range strings.Split(name, ",") {
name = strings.TrimPrefix(token, "name=")
cgEntries[name] = path
}
}
// Return upon any error while reading the cgroup categories.
if err := scanner.Err(); err != nil {
return nil, err
}
return cgEntries, nil
}
// Fetch value of the cgroup kernel param from the cgroup manager,
// if cgroup manager is configured we should just rely on `cgm` cli
// to fetch all the values for us.
func getManagerKernValue(cname, path, kernParam string) (limit uint64, err error) {
cmd := exec.Command("cgm", "getvalue", cname, path, kernParam)
var out bytes.Buffer
cmd.Stdout = &out
if err = cmd.Run(); err != nil {
return 0, err
}
// Parse the cgm output.
limit, err = strconv.ParseUint(strings.TrimSpace(out.String()), 10, 64)
return limit, err
}
// Get cgroup memory limit file path.
func getMemoryLimitFilePath(cgPath string) string {
path := cgroupMemSysPath
// Docker generates weird cgroup paths that don't
// really exist on the file system.
//
// For example on regular Linux OS :
// `/user.slice/user-1000.slice/session-1.scope`
//
// But they exist as a bind mount on Docker and
// are not accessible : `/docker/`
//
// We we will just ignore if there is `/docker` in the
// path ignore and fall back to :
// `/sys/fs/cgroup/memory/memory.limit_in_bytes`
if !strings.HasPrefix(cgPath, dockerPrefixName) {
path = filepath.Join(path, cgPath)
}
// Final path.
return filepath.Join(path, memoryLimitKernelParam)
}
// GetMemoryLimit - Fetches cgroup memory limit either from
// a file path at '/sys/fs/cgroup/memory', if path fails then
// fallback to querying cgroup manager.
func GetMemoryLimit(pid int) (limit uint64, err error) {
var cg CGEntries
cg, err = GetEntries(pid)
if err != nil {
return 0, err
}
path := cg["memory"]
limit, err = getManagerKernValue("memory", path, memoryLimitKernelParam)
if err != nil {
// Upon any failure returned from `cgm`, on some systems cgm
// might not be installed. We fallback to using the the sysfs
// path instead to lookup memory limits.
var b []byte
b, err = os.ReadFile(getMemoryLimitFilePath(path))
if err != nil {
return 0, err
}
limit, err = strconv.ParseUint(strings.TrimSpace(string(b)), 10, 64)
}
return limit, err
}
golang-github-minio-madmin-go-3.0.104/cgroup/linux_test.go 0000664 0000000 0000000 00000006345 14774251704 0023434 0 ustar 00root root 0000000 0000000 //go:build linux
// +build linux
//
// Copyright (c) 2015-2024 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as
// published by the Free Software Foundation, either version 3 of the
// License, or (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see .
//
package cgroup
import (
"os"
"testing"
)
// Testing parsing correctness for various process cgroup files.
func TestProcCGroup(t *testing.T) {
tmpPath, err := os.CreateTemp("", "cgroup")
if err != nil {
t.Fatal(err)
}
defer os.Remove(tmpPath.Name())
cgroup := `
11:memory:/user.slice
10:blkio:/user.slice
9:hugetlb:/
8:net_cls,net_prio:/
7:perf_event:/
6:pids:/user.slice/user-1000.slice
5:devices:/user.slice
4:cpuset:/
3:cpu,cpuacct:/user.slice
2:freezer:/
1:name=systemd:/user.slice/user-1000.slice/session-1.scope
`
_, err = tmpPath.WriteString(cgroup)
if err != nil {
t.Fatal(err)
}
// Seek back to read from the beginning.
tmpPath.Seek(0, 0)
cg, err := parseProcCGroup(tmpPath)
if err != nil {
t.Fatal(err)
}
path := cg["memory"]
if len(path) == 0 {
t.Fatal("Path component cannot be empty")
}
if path != "/user.slice" {
t.Fatal("Path component cannot be empty")
}
path = cg["systemd"]
if path != "/user.slice/user-1000.slice/session-1.scope" {
t.Fatal("Path component cannot be empty")
}
// Mixed cgroups with different group names.
cgroup = `
11:memory:/newtest/newtest
10:blkio:/user.slice
9:hugetlb:/
8:net_cls,net_prio:/
7:perf_event:/
6:pids:/user.slice/user-1000.slice
5:devices:/user.slice
4:cpuset:/
3:cpu,cpuacct:/newtest/newtest
2:freezer:/
1:name=systemd:/user.slice/user-1000.slice/session-1.scope
`
// Seek back to read from the beginning.
tmpPath.Seek(0, 0)
_, err = tmpPath.WriteString(cgroup)
if err != nil {
t.Fatal(err)
}
// Seek back to read from the beginning.
tmpPath.Seek(0, 0)
cg, err = parseProcCGroup(tmpPath)
if err != nil {
t.Fatal(err)
}
path = cg["memory"]
if path != "/newtest/newtest" {
t.Fatal("Path component cannot be empty")
}
path = cg["systemd"]
if path != "/user.slice/user-1000.slice/session-1.scope" {
t.Fatal("Path component cannot be empty")
}
}
// Tests cgroup memory limit path construction.
func TestMemoryLimitPath(t *testing.T) {
testCases := []struct {
cgroupPath string
expectedPath string
}{
{
cgroupPath: "/user.slice",
expectedPath: "/sys/fs/cgroup/memory/user.slice/memory.limit_in_bytes",
},
{
cgroupPath: "/docker/testing",
expectedPath: "/sys/fs/cgroup/memory/memory.limit_in_bytes",
},
}
for i, testCase := range testCases {
actualPath := getMemoryLimitFilePath(testCase.cgroupPath)
if actualPath != testCase.expectedPath {
t.Fatalf("Test: %d: Expected: %s, got %s", i+1, testCase.expectedPath, actualPath)
}
}
}
golang-github-minio-madmin-go-3.0.104/cgroup/nolinux.go 0000664 0000000 0000000 00000001777 14774251704 0022736 0 ustar 00root root 0000000 0000000 //go:build !linux
// +build !linux
//
// Copyright (c) 2015-2024 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as
// published by the Free Software Foundation, either version 3 of the
// License, or (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see .
//
package cgroup
import "errors"
// GetMemoryLimit - Not implemented in non-linux platforms
func GetMemoryLimit(_ int) (limit uint64, err error) {
return limit, errors.New("Not implemented for non-linux platforms")
}
golang-github-minio-madmin-go-3.0.104/cluster-commands.go 0000664 0000000 0000000 00000125761 14774251704 0023223 0 ustar 00root root 0000000 0000000 //
// Copyright (c) 2015-2024 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as
// published by the Free Software Foundation, either version 3 of the
// License, or (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see .
//
package madmin
import (
"bytes"
"context"
"encoding/json"
"errors"
"io"
"net/http"
"net/url"
"strconv"
"time"
"github.com/minio/minio-go/v7/pkg/replication"
)
// SiteReplAPIVersion holds the supported version of the server Replication API
const SiteReplAPIVersion = "1"
// PeerSite - represents a cluster/site to be added to the set of replicated
// sites.
type PeerSite struct {
Name string `json:"name"`
Endpoint string `json:"endpoints"`
AccessKey string `json:"accessKey"`
SecretKey string `json:"secretKey"`
}
// Meaningful values for ReplicateAddStatus.Status
const (
ReplicateAddStatusSuccess = "Requested sites were configured for replication successfully."
ReplicateAddStatusPartial = "Some sites could not be configured for replication."
)
// ReplicateAddStatus - returns status of add request.
type ReplicateAddStatus struct {
Success bool `json:"success"`
Status string `json:"status"`
ErrDetail string `json:"errorDetail,omitempty"`
InitialSyncErrorMessage string `json:"initialSyncErrorMessage,omitempty"`
}
// SRAddOptions holds SR Add options
type SRAddOptions struct {
ReplicateILMExpiry bool
Force bool
}
func (o *SRAddOptions) getURLValues() url.Values {
urlValues := make(url.Values)
urlValues.Set("replicateILMExpiry", strconv.FormatBool(o.ReplicateILMExpiry))
urlValues.Set("force", strconv.FormatBool(o.Force))
return urlValues
}
// SiteReplicationAdd - sends the SR add API call.
func (adm *AdminClient) SiteReplicationAdd(ctx context.Context, sites []PeerSite, opts SRAddOptions) (ReplicateAddStatus, error) {
sitesBytes, err := json.Marshal(sites)
if err != nil {
return ReplicateAddStatus{}, nil
}
encBytes, err := EncryptData(adm.getSecretKey(), sitesBytes)
if err != nil {
return ReplicateAddStatus{}, err
}
q := opts.getURLValues()
q.Set("api-version", SiteReplAPIVersion)
reqData := requestData{
relPath: adminAPIPrefix + "/site-replication/add",
content: encBytes,
queryValues: q,
}
resp, err := adm.executeMethod(ctx, http.MethodPut, reqData)
defer closeResponse(resp)
if err != nil {
return ReplicateAddStatus{}, err
}
if resp.StatusCode != http.StatusOK {
return ReplicateAddStatus{}, httpRespToErrorResponse(resp)
}
b, err := io.ReadAll(resp.Body)
if err != nil {
return ReplicateAddStatus{}, err
}
var res ReplicateAddStatus
if err = json.Unmarshal(b, &res); err != nil {
return ReplicateAddStatus{}, err
}
return res, nil
}
// SiteReplicationInfo - contains cluster replication information.
type SiteReplicationInfo struct {
Enabled bool `json:"enabled"`
Name string `json:"name,omitempty"`
Sites []PeerInfo `json:"sites,omitempty"`
ServiceAccountAccessKey string `json:"serviceAccountAccessKey,omitempty"`
APIVersion string `json:"apiVersion,omitempty"`
}
// SiteReplicationInfo - returns cluster replication information.
func (adm *AdminClient) SiteReplicationInfo(ctx context.Context) (info SiteReplicationInfo, err error) {
q := make(url.Values)
q.Set("api-version", SiteReplAPIVersion)
reqData := requestData{
relPath: adminAPIPrefix + "/site-replication/info",
queryValues: q,
}
resp, err := adm.executeMethod(ctx, http.MethodGet, reqData)
defer closeResponse(resp)
if err != nil {
return info, err
}
if resp.StatusCode != http.StatusOK {
return info, httpRespToErrorResponse(resp)
}
b, err := io.ReadAll(resp.Body)
if err != nil {
return info, err
}
err = json.Unmarshal(b, &info)
return info, err
}
// SRPeerJoinReq - arg body for SRPeerJoin
type SRPeerJoinReq struct {
SvcAcctAccessKey string `json:"svcAcctAccessKey"`
SvcAcctSecretKey string `json:"svcAcctSecretKey"`
SvcAcctParent string `json:"svcAcctParent"`
Peers map[string]PeerInfo `json:"peers"`
UpdatedAt time.Time `json:"updatedAt"`
}
// PeerInfo - contains some properties of a cluster peer.
type PeerInfo struct {
Endpoint string `json:"endpoint"`
Name string `json:"name"`
// Deployment ID is useful as it is immutable - though endpoint may
// change.
DeploymentID string `json:"deploymentID"`
SyncState SyncStatus `json:"sync"` // whether to enable| disable synchronous replication
DefaultBandwidth BucketBandwidth `json:"defaultbandwidth"` // bandwidth limit per bucket in bytes/sec
ReplicateILMExpiry bool `json:"replicate-ilm-expiry"`
APIVersion string `json:"apiVersion,omitempty"`
}
// BucketBandwidth has default bandwidth limit per bucket in bytes/sec
type BucketBandwidth struct {
Limit uint64 `json:"bandwidthLimitPerBucket"`
IsSet bool `json:"set"`
UpdatedAt time.Time `json:"updatedAt,omitempty"`
}
type SyncStatus string // change in sync state
const (
SyncEnabled SyncStatus = "enable"
SyncDisabled SyncStatus = "disable"
)
func (s SyncStatus) Empty() bool {
return s != SyncDisabled && s != SyncEnabled
}
// SRPeerJoin - used only by minio server to send SR join requests to peer
// servers.
func (adm *AdminClient) SRPeerJoin(ctx context.Context, r SRPeerJoinReq) error {
b, err := json.Marshal(r)
if err != nil {
return err
}
encBuf, err := EncryptData(adm.getSecretKey(), b)
if err != nil {
return err
}
q := make(url.Values)
q.Set("api-version", SiteReplAPIVersion)
reqData := requestData{
relPath: adminAPIPrefix + "/site-replication/peer/join",
content: encBuf,
queryValues: q,
}
resp, err := adm.executeMethod(ctx, http.MethodPut, reqData)
defer closeResponse(resp)
if err != nil {
return err
}
if resp.StatusCode != http.StatusOK {
return httpRespToErrorResponse(resp)
}
return nil
}
// BktOp represents the bucket operation being requested.
type BktOp string
// BktOp value constants.
const (
// make bucket and enable versioning
MakeWithVersioningBktOp BktOp = "make-with-versioning"
// add replication configuration
ConfigureReplBktOp BktOp = "configure-replication"
// delete bucket (forceDelete = off)
DeleteBucketBktOp BktOp = "delete-bucket"
// delete bucket (forceDelete = on)
ForceDeleteBucketBktOp BktOp = "force-delete-bucket"
// purge bucket
PurgeDeletedBucketOp BktOp = "purge-deleted-bucket"
)
// SRPeerBucketOps - tells peers to create bucket and setup replication.
func (adm *AdminClient) SRPeerBucketOps(ctx context.Context, bucket string, op BktOp, opts map[string]string) error {
v := url.Values{}
v.Add("bucket", bucket)
v.Add("operation", string(op))
// For make-bucket, bucket options may be sent via `opts`
if op == MakeWithVersioningBktOp || op == DeleteBucketBktOp {
for k, val := range opts {
v.Add(k, val)
}
}
v.Set("api-version", SiteReplAPIVersion)
reqData := requestData{
queryValues: v,
relPath: adminAPIPrefix + "/site-replication/peer/bucket-ops",
}
resp, err := adm.executeMethod(ctx, http.MethodPut, reqData)
defer closeResponse(resp)
if err != nil {
return err
}
if resp.StatusCode != http.StatusOK {
return httpRespToErrorResponse(resp)
}
return nil
}
// SRIAMItem.Type constants.
const (
SRIAMItemPolicy = "policy"
SRIAMItemPolicyMapping = "policy-mapping"
SRIAMItemGroupInfo = "group-info"
SRIAMItemCredential = "credential"
SRIAMItemSvcAcc = "service-account"
SRIAMItemSTSAcc = "sts-account"
SRIAMItemIAMUser = "iam-user"
SRIAMItemExternalUser = "external-user"
)
// SRSessionPolicy - represents a session policy to be replicated.
type SRSessionPolicy json.RawMessage
func (s SRSessionPolicy) MarshalJSON() ([]byte, error) {
return json.RawMessage(s).MarshalJSON()
}
func (s *SRSessionPolicy) UnmarshalJSON(data []byte) error {
if s == nil {
return errors.New("json.RawMessage: UnmarshalJSON on nil pointer")
}
if bytes.Equal(data, []byte("null")) {
*s = nil
} else {
*s = append((*s)[0:0], data...)
}
return nil
}
// SRSvcAccCreate - create operation
type SRSvcAccCreate struct {
Parent string `json:"parent"`
AccessKey string `json:"accessKey"`
SecretKey string `json:"secretKey"`
Groups []string `json:"groups"`
Claims map[string]interface{} `json:"claims"`
SessionPolicy SRSessionPolicy `json:"sessionPolicy"`
Status string `json:"status"`
Name string `json:"name"`
Description string `json:"description"`
Expiration *time.Time `json:"expiration,omitempty"`
APIVersion string `json:"apiVersion,omitempty"`
}
// SRSvcAccUpdate - update operation
type SRSvcAccUpdate struct {
AccessKey string `json:"accessKey"`
SecretKey string `json:"secretKey"`
Status string `json:"status"`
Name string `json:"name"`
Description string `json:"description"`
SessionPolicy SRSessionPolicy `json:"sessionPolicy"`
Expiration *time.Time `json:"expiration,omitempty"`
APIVersion string `json:"apiVersion,omitempty"`
}
// SRSvcAccDelete - delete operation
type SRSvcAccDelete struct {
AccessKey string `json:"accessKey"`
APIVersion string `json:"apiVersion,omitempty"`
}
// SRSvcAccChange - sum-type to represent an svc account change.
type SRSvcAccChange struct {
Create *SRSvcAccCreate `json:"crSvcAccCreate"`
Update *SRSvcAccUpdate `json:"crSvcAccUpdate"`
Delete *SRSvcAccDelete `json:"crSvcAccDelete"`
APIVersion string `json:"apiVersion,omitempty"`
}
// SRPolicyMapping - represents mapping of a policy to a user or group.
type SRPolicyMapping struct {
UserOrGroup string `json:"userOrGroup"`
UserType int `json:"userType"`
IsGroup bool `json:"isGroup"`
Policy string `json:"policy"`
CreatedAt time.Time `json:"createdAt,omitempty"`
UpdatedAt time.Time `json:"updatedAt,omitempty"`
APIVersion string `json:"apiVersion,omitempty"`
}
// SRSTSCredential - represents an STS credential to be replicated.
type SRSTSCredential struct {
AccessKey string `json:"accessKey"`
SecretKey string `json:"secretKey"`
SessionToken string `json:"sessionToken"`
ParentUser string `json:"parentUser"`
ParentPolicyMapping string `json:"parentPolicyMapping,omitempty"`
APIVersion string `json:"apiVersion,omitempty"`
}
// OpenIDUserAccessInfo contains information to access and refresh the token
// that is used to access to UserInfo OpenID endpoint.
type OpenIDUserAccessInfo struct {
RefreshToken string `json:"refreshToken,omitempty"`
AccessToken string `json:"accessToken,omitempty"`
}
// OpenIDUser holds information to maintain an virtual user in OpenID
type OpenIDUser struct {
AccessInfo OpenIDUserAccessInfo `json:"accessInfo,omitempty"`
}
// SRExternalUser - represents an external user information to be replicated.
type SRExternalUser struct {
APIVersion string `json:"apiVersion,omitempty"`
Name string `json:"name"`
IsDeleteReq bool `json:"isDeleteReq"`
OpenIDUser *OpenIDUser `json:"openIDUser,omitempty"`
}
// SRIAMUser - represents a regular (IAM) user to be replicated. A nil UserReq
// implies that a user delete operation should be replicated on the peer cluster.
type SRIAMUser struct {
AccessKey string `json:"accessKey"`
IsDeleteReq bool `json:"isDeleteReq"`
UserReq *AddOrUpdateUserReq `json:"userReq"`
APIVersion string `json:"apiVersion,omitempty"`
}
// SRGroupInfo - represents a regular (IAM) user to be replicated.
type SRGroupInfo struct {
UpdateReq GroupAddRemove `json:"updateReq"`
APIVersion string `json:"apiVersion,omitempty"`
}
// SRCredInfo - represents a credential change (create/update/delete) to be
// replicated. This replaces `SvcAccChange`, `STSCredential` and `IAMUser` and
// will DEPRECATE them.
type SRCredInfo struct {
AccessKey string `json:"accessKey"`
// This type corresponds to github.com/minio/minio/cmd.IAMUserType
IAMUserType int `json:"iamUserType"`
IsDeleteReq bool `json:"isDeleteReq,omitempty"`
// This is the JSON encoded value of github.com/minio/minio/cmd.UserIdentity
UserIdentityJSON json.RawMessage `json:"userIdentityJSON"`
APIVersion string `json:"apiVersion,omitempty"`
}
// SRIAMItem - represents an IAM object that will be copied to a peer.
type SRIAMItem struct {
Type string `json:"type"`
// Name and Policy below are used when Type == SRIAMItemPolicy
Name string `json:"name"`
Policy json.RawMessage `json:"policy"`
// Used when Type == SRIAMItemPolicyMapping
PolicyMapping *SRPolicyMapping `json:"policyMapping"`
// Used when Type = SRIAMItemGroupInfo
GroupInfo *SRGroupInfo `json:"groupInfo"`
// Used when Type = SRIAMItemCredential
CredentialInfo *SRCredInfo `json:"credentialChange"`
// Used when Type == SRIAMItemSvcAcc
SvcAccChange *SRSvcAccChange `json:"serviceAccountChange"`
// Used when Type = SRIAMItemSTSAcc
STSCredential *SRSTSCredential `json:"stsCredential"`
// Used when Type = SRIAMItemIAMUser
IAMUser *SRIAMUser `json:"iamUser"`
// Used when Type = SRIAMItemExternalUser
ExternalUser *SRExternalUser `json:"externalUser"`
// UpdatedAt - timestamp of last update
UpdatedAt time.Time `json:"updatedAt,omitempty"`
APIVersion string `json:"apiVersion,omitempty"`
}
// SRPeerReplicateIAMItem - copies an IAM object to a peer cluster.
func (adm *AdminClient) SRPeerReplicateIAMItem(ctx context.Context, item SRIAMItem) error {
b, err := json.Marshal(item)
if err != nil {
return err
}
q := make(url.Values)
q.Add("api-version", SiteReplAPIVersion)
reqData := requestData{
relPath: adminAPIPrefix + "/site-replication/peer/iam-item",
content: b,
queryValues: q,
}
resp, err := adm.executeMethod(ctx, http.MethodPut, reqData)
defer closeResponse(resp)
if err != nil {
return err
}
if resp.StatusCode != http.StatusOK {
return httpRespToErrorResponse(resp)
}
return nil
}
// SRBucketMeta.Type constants
const (
SRBucketMetaTypePolicy = "policy"
SRBucketMetaTypeTags = "tags"
SRBucketMetaTypeVersionConfig = "version-config"
SRBucketMetaTypeObjectLockConfig = "object-lock-config"
SRBucketMetaTypeSSEConfig = "sse-config"
SRBucketMetaTypeQuotaConfig = "quota-config"
SRBucketMetaLCConfig = "lc-config"
SRBucketMetaTypeCorsConfig = "cors-config"
)
// SRBucketMeta - represents a bucket metadata change that will be copied to a peer.
type SRBucketMeta struct {
Type string `json:"type"`
Bucket string `json:"bucket"`
Policy json.RawMessage `json:"policy,omitempty"`
// Since Versioning config does not have a json representation, we use
// xml byte presentation directly.
Versioning *string `json:"versioningConfig,omitempty"`
// Since tags does not have a json representation, we use its xml byte
// representation directly.
Tags *string `json:"tags,omitempty"`
// Since object lock does not have a json representation, we use its xml
// byte representation.
ObjectLockConfig *string `json:"objectLockConfig,omitempty"`
// Since SSE config does not have a json representation, we use its xml
// byte respresentation.
SSEConfig *string `json:"sseConfig,omitempty"`
// Quota has a json representation use it as is.
Quota json.RawMessage `json:"quota,omitempty"`
// Since Expiry Lifecycle config does not have a json representation, we use its xml
// byte respresentation.
ExpiryLCConfig *string `json:"expLCConfig,omitempty"`
// UpdatedAt - timestamp of last update
UpdatedAt time.Time `json:"updatedAt,omitempty"`
// ExpiryUpdatedAt - timestamp of last update of expiry rule
ExpiryUpdatedAt time.Time `json:"expiryUpdatedAt,omitempty"`
// Cors is base64 XML representation of CORS config
Cors *string `json:"cors,omitempty"`
APIVersion string `json:"apiVersion,omitempty"`
}
// SRPeerReplicateBucketMeta - copies a bucket metadata change to a peer cluster.
func (adm *AdminClient) SRPeerReplicateBucketMeta(ctx context.Context, item SRBucketMeta) error {
b, err := json.Marshal(item)
if err != nil {
return err
}
q := make(url.Values)
q.Set("api-version", SiteReplAPIVersion)
reqData := requestData{
relPath: adminAPIPrefix + "/site-replication/peer/bucket-meta",
content: b,
queryValues: q,
}
resp, err := adm.executeMethod(ctx, http.MethodPut, reqData)
defer closeResponse(resp)
if err != nil {
return err
}
if resp.StatusCode != http.StatusOK {
return httpRespToErrorResponse(resp)
}
return nil
}
// SRBucketInfo - returns all the bucket metadata available for bucket
type SRBucketInfo struct {
Bucket string `json:"bucket"`
Policy json.RawMessage `json:"policy,omitempty"`
// Since Versioning config does not have a json representation, we use
// xml byte presentation directly.
Versioning *string `json:"versioningConfig,omitempty"`
// Since tags does not have a json representation, we use its xml byte
// representation directly.
Tags *string `json:"tags,omitempty"`
// Since object lock does not have a json representation, we use its xml
// byte representation.
ObjectLockConfig *string `json:"objectLockConfig,omitempty"`
// Since SSE config does not have a json representation, we use its xml
// byte respresentation.
SSEConfig *string `json:"sseConfig,omitempty"`
// replication config in json representation
ReplicationConfig *string `json:"replicationConfig,omitempty"`
// quota config in json representation
QuotaConfig *string `json:"quotaConfig,omitempty"`
// Since Expiry Licfecycle config does not have a json representation, we use its xml
// byte representation
ExpiryLCConfig *string `json:"expLCConfig,omitempty"`
CorsConfig *string `json:"corsConfig,omitempty"`
// time stamps of bucket metadata updates
PolicyUpdatedAt time.Time `json:"policyTimestamp,omitempty"`
TagConfigUpdatedAt time.Time `json:"tagTimestamp,omitempty"`
ObjectLockConfigUpdatedAt time.Time `json:"olockTimestamp,omitempty"`
SSEConfigUpdatedAt time.Time `json:"sseTimestamp,omitempty"`
VersioningConfigUpdatedAt time.Time `json:"versioningTimestamp,omitempty"`
ReplicationConfigUpdatedAt time.Time `json:"replicationConfigTimestamp,omitempty"`
QuotaConfigUpdatedAt time.Time `json:"quotaTimestamp,omitempty"`
ExpiryLCConfigUpdatedAt time.Time `json:"expLCTimestamp,omitempty"`
CreatedAt time.Time `json:"bucketTimestamp,omitempty"`
DeletedAt time.Time `json:"bucketDeletedTimestamp,omitempty"`
CorsConfigUpdatedAt time.Time `json:"corsTimestamp,omitempty"`
Location string `json:"location,omitempty"`
APIVersion string `json:"apiVersion,omitempty"`
}
// OpenIDProviderSettings contains info on a particular OIDC based provider.
type OpenIDProviderSettings struct {
ClaimName string
ClaimUserinfoEnabled bool
RolePolicy string
ClientID string
HashedClientSecret string
}
// OpenIDSettings contains OpenID configuration info of a cluster.
type OpenIDSettings struct {
// Enabled is true iff there is at least one OpenID provider configured.
Enabled bool
Region string
// Map of role ARN to provider info
Roles map[string]OpenIDProviderSettings
// Info on the claim based provider (all fields are empty if not
// present)
ClaimProvider OpenIDProviderSettings
}
// IDPSettings contains key IDentity Provider settings to validate that all
// peers have the same configuration.
type IDPSettings struct {
LDAP LDAPSettings
OpenID OpenIDSettings
}
// LDAPSettings contains LDAP configuration info of a cluster.
type LDAPSettings struct {
IsLDAPEnabled bool
LDAPUserDNSearchBase string
LDAPUserDNSearchFilter string
LDAPGroupSearchBase string
LDAPGroupSearchFilter string
}
// SRPeerGetIDPSettings - fetches IDP settings from the server.
func (adm *AdminClient) SRPeerGetIDPSettings(ctx context.Context) (info IDPSettings, err error) {
q := make(url.Values)
q.Set("api-version", SiteReplAPIVersion)
reqData := requestData{
relPath: adminAPIPrefix + "/site-replication/peer/idp-settings",
queryValues: q,
}
resp, err := adm.executeMethod(ctx, http.MethodGet, reqData)
defer closeResponse(resp)
if err != nil {
return info, err
}
if resp.StatusCode != http.StatusOK {
return info, httpRespToErrorResponse(resp)
}
b, err := io.ReadAll(resp.Body)
if err != nil {
return info, err
}
err = json.Unmarshal(b, &info)
if err != nil {
// If the server is older version, the IDPSettings was =
// LDAPSettings, so we try that.
err2 := json.Unmarshal(b, &info.LDAP)
if err2 == nil {
err = nil
}
}
return info, err
}
// SRIAMPolicy - represents an IAM policy.
type SRIAMPolicy struct {
Policy json.RawMessage `json:"policy"`
UpdatedAt time.Time `json:"updatedAt,omitempty"`
APIVersion string `json:"apiVersion,omitempty"`
}
// ILMExpiryRule - represents an ILM expiry rule
type ILMExpiryRule struct {
ILMRule string `json:"ilm-rule"`
Bucket string `json:"bucket"`
UpdatedAt time.Time `json:"updatedAt,omitempty"`
APIVersion string `json:"apiVersion,omitempty"`
}
// SRInfo gets replication metadata for a site
type SRInfo struct {
Enabled bool
Name string
DeploymentID string
Buckets map[string]SRBucketInfo // map of bucket metadata info
Policies map[string]SRIAMPolicy // map of IAM policy name to content
UserPolicies map[string]SRPolicyMapping // map of username -> user policy mapping
UserInfoMap map[string]UserInfo // map of user name to UserInfo
GroupDescMap map[string]GroupDesc // map of group name to GroupDesc
GroupPolicies map[string]SRPolicyMapping // map of groupname -> group policy mapping
ReplicationCfg map[string]replication.Config // map of bucket -> replication config
ILMExpiryRules map[string]ILMExpiryRule // map of ILM Expiry rule to content
State SRStateInfo // peer state
APIVersion string `json:"apiVersion,omitempty"`
}
// SRMetaInfo - returns replication metadata info for a site.
func (adm *AdminClient) SRMetaInfo(ctx context.Context, opts SRStatusOptions) (info SRInfo, err error) {
q := opts.getURLValues()
q.Set("api-version", SiteReplAPIVersion)
reqData := requestData{
relPath: adminAPIPrefix + "/site-replication/metainfo",
queryValues: q,
}
resp, err := adm.executeMethod(ctx, http.MethodGet, reqData)
defer closeResponse(resp)
if err != nil {
return info, err
}
if resp.StatusCode != http.StatusOK {
return info, httpRespToErrorResponse(resp)
}
err = json.NewDecoder(resp.Body).Decode(&info)
return info, err
}
// SRStatusInfo returns detailed status on site replication status
type SRStatusInfo struct {
Enabled bool
MaxBuckets int // maximum buckets seen across sites
MaxUsers int // maximum users seen across sites
MaxGroups int // maximum groups seen across sites
MaxPolicies int // maximum policies across sites
MaxILMExpiryRules int // maxmimum ILM Expiry rules across sites
Sites map[string]PeerInfo // deployment->sitename
StatsSummary map[string]SRSiteSummary // map of deployment id -> site stat
// BucketStats map of bucket to slice of deployment IDs with stats. This is populated only if there are
// mismatches or if a specific bucket's stats are requested
BucketStats map[string]map[string]SRBucketStatsSummary
// PolicyStats map of policy to slice of deployment IDs with stats. This is populated only if there are
// mismatches or if a specific bucket's stats are requested
PolicyStats map[string]map[string]SRPolicyStatsSummary
// UserStats map of user to slice of deployment IDs with stats. This is populated only if there are
// mismatches or if a specific bucket's stats are requested
UserStats map[string]map[string]SRUserStatsSummary
// GroupStats map of group to slice of deployment IDs with stats. This is populated only if there are
// mismatches or if a specific bucket's stats are requested
GroupStats map[string]map[string]SRGroupStatsSummary
// Metrics summary of SRMetrics
Metrics SRMetricsSummary // metrics summary. This is populated if buckets/bucket entity requested
// ILMExpiryStats map of ILM Expiry rules to slice of deployment IDs with stats. This is populated if there
// are mismatches or if a specific ILM expiry rule's stats are requested
ILMExpiryStats map[string]map[string]SRILMExpiryStatsSummary
APIVersion string `json:"apiVersion,omitempty"`
}
// SRPolicyStatsSummary has status of policy replication misses
type SRPolicyStatsSummary struct {
DeploymentID string
PolicyMismatch bool
HasPolicy bool
APIVersion string
}
// SRUserStatsSummary has status of user replication misses
type SRUserStatsSummary struct {
DeploymentID string
PolicyMismatch bool
UserInfoMismatch bool
HasUser bool
HasPolicyMapping bool
APIVersion string
}
// SRGroupStatsSummary has status of group replication misses
type SRGroupStatsSummary struct {
DeploymentID string
PolicyMismatch bool
HasGroup bool
GroupDescMismatch bool
HasPolicyMapping bool
APIVersion string
}
// SRBucketStatsSummary has status of bucket metadata replication misses
type SRBucketStatsSummary struct {
DeploymentID string
HasBucket bool
BucketMarkedDeleted bool
TagMismatch bool
VersioningConfigMismatch bool
OLockConfigMismatch bool
PolicyMismatch bool
SSEConfigMismatch bool
ReplicationCfgMismatch bool
QuotaCfgMismatch bool
CorsCfgMismatch bool
HasTagsSet bool
HasOLockConfigSet bool
HasPolicySet bool
HasSSECfgSet bool
HasReplicationCfg bool
HasQuotaCfgSet bool
HasCorsCfgSet bool
APIVersion string
}
// SRILMExpiryStatsSummary has status of ILM Expiry rules metadata replication misses
type SRILMExpiryStatsSummary struct {
DeploymentID string
ILMExpiryRuleMismatch bool
HasILMExpiryRules bool
APIVersion string
}
// SRSiteSummary holds the count of replicated items in site replication
type SRSiteSummary struct {
ReplicatedBuckets int // count of buckets replicated across sites
ReplicatedTags int // count of buckets with tags replicated across sites
ReplicatedBucketPolicies int // count of policies replicated across sites
ReplicatedIAMPolicies int // count of IAM policies replicated across sites
ReplicatedUsers int // count of users replicated across sites
ReplicatedGroups int // count of groups replicated across sites
ReplicatedLockConfig int // count of object lock config replicated across sites
ReplicatedSSEConfig int // count of SSE config replicated across sites
ReplicatedVersioningConfig int // count of versioning config replicated across sites
ReplicatedQuotaConfig int // count of bucket with quota config replicated across sites
ReplicatedUserPolicyMappings int // count of user policy mappings replicated across sites
ReplicatedGroupPolicyMappings int // count of group policy mappings replicated across sites
ReplicatedILMExpiryRules int // count of ILM expiry rules replicated across sites
ReplicatedCorsConfig int // count of CORS config replicated across sites
TotalBucketsCount int // total buckets on this site
TotalTagsCount int // total count of buckets with tags on this site
TotalBucketPoliciesCount int // total count of buckets with bucket policies for this site
TotalIAMPoliciesCount int // total count of IAM policies for this site
TotalLockConfigCount int // total count of buckets with object lock config for this site
TotalSSEConfigCount int // total count of buckets with SSE config
TotalVersioningConfigCount int // total count of bucekts with versioning config
TotalQuotaConfigCount int // total count of buckets with quota config
TotalUsersCount int // total number of users seen on this site
TotalGroupsCount int // total number of groups seen on this site
TotalUserPolicyMappingCount int // total number of user policy mappings seen on this site
TotalGroupPolicyMappingCount int // total number of group policy mappings seen on this site
TotalILMExpiryRulesCount int // total number of ILM expiry rules seen on the site
TotalCorsConfigCount int // total number of CORS config seen on the site
APIVersion string
}
// SREntityType specifies type of entity
type SREntityType int
const (
// Unspecified entity
Unspecified SREntityType = iota
// SRBucketEntity Bucket entity type
SRBucketEntity
// SRPolicyEntity Policy entity type
SRPolicyEntity
// SRUserEntity User entity type
SRUserEntity
// SRGroupEntity Group entity type
SRGroupEntity
// SRILMExpiryRuleEntity ILM expiry rule entity type
SRILMExpiryRuleEntity
)
// SRStatusOptions holds SR status options
type SRStatusOptions struct {
Buckets bool
Policies bool
Users bool
Groups bool
Metrics bool
ILMExpiryRules bool
PeerState bool
Entity SREntityType
EntityValue string
ShowDeleted bool
APIVersion string
}
// IsEntitySet returns true if entity option is set
func (o *SRStatusOptions) IsEntitySet() bool {
switch o.Entity {
case SRBucketEntity, SRPolicyEntity, SRUserEntity, SRGroupEntity, SRILMExpiryRuleEntity:
return true
default:
return false
}
}
// GetSREntityType returns the SREntityType for a key
func GetSREntityType(name string) SREntityType {
switch name {
case "bucket":
return SRBucketEntity
case "user":
return SRUserEntity
case "group":
return SRGroupEntity
case "policy":
return SRPolicyEntity
case "ilm-expiry-rule":
return SRILMExpiryRuleEntity
default:
return Unspecified
}
}
func (o *SRStatusOptions) getURLValues() url.Values {
urlValues := make(url.Values)
urlValues.Set("buckets", strconv.FormatBool(o.Buckets))
urlValues.Set("policies", strconv.FormatBool(o.Policies))
urlValues.Set("users", strconv.FormatBool(o.Users))
urlValues.Set("groups", strconv.FormatBool(o.Groups))
urlValues.Set("showDeleted", strconv.FormatBool(o.ShowDeleted))
urlValues.Set("metrics", strconv.FormatBool(o.Metrics))
urlValues.Set("ilm-expiry-rules", strconv.FormatBool(o.ILMExpiryRules))
urlValues.Set("peer-state", strconv.FormatBool(o.PeerState))
if o.IsEntitySet() {
urlValues.Set("entityvalue", o.EntityValue)
switch o.Entity {
case SRBucketEntity:
urlValues.Set("entity", "bucket")
case SRPolicyEntity:
urlValues.Set("entity", "policy")
case SRUserEntity:
urlValues.Set("entity", "user")
case SRGroupEntity:
urlValues.Set("entity", "group")
case SRILMExpiryRuleEntity:
urlValues.Set("entity", "ilm-expiry-rule")
}
}
return urlValues
}
// SRStatusInfo - returns site replication status
func (adm *AdminClient) SRStatusInfo(ctx context.Context, opts SRStatusOptions) (info SRStatusInfo, err error) {
q := opts.getURLValues()
q.Set("api-version", SiteReplAPIVersion)
reqData := requestData{
relPath: adminAPIPrefix + "/site-replication/status",
queryValues: q,
}
resp, err := adm.executeMethod(ctx, http.MethodGet, reqData)
defer closeResponse(resp)
if err != nil {
return info, err
}
if resp.StatusCode != http.StatusOK {
return info, httpRespToErrorResponse(resp)
}
err = json.NewDecoder(resp.Body).Decode(&info)
return info, err
}
// ReplicateEditStatus - returns status of edit request.
type ReplicateEditStatus struct {
Success bool `json:"success"`
Status string `json:"status"`
ErrDetail string `json:"errorDetail,omitempty"`
}
// SREditOptions holds SR Edit options
type SREditOptions struct {
DisableILMExpiryReplication bool
EnableILMExpiryReplication bool
}
func (o *SREditOptions) getURLValues() url.Values {
urlValues := make(url.Values)
urlValues.Set("disableILMExpiryReplication", strconv.FormatBool(o.DisableILMExpiryReplication))
urlValues.Set("enableILMExpiryReplication", strconv.FormatBool(o.EnableILMExpiryReplication))
return urlValues
}
// SiteReplicationEdit - sends the SR edit API call.
func (adm *AdminClient) SiteReplicationEdit(ctx context.Context, site PeerInfo, opts SREditOptions) (ReplicateEditStatus, error) {
sitesBytes, err := json.Marshal(site)
if err != nil {
return ReplicateEditStatus{}, nil
}
encBytes, err := EncryptData(adm.getSecretKey(), sitesBytes)
if err != nil {
return ReplicateEditStatus{}, err
}
q := opts.getURLValues()
q.Set("api-version", SiteReplAPIVersion)
reqData := requestData{
relPath: adminAPIPrefix + "/site-replication/edit",
content: encBytes,
queryValues: q,
}
resp, err := adm.executeMethod(ctx, http.MethodPut, reqData)
defer closeResponse(resp)
if err != nil {
return ReplicateEditStatus{}, err
}
if resp.StatusCode != http.StatusOK {
return ReplicateEditStatus{}, httpRespToErrorResponse(resp)
}
var res ReplicateEditStatus
err = json.NewDecoder(resp.Body).Decode(&res)
return res, err
}
// SRPeerEdit - used only by minio server to update peer endpoint
// for a server already in the site replication setup
func (adm *AdminClient) SRPeerEdit(ctx context.Context, pi PeerInfo) error {
b, err := json.Marshal(pi)
if err != nil {
return err
}
q := make(url.Values)
q.Set("api-version", SiteReplAPIVersion)
reqData := requestData{
relPath: adminAPIPrefix + "/site-replication/peer/edit",
content: b,
queryValues: q,
}
resp, err := adm.executeMethod(ctx, http.MethodPut, reqData)
defer closeResponse(resp)
if err != nil {
return err
}
if resp.StatusCode != http.StatusOK {
return httpRespToErrorResponse(resp)
}
return nil
}
// SRStateEdit - used only by minio server to update peer state
// for a server already in the site replication setup
func (adm *AdminClient) SRStateEdit(ctx context.Context, state SRStateEditReq) error {
b, err := json.Marshal(state)
if err != nil {
return err
}
q := make(url.Values)
q.Set("api-version", SiteReplAPIVersion)
reqData := requestData{
relPath: adminAPIPrefix + "/site-replication/state/edit",
content: b,
queryValues: q,
}
resp, err := adm.executeMethod(ctx, http.MethodPut, reqData)
defer closeResponse(resp)
if err != nil {
return err
}
if resp.StatusCode != http.StatusOK {
return httpRespToErrorResponse(resp)
}
return nil
}
// SiteReplicationRemove - unlinks a site from site replication
func (adm *AdminClient) SiteReplicationRemove(ctx context.Context, removeReq SRRemoveReq) (st ReplicateRemoveStatus, err error) {
rmvBytes, err := json.Marshal(removeReq)
if err != nil {
return st, nil
}
q := make(url.Values)
q.Set("api-version", SiteReplAPIVersion)
reqData := requestData{
relPath: adminAPIPrefix + "/site-replication/remove",
content: rmvBytes,
queryValues: q,
}
resp, err := adm.executeMethod(ctx, http.MethodPut, reqData)
defer closeResponse(resp)
if err != nil {
return st, err
}
if resp.StatusCode != http.StatusOK {
return st, httpRespToErrorResponse(resp)
}
var res ReplicateRemoveStatus
err = json.NewDecoder(resp.Body).Decode(&res)
return res, err
}
// SRPeerRemove - used only by minio server to unlink cluster replication
// for a server already in the site replication setup
func (adm *AdminClient) SRPeerRemove(ctx context.Context, removeReq SRRemoveReq) (st ReplicateRemoveStatus, err error) {
reqBytes, err := json.Marshal(removeReq)
if err != nil {
return st, err
}
q := make(url.Values)
q.Set("api-version", SiteReplAPIVersion)
reqData := requestData{
relPath: adminAPIPrefix + "/site-replication/peer/remove",
content: reqBytes,
queryValues: q,
}
resp, err := adm.executeMethod(ctx, http.MethodPut, reqData)
defer closeResponse(resp)
if err != nil {
return st, err
}
if resp.StatusCode != http.StatusOK {
return st, httpRespToErrorResponse(resp)
}
return ReplicateRemoveStatus{}, nil
}
// ReplicateRemoveStatus - returns status of unlink request.
type ReplicateRemoveStatus struct {
Status string `json:"status"`
ErrDetail string `json:"errorDetail,omitempty"`
APIVersion string `json:"apiVersion,omitempty"`
}
// SRRemoveReq - arg body for SRRemoveReq
type SRRemoveReq struct {
RequestingDepID string `json:"requestingDepID"`
SiteNames []string `json:"sites"`
RemoveAll bool `json:"all"` // true if all sites are to be removed.
}
// SRStateEditReq - arg body for SRStateEditReq
type SRStateEditReq struct {
Peers map[string]PeerInfo `json:"peers"`
UpdatedAt time.Time `json:"updatedAt"`
}
// SRStateInfo - site replication state information
type SRStateInfo struct {
Name string `json:"name"`
Peers map[string]PeerInfo `json:"peers"`
UpdatedAt time.Time `json:"updatedAt"`
APIVersion string `json:"apiVersion,omitempty"`
}
const (
ReplicateRemoveStatusSuccess = "Requested site(s) were removed from cluster replication successfully."
ReplicateRemoveStatusPartial = "Some site(s) could not be removed from cluster replication configuration."
)
type ResyncBucketStatus struct {
Bucket string `json:"bucket"`
Status string `json:"status"`
ErrDetail string `json:"errorDetail,omitempty"`
}
// SRResyncOpStatus - returns status of resync start request.
type SRResyncOpStatus struct {
OpType string `json:"op"` // one of "start" or "cancel"
ResyncID string `json:"id"`
Status string `json:"status"`
Buckets []ResyncBucketStatus `json:"buckets"`
ErrDetail string `json:"errorDetail,omitempty"`
}
// SiteResyncOp type of resync operation
type SiteResyncOp string
const (
// SiteResyncStart starts a site resync operation
SiteResyncStart SiteResyncOp = "start"
// SiteResyncCancel cancels ongoing site resync
SiteResyncCancel SiteResyncOp = "cancel"
)
// SiteReplicationResyncOp - perform a site replication resync operation
func (adm *AdminClient) SiteReplicationResyncOp(ctx context.Context, site PeerInfo, op SiteResyncOp) (SRResyncOpStatus, error) {
reqBytes, err := json.Marshal(site)
if err != nil {
return SRResyncOpStatus{}, nil
}
v := url.Values{}
v.Set("operation", string(op))
v.Set("api-version", SiteReplAPIVersion)
reqData := requestData{
relPath: adminAPIPrefix + "/site-replication/resync/op",
content: reqBytes,
queryValues: v,
}
resp, err := adm.executeMethod(ctx, http.MethodPut, reqData)
defer closeResponse(resp)
if err != nil {
return SRResyncOpStatus{}, err
}
if resp.StatusCode != http.StatusOK {
return SRResyncOpStatus{}, httpRespToErrorResponse(resp)
}
var res SRResyncOpStatus
err = json.NewDecoder(resp.Body).Decode(&res)
return res, err
}
// SRMetric - captures replication metrics for a site replication peer
type SRMetric struct {
DeploymentID string `json:"deploymentID"`
Endpoint string `json:"endpoint"`
TotalDowntime time.Duration `json:"totalDowntime"`
LastOnline time.Time `json:"lastOnline"`
Online bool `json:"isOnline"`
Latency LatencyStat `json:"latency"`
// replication metrics across buckets roll up
ReplicatedSize int64 `json:"replicatedSize"`
// Total number of completed operations
ReplicatedCount int64 `json:"replicatedCount"`
// ReplicationErrorStats captures replication errors
Failed TimedErrStats `json:"failed,omitempty"`
// XferStats captures transfer stats
XferStats map[replication.MetricName]replication.XferStats `json:"transferSummary"`
// MRFStats captures current backlog entries in the last 5 minutes
MRFStats replication.ReplMRFStats `json:"mrfStats"`
// DowntimeInfo captures the link information
DowntimeInfo DowntimeInfo `json:"downtimeInfo"`
}
// WorkerStat captures number of replication workers
type WorkerStat struct {
Curr int `json:"curr"`
Avg float32 `json:"avg"`
Max int `json:"max"`
}
// InQueueMetric holds stats for objects in replication queue
type InQueueMetric struct {
Curr QStat `json:"curr" msg:"cq"`
Avg QStat `json:"avg" msg:"aq"`
Max QStat `json:"max" msg:"pq"`
}
// QStat represents number of objects and bytes in queue
type QStat struct {
Count float64 `json:"count"`
Bytes float64 `json:"bytes"`
}
// Add two QStat
func (q *QStat) Add(o QStat) QStat {
return QStat{Bytes: q.Bytes + o.Bytes, Count: q.Count + o.Count}
}
// SRMetricsSummary captures summary of replication counts across buckets on site
// along with op metrics rollup.
type SRMetricsSummary struct {
// op metrics roll up
ActiveWorkers WorkerStat `json:"activeWorkers"`
// Total Replica size in bytes
ReplicaSize int64 `json:"replicaSize"`
// Total count of replica received
ReplicaCount int64 `json:"replicaCount"`
// queue metrics
Queued InQueueMetric `json:"queued"`
// proxied metrics
Proxied ReplProxyMetric `json:"proxied"`
// replication metrics summary for each site replication peer
Metrics map[string]SRMetric `json:"replMetrics"`
// uptime of node being queried for site replication metrics
Uptime int64 `json:"uptime"`
// represents the retry count
Retries Counter `json:"retries"`
// represents the error count
Errors Counter `json:"errors"`
}
// Counter denotes the counts
type Counter struct {
// Counted last 1hr
Last1hr uint64 `json:"last1hr"`
// Counted last 1m
Last1m uint64 `json:"last1m"`
// Total count
Total uint64 `json:"total"`
}
// ReplProxyMetric holds stats for replication proxying
type ReplProxyMetric struct {
PutTagTotal uint64 `json:"putTaggingProxyTotal" msg:"ptc"`
GetTagTotal uint64 `json:"getTaggingProxyTotal" msg:"gtc"`
RmvTagTotal uint64 `json:"removeTaggingProxyTotal" msg:"rtc"`
GetTotal uint64 `json:"getProxyTotal" msg:"gc"`
HeadTotal uint64 `json:"headProxyTotal" msg:"hc"`
PutTagFailedTotal uint64 `json:"putTaggingProxyFailed" msg:"ptc"`
GetTagFailedTotal uint64 `json:"getTaggingProxyFailed" msg:"gtc"`
RmvTagFailedTotal uint64 `json:"removeTaggingProxyFailed" msg:"rtc"`
GetFailedTotal uint64 `json:"getProxyFailed" msg:"gc"`
HeadFailedTotal uint64 `json:"headProxyFailed" msg:"hc"`
}
// Add updates proxy metrics
func (p *ReplProxyMetric) Add(p2 ReplProxyMetric) {
p.GetTagTotal += p2.GetTagTotal
p.PutTagTotal += p2.PutTagTotal
p.RmvTagTotal += p2.RmvTagTotal
p.GetTotal += p2.GetTotal
p.HeadTotal += p2.HeadTotal
p.PutTagFailedTotal += p2.PutTagFailedTotal
p.GetTagFailedTotal += p2.GetTagFailedTotal
p.RmvTagFailedTotal += p2.RmvTagFailedTotal
p.GetFailedTotal += p2.GetFailedTotal
p.HeadFailedTotal += p2.HeadFailedTotal
}
golang-github-minio-madmin-go-3.0.104/cluster-commands_test.go 0000664 0000000 0000000 00000003654 14774251704 0024256 0 ustar 00root root 0000000 0000000 //
// Copyright (c) 2015-2025 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as
// published by the Free Software Foundation, either version 3 of the
// License, or (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see .
//
package madmin
import (
"bytes"
"testing"
)
func TestSRSessionPolicy_MarshalUnmarshalJSON(t *testing.T) {
tests := []struct {
name string
policyBytes []byte
}{
{
name: "ValidPolicy",
policyBytes: []byte(`{"Version":"2012-10-17","Statement":[{"Effect":"Allow","Action":"s3:GetObject","Resource":"arn:aws:s3:::example-bucket/*"}]}`),
},
{
name: "EmptyPolicy",
policyBytes: []byte(``),
},
{
name: "NullPolicy",
policyBytes: nil,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
// Convert policy bytes to SRSessionPolicy
policy := SRSessionPolicy(tt.policyBytes)
// Test MarshalJSON
data, err := policy.MarshalJSON()
if err != nil {
t.Errorf("SRSessionPolicy.MarshalJSON() error = %v", err)
return
}
// Test UnmarshalJSON
var got SRSessionPolicy
if err := got.UnmarshalJSON(data); err != nil {
t.Errorf("SRSessionPolicy.UnmarshalJSON() error = %v", err)
return
}
// Check if the result matches the original policy
if !bytes.Equal(got, tt.policyBytes) {
t.Errorf("SRSessionPolicy.UnmarshalJSON() = %s, want %s", got, tt.policyBytes)
}
})
}
}
golang-github-minio-madmin-go-3.0.104/cluster-health.go 0000664 0000000 0000000 00000014207 14774251704 0022657 0 ustar 00root root 0000000 0000000 //
// Copyright (c) 2015-2024 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as
// published by the Free Software Foundation, either version 3 of the
// License, or (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see .
//
package madmin
import (
"context"
"net/http"
"net/http/httptrace"
"net/url"
"strconv"
"sync"
"time"
)
const (
minioWriteQuorumHeader = "x-minio-write-quorum"
minIOHealingDrives = "x-minio-healing-drives"
clusterCheckEndpoint = "/minio/health/cluster"
clusterReadCheckEndpoint = "/minio/health/cluster/read"
maintanenceURLParameterKey = "maintenance"
)
// HealthResult represents the cluster health result
type HealthResult struct {
Healthy bool
MaintenanceMode bool
WriteQuorum int
HealingDrives int
}
// HealthOpts represents the input options for the health check
type HealthOpts struct {
ClusterRead bool
Maintenance bool
}
// Healthy will hit `/minio/health/cluster` and `/minio/health/cluster/ready` anonymous APIs to check the cluster health
func (an *AnonymousClient) Healthy(ctx context.Context, opts HealthOpts) (result HealthResult, err error) {
if opts.ClusterRead {
return an.clusterReadCheck(ctx)
}
return an.clusterCheck(ctx, opts.Maintenance)
}
func (an *AnonymousClient) clusterCheck(ctx context.Context, maintenance bool) (result HealthResult, err error) {
urlValues := make(url.Values)
if maintenance {
urlValues.Set(maintanenceURLParameterKey, "true")
}
resp, err := an.executeMethod(ctx, http.MethodGet, requestData{
relPath: clusterCheckEndpoint,
queryValues: urlValues,
}, nil)
defer closeResponse(resp)
if err != nil {
return result, err
}
if resp != nil {
writeQuorumStr := resp.Header.Get(minioWriteQuorumHeader)
if writeQuorumStr != "" {
result.WriteQuorum, err = strconv.Atoi(writeQuorumStr)
if err != nil {
return result, err
}
}
healingDrivesStr := resp.Header.Get(minIOHealingDrives)
if healingDrivesStr != "" {
result.HealingDrives, err = strconv.Atoi(healingDrivesStr)
if err != nil {
return result, err
}
}
switch resp.StatusCode {
case http.StatusOK:
result.Healthy = true
case http.StatusPreconditionFailed:
result.MaintenanceMode = true
default:
// Not Healthy
}
}
return result, nil
}
func (an *AnonymousClient) clusterReadCheck(ctx context.Context) (result HealthResult, err error) {
resp, err := an.executeMethod(ctx, http.MethodGet, requestData{
relPath: clusterReadCheckEndpoint,
}, nil)
defer closeResponse(resp)
if err != nil {
return result, err
}
if resp != nil {
switch resp.StatusCode {
case http.StatusOK:
result.Healthy = true
default:
// Not Healthy
}
}
return result, nil
}
// AliveOpts customizing liveness check.
type AliveOpts struct {
Readiness bool // send request to /minio/health/ready
}
// AliveResult returns the time spent getting a response
// back from the server on /minio/health/live endpoint
type AliveResult struct {
Endpoint *url.URL `json:"endpoint"`
ResponseTime time.Duration `json:"responseTime"`
DNSResolveTime time.Duration `json:"dnsResolveTime"`
Online bool `json:"online"` // captures x-minio-server-status
Error error `json:"error"`
}
// Alive will hit `/minio/health/live` to check if server is reachable, optionally returns
// the amount of time spent getting a response back from the server.
func (an *AnonymousClient) Alive(ctx context.Context, opts AliveOpts, servers ...ServerProperties) (resultsCh chan AliveResult) {
resource := "/minio/health/live"
if opts.Readiness {
resource = "/minio/health/ready"
}
scheme := "http"
if an.endpointURL != nil {
scheme = an.endpointURL.Scheme
}
resultsCh = make(chan AliveResult)
go func() {
defer close(resultsCh)
if len(servers) == 0 {
an.alive(ctx, an.endpointURL, resource, resultsCh)
} else {
var wg sync.WaitGroup
wg.Add(len(servers))
for _, server := range servers {
server := server
go func() {
defer wg.Done()
sscheme := server.Scheme
if sscheme == "" {
sscheme = scheme
}
u, err := url.Parse(sscheme + "://" + server.Endpoint)
if err != nil {
resultsCh <- AliveResult{
Error: err,
}
return
}
an.alive(ctx, u, resource, resultsCh)
}()
}
wg.Wait()
}
}()
return resultsCh
}
func (an *AnonymousClient) alive(ctx context.Context, u *url.URL, resource string, resultsCh chan AliveResult) {
var (
dnsStartTime, dnsDoneTime time.Time
reqStartTime, firstByteTime time.Time
)
trace := &httptrace.ClientTrace{
DNSStart: func(_ httptrace.DNSStartInfo) {
dnsStartTime = time.Now()
},
DNSDone: func(_ httptrace.DNSDoneInfo) {
dnsDoneTime = time.Now()
},
GetConn: func(_ string) {
// GetConn is called again when trace is ON
// https://github.com/golang/go/issues/44281
if reqStartTime.IsZero() {
reqStartTime = time.Now()
}
},
GotFirstResponseByte: func() {
firstByteTime = time.Now()
},
}
resp, err := an.executeMethod(ctx, http.MethodGet, requestData{
relPath: resource,
endpointOverride: u,
}, trace)
closeResponse(resp)
var respTime time.Duration
if firstByteTime.IsZero() {
respTime = time.Since(reqStartTime)
} else {
respTime = firstByteTime.Sub(reqStartTime) - dnsDoneTime.Sub(dnsStartTime)
}
result := AliveResult{
Endpoint: u,
ResponseTime: respTime,
DNSResolveTime: dnsDoneTime.Sub(dnsStartTime),
}
if err != nil {
result.Error = err
} else {
result.Online = resp.StatusCode == http.StatusOK && resp.Header.Get("x-minio-server-status") != "offline"
}
select {
case <-ctx.Done():
return
case resultsCh <- result:
}
}
golang-github-minio-madmin-go-3.0.104/code_of_conduct.md 0000664 0000000 0000000 00000006762 14774251704 0023052 0 ustar 00root root 0000000 0000000 # Contributor Covenant Code of Conduct
## Our Pledge
In the interest of fostering an open and welcoming environment, we as
contributors and maintainers pledge to making participation in our project and
our community a harassment-free experience for everyone, regardless of age, body
size, disability, ethnicity, gender identity and expression, level of experience,
nationality, personal appearance, race, religion, or sexual identity and
orientation.
## Our Standards
Examples of behavior that contributes to creating a positive environment
include:
* Using welcoming and inclusive language
* Being respectful of differing viewpoints and experiences
* Gracefully accepting constructive criticism
* Focusing on what is best for the community
* Showing empathy towards other community members
Examples of unacceptable behavior by participants include:
* The use of sexualized language or imagery and unwelcome sexual attention or
advances
* Trolling, insulting/derogatory comments, and personal or political attacks
* Public or private harassment
* Publishing others' private information, such as a physical or electronic
address, without explicit permission
* Other conduct which could reasonably be considered inappropriate in a
professional setting
## Our Responsibilities
Project maintainers are responsible for clarifying the standards of acceptable
behavior and are expected to take appropriate and fair corrective action in
response to any instances of unacceptable behavior, in compliance with the
licensing terms applying to the Project developments.
Project maintainers have the right and responsibility to remove, edit, or
reject comments, commits, code, wiki edits, issues, and other contributions
that are not aligned to this Code of Conduct, or to ban temporarily or
permanently any contributor for other behaviors that they deem inappropriate,
threatening, offensive, or harmful. However, these actions shall respect the
licensing terms of the Project Developments that will always supersede such
Code of Conduct.
## Scope
This Code of Conduct applies both within project spaces and in public spaces
when an individual is representing the project or its community. Examples of
representing a project or community include using an official project e-mail
address, posting via an official social media account, or acting as an appointed
representative at an online or offline event. Representation of a project may be
further defined and clarified by project maintainers.
## Enforcement
Instances of abusive, harassing, or otherwise unacceptable behavior may be
reported by contacting the project team at dev@min.io. The project team
will review and investigate all complaints, and will respond in a way that it deems
appropriate to the circumstances. The project team is obligated to maintain
confidentiality with regard to the reporter of an incident.
Further details of specific enforcement policies may be posted separately.
Project maintainers who do not follow or enforce the Code of Conduct in good
faith may face temporary or permanent repercussions as determined by other
members of the project's leadership.
## Attribution
This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4,
available at [http://contributor-covenant.org/version/1/4][version]
This version includes a clarification to ensure that the code of conduct is in
compliance with the free software licensing terms of the project.
[homepage]: http://contributor-covenant.org
[version]: http://contributor-covenant.org/version/1/4/
golang-github-minio-madmin-go-3.0.104/config-commands.go 0000664 0000000 0000000 00000004364 14774251704 0023002 0 ustar 00root root 0000000 0000000 //
// Copyright (c) 2015-2024 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as
// published by the Free Software Foundation, either version 3 of the
// License, or (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see .
//
package madmin
import (
"bytes"
"context"
"io"
"net/http"
)
// GetConfig - returns the config.json of a minio setup, incoming data is encrypted.
func (adm *AdminClient) GetConfig(ctx context.Context) ([]byte, error) {
// Execute GET on /minio/admin/v3/config to get config of a setup.
resp, err := adm.executeMethod(ctx,
http.MethodGet,
requestData{relPath: adminAPIPrefix + "/config"})
defer closeResponse(resp)
if err != nil {
return nil, err
}
if resp.StatusCode != http.StatusOK {
return nil, httpRespToErrorResponse(resp)
}
return DecryptData(adm.getSecretKey(), resp.Body)
}
// SetConfig - set config supplied as config.json for the setup.
func (adm *AdminClient) SetConfig(ctx context.Context, config io.Reader) (err error) {
const maxConfigJSONSize = 256 * 1024 // 256KiB
// Read configuration bytes
configBuf := make([]byte, maxConfigJSONSize+1)
n, err := io.ReadFull(config, configBuf)
if err == nil {
return bytes.ErrTooLarge
}
if err != io.ErrUnexpectedEOF {
return err
}
configBytes := configBuf[:n]
econfigBytes, err := EncryptData(adm.getSecretKey(), configBytes)
if err != nil {
return err
}
reqData := requestData{
relPath: adminAPIPrefix + "/config",
content: econfigBytes,
}
// Execute PUT on /minio/admin/v3/config to set config.
resp, err := adm.executeMethod(ctx, http.MethodPut, reqData)
defer closeResponse(resp)
if err != nil {
return err
}
if resp.StatusCode != http.StatusOK {
return httpRespToErrorResponse(resp)
}
return nil
}
golang-github-minio-madmin-go-3.0.104/config-help-commands.go 0000664 0000000 0000000 00000004635 14774251704 0023731 0 ustar 00root root 0000000 0000000 //
// Copyright (c) 2015-2024 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as
// published by the Free Software Foundation, either version 3 of the
// License, or (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see .
//
package madmin
import (
"context"
"encoding/json"
"net/http"
"net/url"
)
// Help - return sub-system level help
type Help struct {
SubSys string `json:"subSys"`
Description string `json:"description"`
MultipleTargets bool `json:"multipleTargets"`
KeysHelp HelpKVS `json:"keysHelp"`
}
// HelpKV - implements help messages for keys
// with value as description of the keys.
type HelpKV struct {
Key string `json:"key"`
Description string `json:"description"`
Optional bool `json:"optional"`
Type string `json:"type"`
MultipleTargets bool `json:"multipleTargets"`
}
// HelpKVS - implement order of keys help messages.
type HelpKVS []HelpKV
// Keys returns help keys
func (h Help) Keys() []string {
keys := make([]string, 0, len(h.KeysHelp))
for _, kh := range h.KeysHelp {
keys = append(keys, kh.Key)
}
return keys
}
// HelpConfigKV - return help for a given sub-system.
func (adm *AdminClient) HelpConfigKV(ctx context.Context, subSys, key string, envOnly bool) (Help, error) {
v := url.Values{}
v.Set("subSys", subSys)
v.Set("key", key)
if envOnly {
v.Set("env", "")
}
reqData := requestData{
relPath: adminAPIPrefix + "/help-config-kv",
queryValues: v,
}
// Execute GET on /minio/admin/v3/help-config-kv
resp, err := adm.executeMethod(ctx, http.MethodGet, reqData)
if err != nil {
return Help{}, err
}
defer closeResponse(resp)
if resp.StatusCode != http.StatusOK {
return Help{}, httpRespToErrorResponse(resp)
}
help := Help{}
d := json.NewDecoder(resp.Body)
d.DisallowUnknownFields()
if err = d.Decode(&help); err != nil {
return help, err
}
return help, nil
}
golang-github-minio-madmin-go-3.0.104/config-history-commands.go 0000664 0000000 0000000 00000007021 14774251704 0024472 0 ustar 00root root 0000000 0000000 //
// Copyright (c) 2015-2024 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as
// published by the Free Software Foundation, either version 3 of the
// License, or (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see .
//
package madmin
import (
"context"
"encoding/json"
"net/http"
"net/url"
"strconv"
"time"
)
// ClearConfigHistoryKV - clears the config entry represented by restoreID.
// optionally allows setting `all` as a special keyword to automatically
// erase all config set history entires.
func (adm *AdminClient) ClearConfigHistoryKV(ctx context.Context, restoreID string) (err error) {
v := url.Values{}
v.Set("restoreId", restoreID)
reqData := requestData{
relPath: adminAPIPrefix + "/clear-config-history-kv",
queryValues: v,
}
// Execute DELETE on /minio/admin/v3/clear-config-history-kv
resp, err := adm.executeMethod(ctx, http.MethodDelete, reqData)
defer closeResponse(resp)
if err != nil {
return err
}
if resp.StatusCode != http.StatusOK {
return httpRespToErrorResponse(resp)
}
return nil
}
// RestoreConfigHistoryKV - Restore a previous config set history.
// Input is a unique id which represents the previous setting.
func (adm *AdminClient) RestoreConfigHistoryKV(ctx context.Context, restoreID string) (err error) {
v := url.Values{}
v.Set("restoreId", restoreID)
reqData := requestData{
relPath: adminAPIPrefix + "/restore-config-history-kv",
queryValues: v,
}
// Execute PUT on /minio/admin/v3/set-config-kv to set config key/value.
resp, err := adm.executeMethod(ctx, http.MethodPut, reqData)
defer closeResponse(resp)
if err != nil {
return err
}
if resp.StatusCode != http.StatusOK {
return httpRespToErrorResponse(resp)
}
return nil
}
// ConfigHistoryEntry - captures config set history with a unique
// restore ID and createTime
type ConfigHistoryEntry struct {
RestoreID string `json:"restoreId"`
CreateTime time.Time `json:"createTime"`
Data string `json:"data"`
}
// CreateTimeFormatted is used to print formatted time for CreateTime.
func (ch ConfigHistoryEntry) CreateTimeFormatted() string {
return ch.CreateTime.Format(http.TimeFormat)
}
// ListConfigHistoryKV - lists a slice of ConfigHistoryEntries sorted by createTime.
func (adm *AdminClient) ListConfigHistoryKV(ctx context.Context, count int) ([]ConfigHistoryEntry, error) {
if count == 0 {
count = 10
}
v := url.Values{}
v.Set("count", strconv.Itoa(count))
// Execute GET on /minio/admin/v3/list-config-history-kv
resp, err := adm.executeMethod(ctx,
http.MethodGet,
requestData{
relPath: adminAPIPrefix + "/list-config-history-kv",
queryValues: v,
})
defer closeResponse(resp)
if err != nil {
return nil, err
}
if resp.StatusCode != http.StatusOK {
return nil, httpRespToErrorResponse(resp)
}
data, err := DecryptData(adm.getSecretKey(), resp.Body)
if err != nil {
return nil, err
}
var chEntries []ConfigHistoryEntry
if err = json.Unmarshal(data, &chEntries); err != nil {
return chEntries, err
}
return chEntries, nil
}
golang-github-minio-madmin-go-3.0.104/config-kv-commands.go 0000664 0000000 0000000 00000007677 14774251704 0023432 0 ustar 00root root 0000000 0000000 //
// Copyright (c) 2015-2024 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as
// published by the Free Software Foundation, either version 3 of the
// License, or (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see .
//
package madmin
import (
"context"
"net/http"
"net/url"
)
// DelConfigKV - delete key from server config.
func (adm *AdminClient) DelConfigKV(ctx context.Context, k string) (restart bool, err error) {
econfigBytes, err := EncryptData(adm.getSecretKey(), []byte(k))
if err != nil {
return false, err
}
reqData := requestData{
relPath: adminAPIPrefix + "/del-config-kv",
content: econfigBytes,
}
// Execute DELETE on /minio/admin/v3/del-config-kv to delete config key.
resp, err := adm.executeMethod(ctx, http.MethodDelete, reqData)
defer closeResponse(resp)
if err != nil {
return false, err
}
if resp.StatusCode != http.StatusOK {
return false, httpRespToErrorResponse(resp)
}
return resp.Header.Get(ConfigAppliedHeader) != ConfigAppliedTrue, nil
}
const (
// ConfigAppliedHeader is the header indicating whether the config was applied without requiring a restart.
ConfigAppliedHeader = "x-minio-config-applied"
// ConfigAppliedTrue is the value set in header if the config was applied.
ConfigAppliedTrue = "true"
)
// SetConfigKV - set key value config to server.
func (adm *AdminClient) SetConfigKV(ctx context.Context, kv string) (restart bool, err error) {
econfigBytes, err := EncryptData(adm.getSecretKey(), []byte(kv))
if err != nil {
return false, err
}
reqData := requestData{
relPath: adminAPIPrefix + "/set-config-kv",
content: econfigBytes,
}
// Execute PUT on /minio/admin/v3/set-config-kv to set config key/value.
resp, err := adm.executeMethod(ctx, http.MethodPut, reqData)
defer closeResponse(resp)
if err != nil {
return false, err
}
if resp.StatusCode != http.StatusOK {
return false, httpRespToErrorResponse(resp)
}
return resp.Header.Get(ConfigAppliedHeader) != ConfigAppliedTrue, nil
}
// GetConfigKV - returns the key, value of the requested key, incoming data is encrypted.
func (adm *AdminClient) GetConfigKV(ctx context.Context, key string) ([]byte, error) {
v := url.Values{}
v.Set("key", key)
// Execute GET on /minio/admin/v3/get-config-kv?key={key} to get value of key.
resp, err := adm.executeMethod(ctx,
http.MethodGet,
requestData{
relPath: adminAPIPrefix + "/get-config-kv",
queryValues: v,
})
defer closeResponse(resp)
if err != nil {
return nil, err
}
if resp.StatusCode != http.StatusOK {
return nil, httpRespToErrorResponse(resp)
}
return DecryptData(adm.getSecretKey(), resp.Body)
}
// KVOptions takes specific inputs for KV functions
type KVOptions struct {
Env bool
}
// GetConfigKVWithOptions - returns the key, value of the requested key, incoming data is encrypted.
func (adm *AdminClient) GetConfigKVWithOptions(ctx context.Context, key string, opts KVOptions) ([]byte, error) {
v := url.Values{}
v.Set("key", key)
if opts.Env {
v.Set("env", "")
}
// Execute GET on /minio/admin/v3/get-config-kv?key={key} to get value of key.
resp, err := adm.executeMethod(ctx,
http.MethodGet,
requestData{
relPath: adminAPIPrefix + "/get-config-kv",
queryValues: v,
})
defer closeResponse(resp)
if err != nil {
return nil, err
}
defer closeResponse(resp)
if resp.StatusCode != http.StatusOK {
return nil, httpRespToErrorResponse(resp)
}
return DecryptData(adm.getSecretKey(), resp.Body)
}
golang-github-minio-madmin-go-3.0.104/cpu_linux.go 0000664 0000000 0000000 00000003532 14774251704 0021740 0 ustar 00root root 0000000 0000000 //go:build linux
// +build linux
//
// Copyright (c) 2015-2024 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as
// published by the Free Software Foundation, either version 3 of the
// License, or (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see .
//
package madmin
import (
"github.com/prometheus/procfs/sysfs"
)
func getCPUFreqStats() ([]CPUFreqStats, error) {
fs, err := sysfs.NewFS("/sys")
if err != nil {
return nil, err
}
stats, err := fs.SystemCpufreq()
if err != nil {
return nil, err
}
out := make([]CPUFreqStats, 0, len(stats))
for _, stat := range stats {
out = append(out, CPUFreqStats{
Name: stat.Name,
CpuinfoCurrentFrequency: stat.CpuinfoCurrentFrequency,
CpuinfoMinimumFrequency: stat.CpuinfoMinimumFrequency,
CpuinfoMaximumFrequency: stat.CpuinfoMaximumFrequency,
CpuinfoTransitionLatency: stat.CpuinfoTransitionLatency,
ScalingCurrentFrequency: stat.ScalingCurrentFrequency,
ScalingMinimumFrequency: stat.ScalingMinimumFrequency,
ScalingMaximumFrequency: stat.ScalingMaximumFrequency,
AvailableGovernors: stat.AvailableGovernors,
Driver: stat.Driver,
Governor: stat.Governor,
RelatedCpus: stat.RelatedCpus,
SetSpeed: stat.SetSpeed,
})
}
return out, nil
}
golang-github-minio-madmin-go-3.0.104/cpu_nolinux.go 0000664 0000000 0000000 00000001701 14774251704 0022271 0 ustar 00root root 0000000 0000000 //go:build !linux
// +build !linux
//
// Copyright (c) 2015-2024 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as
// published by the Free Software Foundation, either version 3 of the
// License, or (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see .
//
package madmin
import (
"errors"
)
func getCPUFreqStats() ([]CPUFreqStats, error) {
return nil, errors.New("Not implemented for non-linux platforms")
}
golang-github-minio-madmin-go-3.0.104/decommission-commands.go 0000664 0000000 0000000 00000011024 14774251704 0024215 0 ustar 00root root 0000000 0000000 //
// Copyright (c) 2015-2024 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as
// published by the Free Software Foundation, either version 3 of the
// License, or (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see .
//
package madmin
import (
"context"
"encoding/json"
"net/http"
"net/url"
"time"
)
// PoolDecommissionInfo currently draining information
type PoolDecommissionInfo struct {
StartTime time.Time `json:"startTime"`
StartSize int64 `json:"startSize"`
TotalSize int64 `json:"totalSize"`
CurrentSize int64 `json:"currentSize"`
Complete bool `json:"complete"`
Failed bool `json:"failed"`
Canceled bool `json:"canceled"`
ObjectsDecommissioned int64 `json:"objectsDecommissioned"`
ObjectsDecommissionFailed int64 `json:"objectsDecommissionedFailed"`
BytesDone int64 `json:"bytesDecommissioned"`
BytesFailed int64 `json:"bytesDecommissionedFailed"`
}
// PoolStatus captures current pool status
type PoolStatus struct {
ID int `json:"id"`
CmdLine string `json:"cmdline"`
LastUpdate time.Time `json:"lastUpdate"`
Decommission *PoolDecommissionInfo `json:"decommissionInfo,omitempty"`
}
// DecommissionPool - starts moving data from specified pool to all other existing pools.
// Decommissioning if successfully started this function will return `nil`, to check
// for on-going draining cycle use StatusPool.
func (adm *AdminClient) DecommissionPool(ctx context.Context, pool string) error {
values := url.Values{}
values.Set("pool", pool)
resp, err := adm.executeMethod(ctx, http.MethodPost, requestData{
// POST //pools/decommission?pool=http://server{1...4}/disk{1...4}
relPath: adminAPIPrefix + "/pools/decommission",
queryValues: values,
})
if err != nil {
return err
}
defer closeResponse(resp)
if resp.StatusCode != http.StatusOK {
return httpRespToErrorResponse(resp)
}
return nil
}
// CancelDecommissionPool - cancels an on-going decommissioning process,
// this automatically makes the pool available for writing once canceled.
func (adm *AdminClient) CancelDecommissionPool(ctx context.Context, pool string) error {
values := url.Values{}
values.Set("pool", pool)
resp, err := adm.executeMethod(ctx, http.MethodPost, requestData{
// POST //pools/cancel?pool=http://server{1...4}/disk{1...4}
relPath: adminAPIPrefix + "/pools/cancel",
queryValues: values,
})
if err != nil {
return err
}
defer closeResponse(resp)
if resp.StatusCode != http.StatusOK {
return httpRespToErrorResponse(resp)
}
return nil
}
// StatusPool return current status about pool, reports any draining activity in progress
// and elapsed time.
func (adm *AdminClient) StatusPool(ctx context.Context, pool string) (PoolStatus, error) {
values := url.Values{}
values.Set("pool", pool)
resp, err := adm.executeMethod(ctx, http.MethodGet, requestData{
// GET //pools/status?pool=http://server{1...4}/disk{1...4}
relPath: adminAPIPrefix + "/pools/status",
queryValues: values,
})
if err != nil {
return PoolStatus{}, err
}
defer closeResponse(resp)
if resp.StatusCode != http.StatusOK {
return PoolStatus{}, httpRespToErrorResponse(resp)
}
var info PoolStatus
if err = json.NewDecoder(resp.Body).Decode(&info); err != nil {
return PoolStatus{}, err
}
return info, nil
}
// ListPoolsStatus returns list of pools currently configured and being used
// on the cluster.
func (adm *AdminClient) ListPoolsStatus(ctx context.Context) ([]PoolStatus, error) {
resp, err := adm.executeMethod(ctx, http.MethodGet, requestData{
relPath: adminAPIPrefix + "/pools/list", // GET //pools/list
})
if err != nil {
return nil, err
}
defer closeResponse(resp)
if resp.StatusCode != http.StatusOK {
return nil, httpRespToErrorResponse(resp)
}
var pools []PoolStatus
if err = json.NewDecoder(resp.Body).Decode(&pools); err != nil {
return nil, err
}
return pools, nil
}
golang-github-minio-madmin-go-3.0.104/encrypt.go 0000664 0000000 0000000 00000011642 14774251704 0021417 0 ustar 00root root 0000000 0000000 //
// Copyright (c) 2015-2024 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as
// published by the Free Software Foundation, either version 3 of the
// License, or (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see .
//
package madmin
import (
"bytes"
"crypto/sha256"
"errors"
"io"
"sync"
"github.com/secure-io/sio-go"
"github.com/secure-io/sio-go/sioutil"
"golang.org/x/crypto/argon2"
"golang.org/x/crypto/pbkdf2"
)
// IsEncrypted reports whether data is encrypted.
func IsEncrypted(data []byte) bool {
if len(data) <= 32 {
return false
}
b := data[32]
return b == pbkdf2AESGCM || b == argon2idAESGCM || b == argon2idChaCHa20Poly1305
}
// EncryptData encrypts the data with an unique key
// derived from password using the Argon2id PBKDF.
//
// The returned ciphertext data consists of:
//
// salt | AEAD ID | nonce | encrypted data
// 32 1 8 ~ len(data)
func EncryptData(password string, data []byte) ([]byte, error) {
salt := sioutil.MustRandom(32)
var (
id byte
err error
stream *sio.Stream
)
if FIPSEnabled() {
key := pbkdf2.Key([]byte(password), salt, pbkdf2Cost, 32, sha256.New)
stream, err = sio.AES_256_GCM.Stream(key)
if err != nil {
return nil, err
}
id = pbkdf2AESGCM
} else {
argon2Mu.Lock()
key := argon2.IDKey([]byte(password), salt, argon2idTime, argon2idMemory, argon2idThreads, 32)
argon2Mu.Unlock()
if sioutil.NativeAES() {
stream, err = sio.AES_256_GCM.Stream(key)
if err != nil {
return nil, err
}
id = argon2idAESGCM
} else {
stream, err = sio.ChaCha20Poly1305.Stream(key)
if err != nil {
return nil, err
}
id = argon2idChaCHa20Poly1305
}
}
nonce := sioutil.MustRandom(stream.NonceSize())
// ciphertext = salt || AEAD ID | nonce | encrypted data
cLen := int64(len(salt)+1+len(nonce)+len(data)) + stream.Overhead(int64(len(data)))
ciphertext := bytes.NewBuffer(make([]byte, 0, cLen)) // pre-alloc correct length
// Prefix the ciphertext with salt, AEAD ID and nonce
ciphertext.Write(salt)
ciphertext.WriteByte(id)
ciphertext.Write(nonce)
w := stream.EncryptWriter(ciphertext, nonce, nil)
if _, err = w.Write(data); err != nil {
return nil, err
}
if err = w.Close(); err != nil {
return nil, err
}
return ciphertext.Bytes(), nil
}
// argon2Mu is used to control concurrent use of argon2,
// which is very cpu/ram intensive.
// Running concurrent operations most often provides no benefit anyway,
// since it already uses 32 threads.
var argon2Mu sync.Mutex
// ErrMaliciousData indicates that the stream cannot be
// decrypted by provided credentials.
var ErrMaliciousData = sio.NotAuthentic
// ErrUnexpectedHeader indicates that the data stream returned unexpected header
var ErrUnexpectedHeader = errors.New("unexpected header")
// DecryptData decrypts the data with the key derived
// from the salt (part of data) and the password using
// the PBKDF used in EncryptData. DecryptData returns
// the decrypted plaintext on success.
//
// The data must be a valid ciphertext produced by
// EncryptData. Otherwise, the decryption will fail.
func DecryptData(password string, data io.Reader) ([]byte, error) {
// Parse the stream header
var hdr [32 + 1 + 8]byte
if _, err := io.ReadFull(data, hdr[:]); err != nil {
if errors.Is(err, io.EOF) {
// Incomplete header, return unexpected header
return nil, ErrUnexpectedHeader
}
return nil, err
}
salt, id, nonce := hdr[0:32], hdr[32:33], hdr[33:41]
var (
err error
stream *sio.Stream
)
switch id[0] {
case argon2idAESGCM:
argon2Mu.Lock()
key := argon2.IDKey([]byte(password), salt, argon2idTime, argon2idMemory, argon2idThreads, 32)
argon2Mu.Unlock()
stream, err = sio.AES_256_GCM.Stream(key)
case argon2idChaCHa20Poly1305:
argon2Mu.Lock()
key := argon2.IDKey([]byte(password), salt, argon2idTime, argon2idMemory, argon2idThreads, 32)
argon2Mu.Unlock()
stream, err = sio.ChaCha20Poly1305.Stream(key)
case pbkdf2AESGCM:
key := pbkdf2.Key([]byte(password), salt, pbkdf2Cost, 32, sha256.New)
stream, err = sio.AES_256_GCM.Stream(key)
default:
err = errors.New("madmin: invalid encryption algorithm ID")
}
if err != nil {
return nil, err
}
return io.ReadAll(stream.DecryptReader(data, nonce, nil))
}
const (
argon2idAESGCM = 0x00
argon2idChaCHa20Poly1305 = 0x01
pbkdf2AESGCM = 0x02
)
const (
argon2idTime = 1
argon2idMemory = 64 * 1024
argon2idThreads = 4
pbkdf2Cost = 8192
)
golang-github-minio-madmin-go-3.0.104/encrypt_test.go 0000664 0000000 0000000 00000010443 14774251704 0022454 0 ustar 00root root 0000000 0000000 //
// Copyright (c) 2015-2024 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as
// published by the Free Software Foundation, either version 3 of the
// License, or (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see .
//
package madmin
import (
"bytes"
"encoding/hex"
"fmt"
"testing"
)
var encryptDataTests = []struct {
Password string
Data []byte
}{
{Password: "", Data: nil},
{Password: "", Data: make([]byte, 256)},
{Password: `xPl.8/rhR"Q_1xLt`, Data: make([]byte, 32)},
{Password: "m69?yz4W-!k+7p0", Data: make([]byte, 1024*1024)},
{Password: `7h5oU4$te{;K}fgqlI^]`, Data: make([]byte, 256)},
}
func TestEncryptData(t *testing.T) {
for i, test := range encryptDataTests {
i, test := i, test
t.Run(fmt.Sprintf("Test-%d", i), func(t *testing.T) {
ciphertext, err := EncryptData(test.Password, test.Data)
if err != nil {
t.Fatalf("Failed to encrypt data: %v", err)
}
plaintext, err := DecryptData(test.Password, bytes.NewReader(ciphertext))
if err != nil {
t.Fatalf("Failed to decrypt data: %v", err)
}
if !bytes.Equal(plaintext, test.Data) {
t.Fatal("Decrypt plaintext does not match origin data")
}
})
}
}
var decryptDataTests = []struct {
Password string
Data string
}{
{Password: "", Data: "828aa81599df0651c0461adb82283e8b89956baee9f6e719947ef9cddc849028001dc9d3ac0938f66b07bacc9751437e1985f8a9763c240e81"},
{Password: "", Data: "1793c71df6647860437134073c15688cbb15961dc0758c7ee1225e66e79c724c00d790dba9c671eae89da2c736d858286ac9bd027abacc6443" +
"0375cd41b63b67c070c7fba475a8dd66ae65ba905176c48cbe6f734fc74df87343d8ccff54bada4aeb0a04bd021633ebe6c4768e23f5dea142" +
"561d4fe3f90ed59d13dc5fb3a585dadec1742325291b9c81692bdd3420b2428127f8195e0ecd9a1c9237712ed67af7339fbbf7ff3ee1c516e1" +
"f81e69d933e057b30997e7274a2c9698e07c39f0e8d6818858f34c8191871b5a52bea9061806bd029024bfc1d9c1f230904968d6c9e10fddcb" +
"c006ba97356ff243570fd96df07dd6894e215a6b24c4ed730369519289ebd877aff6ccbd2265985e4ab1a2b7930bab9cfb767b97348a639ddf" +
"8db81bf5151da7e8f3d9638a1b86eb1dd78cc6a526f10a414c78638f"},
{Password: `xPl.8/rhR"Q_1xLt`, Data: "b5c016e93b84b473fc8a37af94936563630c36d6df1841d23a86ee51ca161f9e00ac19116b32f643ff6a56a212b265d8c56" +
"195bb0d12ce199e13dfdc5272f80c1564da2c6fc2fa18da91d8062de02af5cdafea491c6f3cae1f"},
{Password: `7h5oU4$te{;K}fgqlI^]`, Data: "c58edf7cfd557b6b655de6f48b1a3049d8d049dadb3a7bfa9ac9ccbb5baf37ec00f83086a26f43b7d6bc9075ad0" +
"38bf5741f118d502ebe94165e4072ba7f98535d6b1e3b6ae67a98115d146d9b4d90e4df4ae82df9cfa17ed7cd42" +
"465181559f7ddf09c98beec521bb4478e0cb73c4e0827af8688ff4e7a07327a10d5a180035e6ddb16d974a85257" +
"981cd9e0360a20f7b4d653190267dfb241148f018ae180568042e864b9e1b5bc05425a3abc2b0324f50c72d5679" +
"8f924405dfc0f8523f4bb564ed65af8e1b1c82a7a0640552ecf81985d95d0993d99172592ddc1393dfa63e8f0b3" +
"d744b2cc4b73384ca4693f0c1aec0e9b00e85f2937e891105d67da8f59c14ca96608e0425c42f9c1e7c2a8b3413" +
"e1381784f9cfe01de7c47cea1f8d7a7d88f5d4aca783cf55332b47f957a6b9a65269d7eb606b877b"},
}
func TestDecryptData(t *testing.T) {
for i, test := range decryptDataTests {
i, test := i, test
t.Run(fmt.Sprintf("Test-%d", i), func(t *testing.T) {
ciphertext, err := hex.DecodeString(test.Data)
if err != nil {
t.Fatalf("Failed to decode ciphertext data: %v", err)
}
_, err = DecryptData(test.Password, bytes.NewReader(ciphertext))
if err != nil {
t.Fatalf("Failed to decrypt data: %v", err)
}
})
}
}
func TestIsDecrypted(t *testing.T) {
for i, test := range decryptDataTests {
i, test := i, test
t.Run(fmt.Sprintf("Test-%d", i), func(t *testing.T) {
ciphertext, err := hex.DecodeString(test.Data)
if err != nil {
t.Fatalf("Failed to decode ciphertext data: %v", err)
}
if !IsEncrypted(ciphertext) {
t.Fatal("Ciphertext is not encrypted")
}
})
}
}
golang-github-minio-madmin-go-3.0.104/estream/ 0000775 0000000 0000000 00000000000 14774251704 0021040 5 ustar 00root root 0000000 0000000 golang-github-minio-madmin-go-3.0.104/estream/README.md 0000664 0000000 0000000 00000020523 14774251704 0022321 0 ustar 00root root 0000000 0000000 # Encrypted Stream
This package provides a flexible way to merge multiple streams with controlled encryption.
The stream is stateful and allows to send individually encrypted streams.
## Features
* Allows encrypted and unencrypted streams.
* Any number of keys can be used on streams.
* Each key can be encrypted by a (different) public key.
* Each stream is identified by a string "name".
* A stream has optional (unencrypted) metadata slice.
* Keys can be re-encrypted with another public key without needing data re-encryption given the private key.
* Streams are checksummed.
* Streams cannot be truncated by early EOF.
* Format is extensible with skippable blocks.
* Allows signaling errors while writing streams.
* Nonce per stream (of course).
* Messagepack for platform independent type safety.
# Usage
Create a writer that will write the stream.
You must provide an `io.Writer` to which the output is written.
Once all streams have been written it should be closed to indicate end of payload.
```Go
w := estream.NewWriter(output)
defer w.Close()
```
It is possible to signal an error to the receiver using `w.AddError(msg string)`.
This will return the error to the receiver.
## Adding keys
Keys for streams must be added. The keys themselves are 32 bytes of random data,
but it must be specified how they are stored.
They can be added as plain text, which isn't secure,
but allows later encryption using a public key.
To add a key without encryption use `w.AddKeyPlain()`
which will add the keys to the stream.
To add an encrypted key provide a 2048 bit public RSA key.
Use `w.AddKeyEncrypted(publicKey)` to add a key to the stream.
Once a key has been sent on the stream it will be used for all subsequent encrypted streams.
This means that different keys with different private/public keys can be sent for different streams.
## Sending streams
Streams are added using either `w.AddEncryptedStream` or `w.AddUnencryptedStream`.
A string identifier can be used to identify each stream when reading.
An optional byte block can also be sent.
Note that neither the name nor the byte block is encrypted,
so they should not contain sensitive data.
The functions above return an `io.WriteCloser`.
Data for this stream should be written to this interface
and `Close()` should be called before another stream can be added.
Note that en-uncrypted streams are unbuffered, so it may be a benefit to insert a `bufio.Writer`
to avoid very small packets. Encrypted streams are buffered since sio collects block before sending.
# Reading Streams
To read back data `r, err := estream.NewReader(input)` can be used for create a Reader.
To set a private key, use `r.SetPrivateKey(key)` to set a single private key.
For multiple keys a key provider can be made to return the appropriate key:
```Go
var key1, key2 *rsa.PrivateKey
// (read keys)
r.PrivateKeyProvider(func(key *rsa.PublicKey) *rsa.PrivateKey {
if key.Equal(&key1.PublicKey) {
return key1
}
if key.Equal(&key2.PublicKey) {
return key2
}
// Unknown key :(
return nil
})
```
It is possible to skip streams that cannot be decrypted using `r.SkipEncrypted(true)`.
A simple for loop can be used to get all streams:
```Go
for {
stream, err := r.NextStream()
if err == io.EOF {
// All streams read
break
}
// Metadata:
fmt.Println(stream.Name)
fmt.Println(stream.Extra)
// Stream content is a standard io.Reader
io.Copy(os.StdOut, stream)
}
```
## Replacing keys
It is possible to replace public keys needed for decryption using `estream.ReplaceKeys()`.
For encrypted keys the private key must be provided and optionally unencrypted keys can also be
encrypted using a public key.
# Format
## Header
Format starts with 2 version bytes.
| Field | Type |
|---------------|--------|
| Major Version | 1 byte |
| Minor Version | 1 byte |
Unknown major versions should be rejected by the decoder,
however minor versions are assumed to be compatible,
but may contain data that will be ignored by older versions.
## Blocks
| Field | Type | Contents |
|--------|--------------|--------------------------|
| id | integer | Block ID |
| length | unsigned int | Length of block in bytes |
Each block is preceded by a messagepack encoded int8 indicating the block type.
Positive types must be parsed by the decoder. Negative types are *skippable* blocks,
so unknown skippable blocks can be ignored.
Blocks have their length encoded as a messagepack unsigned integer following the block ID.
This indicates the number of bytes to skip after the length to reach the next block ID.
Maximum block size is 2^32-1 (4294967295) bytes.
All block content is messagepack encoded.
### id 1: Plain Key
This block contains an unencrypted key that is used for all following streams.
Multiple keys can be sent, but only the latest key should be used to decrypt a stream.
| Field | Type | Contents |
|---------------|-----------|---------------|
| Stream Key | bin array | 32 byte key |
### id 2: RSA Encrypted Key
This block contains an RSA encrypted key that is used for all following streams.
Multiple keys can be sent, but only the latest key should be used to decrypt a stream.
| Field | Type | Contents |
|------------|-----------|---------------------------------------------|
| Public Key | bin array | RSA public key to PKCS #1 in ASN.1 DER form |
| Cipher Key | bin array | 32 byte key encrypted with public key above |
The cipher key is encrypted with RSA-OAEP using SHA-512.
### id 3: SIO Encrypted Stream
Start of stream encrypted using [sio-go](github.com/secure-io/sio-go).
Stream will be encrypted using `AES_256_GCM` using the last key provided on stream.
| Field | Type | Contents |
|----------|-----------|-------------------------------|
| Name | string | Identifier of the stream |
| Extra | bin array | Optional extra data |
| Checksum | uint8 | Checksum type used for stream |
| Nonce | bin array | 8 byte nonce used for stream |
The stream consists of all data blocks following until "End Of Stream" block is sent.
Checksum is of encrypted data.
There is no checksum for decrypted data.
### id 4: Plain Stream
Start of unencrypted stream.
| Field | Type | Contents |
|----------|-----------|-------------------------------|
| Name | string | Identifier of the stream |
| Extra | bin array | Optional extra data |
| Checksum | uint8 | Checksum type used for stream |
The stream consists of all data blocks following until "End Of Stream" block is sent.
### id 5: Data Block
Data contains a data block.
| Field | Type | Contents |
|-------|-----------|--------------------------|
| Data | bin array | Data to append to stream |
If block is part of an encrypted stream it should be sent to the stream decrypter as is.
### id 6: End Of Stream
Indicates successful end of individual stream.
| Field | Type | Contents |
|---------------|-----------|-----------|
| Checksum | bin array | Checksum |
No more data blocks should be expected before new stream information is sent.
### id 7: EOF
Indicates successful end of all streams.
### id 8: Error
An error block can be sent to indicate an error occurred while generating the stream.
It is expected that the parser returns the message and stops processing.
| Field | Type | Contents |
|---------|--------|-------------------------------------|
| Message | string | Error message that will be returned |
## Checksum types
| ID | Type | Bytes |
|-----|------------------------------------|-----------|
| 0 | No checksum | (ignored) |
| 1 | 64 bit xxhash (XXH64) (Big Endian) | 8 |
# Version History
| Major | Minor | Changes |
|-------|-------|-----------------|
| 2 | 1 | Initial Version |
golang-github-minio-madmin-go-3.0.104/estream/blockid_string.go 0000664 0000000 0000000 00000001533 14774251704 0024366 0 ustar 00root root 0000000 0000000 // Code generated by "stringer -type=blockID -trimprefix=block"; DO NOT EDIT.
package estream
import "strconv"
func _() {
// An "invalid array index" compiler error signifies that the constant values have changed.
// Re-run the stringer command to generate them again.
var x [1]struct{}
_ = x[blockPlainKey-1]
_ = x[blockEncryptedKey-2]
_ = x[blockEncStream-3]
_ = x[blockPlainStream-4]
_ = x[blockDatablock-5]
_ = x[blockEOS-6]
_ = x[blockEOF-7]
_ = x[blockError-8]
}
const _blockID_name = "PlainKeyEncryptedKeyEncStreamPlainStreamDatablockEOSEOFError"
var _blockID_index = [...]uint8{0, 8, 20, 29, 40, 49, 52, 55, 60}
func (i blockID) String() string {
i -= 1
if i < 0 || i >= blockID(len(_blockID_index)-1) {
return "blockID(" + strconv.FormatInt(int64(i+1), 10) + ")"
}
return _blockID_name[_blockID_index[i]:_blockID_index[i+1]]
}
golang-github-minio-madmin-go-3.0.104/estream/checksumtype_string.go 0000664 0000000 0000000 00000001334 14774251704 0025462 0 ustar 00root root 0000000 0000000 // Code generated by "stringer -type=checksumType -trimprefix=checksumType"; DO NOT EDIT.
package estream
import "strconv"
func _() {
// An "invalid array index" compiler error signifies that the constant values have changed.
// Re-run the stringer command to generate them again.
var x [1]struct{}
_ = x[checksumTypeNone-0]
_ = x[checksumTypeXxhash-1]
_ = x[checksumTypeUnknown-2]
}
const _checksumType_name = "NoneXxhashUnknown"
var _checksumType_index = [...]uint8{0, 4, 10, 17}
func (i checksumType) String() string {
if i >= checksumType(len(_checksumType_index)-1) {
return "checksumType(" + strconv.FormatInt(int64(i), 10) + ")"
}
return _checksumType_name[_checksumType_index[i]:_checksumType_index[i+1]]
}
golang-github-minio-madmin-go-3.0.104/estream/reader.go 0000664 0000000 0000000 00000025452 14774251704 0022641 0 ustar 00root root 0000000 0000000 //
// Copyright (c) 2015-2024 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as
// published by the Free Software Foundation, either version 3 of the
// License, or (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see .
//
package estream
import (
"bytes"
"crypto/rand"
"crypto/rsa"
"crypto/sha512"
"crypto/x509"
"encoding/hex"
"errors"
"fmt"
"io"
"runtime"
"github.com/cespare/xxhash/v2"
"github.com/secure-io/sio-go"
"github.com/tinylib/msgp/msgp"
)
type Reader struct {
mr *msgp.Reader
majorV uint8
minorV uint8
err error
inStream bool
key *[32]byte
private *rsa.PrivateKey
privateFn func(key *rsa.PublicKey) *rsa.PrivateKey
skipEncrypted bool
returnNonDec bool
}
// ErrNoKey is returned when a stream cannot be decrypted.
// The Skip function on the stream can be called to skip to the next.
var ErrNoKey = errors.New("no valid private key found")
// NewReader will return a Reader that will split streams.
func NewReader(r io.Reader) (*Reader, error) {
var ver [2]byte
if _, err := io.ReadFull(r, ver[:]); err != nil {
return nil, err
}
switch ver[0] {
case 2:
default:
return nil, fmt.Errorf("unknown stream version: 0x%x", ver[0])
}
return &Reader{mr: msgp.NewReader(r), majorV: ver[0], minorV: ver[1]}, nil
}
// SetPrivateKey will set the private key to allow stream decryption.
// This overrides any function set by PrivateKeyProvider.
func (r *Reader) SetPrivateKey(k *rsa.PrivateKey) {
r.privateFn = nil
r.private = k
}
// PrivateKeyProvider will ask for a private key matching the public key.
// If the function returns a nil private key the stream key will not be decrypted
// and if SkipEncrypted has been set any streams with this key will be silently skipped.
// This overrides any key set by SetPrivateKey.
func (r *Reader) PrivateKeyProvider(fn func(key *rsa.PublicKey) *rsa.PrivateKey) {
r.privateFn = fn
r.private = nil
}
// SkipEncrypted will skip encrypted streams if no private key has been set.
func (r *Reader) SkipEncrypted(b bool) {
r.skipEncrypted = b
}
// ReturnNonDecryptable will return non-decryptable stream headers.
// Streams are returned with ErrNoKey error.
// Streams with this error cannot be read, but the Skip function can be invoked.
// SkipEncrypted overrides this.
func (r *Reader) ReturnNonDecryptable(b bool) {
r.returnNonDec = b
}
// Stream returns the next stream.
type Stream struct {
io.Reader
Name string
Extra []byte
SentEncrypted bool
parent *Reader
}
// NextStream will return the next stream.
// Before calling this the previous stream must be read until EOF,
// or Skip() should have been called.
// Will return nil, io.EOF when there are no more streams.
func (r *Reader) NextStream() (*Stream, error) {
if r.err != nil {
return nil, r.err
}
if r.inStream {
return nil, errors.New("previous stream not read until EOF")
}
// Temp storage for blocks.
block := make([]byte, 1024)
for {
// Read block ID.
n, err := r.mr.ReadInt8()
if err != nil {
return nil, r.setErr(err)
}
id := blockID(n)
// Read block size
sz, err := r.mr.ReadUint32()
if err != nil {
return nil, r.setErr(err)
}
// Read block data
if cap(block) < int(sz) {
block = make([]byte, sz)
}
block = block[:sz]
_, err = io.ReadFull(r.mr, block)
if err != nil {
return nil, r.setErr(err)
}
// Parse block
switch id {
case blockPlainKey:
// Read plaintext key.
key, _, err := msgp.ReadBytesBytes(block, make([]byte, 0, 32))
if err != nil {
return nil, r.setErr(err)
}
if len(key) != 32 {
return nil, r.setErr(fmt.Errorf("unexpected key length: %d", len(key)))
}
// Set key for following streams.
r.key = (*[32]byte)(key)
case blockEncryptedKey:
// Read public key
publicKey, block, err := msgp.ReadBytesZC(block)
if err != nil {
return nil, r.setErr(err)
}
// Request private key if we have a custom function.
if r.privateFn != nil {
pk, err := x509.ParsePKCS1PublicKey(publicKey)
if err != nil {
return nil, r.setErr(err)
}
r.private = r.privateFn(pk)
if r.private == nil {
if r.skipEncrypted || r.returnNonDec {
r.key = nil
continue
}
return nil, r.setErr(errors.New("nil private key returned"))
}
}
// Read cipher key
cipherKey, _, err := msgp.ReadBytesZC(block)
if err != nil {
return nil, r.setErr(err)
}
if r.private == nil {
if r.skipEncrypted || r.returnNonDec {
r.key = nil
continue
}
return nil, r.setErr(errors.New("private key has not been set"))
}
// Decrypt stream key
key, err := rsa.DecryptOAEP(sha512.New(), rand.Reader, r.private, cipherKey, nil)
if err != nil {
if r.returnNonDec {
r.key = nil
continue
}
return nil, err
}
if len(key) != 32 {
return nil, r.setErr(fmt.Errorf("unexpected key length: %d", len(key)))
}
r.key = (*[32]byte)(key)
case blockPlainStream, blockEncStream:
// Read metadata
name, block, err := msgp.ReadStringBytes(block)
if err != nil {
return nil, r.setErr(err)
}
extra, block, err := msgp.ReadBytesBytes(block, nil)
if err != nil {
return nil, r.setErr(err)
}
c, block, err := msgp.ReadUint8Bytes(block)
if err != nil {
return nil, r.setErr(err)
}
checksum := checksumType(c)
if !checksum.valid() {
return nil, r.setErr(fmt.Errorf("unknown checksum type %d", checksum))
}
// Return plaintext stream
if id == blockPlainStream {
return &Stream{
Reader: r.newStreamReader(checksum),
Name: name,
Extra: extra,
parent: r,
}, nil
}
// Handle encrypted streams.
if r.key == nil {
if r.skipEncrypted {
if err := r.skipDataBlocks(); err != nil {
return nil, r.setErr(err)
}
continue
}
return &Stream{
SentEncrypted: true,
Reader: nil,
Name: name,
Extra: extra,
parent: r,
}, ErrNoKey
}
// Read stream nonce
nonce, _, err := msgp.ReadBytesZC(block)
if err != nil {
return nil, r.setErr(err)
}
stream, err := sio.AES_256_GCM.Stream(r.key[:])
if err != nil {
return nil, r.setErr(err)
}
// Check if nonce is expected length.
if len(nonce) != stream.NonceSize() {
return nil, r.setErr(fmt.Errorf("unexpected nonce length: %d", len(nonce)))
}
encr := stream.DecryptReader(r.newStreamReader(checksum), nonce, nil)
return &Stream{
SentEncrypted: true,
Reader: encr,
Name: name,
Extra: extra,
parent: r,
}, nil
case blockEOS:
return nil, errors.New("end-of-stream without being in stream")
case blockEOF:
return nil, io.EOF
case blockError:
msg, _, err := msgp.ReadStringBytes(block)
if err != nil {
return nil, r.setErr(err)
}
return nil, r.setErr(errors.New(msg))
default:
if id >= 0 {
return nil, fmt.Errorf("unknown block type: %d", id)
}
}
}
}
// skipDataBlocks reads data blocks until end.
func (r *Reader) skipDataBlocks() error {
for {
// Read block ID.
n, err := r.mr.ReadInt8()
if err != nil {
return err
}
id := blockID(n)
sz, err := r.mr.ReadUint32()
if err != nil {
return err
}
if id == blockError {
msg, err := r.mr.ReadString()
if err != nil {
return err
}
return errors.New(msg)
}
// Discard data
_, err = io.CopyN(io.Discard, r.mr, int64(sz))
if err != nil {
return err
}
switch id {
case blockDatablock:
// Skip data
case blockEOS:
// Done
r.inStream = false
return nil
default:
if id >= 0 {
return fmt.Errorf("unknown block type: %d", id)
}
}
}
}
// setErr sets a stateful error.
func (r *Reader) setErr(err error) error {
if r.err != nil {
return r.err
}
if err == nil {
return err
}
if errors.Is(err, io.EOF) {
r.err = io.ErrUnexpectedEOF
}
if false {
_, file, line, ok := runtime.Caller(1)
if ok {
err = fmt.Errorf("%s:%d: %w", file, line, err)
}
}
r.err = err
return err
}
type streamReader struct {
up *Reader
h xxhash.Digest
buf bytes.Buffer
tmp []byte
isEOF bool
check checksumType
}
// newStreamReader creates a stream reader that can be read to get all data blocks.
func (r *Reader) newStreamReader(ct checksumType) *streamReader {
sr := &streamReader{up: r, check: ct}
sr.h.Reset()
r.inStream = true
return sr
}
// Skip the remainder of the stream.
func (s *Stream) Skip() error {
if sr, ok := s.Reader.(*streamReader); ok {
sr.isEOF = true
sr.buf.Reset()
}
return s.parent.skipDataBlocks()
}
// Read will return data blocks as on stream.
func (r *streamReader) Read(b []byte) (int, error) {
if r.isEOF {
return 0, io.EOF
}
if r.up.err != nil {
return 0, r.up.err
}
for {
// If we have anything in the buffer return that first.
if r.buf.Len() > 0 {
n, err := r.buf.Read(b)
if err == io.EOF {
err = nil
}
return n, r.up.setErr(err)
}
// Read block
n, err := r.up.mr.ReadInt8()
if err != nil {
return 0, r.up.setErr(err)
}
id := blockID(n)
// Read size...
sz, err := r.up.mr.ReadUint32()
if err != nil {
return 0, r.up.setErr(err)
}
switch id {
case blockDatablock:
// Read block
buf, err := r.up.mr.ReadBytes(r.tmp[:0])
if err != nil {
return 0, r.up.setErr(err)
}
// Write to buffer and checksum
if r.check == checksumTypeXxhash {
r.h.Write(buf)
}
r.tmp = buf
r.buf.Write(buf)
case blockEOS:
// Verify stream checksum if any.
hash, err := r.up.mr.ReadBytes(nil)
if err != nil {
return 0, r.up.setErr(err)
}
switch r.check {
case checksumTypeXxhash:
got := r.h.Sum(nil)
if !bytes.Equal(hash, got) {
return 0, r.up.setErr(fmt.Errorf("checksum mismatch, want %s, got %s", hex.EncodeToString(hash), hex.EncodeToString(got)))
}
case checksumTypeNone:
default:
return 0, r.up.setErr(fmt.Errorf("unknown checksum id %d", r.check))
}
r.isEOF = true
r.up.inStream = false
return 0, io.EOF
case blockError:
msg, err := r.up.mr.ReadString()
if err != nil {
return 0, r.up.setErr(err)
}
return 0, r.up.setErr(errors.New(msg))
default:
if id >= 0 {
return 0, fmt.Errorf("unexpected block type: %d", id)
}
// Skip block...
_, err := io.CopyN(io.Discard, r.up.mr, int64(sz))
if err != nil {
return 0, r.up.setErr(err)
}
}
}
}
golang-github-minio-madmin-go-3.0.104/estream/stream.go 0000664 0000000 0000000 00000027272 14774251704 0022674 0 ustar 00root root 0000000 0000000 //
// Copyright (c) 2015-2024 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as
// published by the Free Software Foundation, either version 3 of the
// License, or (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see .
//
package estream
import (
"crypto/rand"
"crypto/rsa"
"crypto/sha512"
"crypto/x509"
"encoding/hex"
"errors"
"fmt"
"hash"
"io"
"github.com/cespare/xxhash/v2"
"github.com/secure-io/sio-go"
"github.com/tinylib/msgp/msgp"
)
// ReplaceFn provides key replacement.
//
// When a key is found on stream, the function is called with the public key.
// The function must then return a private key to decrypt matching the key sent.
// The public key must then be specified that should be used to re-encrypt the stream.
//
// If no private key is sent and the public key matches the one sent to the function
// the key will be kept as is. Other returned values will cause an error.
//
// For encrypting unencrypted keys on stream a nil key will be sent.
// If a public key is returned the key will be encrypted with the public key.
// No private key should be returned for this.
type ReplaceFn func(key *rsa.PublicKey) (*rsa.PrivateKey, *rsa.PublicKey)
// ReplaceKeysOptions allows passing additional options to ReplaceKeys.
type ReplaceKeysOptions struct {
// If EncryptAll set all unencrypted keys will be encrypted.
EncryptAll bool
// PassErrors will pass through error an error packet,
// and not return an error.
PassErrors bool
}
// ReplaceKeys will replace the keys in a stream.
//
// A replace function must be provided. See ReplaceFn for functionality.
// If encryptAll is set.
func ReplaceKeys(w io.Writer, r io.Reader, replace ReplaceFn, o ReplaceKeysOptions) error {
var ver [2]byte
if _, err := io.ReadFull(r, ver[:]); err != nil {
return err
}
switch ver[0] {
case 2:
default:
return fmt.Errorf("unknown stream version: 0x%x", ver[0])
}
if _, err := w.Write(ver[:]); err != nil {
return err
}
// Input
mr := msgp.NewReader(r)
mw := msgp.NewWriter(w)
// Temporary block storage.
block := make([]byte, 1024)
// Write a block.
writeBlock := func(id blockID, sz uint32, content []byte) error {
if err := mw.WriteInt8(int8(id)); err != nil {
return err
}
if err := mw.WriteUint32(sz); err != nil {
return err
}
_, err := mw.Write(content)
return err
}
for {
// Read block ID.
n, err := mr.ReadInt8()
if err != nil {
return err
}
id := blockID(n)
// Read size
sz, err := mr.ReadUint32()
if err != nil {
return err
}
if cap(block) < int(sz) {
block = make([]byte, sz)
}
block = block[:sz]
_, err = io.ReadFull(mr, block)
if err != nil {
return err
}
switch id {
case blockEncryptedKey:
ogBlock := block
// Read public key
publicKey, block, err := msgp.ReadBytesZC(block)
if err != nil {
return err
}
pk, err := x509.ParsePKCS1PublicKey(publicKey)
if err != nil {
return err
}
private, public := replace(pk)
if private == nil && public == pk {
if err := writeBlock(id, sz, ogBlock); err != nil {
return err
}
}
if private == nil {
return errors.New("no private key provided, unable to re-encrypt")
}
// Read cipher key
cipherKey, _, err := msgp.ReadBytesZC(block)
if err != nil {
return err
}
// Decrypt stream key
key, err := rsa.DecryptOAEP(sha512.New(), rand.Reader, private, cipherKey, nil)
if err != nil {
return err
}
if len(key) != 32 {
return fmt.Errorf("unexpected key length: %d", len(key))
}
cipherKey, err = rsa.EncryptOAEP(sha512.New(), rand.Reader, public, key[:], nil)
if err != nil {
return err
}
// Write Public key
tmp := msgp.AppendBytes(nil, x509.MarshalPKCS1PublicKey(public))
// Write encrypted cipher key
tmp = msgp.AppendBytes(tmp, cipherKey)
if err := writeBlock(blockEncryptedKey, uint32(len(tmp)), tmp); err != nil {
return err
}
case blockPlainKey:
if !o.EncryptAll {
if err := writeBlock(id, sz, block); err != nil {
return err
}
continue
}
_, public := replace(nil)
if public == nil {
if err := writeBlock(id, sz, block); err != nil {
return err
}
continue
}
key, _, err := msgp.ReadBytesZC(block)
if err != nil {
return err
}
if len(key) != 32 {
return fmt.Errorf("unexpected key length: %d", len(key))
}
cipherKey, err := rsa.EncryptOAEP(sha512.New(), rand.Reader, public, key[:], nil)
if err != nil {
return err
}
// Write Public key
tmp := msgp.AppendBytes(nil, x509.MarshalPKCS1PublicKey(public))
// Write encrypted cipher key
tmp = msgp.AppendBytes(tmp, cipherKey)
if err := writeBlock(blockEncryptedKey, uint32(len(tmp)), tmp); err != nil {
return err
}
case blockEOF:
if err := writeBlock(id, sz, block); err != nil {
return err
}
return mw.Flush()
case blockError:
if o.PassErrors {
if err := writeBlock(id, sz, block); err != nil {
return err
}
return mw.Flush()
}
// Return error
msg, _, err := msgp.ReadStringBytes(block)
if err != nil {
return err
}
return errors.New(msg)
default:
if err := writeBlock(id, sz, block); err != nil {
return err
}
}
}
}
// DebugStream will print stream block information to w.
func (r *Reader) DebugStream(w io.Writer) error {
if r.err != nil {
return r.err
}
if r.inStream {
return errors.New("previous stream not read until EOF")
}
fmt.Fprintf(w, "stream major: %v, minor: %v\n", r.majorV, r.minorV)
// Temp storage for blocks.
block := make([]byte, 1024)
hashers := []hash.Hash{nil, xxhash.New()}
for {
// Read block ID.
n, err := r.mr.ReadInt8()
if err != nil {
return r.setErr(fmt.Errorf("reading block id: %w", err))
}
id := blockID(n)
// Read block size
sz, err := r.mr.ReadUint32()
if err != nil {
return r.setErr(fmt.Errorf("reading block size: %w", err))
}
fmt.Fprintf(w, "block type: %v, size: %d bytes, in stream: %v\n", id, sz, r.inStream)
// Read block data
if cap(block) < int(sz) {
block = make([]byte, sz)
}
block = block[:sz]
_, err = io.ReadFull(r.mr, block)
if err != nil {
return r.setErr(fmt.Errorf("reading block data: %w", err))
}
// Parse block
switch id {
case blockPlainKey:
// Read plaintext key.
key, _, err := msgp.ReadBytesBytes(block, make([]byte, 0, 32))
if err != nil {
return r.setErr(fmt.Errorf("reading key: %w", err))
}
if len(key) != 32 {
return r.setErr(fmt.Errorf("unexpected key length: %d", len(key)))
}
// Set key for following streams.
r.key = (*[32]byte)(key)
fmt.Fprintf(w, "plain key read\n")
case blockEncryptedKey:
// Read public key
publicKey, block, err := msgp.ReadBytesZC(block)
if err != nil {
return r.setErr(fmt.Errorf("reading public key: %w", err))
}
// Request private key if we have a custom function.
if r.privateFn != nil {
fmt.Fprintf(w, "requesting private key from privateFn\n")
pk, err := x509.ParsePKCS1PublicKey(publicKey)
if err != nil {
return r.setErr(fmt.Errorf("parse public key: %w", err))
}
r.private = r.privateFn(pk)
if r.private == nil {
fmt.Fprintf(w, "privateFn did not provide private key\n")
if r.skipEncrypted || r.returnNonDec {
fmt.Fprintf(w, "continuing. skipEncrypted: %v, returnNonDec: %v\n", r.skipEncrypted, r.returnNonDec)
r.key = nil
continue
}
return r.setErr(errors.New("nil private key returned"))
}
}
// Read cipher key
cipherKey, _, err := msgp.ReadBytesZC(block)
if err != nil {
return r.setErr(fmt.Errorf("reading cipherkey: %w", err))
}
if r.private == nil {
if r.skipEncrypted || r.returnNonDec {
fmt.Fprintf(w, "no private key, continuing due to skipEncrypted: %v, returnNonDec: %v\n", r.skipEncrypted, r.returnNonDec)
r.key = nil
continue
}
return r.setErr(errors.New("private key has not been set"))
}
// Decrypt stream key
key, err := rsa.DecryptOAEP(sha512.New(), rand.Reader, r.private, cipherKey, nil)
if err != nil {
if r.returnNonDec {
fmt.Fprintf(w, "no private key, continuing due to returnNonDec: %v\n", r.returnNonDec)
r.key = nil
continue
}
return fmt.Errorf("decrypting key: %w", err)
}
if len(key) != 32 {
return r.setErr(fmt.Errorf("unexpected key length: %d", len(key)))
}
r.key = (*[32]byte)(key)
fmt.Fprintf(w, "stream key decoded\n")
case blockPlainStream, blockEncStream:
// Read metadata
name, block, err := msgp.ReadStringBytes(block)
if err != nil {
return r.setErr(fmt.Errorf("reading name: %w", err))
}
extra, block, err := msgp.ReadBytesBytes(block, nil)
if err != nil {
return r.setErr(fmt.Errorf("reading extra: %w", err))
}
c, block, err := msgp.ReadUint8Bytes(block)
if err != nil {
return r.setErr(fmt.Errorf("reading checksum: %w", err))
}
checksum := checksumType(c)
if !checksum.valid() {
return r.setErr(fmt.Errorf("unknown checksum type %d", checksum))
}
fmt.Fprintf(w, "new stream. name: %v, extra size: %v, checksum type: %v\n", name, len(extra), checksum)
for _, h := range hashers {
if h != nil {
h.Reset()
}
}
// Return plaintext stream
if id == blockPlainStream {
r.inStream = true
continue
}
// Handle encrypted streams.
if r.key == nil {
if r.skipEncrypted {
fmt.Fprintf(w, "nil key, skipEncrypted: %v\n", r.skipEncrypted)
r.inStream = true
continue
}
return ErrNoKey
}
// Read stream nonce
nonce, _, err := msgp.ReadBytesZC(block)
if err != nil {
return r.setErr(fmt.Errorf("reading nonce: %w", err))
}
stream, err := sio.AES_256_GCM.Stream(r.key[:])
if err != nil {
return r.setErr(fmt.Errorf("initializing sio: %w", err))
}
// Check if nonce is expected length.
if len(nonce) != stream.NonceSize() {
return r.setErr(fmt.Errorf("unexpected nonce length: %d", len(nonce)))
}
fmt.Fprintf(w, "nonce: %v\n", nonce)
r.inStream = true
case blockEOS:
if !r.inStream {
return errors.New("end-of-stream without being in stream")
}
h, _, err := msgp.ReadBytesZC(block)
if err != nil {
return r.setErr(fmt.Errorf("reading block data: %w", err))
}
fmt.Fprintf(w, "end-of-stream. stream hash: %s. data hashes: ", hex.EncodeToString(h))
for i, h := range hashers {
if h != nil {
fmt.Fprintf(w, "%s:%s. ", checksumType(i), hex.EncodeToString(h.Sum(nil)))
}
}
fmt.Fprint(w, "\n")
r.inStream = false
case blockEOF:
if r.inStream {
return errors.New("end-of-file without finishing stream")
}
fmt.Fprintf(w, "end-of-file\n")
return nil
case blockError:
msg, _, err := msgp.ReadStringBytes(block)
if err != nil {
return r.setErr(fmt.Errorf("reading error string: %w", err))
}
fmt.Fprintf(w, "error recorded on stream: %v\n", msg)
return nil
case blockDatablock:
buf, _, err := msgp.ReadBytesZC(block)
if err != nil {
return r.setErr(fmt.Errorf("reading block data: %w", err))
}
for _, h := range hashers {
if h != nil {
h.Write(buf)
}
}
fmt.Fprintf(w, "data block, length: %v\n", len(buf))
default:
fmt.Fprintf(w, "skipping block\n")
if id >= 0 {
return fmt.Errorf("unknown block type: %d", id)
}
}
}
}
golang-github-minio-madmin-go-3.0.104/estream/stream_test.go 0000664 0000000 0000000 00000022727 14774251704 0023733 0 ustar 00root root 0000000 0000000 //
// Copyright (c) 2015-2024 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as
// published by the Free Software Foundation, either version 3 of the
// License, or (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see .
//
package estream
import (
"bytes"
crand "crypto/rand"
"crypto/rsa"
"io"
"os"
"testing"
)
var testStreams = map[string][]byte{
"stream1": bytes.Repeat([]byte("a"), 2000),
"stream2": bytes.Repeat([]byte("b"), 1<<20),
"stream3": bytes.Repeat([]byte("b"), 5),
"empty": {},
}
func TestStreamRoundtrip(t *testing.T) {
var buf bytes.Buffer
w := NewWriter(&buf)
if err := w.AddKeyPlain(); err != nil {
t.Fatal(err)
}
wantStreams := 0
wantDecStreams := 0
for name, value := range testStreams {
st, err := w.AddEncryptedStream(name, []byte(name))
if err != nil {
t.Fatal(err)
}
_, err = io.Copy(st, bytes.NewBuffer(value))
if err != nil {
t.Fatal(err)
}
st.Close()
st, err = w.AddUnencryptedStream(name, []byte(name))
if err != nil {
t.Fatal(err)
}
_, err = io.Copy(st, bytes.NewBuffer(value))
if err != nil {
t.Fatal(err)
}
st.Close()
wantStreams += 2
wantDecStreams += 2
}
priv, err := rsa.GenerateKey(crand.Reader, 2048)
if err != nil {
t.Fatal(err)
}
err = w.AddKeyEncrypted(&priv.PublicKey)
if err != nil {
t.Fatal(err)
}
for name, value := range testStreams {
st, err := w.AddEncryptedStream(name, []byte(name))
if err != nil {
t.Fatal(err)
}
_, err = io.Copy(st, bytes.NewBuffer(value))
if err != nil {
t.Fatal(err)
}
st.Close()
st, err = w.AddUnencryptedStream(name, []byte(name))
if err != nil {
t.Fatal(err)
}
_, err = io.Copy(st, bytes.NewBuffer(value))
if err != nil {
t.Fatal(err)
}
st.Close()
wantStreams += 2
wantDecStreams++
}
err = w.Close()
if err != nil {
t.Fatal(err)
}
// Read back...
b := buf.Bytes()
r, err := NewReader(bytes.NewBuffer(b))
if err != nil {
t.Fatal(err)
}
r.SetPrivateKey(priv)
var gotStreams int
for {
st, err := r.NextStream()
if err == io.EOF {
break
}
if err != nil {
t.Fatalf("stream %d: %v", gotStreams, err)
}
want, ok := testStreams[st.Name]
if !ok {
t.Fatal("unexpected stream name", st.Name)
}
if !bytes.Equal(st.Extra, []byte(st.Name)) {
t.Fatal("unexpected stream extra:", st.Extra)
}
got, err := io.ReadAll(st)
if err != nil {
t.Fatalf("stream %d: %v", gotStreams, err)
}
if !bytes.Equal(got, want) {
t.Errorf("stream %d: content mismatch (len %d,%d)", gotStreams, len(got), len(want))
}
gotStreams++
}
if gotStreams != wantStreams {
t.Errorf("want %d streams, got %d", wantStreams, gotStreams)
}
// Read back, but skip encrypted streams.
r, err = NewReader(bytes.NewBuffer(b))
if err != nil {
t.Fatal(err)
}
r.SkipEncrypted(true)
gotStreams = 0
for {
st, err := r.NextStream()
if err == io.EOF {
break
}
if err != nil {
t.Fatalf("stream %d: %v", gotStreams, err)
}
want, ok := testStreams[st.Name]
if !ok {
t.Fatal("unexpected stream name", st.Name)
}
if !bytes.Equal(st.Extra, []byte(st.Name)) {
t.Fatal("unexpected stream extra:", st.Extra)
}
got, err := io.ReadAll(st)
if err != nil {
t.Fatalf("stream %d: %v", gotStreams, err)
}
if !bytes.Equal(got, want) {
t.Errorf("stream %d: content mismatch (len %d,%d)", gotStreams, len(got), len(want))
}
gotStreams++
}
if gotStreams != wantDecStreams {
t.Errorf("want %d streams, got %d", wantStreams, gotStreams)
}
gotStreams = 0
r, err = NewReader(bytes.NewBuffer(b))
if err != nil {
t.Fatal(err)
}
r.SkipEncrypted(true)
for {
st, err := r.NextStream()
if err == io.EOF {
break
}
if err != nil {
t.Fatalf("stream %d: %v", gotStreams, err)
}
_, ok := testStreams[st.Name]
if !ok {
t.Fatal("unexpected stream name", st.Name)
}
if !bytes.Equal(st.Extra, []byte(st.Name)) {
t.Fatal("unexpected stream extra:", st.Extra)
}
err = st.Skip()
if err != nil {
t.Fatalf("stream %d: %v", gotStreams, err)
}
gotStreams++
}
if gotStreams != wantDecStreams {
t.Errorf("want %d streams, got %d", wantDecStreams, gotStreams)
}
if false {
r, err = NewReader(bytes.NewBuffer(b))
if err != nil {
t.Fatal(err)
}
r.SkipEncrypted(true)
err = r.DebugStream(os.Stdout)
if err != nil {
t.Fatal(err)
}
}
}
func TestReplaceKeys(t *testing.T) {
var buf bytes.Buffer
w := NewWriter(&buf)
if err := w.AddKeyPlain(); err != nil {
t.Fatal(err)
}
wantStreams := 0
for name, value := range testStreams {
st, err := w.AddEncryptedStream(name, []byte(name))
if err != nil {
t.Fatal(err)
}
_, err = io.Copy(st, bytes.NewBuffer(value))
if err != nil {
t.Fatal(err)
}
st.Close()
st, err = w.AddUnencryptedStream(name, []byte(name))
if err != nil {
t.Fatal(err)
}
_, err = io.Copy(st, bytes.NewBuffer(value))
if err != nil {
t.Fatal(err)
}
st.Close()
wantStreams += 2
}
priv, err := rsa.GenerateKey(crand.Reader, 2048)
if err != nil {
t.Fatal(err)
}
err = w.AddKeyEncrypted(&priv.PublicKey)
if err != nil {
t.Fatal(err)
}
for name, value := range testStreams {
st, err := w.AddEncryptedStream(name, []byte(name))
if err != nil {
t.Fatal(err)
}
_, err = io.Copy(st, bytes.NewBuffer(value))
if err != nil {
t.Fatal(err)
}
st.Close()
st, err = w.AddUnencryptedStream(name, []byte(name))
if err != nil {
t.Fatal(err)
}
_, err = io.Copy(st, bytes.NewBuffer(value))
if err != nil {
t.Fatal(err)
}
st.Close()
wantStreams += 2
}
err = w.Close()
if err != nil {
t.Fatal(err)
}
priv2, err := rsa.GenerateKey(crand.Reader, 2048)
if err != nil {
t.Fatal(err)
}
var replaced bytes.Buffer
err = ReplaceKeys(&replaced, &buf, func(key *rsa.PublicKey) (*rsa.PrivateKey, *rsa.PublicKey) {
if key == nil {
return nil, &priv2.PublicKey
}
if key.Equal(&priv.PublicKey) {
return priv, &priv2.PublicKey
}
t.Fatal("unknown key\n", *key, "\nwant\n", priv.PublicKey)
return nil, nil
}, ReplaceKeysOptions{EncryptAll: true})
if err != nil {
t.Fatal(err)
}
// Read back...
r, err := NewReader(&replaced)
if err != nil {
t.Fatal(err)
}
// Use key provider.
r.PrivateKeyProvider(func(key *rsa.PublicKey) *rsa.PrivateKey {
if key.Equal(&priv2.PublicKey) {
return priv2
}
t.Fatal("unexpected public key")
return nil
})
var gotStreams int
for {
st, err := r.NextStream()
if err == io.EOF {
break
}
if err != nil {
t.Fatalf("stream %d: %v", gotStreams, err)
}
want, ok := testStreams[st.Name]
if !ok {
t.Fatal("unexpected stream name", st.Name)
}
if st.SentEncrypted != (gotStreams&1 == 0) {
t.Errorf("stream %d was sent with unexpected encryption %v", gotStreams, st.SentEncrypted)
}
if !bytes.Equal(st.Extra, []byte(st.Name)) {
t.Fatal("unexpected stream extra:", st.Extra)
}
got, err := io.ReadAll(st)
if err != nil {
t.Fatalf("stream %d: %v", gotStreams, err)
}
if !bytes.Equal(got, want) {
t.Errorf("stream %d: content mismatch (len %d,%d)", gotStreams, len(got), len(want))
}
gotStreams++
}
if gotStreams != wantStreams {
t.Errorf("want %d streams, got %d", wantStreams, gotStreams)
}
}
func TestError(t *testing.T) {
var buf bytes.Buffer
w := NewWriter(&buf)
if err := w.AddKeyPlain(); err != nil {
t.Fatal(err)
}
want := "an error message!"
if err := w.AddError(want); err != nil {
t.Fatal(err)
}
w.Close()
// Read back...
r, err := NewReader(&buf)
if err != nil {
t.Fatal(err)
}
st, err := r.NextStream()
if err == nil {
t.Fatalf("did not receive error, got %v, err: %v", st, err)
}
if err.Error() != want {
t.Errorf("Expected %q, got %q", want, err.Error())
}
}
func TestStreamReturnNonDecryptable(t *testing.T) {
var buf bytes.Buffer
w := NewWriter(&buf)
if err := w.AddKeyPlain(); err != nil {
t.Fatal(err)
}
priv, err := rsa.GenerateKey(crand.Reader, 2048)
if err != nil {
t.Fatal(err)
}
err = w.AddKeyEncrypted(&priv.PublicKey)
if err != nil {
t.Fatal(err)
}
wantStreams := len(testStreams)
for name, value := range testStreams {
st, err := w.AddEncryptedStream(name, []byte(name))
if err != nil {
t.Fatal(err)
}
_, err = io.Copy(st, bytes.NewBuffer(value))
if err != nil {
t.Fatal(err)
}
st.Close()
}
err = w.Close()
if err != nil {
t.Fatal(err)
}
// Read back...
b := buf.Bytes()
r, err := NewReader(bytes.NewBuffer(b))
if err != nil {
t.Fatal(err)
}
r.ReturnNonDecryptable(true)
gotStreams := 0
for {
st, err := r.NextStream()
if err == io.EOF {
break
}
if err != ErrNoKey {
t.Fatalf("stream %d: %v", gotStreams, err)
}
_, ok := testStreams[st.Name]
if !ok {
t.Fatal("unexpected stream name", st.Name)
}
if !bytes.Equal(st.Extra, []byte(st.Name)) {
t.Fatal("unexpected stream extra:", st.Extra)
}
if !st.SentEncrypted {
t.Fatal("stream not marked as encrypted:", st.SentEncrypted)
}
err = st.Skip()
if err != nil {
t.Fatalf("stream %d: %v", gotStreams, err)
}
gotStreams++
}
if gotStreams != wantStreams {
t.Errorf("want %d streams, got %d", wantStreams, gotStreams)
}
}
golang-github-minio-madmin-go-3.0.104/estream/types.go 0000664 0000000 0000000 00000002407 14774251704 0022536 0 ustar 00root root 0000000 0000000 //
// Copyright (c) 2015-2024 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as
// published by the Free Software Foundation, either version 3 of the
// License, or (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see .
//
package estream
//go:generate stringer -type=blockID -trimprefix=block
type blockID int8
const (
blockPlainKey blockID = iota + 1
blockEncryptedKey
blockEncStream
blockPlainStream
blockDatablock
blockEOS
blockEOF
blockError
)
type checksumType uint8
//go:generate stringer -type=checksumType -trimprefix=checksumType
const (
checksumTypeNone checksumType = iota
checksumTypeXxhash
checksumTypeUnknown
)
func (c checksumType) valid() bool {
return c >= checksumTypeNone && c < checksumTypeUnknown
}
golang-github-minio-madmin-go-3.0.104/estream/writer.go 0000664 0000000 0000000 00000020214 14774251704 0022702 0 ustar 00root root 0000000 0000000 //
// Copyright (c) 2015-2024 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as
// published by the Free Software Foundation, either version 3 of the
// License, or (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see .
//
package estream
import (
"bytes"
crand "crypto/rand"
"crypto/rsa"
"crypto/sha512"
"crypto/x509"
"encoding/binary"
"errors"
"io"
"github.com/cespare/xxhash/v2"
"github.com/secure-io/sio-go"
"github.com/tinylib/msgp/msgp"
)
// Writer provides a stream writer.
// Streams can optionally be encrypted.
// All streams have checksum verification.
type Writer struct {
up io.Writer
err error
key *[32]byte
bw blockWriter
nonce uint64
}
const (
writerMajorVersion = 2
writerMinorVersion = 1
)
// NewWriter will return a writer that allows to add encrypted and non-encrypted data streams.
func NewWriter(w io.Writer) *Writer {
_, err := w.Write([]byte{writerMajorVersion, writerMinorVersion})
writer := &Writer{err: err, up: w}
writer.bw.init(w)
return writer
}
// Close will flush and close the output stream.
func (w *Writer) Close() error {
if w.err != nil {
return w.err
}
w.addBlock(blockEOF)
return w.sendBlock()
}
// AddKeyEncrypted will create a new encryption key and add it to the stream.
// The key will be encrypted with the public key provided.
// All following files will be encrypted with this key.
func (w *Writer) AddKeyEncrypted(publicKey *rsa.PublicKey) error {
if w.err != nil {
return w.err
}
var key [32]byte
_, err := io.ReadFull(crand.Reader, key[:])
if err != nil {
return w.setErr(err)
}
w.key = &key
cipherKey, err := rsa.EncryptOAEP(sha512.New(), crand.Reader, publicKey, key[:], nil)
if err != nil {
return w.setErr(err)
}
mw := w.addBlock(blockEncryptedKey)
// Write public key...
if err := mw.WriteBytes(x509.MarshalPKCS1PublicKey(publicKey)); err != nil {
return w.setErr(err)
}
// Write encrypted cipher key
w.setErr(mw.WriteBytes(cipherKey))
return w.sendBlock()
}
// AddKeyPlain will create a new encryption key and add it to the stream.
// The key will be stored without any encryption.
// All calls to AddEncryptedStream will use this key
func (w *Writer) AddKeyPlain() error {
if w.err != nil {
return w.err
}
var key [32]byte
_, err := io.ReadFull(crand.Reader, key[:])
if err != nil {
return w.setErr(err)
}
w.key = &key
mw := w.addBlock(blockPlainKey)
w.setErr(mw.WriteBytes(key[:]))
return w.sendBlock()
}
// AddError will indicate the writer encountered an error
// and the reader should abort the stream.
// The message will be returned as an error.
func (w *Writer) AddError(msg string) error {
if w.err != nil {
return w.err
}
mw := w.addBlock(blockError)
w.setErr(mw.WriteString(msg))
return w.sendBlock()
}
// AddUnencryptedStream adds a named stream.
// Extra data can be added, which is added without encryption or checksums.
func (w *Writer) AddUnencryptedStream(name string, extra []byte) (io.WriteCloser, error) {
if w.err != nil {
return nil, w.err
}
mw := w.addBlock(blockPlainStream)
// Write metadata...
w.setErr(mw.WriteString(name))
w.setErr(mw.WriteBytes(extra))
w.setErr(mw.WriteUint8(uint8(checksumTypeXxhash)))
if err := w.sendBlock(); err != nil {
return nil, err
}
return w.newStreamWriter(), nil
}
// AddEncryptedStream adds a named encrypted stream.
// AddKeyEncrypted must have been called before this, but
// multiple streams can safely use the same key.
// Extra data can be added, which is added without encryption or checksums.
func (w *Writer) AddEncryptedStream(name string, extra []byte) (io.WriteCloser, error) {
if w.err != nil {
return nil, w.err
}
if w.key == nil {
return nil, errors.New("AddEncryptedStream: No key on stream")
}
mw := w.addBlock(blockEncStream)
// Write metadata...
w.setErr(mw.WriteString(name))
w.setErr(mw.WriteBytes(extra))
w.setErr(mw.WriteUint8(uint8(checksumTypeXxhash)))
stream, err := sio.AES_256_GCM.Stream(w.key[:])
if err != nil {
return nil, w.setErr(err)
}
// Get nonce for stream.
nonce := make([]byte, stream.NonceSize())
binary.LittleEndian.PutUint64(nonce, w.nonce)
w.nonce++
// Write nonce as bin array.
w.setErr(mw.WriteBytes(nonce))
if err := w.sendBlock(); err != nil {
return nil, err
}
// Send output as blocks.
sw := w.newStreamWriter()
encw := stream.EncryptWriter(sw, nonce, nil)
return &closeWrapper{
up: encw,
after: func() error {
return sw.Close()
},
}, nil
}
// addBlock initializes a new block.
// Block content should be written to the returned writer.
// When done call sendBlock.
func (w *Writer) addBlock(id blockID) *msgp.Writer {
return w.bw.newBlock(id)
}
// sendBlock sends the queued block.
func (w *Writer) sendBlock() error {
if w.err != nil {
return w.err
}
return w.setErr(w.bw.send())
}
// newStreamWriter creates a new stream writer
func (w *Writer) newStreamWriter() *streamWriter {
sw := &streamWriter{w: w}
sw.h.Reset()
return sw
}
// setErr will set a stateful error on w.
// If an error has already been set that is returned instead.
func (w *Writer) setErr(err error) error {
if w.err != nil {
return w.err
}
if err == nil {
return err
}
w.err = err
return err
}
// streamWriter will send each individual write as a block on stream.
// Close must be called when writes have completed to send hashes.
type streamWriter struct {
w *Writer
h xxhash.Digest
eosWritten bool
}
// Write satisfies the io.Writer interface.
// Each write is sent as a separate block.
func (w *streamWriter) Write(b []byte) (int, error) {
mw := w.w.addBlock(blockDatablock)
// Update hash.
w.h.Write(b)
// Write as messagepack bin array.
if err := mw.WriteBytes(b); err != nil {
return 0, w.w.setErr(err)
}
// Write data as binary array.
return len(b), w.w.sendBlock()
}
// Close satisfies the io.Closer interface.
func (w *streamWriter) Close() error {
// Write EOS only once.
if !w.eosWritten {
mw := w.w.addBlock(blockEOS)
sum := w.h.Sum(nil)
w.w.setErr(mw.WriteBytes(sum))
w.eosWritten = true
return w.w.sendBlock()
}
return nil
}
type closeWrapper struct {
before, after func() error
up io.WriteCloser
}
func (w *closeWrapper) Write(b []byte) (int, error) {
return w.up.Write(b)
}
// Close satisfies the io.Closer interface.
func (w *closeWrapper) Close() error {
if w.before != nil {
if err := w.before(); err != nil {
return err
}
w.before = nil
}
if w.up != nil {
if err := w.up.Close(); err != nil {
return err
}
w.up = nil
}
if w.after != nil {
if err := w.after(); err != nil {
return err
}
w.after = nil
}
return nil
}
type blockWriter struct {
id blockID
w io.Writer
wr *msgp.Writer
buf bytes.Buffer
hdr [8 + 5]byte
}
// init the blockwriter
// blocks will be written to w.
func (b *blockWriter) init(w io.Writer) {
b.w = w
b.buf.Grow(1 << 10)
b.buf.Reset()
b.wr = msgp.NewWriter(&b.buf)
}
// newBlock starts a new block with the specified id.
// Content should be written to the returned writer.
func (b *blockWriter) newBlock(id blockID) *msgp.Writer {
b.id = id
b.buf.Reset()
b.wr.Reset(&b.buf)
return b.wr
}
func (b *blockWriter) send() error {
if b.id == 0 {
return errors.New("blockWriter: no block started")
}
// Flush block data into b.buf
if err := b.wr.Flush(); err != nil {
return err
}
// Add block id
hdr := msgp.AppendInt8(b.hdr[:0], int8(b.id))
blockLen := uint32(b.buf.Len())
if blockLen > (1<<32 - 1) {
return errors.New("max block size exceeded")
}
// Add block length.
hdr = msgp.AppendUint32(hdr, blockLen)
if _, err := b.w.Write(hdr); err != nil {
return err
}
// Write block.
_, err := b.w.Write(b.buf.Bytes())
// Reset for new block.
b.buf.Reset()
b.id = 0
return err
}
golang-github-minio-madmin-go-3.0.104/examples/ 0000775 0000000 0000000 00000000000 14774251704 0021216 5 ustar 00root root 0000000 0000000 golang-github-minio-madmin-go-3.0.104/examples/.copyright.tmpl 0000664 0000000 0000000 00000001317 14774251704 0024204 0 ustar 00root root 0000000 0000000 +build ignore
Copyright (c) ${years} ${owner}.
This file is part of ${projectname}
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as
published by the Free Software Foundation, either version 3 of the
License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see .
golang-github-minio-madmin-go-3.0.104/examples/accounting-info.go 0000664 0000000 0000000 00000002723 14774251704 0024634 0 ustar 00root root 0000000 0000000 //go:build ignore
// +build ignore
// Copyright (c) 2015-2022 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as
// published by the Free Software Foundation, either version 3 of the
// License, or (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see .
//
package main
import (
"context"
"log"
"github.com/minio/madmin-go/v3"
)
func main() {
// Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY and my-bucketname are
// dummy values, please replace them with original values.
// API requests are secure (HTTPS) if secure=true and insecure (HTTPS) otherwise.
// New returns an MinIO Admin client object.
madmClnt, err := madmin.New("your-minio.example.com:9000", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true)
if err != nil {
log.Fatalln(err)
}
opts := madmin.AccountOpts{PrefixUsage: false}
accountInfo, err := madmClnt.AccountInfo(context.Background(), opts)
if err != nil {
log.Fatalln(err)
}
log.Println(accountInfo)
}
golang-github-minio-madmin-go-3.0.104/examples/alive.go 0000664 0000000 0000000 00000003566 14774251704 0022657 0 ustar 00root root 0000000 0000000 //go:build ignore
// +build ignore
// Copyright (c) 2015-2022 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as
// published by the Free Software Foundation, either version 3 of the
// License, or (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see .
//
package main
import (
"context"
"log"
"github.com/minio/madmin-go/v3"
)
func main() {
// Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY and my-bucketname are
// dummy values, please replace them with original values.
// API requests are secure (HTTPS) if secure=true and insecure (HTTP) otherwise.
// New returns an MinIO Admin client object.
madmClnt, err := madmin.New("your-minio.example.com:9000", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true)
if err != nil {
log.Fatalln(err)
}
st, err := madmClnt.ServerInfo(context.Background())
if err != nil {
log.Fatalln(err)
}
// API requests are secure (HTTPS) if secure=true and insecure (HTTPS) otherwise.
// NewAnonymousClient returns an anonymous MinIO Admin client object.
// Anonymous client doesn't require any credentials
madmAnonClnt, err := madmin.NewAnonymousClient("your-minio.example.com:9000", true)
if err != nil {
log.Fatalln(err)
}
// madmAnonClnt.TraceOn(os.Stderr)
for aliveResult := range madmAnonClnt.Alive(context.Background(), madmin.AliveOpts{}, st.Servers...) {
log.Printf("%+v\n", aliveResult)
}
}
golang-github-minio-madmin-go-3.0.104/examples/bucket-bandwidth.go 0000664 0000000 0000000 00000003202 14774251704 0024761 0 ustar 00root root 0000000 0000000 //
//go:build ignore
// +build ignore
//
// Copyright (c) 2015-2022 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as
// published by the Free Software Foundation, either version 3 of the
// License, or (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see .
//
package main
import (
"context"
"fmt"
"log"
"github.com/minio/madmin-go/v3"
)
func main() {
// Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY and my-bucketname are
// dummy values, please replace them with original values.
// API requests are secure (HTTPS) if secure=true and insecure (HTTP) otherwise.
// New returns an MinIO Admin client object.
madminClient, err := madmin.New("your-minio.example.com:9000", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true)
if err != nil {
log.Fatalln(err)
}
ctx := context.Background()
reportCh := madminClient.GetBucketBandwidth(ctx)
for i := 0; i < 10; i++ {
report := <-reportCh
fmt.Printf("Report: %+v\n", report)
}
reportCh = madminClient.GetBucketBandwidth(ctx, "sourceBucket", "sourceBucket2")
for i := 0; i < 10; i++ {
report := <-reportCh
fmt.Printf("Report: %+v\n", report)
}
}
golang-github-minio-madmin-go-3.0.104/examples/bucket-metadata.go 0000664 0000000 0000000 00000003463 14774251704 0024606 0 ustar 00root root 0000000 0000000 //go:build ignore
// +build ignore
// Copyright (c) 2015-2022 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as
// published by the Free Software Foundation, either version 3 of the
// License, or (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see .
//
package main
import (
"context"
"log"
"github.com/minio/madmin-go/v3"
)
func main() {
// Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY and my-bucketname are
// dummy values, please replace them with original values.
// API requests are secure (HTTPS) if secure=true and insecure (HTTP) otherwise.
// New returns an MinIO Admin client object.
bucket := "bucket"
client, err := madmin.New("your-minio.example.com:9000", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true)
if err != nil {
log.Fatalln(err)
}
// return bucket metadata as zipped content
r, err := client.ExportBucketMetadata(context.Background(), bucket)
if err != nil {
log.Fatalln(err)
}
// set bucket metadata to bucket on a new cluster
client2, err := madmin.New("your-minio.example.com:9001", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true)
if err != nil {
log.Fatalln(err)
}
bucket2 := "bucket"
// set bucket metadata from reader
if err := client2.ImportBucketMetadata(context.Background(), bucket2, r); err != nil {
log.Fatalln(err)
}
}
golang-github-minio-madmin-go-3.0.104/examples/bucket-quota.go 0000664 0000000 0000000 00000003473 14774251704 0024160 0 ustar 00root root 0000000 0000000 //go:build ignore
// +build ignore
// Copyright (c) 2015-2022 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as
// published by the Free Software Foundation, either version 3 of the
// License, or (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see .
//
package main
import (
"context"
"fmt"
"log"
"github.com/minio/madmin-go/v3"
)
func main() {
// Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY and my-bucketname are
// dummy values, please replace them with original values.
// API requests are secure (HTTPS) if secure=true and insecure (HTTP) otherwise.
// New returns an MinIO Admin client object.
madmClnt, err := madmin.New("your-minio.example.com:9000", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true)
if err != nil {
log.Fatalln(err)
}
var kiB uint64 = 1 << 10
ctx := context.Background()
quota := &madmin.BucketQuota{
Quota: 32 * kiB,
Type: madmin.HardQuota,
}
// set bucket quota config
if err := madmClnt.SetBucketQuota(ctx, "bucket-name", quota); err != nil {
log.Fatalln(err)
}
// gets bucket quota config
quotaCfg, err := madmClnt.GetBucketQuota(ctx, "bucket-name")
if err != nil {
log.Fatalln(err)
}
fmt.Println(quotaCfg)
// remove bucket quota config
if err := madmClnt.RemoveBucketQuota(ctx, "bucket-name"); err != nil {
log.Fatalln(err)
}
}
golang-github-minio-madmin-go-3.0.104/examples/bucket-target.go 0000664 0000000 0000000 00000005400 14774251704 0024305 0 ustar 00root root 0000000 0000000 //go:build ignore
// +build ignore
// Copyright (c) 2015-2022 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as
// published by the Free Software Foundation, either version 3 of the
// License, or (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see .
//
package main
import (
"context"
"fmt"
"log"
"github.com/minio/madmin-go/v3"
)
func main() {
// Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY and my-bucketname are
// dummy values, please replace them with original values.
// API requests are secure (HTTPS) if secure=true and insecure (HTTP) otherwise.
// New returns an MinIO Admin client object.
madmClnt, err := madmin.New("your-minio.example.com:9000", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true)
if err != nil {
log.Fatalln(err)
}
ctx := context.Background()
creds := madmin.Credentials{
AccessKey: "access-key",
SecretKey: "secret-key",
}
target := madmin.BucketTarget{
Endpoint: "site2:9000",
Credentials: creds,
TargetBucket: "destbucket",
IsSSL: false,
Type: madmin.ReplicationArn,
BandwidthLimit: 2 * 1024 * 1024,
}
// Set bucket target
arn, err := madmClnt.SetBucketTarget(ctx, "srcbucket", &target)
if err != nil {
log.Fatalln(err)
}
fmt.Println("replication target ARN is:", arn)
// List all bucket target(s)
target, err = madmClnt.ListBucketTargets(ctx, "srcbucket", "")
if err != nil {
log.Fatalln(err)
}
// Get bucket target for arn type "replica"
target, err = madmClnt.ListBucketTargets(ctx, "srcbucket", "replica")
if err != nil {
log.Fatalln(err)
}
// update credentials for target
creds = Credentials{
AccessKey: "access-key2",
SecretKey: "secret-key2",
}
target = madmin.BucketTarget{
Endpoint: "site2:9000",
Credentials: creds,
SourceBucket: "srcbucket",
TargetBucket: "destbucket",
IsSSL: false,
Arn: "arn:minio:ilm:us-east-1:3cbe15b8-82b9-44bc-a737-db9051ab359a:srcbucket",
}
// update credentials on bucket target
if _, err := madmClnt.UpdateBucketTarget(ctx, &target); err != nil {
log.Fatalln(err)
}
// Remove bucket target
arn := "arn:minio:replica::ac66b2cf-dd8f-4e7e-a882-9a64132f0d59:dest"
if err := madmClnt.RemoveBucketTarget(ctx, "srcbucket", arn); err != nil {
log.Fatalln(err)
}
}
golang-github-minio-madmin-go-3.0.104/examples/cluster-health.go 0000664 0000000 0000000 00000003132 14774251704 0024470 0 ustar 00root root 0000000 0000000 //go:build ignore
// +build ignore
// Copyright (c) 2015-2022 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as
// published by the Free Software Foundation, either version 3 of the
// License, or (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see .
//
package main
import (
"context"
"log"
"github.com/minio/madmin-go/v3"
)
func main() {
// API requests are secure (HTTPS) if secure=true and insecure (HTTPS) otherwise.
// NewAnonymousClient returns an anonymous MinIO Admin client object.
// Anonymous client doesn't require any credentials
madmAnonClnt, err := madmin.NewAnonymousClient("your-minio.example.com:9000", true)
if err != nil {
log.Fatalln(err)
}
// To enable trace :-
// madmAnonClnt.TraceOn(os.Stderr)
opts := madmin.HealthOpts{
ClusterRead: false, // set to "true" to check if the cluster has read quorum
Maintenance: false, // set to "true" to check if the cluster is taken down for maintenance
}
healthResult, err := madmAnonClnt.Healthy(context.Background(), opts)
if err != nil {
log.Fatalln(err)
}
log.Println(healthResult)
}
golang-github-minio-madmin-go-3.0.104/examples/create-job.go 0000664 0000000 0000000 00000003374 14774251704 0023567 0 ustar 00root root 0000000 0000000 //go:build ignore
// +build ignore
// Copyright (c) 2015-2022 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as
// published by the Free Software Foundation, either version 3 of the
// License, or (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see .
//
package main
import (
"context"
"log"
"os"
"github.com/minio/madmin-go/v3"
)
func main() {
// Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY and my-bucketname are
// dummy values, please replace them with original values.
// API requests are secure (HTTPS) if secure=true and insecure (HTTP) otherwise.
// New returns an MinIO Admin client object.
madmClnt, err := madmin.New("your-minio.example.com:9000", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true)
if err != nil {
log.Fatalln(err)
}
madmClnt.TraceOn(os.Stderr)
job := `
replicate:
flags:
# (optional)
name: "weekly-replication-job"
target:
type: "minio"
bucket: "testbucket"
endpoint: "https://play.min.io"
credentials:
accessKey: "minioadmin"
secretKey: "minioadmin"
sessionToken: ""
source:
type: "minio"
bucket: "testbucket"
prefix: ""
`
if err = madmClnt.StartBatchJob(context.Background(), job); err != nil {
log.Fatalln(err)
}
}
golang-github-minio-madmin-go-3.0.104/examples/data-usage-info.go 0000664 0000000 0000000 00000002642 14774251704 0024515 0 ustar 00root root 0000000 0000000 //go:build ignore
// +build ignore
// Copyright (c) 2015-2022 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as
// published by the Free Software Foundation, either version 3 of the
// License, or (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see .
//
package main
import (
"context"
"log"
"github.com/minio/madmin-go/v3"
)
func main() {
// Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY and my-bucketname are
// dummy values, please replace them with original values.
// API requests are secure (HTTPS) if secure=true and insecure (HTTPS) otherwise.
// New returns an MinIO Admin client object.
madmClnt, err := madmin.New("your-minio.example.com:9000", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true)
if err != nil {
log.Fatalln(err)
}
dataUsageInfo, err := madmClnt.DataUsageInfo(context.Background())
if err != nil {
log.Fatalln(err)
}
log.Println(dataUsageInfo)
}
golang-github-minio-madmin-go-3.0.104/examples/force-unlock.go 0000664 0000000 0000000 00000002763 14774251704 0024144 0 ustar 00root root 0000000 0000000 //go:build ignore
// +build ignore
// Copyright (c) 2015-2022 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as
// published by the Free Software Foundation, either version 3 of the
// License, or (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see .
//
package main
import (
"context"
"log"
"os"
"github.com/minio/madmin-go/v3"
)
func main() {
// Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY are
// dummy values, please replace them with original values.
// API requests are secure (HTTPS) if secure=true and insecure (HTTP) otherwise.
// New returns an MinIO Admin client object.
madmClnt, err := madmin.New("your-minio.example.com:9000", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true)
if err != nil {
log.Fatalln(err)
}
if len(os.Args) == 1 {
log.Fatalln("Please provide paths in following form ./force-unlock bucket/object/foo/1.txt")
}
if err := madmClnt.ForceUnlock(context.Background(), os.Args[:1]...); err != nil {
log.Fatalln(err)
}
}
golang-github-minio-madmin-go-3.0.104/examples/heal-manual.go 0000664 0000000 0000000 00000005105 14774251704 0023732 0 ustar 00root root 0000000 0000000 //go:build ignore
// +build ignore
// Copyright (c) 2015-2022 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as
// published by the Free Software Foundation, either version 3 of the
// License, or (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see .
//
package main
import (
"context"
"encoding/json"
"fmt"
"log"
"os"
"time"
"github.com/minio/madmin-go/v3"
)
func main() {
// Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY are
// dummy values, please replace them with original values.
// API requests are secure (HTTPS) if secure=true and insecure (HTTP) otherwise.
// New returns an MinIO Admin client object.
madmClnt, err := madmin.New("your-minio.example.com:9000", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true)
if err != nil {
log.Fatalln(err)
}
opts := madmin.HealOpts{
Recursive: true, // recursively heal all objects at 'prefix'
Remove: true, // remove content that has lost quorum and not recoverable
Recreate: true, // rewrite all old non-inlined xl.meta to new xl.meta
ScanMode: madmin.HealNormalScan, // by default do not do 'deep' scanning
}
start, _, err := madmClnt.Heal(context.Background(), "healing-rewrite-bucket", "", opts, "", false, false)
if err != nil {
log.Fatalln(err)
}
fmt.Println("Healstart sequence ===")
enc := json.NewEncoder(os.Stdout)
if err = enc.Encode(&start); err != nil {
log.Fatalln(err)
}
fmt.Println()
for {
_, status, err := madmClnt.Heal(context.Background(), "healing-rewrite-bucket", "", opts, start.ClientToken, false, false)
if status.Summary == "finished" {
fmt.Println("Healstatus on items ===")
for _, item := range status.Items {
if err = enc.Encode(&item); err != nil {
log.Fatalln(err)
}
}
break
}
if status.Summary == "stopped" {
fmt.Println("Healstatus on items ===")
fmt.Println("Heal failed with", status.FailureDetail)
break
}
for _, item := range status.Items {
if err = enc.Encode(&item); err != nil {
log.Fatalln(err)
}
}
time.Sleep(time.Second)
}
}
golang-github-minio-madmin-go-3.0.104/examples/heal-status.go 0000664 0000000 0000000 00000002774 14774251704 0024011 0 ustar 00root root 0000000 0000000 //go:build ignore
// +build ignore
// Copyright (c) 2015-2022 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as
// published by the Free Software Foundation, either version 3 of the
// License, or (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see .
//
package main
import (
"context"
"encoding/json"
"log"
"github.com/minio/madmin-go/v3"
)
func main() {
// Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY are
// dummy values, please replace them with original values.
// API requests are secure (HTTPS) if secure=true and insecure (HTTP) otherwise.
// New returns an MinIO Admin client object.
madmClnt, err := madmin.New("your-minio.example.com:9000", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true)
if err != nil {
log.Fatalln(err)
}
healStatusResult, err := madmClnt.BackgroundHealStatus(context.Background())
if err != nil {
log.Fatalln(err)
}
js, _ := json.MarshalIndent(healStatusResult, "", " ")
log.Printf("Heal status result: %s\n", string(js))
}
golang-github-minio-madmin-go-3.0.104/examples/iam-migrate.go 0000664 0000000 0000000 00000003020 14774251704 0023734 0 ustar 00root root 0000000 0000000 //go:build ignore
// +build ignore
// Copyright (c) 2015-2022 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as
// published by the Free Software Foundation, either version 3 of the
// License, or (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see .
//
package main
import (
"context"
"log"
"github.com/minio/madmin-go/v3"
)
func main() {
// Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY and my-bucketname are
// dummy values, please replace them with original values.
// API requests are secure (HTTPS) if secure=true and insecure (HTTP) otherwise.
// New returns an MinIO Admin client object.
client, err := madmin.New("your-minio.example.com:9000", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true)
if err != nil {
log.Fatalln(err)
}
// return IAM info as zipped content
r, err := client.ExportIAM(context.Background())
if err != nil {
log.Fatalln(err)
}
// set IAM info from the reader
if err := client.ImportIAM(context.Background(), r); err != nil {
log.Fatalln(err)
}
}
golang-github-minio-madmin-go-3.0.104/examples/job.yaml.template 0000664 0000000 0000000 00000003624 14774251704 0024473 0 ustar 00root root 0000000 0000000 replicate:
apiVersion: v1
# source of the objects to be replicated
source:
type: TYPE # valid values are "minio"
bucket: BUCKET
prefix: PREFIX
# NOTE: if source is remote then target must be "local"
# endpoint: ENDPOINT
# credentials:
# accessKey: ACCESS-KEY
# secretKey: SECRET-KEY
# sessionToken: SESSION-TOKEN # Available when rotating credentials are used
# target where the objects must be replicated
target:
type: TYPE # valid values are "minio"
bucket: BUCKET
prefix: PREFIX
# NOTE: if target is remote then source must be "local"
# endpoint: ENDPOINT
# credentials:
# accessKey: ACCESS-KEY
# secretKey: SECRET-KEY
# sessionToken: SESSION-TOKEN # Available when rotating credentials are used
# optional flags based filtering criteria
# for all source objects
flags:
filter:
newerThan: "7d" # match objects newer than this value (e.g. 7d10h31s)
olderThan: "7d" # match objects older than this value (e.g. 7d10h31s)
createdAfter: "date" # match objects created after "date"
createdBefore: "date" # match objects created before "date"
## NOTE: tags are not supported when "source" is remote.
# tags:
# - key: "name"
# value: "pick*" # match objects with tag 'name', with all values starting with 'pick'
## NOTE: metadata filter not supported when "source" is non MinIO.
# metadata:
# - key: "content-type"
# value: "image/*" # match objects with 'content-type', with all values starting with 'image/'
notify:
endpoint: "https://notify.endpoint" # notification endpoint to receive job status events
token: "Bearer xxxxx" # optional authentication token for the notification endpoint
retry:
attempts: 10 # number of retries for the job before giving up
delay: "500ms" # least amount of delay between each retry
golang-github-minio-madmin-go-3.0.104/examples/kms-status.go 0000664 0000000 0000000 00000003633 14774251704 0023665 0 ustar 00root root 0000000 0000000 //go:build ignore
// +build ignore
// Copyright (c) 2015-2022 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as
// published by the Free Software Foundation, either version 3 of the
// License, or (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see .
//
package main
import (
"context"
"log"
"github.com/minio/madmin-go/v3"
)
func main() {
// Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY and my-bucketname are
// dummy values, please replace them with original values.
// API requests are secure (HTTPS) if secure=true and insecure (HTTP) otherwise.
// New returns an MinIO Admin client object.
madmClnt, err := madmin.New("your-minio.example.com:9000", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true)
if err != nil {
log.Fatalln(err)
}
status, err := madmClnt.GetKeyStatus(context.Background(), "") // empty string refers to the default master key
if err != nil {
log.Fatalln(err)
}
log.Printf("Key: %s\n", status.KeyID)
if status.EncryptionErr == "" {
log.Println("\t • Encryption ✔")
} else {
log.Printf("\t • Encryption failed: %s\n", status.EncryptionErr)
}
if status.UpdateErr == "" {
log.Println("\t • Re-wrap ✔")
} else {
log.Printf("\t • Re-wrap failed: %s\n", status.UpdateErr)
}
if status.DecryptionErr == "" {
log.Println("\t • Decryption ✔")
} else {
log.Printf("\t • Decryption failed: %s\n", status.DecryptionErr)
}
}
golang-github-minio-madmin-go-3.0.104/examples/list-pools.go 0000664 0000000 0000000 00000003022 14774251704 0023647 0 ustar 00root root 0000000 0000000 //go:build ignore
// +build ignore
// Copyright (c) 2015-2022 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as
// published by the Free Software Foundation, either version 3 of the
// License, or (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see .
//
package main
import (
"context"
"encoding/json"
"fmt"
"log"
"github.com/minio/madmin-go/v3"
)
func main() {
// Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY are
// dummy values, please replace them with original values.
// API requests are secure (HTTPS) if secure=true and insecure (HTTP) otherwise.
// New returns an MinIO Admin client object.
madmClnt, err := madmin.New("your-minio.example.com:9000", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true)
if err != nil {
log.Fatalln(err)
}
pools, err := madmClnt.ListPools(context.Background())
if err != nil {
log.Fatalf("failed due to: %v", err)
}
out, err := json.Marshal(pools)
if err != nil {
log.Fatalf("Marshal failed due to: %v", err)
}
fmt.Println(string(out))
}
golang-github-minio-madmin-go-3.0.104/examples/perf-object.go 0000664 0000000 0000000 00000003045 14774251704 0023747 0 ustar 00root root 0000000 0000000 //go:build ignore
// +build ignore
// Copyright (c) 2015-2022 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as
// published by the Free Software Foundation, either version 3 of the
// License, or (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see .
//
package main
import (
"context"
"encoding/json"
"log"
"github.com/minio/madmin-go/v3"
)
func main() {
// Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY are
// dummy values, please replace them with original values.
// API requests are secure (HTTPS) if secure=true and insecure (HTTP) otherwise.
// New returns an MinIO Admin client object.
madmClnt, err := madmin.New("your-minio.example.com:9000", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true)
if err != nil {
log.Fatalln(err)
}
results, err := madmClnt.Speedtest(context.Background(), madmin.SpeedtestOpts{Autotune: true})
if err != nil {
log.Fatalln(err)
}
for result := range results {
js, _ := json.MarshalIndent(result, "", " ")
log.Printf("Speedtest Result: %s\n", string(js))
}
}
golang-github-minio-madmin-go-3.0.104/examples/profiling.go 0000664 0000000 0000000 00000004747 14774251704 0023552 0 ustar 00root root 0000000 0000000 //go:build ignore
// +build ignore
// Copyright (c) 2015-2022 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as
// published by the Free Software Foundation, either version 3 of the
// License, or (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see .
//
package main
import (
"context"
"io"
"log"
"os"
"time"
"github.com/minio/madmin-go/v3"
)
func main() {
// Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY are
// dummy values, please replace them with original values.
// Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY are
// dummy values, please replace them with original values.
// API requests are secure (HTTPS) if secure=true and insecure (HTTP) otherwise.
// New returns an MinIO Admin client object.
madmClnt, err := madmin.New("your-minio.example.com:9000", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true)
if err != nil {
log.Fatalln(err)
}
profiler := madmin.ProfilerCPU
log.Println("Starting " + profiler + " profiling..")
startResults, err := madmClnt.StartProfiling(context.Background(), profiler)
if err != nil {
log.Fatalln(err)
}
for _, result := range startResults {
if !result.Success {
log.Printf("Unable to start profiling on node `%s`, reason = `%s`\n", result.NodeName, result.Error)
continue
}
log.Printf("Profiling successfully started on node `%s`\n", result.NodeName)
}
sleep := time.Duration(10)
time.Sleep(time.Second * sleep)
log.Println("Stopping profiling..")
profilingData, err := madmClnt.DownloadProfilingData(context.Background())
if err != nil {
log.Fatalln(err)
}
profilingFile, err := os.Create("/tmp/profiling-" + string(profiler) + ".zip")
if err != nil {
log.Fatal(err)
}
if _, err := io.Copy(profilingFile, profilingData); err != nil {
log.Fatal(err)
}
if err := profilingFile.Close(); err != nil {
log.Fatal(err)
}
if err := profilingData.Close(); err != nil {
log.Fatal(err)
}
log.Println("Profiling files " + profilingFile.Name() + " successfully downloaded.")
}
golang-github-minio-madmin-go-3.0.104/examples/replicate-diff.go 0000664 0000000 0000000 00000003257 14774251704 0024432 0 ustar 00root root 0000000 0000000 //go:build ignore
// +build ignore
// Copyright (c) 2015-2022 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as
// published by the Free Software Foundation, either version 3 of the
// License, or (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see .
//
package main
import (
"context"
"fmt"
"log"
"github.com/minio/madmin-go/v3"
)
func main() {
// Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY and my-bucketname are
// dummy values, please replace them with original values.
// API requests are secure (HTTPS) if secure=true and insecure (HTTP) otherwise.
// New returns an MinIO Admin client object.
madmClnt, err := madmin.New("your-minio.example.com:9000", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true)
if err != nil {
log.Fatalln(err)
}
// Print a diff of unreplicated objects in a particular prefix in my-bucketname for the remote target specified.
// Leaving out the ARN returns
diffCh := madmClnt.BucketReplicationDiff(context.Background(), "my-bucketname", ReplDiffOpts{
ARN: "",
Prefix: "prefix/path",
})
for diff := range diffCh {
if diff.Err != nil {
log.Fatalln(diff.Err)
}
fmt.Println(diff)
}
}
golang-github-minio-madmin-go-3.0.104/examples/replicate-mrf.go 0000664 0000000 0000000 00000003044 14774251704 0024300 0 ustar 00root root 0000000 0000000 //go:build ignore
// +build ignore
// Copyright (c) 2015-2023 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as
// published by the Free Software Foundation, either version 3 of the
// License, or (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see .
//
package main
import (
"context"
"fmt"
"log"
"github.com/minio/madmin-go/v2"
)
func main() {
// Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY and my-bucketname are
// dummy values, please replace them with original values.
// API requests are secure (HTTPS) if secure=true and insecure (HTTP) otherwise.
// New returns an MinIO Admin client object.
madmClnt, err := madmin.New("your-minio.example.com:9000", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true)
if err != nil {
log.Fatalln(err)
}
// Print most recent failures for replication across all nodes in a minio cluster.
mrfCh := madmClnt.BucketReplicationMRF(context.Background(), "my-bucketname", "all")
for m := range mrfCh {
if m.Err != nil {
log.Fatalln(m.Err)
}
fmt.Println(m)
}
}
golang-github-minio-madmin-go-3.0.104/examples/server-info.go 0000664 0000000 0000000 00000002620 14774251704 0024004 0 ustar 00root root 0000000 0000000 //go:build ignore
// +build ignore
// Copyright (c) 2015-2022 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as
// published by the Free Software Foundation, either version 3 of the
// License, or (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see .
//
package main
import (
"context"
"log"
"github.com/minio/madmin-go/v3"
)
func main() {
// Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY and my-bucketname are
// dummy values, please replace them with original values.
// API requests are secure (HTTPS) if secure=true and insecure (HTTP) otherwise.
// New returns an MinIO Admin client object.
madmClnt, err := madmin.New("your-minio.example.com:9000", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true)
if err != nil {
log.Fatalln(err)
}
st, err := madmClnt.ServerInfo(context.Background())
if err != nil {
log.Fatalln(err)
}
log.Printf("%+v\n", st)
}
golang-github-minio-madmin-go-3.0.104/examples/service-account.go 0000664 0000000 0000000 00000004322 14774251704 0024640 0 ustar 00root root 0000000 0000000 //
//go:build ignore
// +build ignore
//
// Copyright (c) 2015-2022 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as
// published by the Free Software Foundation, either version 3 of the
// License, or (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see .
//
package main
import (
"context"
"fmt"
"log"
"time"
"github.com/minio/madmin-go/v3"
)
func main() {
// Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY and my-bucketname are
// dummy values, please replace them with original values.
// API requests are secure (HTTPS) if secure=true and insecure (HTTP) otherwise.
// New returns an MinIO Admin client object.
madminClient, err := madmin.New("your-minio.example.com:9000", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true)
if err != nil {
log.Fatalln(err)
}
ctx := context.Background()
// add service account
expiration := time.Now().Add(30 * time.Minute)
addReq := madmin.AddServiceAccountReq{
TargetUser: "my-username",
Expiration: &expiration,
}
addRes, err := madminClient.AddServiceAccount(context.Background(), addReq)
if err != nil {
log.Fatalln(err)
}
fmt.Println(addRes)
// update service account
newExpiration := time.Now().Add(45 * time.Minute)
updateReq := madmin.UpdateServiceAccountReq{
NewStatus: "my-status",
NewExpiration: &newExpiration,
}
if err := madminClient.UpdateServiceAccount(ctx, "my-accesskey", updateReq); err != nil {
log.Fatalln(err)
}
// get service account
listRes, err := madminClient.ListServiceAccounts(ctx, "my-username")
if err != nil {
log.Fatalln(err)
}
fmt.Println(listRes)
// delete service account
if err := madminClient.DeleteServiceAccount(ctx, "my-accesskey"); err != nil {
log.Fatalln(err)
}
}
golang-github-minio-madmin-go-3.0.104/examples/service-restart.go 0000664 0000000 0000000 00000002574 14774251704 0024677 0 ustar 00root root 0000000 0000000 //go:build ignore
// +build ignore
// Copyright (c) 2015-2022 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as
// published by the Free Software Foundation, either version 3 of the
// License, or (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see .
//
package main
import (
"context"
"log"
"github.com/minio/madmin-go/v3"
)
func main() {
// Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY are
// dummy values, please replace them with original values.
// API requests are secure (HTTPS) if secure=true and insecure (HTTP) otherwise.
// New returns an MinIO Admin client object.
madmClnt, err := madmin.New("your-minio.example.com:9000", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true)
if err != nil {
log.Fatalln(err)
}
err = madmClnt.ServiceRestart(context.Background())
if err != nil {
log.Fatalln(err)
}
log.Println("Success")
}
golang-github-minio-madmin-go-3.0.104/examples/service-trace.go 0000664 0000000 0000000 00000003222 14774251704 0024300 0 ustar 00root root 0000000 0000000 //go:build ignore
// +build ignore
// Copyright (c) 2015-2022 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as
// published by the Free Software Foundation, either version 3 of the
// License, or (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see .
//
package main
import (
"context"
"fmt"
"log"
"github.com/minio/madmin-go/v3"
)
func main() {
// Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY are
// dummy values, please replace them with original values.
// API requests are secure (HTTPS) if secure=true and insecure (HTTP) otherwise.
// New returns an MinIO Admin client object.
madmClnt, err := madmin.New("your-minio.example.com:9000", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true)
if err != nil {
log.Fatalln(err)
}
// Start listening on all http trace activity from all servers in the minio cluster.
traceCh := madmClnt.ServiceTrace(context.Background(), madmin.ServiceTraceOpts{
S3: true,
Internal: true,
Storage: true,
OS: true,
Threshold: 0,
})
for traceInfo := range traceCh {
if traceInfo.Err != nil {
fmt.Println(traceInfo.Err)
}
fmt.Println(traceInfo)
}
}
golang-github-minio-madmin-go-3.0.104/examples/storage-info.go 0000664 0000000 0000000 00000002622 14774251704 0024144 0 ustar 00root root 0000000 0000000 //go:build ignore
// +build ignore
// Copyright (c) 2015-2022 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as
// published by the Free Software Foundation, either version 3 of the
// License, or (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see .
//
package main
import (
"context"
"log"
"github.com/minio/madmin-go/v3"
)
func main() {
// Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY and my-bucketname are
// dummy values, please replace them with original values.
// API requests are secure (HTTPS) if secure=true and insecure (HTTPS) otherwise.
// New returns an MinIO Admin client object.
madmClnt, err := madmin.New("your-minio.example.com:9000", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true)
if err != nil {
log.Fatalln(err)
}
st, err := madmClnt.StorageInfo(context.Background())
if err != nil {
log.Fatalln(err)
}
log.Printf("%+v\n", st)
}
golang-github-minio-madmin-go-3.0.104/examples/top-locks.go 0000664 0000000 0000000 00000003057 14774251704 0023465 0 ustar 00root root 0000000 0000000 //go:build ignore
// +build ignore
// Copyright (c) 2015-2022 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as
// published by the Free Software Foundation, either version 3 of the
// License, or (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see .
//
package main
import (
"context"
"encoding/json"
"log"
"github.com/minio/madmin-go/v3"
)
func main() {
// Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY are
// dummy values, please replace them with original values.
// API requests are secure (HTTPS) if secure=true and insecure (HTTP) otherwise.
// New returns an MinIO Admin client object.
madmClnt, err := madmin.New("your-minio.example.com:9000", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true)
if err != nil {
log.Fatalln(err)
}
locks, err := madmClnt.TopLocks(context.Background())
if err != nil {
log.Fatalf("failed due to: %v", err)
}
out, err := json.Marshal(locks)
if err != nil {
log.Fatalf("Marshal failed due to: %v", err)
}
log.Println("Top Locks received successfully: ", string(out))
}
golang-github-minio-madmin-go-3.0.104/external.go 0000664 0000000 0000000 00000006241 14774251704 0021554 0 ustar 00root root 0000000 0000000 //
// Copyright (c) 2015-2024 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as
// published by the Free Software Foundation, either version 3 of the
// License, or (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see .
//
package madmin
// Provide msgp for external types.
// If updating packages breaks this, update structs below.
//msgp:clearomitted
//msgp:tag json
//go:generate msgp -unexported
type cpuTimesStat struct {
CPU string `json:"cpu"`
User float64 `json:"user"`
System float64 `json:"system"`
Idle float64 `json:"idle"`
Nice float64 `json:"nice"`
Iowait float64 `json:"iowait"`
Irq float64 `json:"irq"`
Softirq float64 `json:"softirq"`
Steal float64 `json:"steal"`
Guest float64 `json:"guest"`
GuestNice float64 `json:"guestNice"`
}
type loadAvgStat struct {
Load1 float64 `json:"load1"`
Load5 float64 `json:"load5"`
Load15 float64 `json:"load15"`
}
// NetDevLine is single line parsed from /proc/net/dev or /proc/[pid]/net/dev.
type procfsNetDevLine struct {
Name string `json:"name"` // The name of the interface.
RxBytes uint64 `json:"rx_bytes"` // Cumulative count of bytes received.
RxPackets uint64 `json:"rx_packets"` // Cumulative count of packets received.
RxErrors uint64 `json:"rx_errors"` // Cumulative count of receive errors encountered.
RxDropped uint64 `json:"rx_dropped"` // Cumulative count of packets dropped while receiving.
RxFIFO uint64 `json:"rx_fifo"` // Cumulative count of FIFO buffer errors.
RxFrame uint64 `json:"rx_frame"` // Cumulative count of packet framing errors.
RxCompressed uint64 `json:"rx_compressed"` // Cumulative count of compressed packets received by the device driver.
RxMulticast uint64 `json:"rx_multicast"` // Cumulative count of multicast frames received by the device driver.
TxBytes uint64 `json:"tx_bytes"` // Cumulative count of bytes transmitted.
TxPackets uint64 `json:"tx_packets"` // Cumulative count of packets transmitted.
TxErrors uint64 `json:"tx_errors"` // Cumulative count of transmit errors encountered.
TxDropped uint64 `json:"tx_dropped"` // Cumulative count of packets dropped while transmitting.
TxFIFO uint64 `json:"tx_fifo"` // Cumulative count of FIFO buffer errors.
TxCollisions uint64 `json:"tx_collisions"` // Cumulative count of collisions detected on the interface.
TxCarrier uint64 `json:"tx_carrier"` // Cumulative count of carrier losses detected by the device driver.
TxCompressed uint64 `json:"tx_compressed"` // Cumulative count of compressed packets transmitted by the device driver.
}
golang-github-minio-madmin-go-3.0.104/external_gen.go 0000664 0000000 0000000 00000060446 14774251704 0022414 0 ustar 00root root 0000000 0000000 package madmin
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
import (
"github.com/tinylib/msgp/msgp"
)
// DecodeMsg implements msgp.Decodable
func (z *cpuTimesStat) DecodeMsg(dc *msgp.Reader) (err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, err = dc.ReadMapHeader()
if err != nil {
err = msgp.WrapError(err)
return
}
for zb0001 > 0 {
zb0001--
field, err = dc.ReadMapKeyPtr()
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "cpu":
z.CPU, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "CPU")
return
}
case "user":
z.User, err = dc.ReadFloat64()
if err != nil {
err = msgp.WrapError(err, "User")
return
}
case "system":
z.System, err = dc.ReadFloat64()
if err != nil {
err = msgp.WrapError(err, "System")
return
}
case "idle":
z.Idle, err = dc.ReadFloat64()
if err != nil {
err = msgp.WrapError(err, "Idle")
return
}
case "nice":
z.Nice, err = dc.ReadFloat64()
if err != nil {
err = msgp.WrapError(err, "Nice")
return
}
case "iowait":
z.Iowait, err = dc.ReadFloat64()
if err != nil {
err = msgp.WrapError(err, "Iowait")
return
}
case "irq":
z.Irq, err = dc.ReadFloat64()
if err != nil {
err = msgp.WrapError(err, "Irq")
return
}
case "softirq":
z.Softirq, err = dc.ReadFloat64()
if err != nil {
err = msgp.WrapError(err, "Softirq")
return
}
case "steal":
z.Steal, err = dc.ReadFloat64()
if err != nil {
err = msgp.WrapError(err, "Steal")
return
}
case "guest":
z.Guest, err = dc.ReadFloat64()
if err != nil {
err = msgp.WrapError(err, "Guest")
return
}
case "guestNice":
z.GuestNice, err = dc.ReadFloat64()
if err != nil {
err = msgp.WrapError(err, "GuestNice")
return
}
default:
err = dc.Skip()
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
return
}
// EncodeMsg implements msgp.Encodable
func (z *cpuTimesStat) EncodeMsg(en *msgp.Writer) (err error) {
// map header, size 11
// write "cpu"
err = en.Append(0x8b, 0xa3, 0x63, 0x70, 0x75)
if err != nil {
return
}
err = en.WriteString(z.CPU)
if err != nil {
err = msgp.WrapError(err, "CPU")
return
}
// write "user"
err = en.Append(0xa4, 0x75, 0x73, 0x65, 0x72)
if err != nil {
return
}
err = en.WriteFloat64(z.User)
if err != nil {
err = msgp.WrapError(err, "User")
return
}
// write "system"
err = en.Append(0xa6, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d)
if err != nil {
return
}
err = en.WriteFloat64(z.System)
if err != nil {
err = msgp.WrapError(err, "System")
return
}
// write "idle"
err = en.Append(0xa4, 0x69, 0x64, 0x6c, 0x65)
if err != nil {
return
}
err = en.WriteFloat64(z.Idle)
if err != nil {
err = msgp.WrapError(err, "Idle")
return
}
// write "nice"
err = en.Append(0xa4, 0x6e, 0x69, 0x63, 0x65)
if err != nil {
return
}
err = en.WriteFloat64(z.Nice)
if err != nil {
err = msgp.WrapError(err, "Nice")
return
}
// write "iowait"
err = en.Append(0xa6, 0x69, 0x6f, 0x77, 0x61, 0x69, 0x74)
if err != nil {
return
}
err = en.WriteFloat64(z.Iowait)
if err != nil {
err = msgp.WrapError(err, "Iowait")
return
}
// write "irq"
err = en.Append(0xa3, 0x69, 0x72, 0x71)
if err != nil {
return
}
err = en.WriteFloat64(z.Irq)
if err != nil {
err = msgp.WrapError(err, "Irq")
return
}
// write "softirq"
err = en.Append(0xa7, 0x73, 0x6f, 0x66, 0x74, 0x69, 0x72, 0x71)
if err != nil {
return
}
err = en.WriteFloat64(z.Softirq)
if err != nil {
err = msgp.WrapError(err, "Softirq")
return
}
// write "steal"
err = en.Append(0xa5, 0x73, 0x74, 0x65, 0x61, 0x6c)
if err != nil {
return
}
err = en.WriteFloat64(z.Steal)
if err != nil {
err = msgp.WrapError(err, "Steal")
return
}
// write "guest"
err = en.Append(0xa5, 0x67, 0x75, 0x65, 0x73, 0x74)
if err != nil {
return
}
err = en.WriteFloat64(z.Guest)
if err != nil {
err = msgp.WrapError(err, "Guest")
return
}
// write "guestNice"
err = en.Append(0xa9, 0x67, 0x75, 0x65, 0x73, 0x74, 0x4e, 0x69, 0x63, 0x65)
if err != nil {
return
}
err = en.WriteFloat64(z.GuestNice)
if err != nil {
err = msgp.WrapError(err, "GuestNice")
return
}
return
}
// MarshalMsg implements msgp.Marshaler
func (z *cpuTimesStat) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize())
// map header, size 11
// string "cpu"
o = append(o, 0x8b, 0xa3, 0x63, 0x70, 0x75)
o = msgp.AppendString(o, z.CPU)
// string "user"
o = append(o, 0xa4, 0x75, 0x73, 0x65, 0x72)
o = msgp.AppendFloat64(o, z.User)
// string "system"
o = append(o, 0xa6, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d)
o = msgp.AppendFloat64(o, z.System)
// string "idle"
o = append(o, 0xa4, 0x69, 0x64, 0x6c, 0x65)
o = msgp.AppendFloat64(o, z.Idle)
// string "nice"
o = append(o, 0xa4, 0x6e, 0x69, 0x63, 0x65)
o = msgp.AppendFloat64(o, z.Nice)
// string "iowait"
o = append(o, 0xa6, 0x69, 0x6f, 0x77, 0x61, 0x69, 0x74)
o = msgp.AppendFloat64(o, z.Iowait)
// string "irq"
o = append(o, 0xa3, 0x69, 0x72, 0x71)
o = msgp.AppendFloat64(o, z.Irq)
// string "softirq"
o = append(o, 0xa7, 0x73, 0x6f, 0x66, 0x74, 0x69, 0x72, 0x71)
o = msgp.AppendFloat64(o, z.Softirq)
// string "steal"
o = append(o, 0xa5, 0x73, 0x74, 0x65, 0x61, 0x6c)
o = msgp.AppendFloat64(o, z.Steal)
// string "guest"
o = append(o, 0xa5, 0x67, 0x75, 0x65, 0x73, 0x74)
o = msgp.AppendFloat64(o, z.Guest)
// string "guestNice"
o = append(o, 0xa9, 0x67, 0x75, 0x65, 0x73, 0x74, 0x4e, 0x69, 0x63, 0x65)
o = msgp.AppendFloat64(o, z.GuestNice)
return
}
// UnmarshalMsg implements msgp.Unmarshaler
func (z *cpuTimesStat) UnmarshalMsg(bts []byte) (o []byte, err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
for zb0001 > 0 {
zb0001--
field, bts, err = msgp.ReadMapKeyZC(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "cpu":
z.CPU, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "CPU")
return
}
case "user":
z.User, bts, err = msgp.ReadFloat64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "User")
return
}
case "system":
z.System, bts, err = msgp.ReadFloat64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "System")
return
}
case "idle":
z.Idle, bts, err = msgp.ReadFloat64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "Idle")
return
}
case "nice":
z.Nice, bts, err = msgp.ReadFloat64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "Nice")
return
}
case "iowait":
z.Iowait, bts, err = msgp.ReadFloat64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "Iowait")
return
}
case "irq":
z.Irq, bts, err = msgp.ReadFloat64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "Irq")
return
}
case "softirq":
z.Softirq, bts, err = msgp.ReadFloat64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "Softirq")
return
}
case "steal":
z.Steal, bts, err = msgp.ReadFloat64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "Steal")
return
}
case "guest":
z.Guest, bts, err = msgp.ReadFloat64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "Guest")
return
}
case "guestNice":
z.GuestNice, bts, err = msgp.ReadFloat64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "GuestNice")
return
}
default:
bts, err = msgp.Skip(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
o = bts
return
}
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z *cpuTimesStat) Msgsize() (s int) {
s = 1 + 4 + msgp.StringPrefixSize + len(z.CPU) + 5 + msgp.Float64Size + 7 + msgp.Float64Size + 5 + msgp.Float64Size + 5 + msgp.Float64Size + 7 + msgp.Float64Size + 4 + msgp.Float64Size + 8 + msgp.Float64Size + 6 + msgp.Float64Size + 6 + msgp.Float64Size + 10 + msgp.Float64Size
return
}
// DecodeMsg implements msgp.Decodable
func (z *loadAvgStat) DecodeMsg(dc *msgp.Reader) (err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, err = dc.ReadMapHeader()
if err != nil {
err = msgp.WrapError(err)
return
}
for zb0001 > 0 {
zb0001--
field, err = dc.ReadMapKeyPtr()
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "load1":
z.Load1, err = dc.ReadFloat64()
if err != nil {
err = msgp.WrapError(err, "Load1")
return
}
case "load5":
z.Load5, err = dc.ReadFloat64()
if err != nil {
err = msgp.WrapError(err, "Load5")
return
}
case "load15":
z.Load15, err = dc.ReadFloat64()
if err != nil {
err = msgp.WrapError(err, "Load15")
return
}
default:
err = dc.Skip()
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
return
}
// EncodeMsg implements msgp.Encodable
func (z loadAvgStat) EncodeMsg(en *msgp.Writer) (err error) {
// map header, size 3
// write "load1"
err = en.Append(0x83, 0xa5, 0x6c, 0x6f, 0x61, 0x64, 0x31)
if err != nil {
return
}
err = en.WriteFloat64(z.Load1)
if err != nil {
err = msgp.WrapError(err, "Load1")
return
}
// write "load5"
err = en.Append(0xa5, 0x6c, 0x6f, 0x61, 0x64, 0x35)
if err != nil {
return
}
err = en.WriteFloat64(z.Load5)
if err != nil {
err = msgp.WrapError(err, "Load5")
return
}
// write "load15"
err = en.Append(0xa6, 0x6c, 0x6f, 0x61, 0x64, 0x31, 0x35)
if err != nil {
return
}
err = en.WriteFloat64(z.Load15)
if err != nil {
err = msgp.WrapError(err, "Load15")
return
}
return
}
// MarshalMsg implements msgp.Marshaler
func (z loadAvgStat) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize())
// map header, size 3
// string "load1"
o = append(o, 0x83, 0xa5, 0x6c, 0x6f, 0x61, 0x64, 0x31)
o = msgp.AppendFloat64(o, z.Load1)
// string "load5"
o = append(o, 0xa5, 0x6c, 0x6f, 0x61, 0x64, 0x35)
o = msgp.AppendFloat64(o, z.Load5)
// string "load15"
o = append(o, 0xa6, 0x6c, 0x6f, 0x61, 0x64, 0x31, 0x35)
o = msgp.AppendFloat64(o, z.Load15)
return
}
// UnmarshalMsg implements msgp.Unmarshaler
func (z *loadAvgStat) UnmarshalMsg(bts []byte) (o []byte, err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
for zb0001 > 0 {
zb0001--
field, bts, err = msgp.ReadMapKeyZC(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "load1":
z.Load1, bts, err = msgp.ReadFloat64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "Load1")
return
}
case "load5":
z.Load5, bts, err = msgp.ReadFloat64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "Load5")
return
}
case "load15":
z.Load15, bts, err = msgp.ReadFloat64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "Load15")
return
}
default:
bts, err = msgp.Skip(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
o = bts
return
}
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z loadAvgStat) Msgsize() (s int) {
s = 1 + 6 + msgp.Float64Size + 6 + msgp.Float64Size + 7 + msgp.Float64Size
return
}
// DecodeMsg implements msgp.Decodable
func (z *procfsNetDevLine) DecodeMsg(dc *msgp.Reader) (err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, err = dc.ReadMapHeader()
if err != nil {
err = msgp.WrapError(err)
return
}
for zb0001 > 0 {
zb0001--
field, err = dc.ReadMapKeyPtr()
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "name":
z.Name, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "Name")
return
}
case "rx_bytes":
z.RxBytes, err = dc.ReadUint64()
if err != nil {
err = msgp.WrapError(err, "RxBytes")
return
}
case "rx_packets":
z.RxPackets, err = dc.ReadUint64()
if err != nil {
err = msgp.WrapError(err, "RxPackets")
return
}
case "rx_errors":
z.RxErrors, err = dc.ReadUint64()
if err != nil {
err = msgp.WrapError(err, "RxErrors")
return
}
case "rx_dropped":
z.RxDropped, err = dc.ReadUint64()
if err != nil {
err = msgp.WrapError(err, "RxDropped")
return
}
case "rx_fifo":
z.RxFIFO, err = dc.ReadUint64()
if err != nil {
err = msgp.WrapError(err, "RxFIFO")
return
}
case "rx_frame":
z.RxFrame, err = dc.ReadUint64()
if err != nil {
err = msgp.WrapError(err, "RxFrame")
return
}
case "rx_compressed":
z.RxCompressed, err = dc.ReadUint64()
if err != nil {
err = msgp.WrapError(err, "RxCompressed")
return
}
case "rx_multicast":
z.RxMulticast, err = dc.ReadUint64()
if err != nil {
err = msgp.WrapError(err, "RxMulticast")
return
}
case "tx_bytes":
z.TxBytes, err = dc.ReadUint64()
if err != nil {
err = msgp.WrapError(err, "TxBytes")
return
}
case "tx_packets":
z.TxPackets, err = dc.ReadUint64()
if err != nil {
err = msgp.WrapError(err, "TxPackets")
return
}
case "tx_errors":
z.TxErrors, err = dc.ReadUint64()
if err != nil {
err = msgp.WrapError(err, "TxErrors")
return
}
case "tx_dropped":
z.TxDropped, err = dc.ReadUint64()
if err != nil {
err = msgp.WrapError(err, "TxDropped")
return
}
case "tx_fifo":
z.TxFIFO, err = dc.ReadUint64()
if err != nil {
err = msgp.WrapError(err, "TxFIFO")
return
}
case "tx_collisions":
z.TxCollisions, err = dc.ReadUint64()
if err != nil {
err = msgp.WrapError(err, "TxCollisions")
return
}
case "tx_carrier":
z.TxCarrier, err = dc.ReadUint64()
if err != nil {
err = msgp.WrapError(err, "TxCarrier")
return
}
case "tx_compressed":
z.TxCompressed, err = dc.ReadUint64()
if err != nil {
err = msgp.WrapError(err, "TxCompressed")
return
}
default:
err = dc.Skip()
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
return
}
// EncodeMsg implements msgp.Encodable
func (z *procfsNetDevLine) EncodeMsg(en *msgp.Writer) (err error) {
// map header, size 17
// write "name"
err = en.Append(0xde, 0x0, 0x11, 0xa4, 0x6e, 0x61, 0x6d, 0x65)
if err != nil {
return
}
err = en.WriteString(z.Name)
if err != nil {
err = msgp.WrapError(err, "Name")
return
}
// write "rx_bytes"
err = en.Append(0xa8, 0x72, 0x78, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73)
if err != nil {
return
}
err = en.WriteUint64(z.RxBytes)
if err != nil {
err = msgp.WrapError(err, "RxBytes")
return
}
// write "rx_packets"
err = en.Append(0xaa, 0x72, 0x78, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x65, 0x74, 0x73)
if err != nil {
return
}
err = en.WriteUint64(z.RxPackets)
if err != nil {
err = msgp.WrapError(err, "RxPackets")
return
}
// write "rx_errors"
err = en.Append(0xa9, 0x72, 0x78, 0x5f, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x73)
if err != nil {
return
}
err = en.WriteUint64(z.RxErrors)
if err != nil {
err = msgp.WrapError(err, "RxErrors")
return
}
// write "rx_dropped"
err = en.Append(0xaa, 0x72, 0x78, 0x5f, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64)
if err != nil {
return
}
err = en.WriteUint64(z.RxDropped)
if err != nil {
err = msgp.WrapError(err, "RxDropped")
return
}
// write "rx_fifo"
err = en.Append(0xa7, 0x72, 0x78, 0x5f, 0x66, 0x69, 0x66, 0x6f)
if err != nil {
return
}
err = en.WriteUint64(z.RxFIFO)
if err != nil {
err = msgp.WrapError(err, "RxFIFO")
return
}
// write "rx_frame"
err = en.Append(0xa8, 0x72, 0x78, 0x5f, 0x66, 0x72, 0x61, 0x6d, 0x65)
if err != nil {
return
}
err = en.WriteUint64(z.RxFrame)
if err != nil {
err = msgp.WrapError(err, "RxFrame")
return
}
// write "rx_compressed"
err = en.Append(0xad, 0x72, 0x78, 0x5f, 0x63, 0x6f, 0x6d, 0x70, 0x72, 0x65, 0x73, 0x73, 0x65, 0x64)
if err != nil {
return
}
err = en.WriteUint64(z.RxCompressed)
if err != nil {
err = msgp.WrapError(err, "RxCompressed")
return
}
// write "rx_multicast"
err = en.Append(0xac, 0x72, 0x78, 0x5f, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x63, 0x61, 0x73, 0x74)
if err != nil {
return
}
err = en.WriteUint64(z.RxMulticast)
if err != nil {
err = msgp.WrapError(err, "RxMulticast")
return
}
// write "tx_bytes"
err = en.Append(0xa8, 0x74, 0x78, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73)
if err != nil {
return
}
err = en.WriteUint64(z.TxBytes)
if err != nil {
err = msgp.WrapError(err, "TxBytes")
return
}
// write "tx_packets"
err = en.Append(0xaa, 0x74, 0x78, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x65, 0x74, 0x73)
if err != nil {
return
}
err = en.WriteUint64(z.TxPackets)
if err != nil {
err = msgp.WrapError(err, "TxPackets")
return
}
// write "tx_errors"
err = en.Append(0xa9, 0x74, 0x78, 0x5f, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x73)
if err != nil {
return
}
err = en.WriteUint64(z.TxErrors)
if err != nil {
err = msgp.WrapError(err, "TxErrors")
return
}
// write "tx_dropped"
err = en.Append(0xaa, 0x74, 0x78, 0x5f, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64)
if err != nil {
return
}
err = en.WriteUint64(z.TxDropped)
if err != nil {
err = msgp.WrapError(err, "TxDropped")
return
}
// write "tx_fifo"
err = en.Append(0xa7, 0x74, 0x78, 0x5f, 0x66, 0x69, 0x66, 0x6f)
if err != nil {
return
}
err = en.WriteUint64(z.TxFIFO)
if err != nil {
err = msgp.WrapError(err, "TxFIFO")
return
}
// write "tx_collisions"
err = en.Append(0xad, 0x74, 0x78, 0x5f, 0x63, 0x6f, 0x6c, 0x6c, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x73)
if err != nil {
return
}
err = en.WriteUint64(z.TxCollisions)
if err != nil {
err = msgp.WrapError(err, "TxCollisions")
return
}
// write "tx_carrier"
err = en.Append(0xaa, 0x74, 0x78, 0x5f, 0x63, 0x61, 0x72, 0x72, 0x69, 0x65, 0x72)
if err != nil {
return
}
err = en.WriteUint64(z.TxCarrier)
if err != nil {
err = msgp.WrapError(err, "TxCarrier")
return
}
// write "tx_compressed"
err = en.Append(0xad, 0x74, 0x78, 0x5f, 0x63, 0x6f, 0x6d, 0x70, 0x72, 0x65, 0x73, 0x73, 0x65, 0x64)
if err != nil {
return
}
err = en.WriteUint64(z.TxCompressed)
if err != nil {
err = msgp.WrapError(err, "TxCompressed")
return
}
return
}
// MarshalMsg implements msgp.Marshaler
func (z *procfsNetDevLine) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize())
// map header, size 17
// string "name"
o = append(o, 0xde, 0x0, 0x11, 0xa4, 0x6e, 0x61, 0x6d, 0x65)
o = msgp.AppendString(o, z.Name)
// string "rx_bytes"
o = append(o, 0xa8, 0x72, 0x78, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73)
o = msgp.AppendUint64(o, z.RxBytes)
// string "rx_packets"
o = append(o, 0xaa, 0x72, 0x78, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x65, 0x74, 0x73)
o = msgp.AppendUint64(o, z.RxPackets)
// string "rx_errors"
o = append(o, 0xa9, 0x72, 0x78, 0x5f, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x73)
o = msgp.AppendUint64(o, z.RxErrors)
// string "rx_dropped"
o = append(o, 0xaa, 0x72, 0x78, 0x5f, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64)
o = msgp.AppendUint64(o, z.RxDropped)
// string "rx_fifo"
o = append(o, 0xa7, 0x72, 0x78, 0x5f, 0x66, 0x69, 0x66, 0x6f)
o = msgp.AppendUint64(o, z.RxFIFO)
// string "rx_frame"
o = append(o, 0xa8, 0x72, 0x78, 0x5f, 0x66, 0x72, 0x61, 0x6d, 0x65)
o = msgp.AppendUint64(o, z.RxFrame)
// string "rx_compressed"
o = append(o, 0xad, 0x72, 0x78, 0x5f, 0x63, 0x6f, 0x6d, 0x70, 0x72, 0x65, 0x73, 0x73, 0x65, 0x64)
o = msgp.AppendUint64(o, z.RxCompressed)
// string "rx_multicast"
o = append(o, 0xac, 0x72, 0x78, 0x5f, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x63, 0x61, 0x73, 0x74)
o = msgp.AppendUint64(o, z.RxMulticast)
// string "tx_bytes"
o = append(o, 0xa8, 0x74, 0x78, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73)
o = msgp.AppendUint64(o, z.TxBytes)
// string "tx_packets"
o = append(o, 0xaa, 0x74, 0x78, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x65, 0x74, 0x73)
o = msgp.AppendUint64(o, z.TxPackets)
// string "tx_errors"
o = append(o, 0xa9, 0x74, 0x78, 0x5f, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x73)
o = msgp.AppendUint64(o, z.TxErrors)
// string "tx_dropped"
o = append(o, 0xaa, 0x74, 0x78, 0x5f, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64)
o = msgp.AppendUint64(o, z.TxDropped)
// string "tx_fifo"
o = append(o, 0xa7, 0x74, 0x78, 0x5f, 0x66, 0x69, 0x66, 0x6f)
o = msgp.AppendUint64(o, z.TxFIFO)
// string "tx_collisions"
o = append(o, 0xad, 0x74, 0x78, 0x5f, 0x63, 0x6f, 0x6c, 0x6c, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x73)
o = msgp.AppendUint64(o, z.TxCollisions)
// string "tx_carrier"
o = append(o, 0xaa, 0x74, 0x78, 0x5f, 0x63, 0x61, 0x72, 0x72, 0x69, 0x65, 0x72)
o = msgp.AppendUint64(o, z.TxCarrier)
// string "tx_compressed"
o = append(o, 0xad, 0x74, 0x78, 0x5f, 0x63, 0x6f, 0x6d, 0x70, 0x72, 0x65, 0x73, 0x73, 0x65, 0x64)
o = msgp.AppendUint64(o, z.TxCompressed)
return
}
// UnmarshalMsg implements msgp.Unmarshaler
func (z *procfsNetDevLine) UnmarshalMsg(bts []byte) (o []byte, err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
for zb0001 > 0 {
zb0001--
field, bts, err = msgp.ReadMapKeyZC(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "name":
z.Name, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Name")
return
}
case "rx_bytes":
z.RxBytes, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "RxBytes")
return
}
case "rx_packets":
z.RxPackets, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "RxPackets")
return
}
case "rx_errors":
z.RxErrors, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "RxErrors")
return
}
case "rx_dropped":
z.RxDropped, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "RxDropped")
return
}
case "rx_fifo":
z.RxFIFO, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "RxFIFO")
return
}
case "rx_frame":
z.RxFrame, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "RxFrame")
return
}
case "rx_compressed":
z.RxCompressed, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "RxCompressed")
return
}
case "rx_multicast":
z.RxMulticast, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "RxMulticast")
return
}
case "tx_bytes":
z.TxBytes, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "TxBytes")
return
}
case "tx_packets":
z.TxPackets, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "TxPackets")
return
}
case "tx_errors":
z.TxErrors, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "TxErrors")
return
}
case "tx_dropped":
z.TxDropped, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "TxDropped")
return
}
case "tx_fifo":
z.TxFIFO, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "TxFIFO")
return
}
case "tx_collisions":
z.TxCollisions, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "TxCollisions")
return
}
case "tx_carrier":
z.TxCarrier, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "TxCarrier")
return
}
case "tx_compressed":
z.TxCompressed, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "TxCompressed")
return
}
default:
bts, err = msgp.Skip(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
o = bts
return
}
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z *procfsNetDevLine) Msgsize() (s int) {
s = 3 + 5 + msgp.StringPrefixSize + len(z.Name) + 9 + msgp.Uint64Size + 11 + msgp.Uint64Size + 10 + msgp.Uint64Size + 11 + msgp.Uint64Size + 8 + msgp.Uint64Size + 9 + msgp.Uint64Size + 14 + msgp.Uint64Size + 13 + msgp.Uint64Size + 9 + msgp.Uint64Size + 11 + msgp.Uint64Size + 10 + msgp.Uint64Size + 11 + msgp.Uint64Size + 8 + msgp.Uint64Size + 14 + msgp.Uint64Size + 11 + msgp.Uint64Size + 14 + msgp.Uint64Size
return
}
golang-github-minio-madmin-go-3.0.104/external_gen_test.go 0000664 0000000 0000000 00000015321 14774251704 0023443 0 ustar 00root root 0000000 0000000 package madmin
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
import (
"bytes"
"testing"
"github.com/tinylib/msgp/msgp"
)
func TestMarshalUnmarshalcpuTimesStat(t *testing.T) {
v := cpuTimesStat{}
bts, err := v.MarshalMsg(nil)
if err != nil {
t.Fatal(err)
}
left, err := v.UnmarshalMsg(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
}
left, err = msgp.Skip(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
}
}
func BenchmarkMarshalMsgcpuTimesStat(b *testing.B) {
v := cpuTimesStat{}
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.MarshalMsg(nil)
}
}
func BenchmarkAppendMsgcpuTimesStat(b *testing.B) {
v := cpuTimesStat{}
bts := make([]byte, 0, v.Msgsize())
bts, _ = v.MarshalMsg(bts[0:0])
b.SetBytes(int64(len(bts)))
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
bts, _ = v.MarshalMsg(bts[0:0])
}
}
func BenchmarkUnmarshalcpuTimesStat(b *testing.B) {
v := cpuTimesStat{}
bts, _ := v.MarshalMsg(nil)
b.ReportAllocs()
b.SetBytes(int64(len(bts)))
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := v.UnmarshalMsg(bts)
if err != nil {
b.Fatal(err)
}
}
}
func TestEncodeDecodecpuTimesStat(t *testing.T) {
v := cpuTimesStat{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
m := v.Msgsize()
if buf.Len() > m {
t.Log("WARNING: TestEncodeDecodecpuTimesStat Msgsize() is inaccurate")
}
vn := cpuTimesStat{}
err := msgp.Decode(&buf, &vn)
if err != nil {
t.Error(err)
}
buf.Reset()
msgp.Encode(&buf, &v)
err = msgp.NewReader(&buf).Skip()
if err != nil {
t.Error(err)
}
}
func BenchmarkEncodecpuTimesStat(b *testing.B) {
v := cpuTimesStat{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
en := msgp.NewWriter(msgp.Nowhere)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.EncodeMsg(en)
}
en.Flush()
}
func BenchmarkDecodecpuTimesStat(b *testing.B) {
v := cpuTimesStat{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
rd := msgp.NewEndlessReader(buf.Bytes(), b)
dc := msgp.NewReader(rd)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
err := v.DecodeMsg(dc)
if err != nil {
b.Fatal(err)
}
}
}
func TestMarshalUnmarshalloadAvgStat(t *testing.T) {
v := loadAvgStat{}
bts, err := v.MarshalMsg(nil)
if err != nil {
t.Fatal(err)
}
left, err := v.UnmarshalMsg(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
}
left, err = msgp.Skip(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
}
}
func BenchmarkMarshalMsgloadAvgStat(b *testing.B) {
v := loadAvgStat{}
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.MarshalMsg(nil)
}
}
func BenchmarkAppendMsgloadAvgStat(b *testing.B) {
v := loadAvgStat{}
bts := make([]byte, 0, v.Msgsize())
bts, _ = v.MarshalMsg(bts[0:0])
b.SetBytes(int64(len(bts)))
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
bts, _ = v.MarshalMsg(bts[0:0])
}
}
func BenchmarkUnmarshalloadAvgStat(b *testing.B) {
v := loadAvgStat{}
bts, _ := v.MarshalMsg(nil)
b.ReportAllocs()
b.SetBytes(int64(len(bts)))
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := v.UnmarshalMsg(bts)
if err != nil {
b.Fatal(err)
}
}
}
func TestEncodeDecodeloadAvgStat(t *testing.T) {
v := loadAvgStat{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
m := v.Msgsize()
if buf.Len() > m {
t.Log("WARNING: TestEncodeDecodeloadAvgStat Msgsize() is inaccurate")
}
vn := loadAvgStat{}
err := msgp.Decode(&buf, &vn)
if err != nil {
t.Error(err)
}
buf.Reset()
msgp.Encode(&buf, &v)
err = msgp.NewReader(&buf).Skip()
if err != nil {
t.Error(err)
}
}
func BenchmarkEncodeloadAvgStat(b *testing.B) {
v := loadAvgStat{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
en := msgp.NewWriter(msgp.Nowhere)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.EncodeMsg(en)
}
en.Flush()
}
func BenchmarkDecodeloadAvgStat(b *testing.B) {
v := loadAvgStat{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
rd := msgp.NewEndlessReader(buf.Bytes(), b)
dc := msgp.NewReader(rd)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
err := v.DecodeMsg(dc)
if err != nil {
b.Fatal(err)
}
}
}
func TestMarshalUnmarshalprocfsNetDevLine(t *testing.T) {
v := procfsNetDevLine{}
bts, err := v.MarshalMsg(nil)
if err != nil {
t.Fatal(err)
}
left, err := v.UnmarshalMsg(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
}
left, err = msgp.Skip(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
}
}
func BenchmarkMarshalMsgprocfsNetDevLine(b *testing.B) {
v := procfsNetDevLine{}
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.MarshalMsg(nil)
}
}
func BenchmarkAppendMsgprocfsNetDevLine(b *testing.B) {
v := procfsNetDevLine{}
bts := make([]byte, 0, v.Msgsize())
bts, _ = v.MarshalMsg(bts[0:0])
b.SetBytes(int64(len(bts)))
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
bts, _ = v.MarshalMsg(bts[0:0])
}
}
func BenchmarkUnmarshalprocfsNetDevLine(b *testing.B) {
v := procfsNetDevLine{}
bts, _ := v.MarshalMsg(nil)
b.ReportAllocs()
b.SetBytes(int64(len(bts)))
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := v.UnmarshalMsg(bts)
if err != nil {
b.Fatal(err)
}
}
}
func TestEncodeDecodeprocfsNetDevLine(t *testing.T) {
v := procfsNetDevLine{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
m := v.Msgsize()
if buf.Len() > m {
t.Log("WARNING: TestEncodeDecodeprocfsNetDevLine Msgsize() is inaccurate")
}
vn := procfsNetDevLine{}
err := msgp.Decode(&buf, &vn)
if err != nil {
t.Error(err)
}
buf.Reset()
msgp.Encode(&buf, &v)
err = msgp.NewReader(&buf).Skip()
if err != nil {
t.Error(err)
}
}
func BenchmarkEncodeprocfsNetDevLine(b *testing.B) {
v := procfsNetDevLine{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
en := msgp.NewWriter(msgp.Nowhere)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.EncodeMsg(en)
}
en.Flush()
}
func BenchmarkDecodeprocfsNetDevLine(b *testing.B) {
v := procfsNetDevLine{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
rd := msgp.NewEndlessReader(buf.Bytes(), b)
dc := msgp.NewReader(rd)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
err := v.DecodeMsg(dc)
if err != nil {
b.Fatal(err)
}
}
}
golang-github-minio-madmin-go-3.0.104/fips.go 0000664 0000000 0000000 00000002155 14774251704 0020673 0 ustar 00root root 0000000 0000000 //
// Copyright (c) 2015-2024 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as
// published by the Free Software Foundation, either version 3 of the
// License, or (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see .
//
//go:build fips
// +build fips
package madmin
// FIPSEnabled returns true if and only if FIPS 140-2 support
// is enabled.
//
// FIPS 140-2 requires that only specifc cryptographic
// primitives, like AES or SHA-256, are used and that
// those primitives are implemented by a FIPS 140-2
// certified cryptographic module.
func FIPSEnabled() bool { return true }
golang-github-minio-madmin-go-3.0.104/go.mod 0000664 0000000 0000000 00000003610 14774251704 0020506 0 ustar 00root root 0000000 0000000 module github.com/minio/madmin-go/v3
go 1.22
require (
github.com/cespare/xxhash/v2 v2.3.0
github.com/dustin/go-humanize v1.0.1
github.com/golang-jwt/jwt/v4 v4.5.0
github.com/minio/minio-go/v7 v7.0.83-0.20241230094935-5757f2c8544a
github.com/prometheus/common v0.59.1
github.com/prometheus/procfs v0.15.1
github.com/prometheus/prom2json v1.4.0
github.com/safchain/ethtool v0.4.1
github.com/secure-io/sio-go v0.3.1
github.com/shirou/gopsutil/v3 v3.24.5
github.com/tinylib/msgp v1.2.5
golang.org/x/crypto v0.31.0
golang.org/x/net v0.33.0
gopkg.in/yaml.v3 v3.0.1
)
require (
github.com/go-ini/ini v1.67.0 // indirect
github.com/go-ole/go-ole v1.3.0 // indirect
github.com/goccy/go-json v0.10.4 // indirect
github.com/golang/protobuf v1.5.4 // indirect
github.com/google/uuid v1.6.0 // indirect
github.com/klauspost/compress v1.17.11 // indirect
github.com/klauspost/cpuid/v2 v2.2.9 // indirect
github.com/kr/pretty v0.2.1 // indirect
github.com/kr/text v0.2.0 // indirect
github.com/lufia/plan9stats v0.0.0-20240909124753-873cd0166683 // indirect
github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
github.com/minio/md5-simd v1.1.2 // indirect
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
github.com/philhofer/fwd v1.1.3-0.20240916144458-20a13a1f6b7c // indirect
github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 // indirect
github.com/prometheus/client_model v0.6.1 // indirect
github.com/prometheus/prometheus v0.54.1 // indirect
github.com/rs/xid v1.6.0 // indirect
github.com/shoenig/go-m1cpu v0.1.6 // indirect
github.com/tklauser/go-sysconf v0.3.14 // indirect
github.com/tklauser/numcpus v0.8.0 // indirect
github.com/yusufpapurcu/wmi v1.2.4 // indirect
golang.org/x/sync v0.10.0 // indirect
golang.org/x/sys v0.28.0 // indirect
golang.org/x/text v0.21.0 // indirect
google.golang.org/protobuf v1.34.2 // indirect
)
golang-github-minio-madmin-go-3.0.104/go.sum 0000664 0000000 0000000 00000023634 14774251704 0020543 0 ustar 00root root 0000000 0000000 github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY=
github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=
github.com/go-ini/ini v1.67.0 h1:z6ZrTEZqSWOTyH2FlglNbNgARyHG8oLW9gMELqKr06A=
github.com/go-ini/ini v1.67.0/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8=
github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0=
github.com/go-ole/go-ole v1.3.0 h1:Dt6ye7+vXGIKZ7Xtk4s6/xVdGDQynvom7xCFEdWr6uE=
github.com/go-ole/go-ole v1.3.0/go.mod h1:5LS6F96DhAwUc7C+1HLexzMXY1xGRSryjyPPKW6zv78=
github.com/goccy/go-json v0.10.4 h1:JSwxQzIqKfmFX1swYPpUThQZp/Ka4wzJdK0LWVytLPM=
github.com/goccy/go-json v0.10.4/go.mod h1:oq7eo15ShAhp70Anwd5lgX2pLfOS3QCiwU/PULtXL6M=
github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg=
github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc=
github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0=
github.com/klauspost/cpuid/v2 v2.0.1/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
github.com/klauspost/cpuid/v2 v2.2.9 h1:66ze0taIn2H33fBvCkXuv9BmCwDfafmiIVpKV9kKGuY=
github.com/klauspost/cpuid/v2 v2.2.9/go.mod h1:rqkxqrZ1EhYM9G+hXH7YdowN5R5RGN6NK4QwQ3WMXF8=
github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI=
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/lufia/plan9stats v0.0.0-20240909124753-873cd0166683 h1:7UMa6KCCMjZEMDtTVdcGu0B1GmmC7QJKiCCjyTAWQy0=
github.com/lufia/plan9stats v0.0.0-20240909124753-873cd0166683/go.mod h1:ilwx/Dta8jXAgpFYFvSWEMwxmbWXyiUHkd5FwyKhb5k=
github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo=
github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
github.com/minio/md5-simd v1.1.2 h1:Gdi1DZK69+ZVMoNHRXJyNcxrMA4dSxoYHZSQbirFg34=
github.com/minio/md5-simd v1.1.2/go.mod h1:MzdKDxYpY2BT9XQFocsiZf/NKVtR7nkE4RoEpN+20RM=
github.com/minio/minio-go/v7 v7.0.83-0.20241230094935-5757f2c8544a h1:nPw29aor4WGYpmBZy5jQT/cW5wtFrG8tEOCNeltMcq8=
github.com/minio/minio-go/v7 v7.0.83-0.20241230094935-5757f2c8544a/go.mod h1:57YXpvc5l3rjPdhqNrDsvVlY0qPI6UTk1bflAe+9doY=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
github.com/philhofer/fwd v1.1.3-0.20240916144458-20a13a1f6b7c h1:dAMKvw0MlJT1GshSTtih8C2gDs04w8dReiOGXrGLNoY=
github.com/philhofer/fwd v1.1.3-0.20240916144458-20a13a1f6b7c/go.mod h1:RqIHx9QI14HlwKwm98g9Re5prTQ6LdeRQn+gXJFxsJM=
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 h1:o4JXh1EVt9k/+g42oCprj/FisM4qX9L3sZB3upGN2ZU=
github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE=
github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E=
github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY=
github.com/prometheus/common v0.59.1 h1:LXb1quJHWm1P6wq/U824uxYi4Sg0oGvNeUm1z5dJoX0=
github.com/prometheus/common v0.59.1/go.mod h1:GpWM7dewqmVYcd7SmRaiWVe9SSqjf0UrwnYnpEZNuT0=
github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc=
github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk=
github.com/prometheus/prom2json v1.4.0 h1:2AEOsd1ebqql/p9u0IWgCpUAteAAf9Lnf/SVyieqer4=
github.com/prometheus/prom2json v1.4.0/go.mod h1:DmcIMPspQD/fMyFCYti5qJJbuEnqDh3DGoooO0sgr4w=
github.com/prometheus/prometheus v0.54.1 h1:vKuwQNjnYN2/mDoWfHXDhAsz/68q/dQDb+YbcEqU7MQ=
github.com/prometheus/prometheus v0.54.1/go.mod h1:xlLByHhk2g3ycakQGrMaU8K7OySZx98BzeCR99991NY=
github.com/rs/xid v1.6.0 h1:fV591PaemRlL6JfRxGDEPl69wICngIQ3shQtzfy2gxU=
github.com/rs/xid v1.6.0/go.mod h1:7XoLgs4eV+QndskICGsho+ADou8ySMSjJKDIan90Nz0=
github.com/safchain/ethtool v0.4.1 h1:S6mEleTADqgynileXoiapt/nKnatyR6bmIHoF+h2ADo=
github.com/safchain/ethtool v0.4.1/go.mod h1:XLLnZmy4OCRTkksP/UiMjij96YmIsBfmBQcs7H6tA48=
github.com/secure-io/sio-go v0.3.1 h1:dNvY9awjabXTYGsTF1PiCySl9Ltofk9GA3VdWlo7rRc=
github.com/secure-io/sio-go v0.3.1/go.mod h1:+xbkjDzPjwh4Axd07pRKSNriS9SCiYksWnZqdnfpQxs=
github.com/shirou/gopsutil/v3 v3.24.5 h1:i0t8kL+kQTvpAYToeuiVk3TgDeKOFioZO3Ztz/iZ9pI=
github.com/shirou/gopsutil/v3 v3.24.5/go.mod h1:bsoOS1aStSs9ErQ1WWfxllSeS1K5D+U30r2NfcubMVk=
github.com/shoenig/go-m1cpu v0.1.6 h1:nxdKQNcEB6vzgA2E2bvzKIYRuNj7XNJ4S/aRSwKzFtM=
github.com/shoenig/go-m1cpu v0.1.6/go.mod h1:1JJMcUBvfNwpq05QDQVAnx3gUHr9IYF7GNg9SUEw2VQ=
github.com/shoenig/test v0.6.4 h1:kVTaSd7WLz5WZ2IaoM0RSzRsUD+m8wRR+5qvntpn4LU=
github.com/shoenig/test v0.6.4/go.mod h1:byHiCGXqrVaflBLAMq/srcZIHynQPQgeyvkvXnjqq0k=
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/tinylib/msgp v1.2.5 h1:WeQg1whrXRFiZusidTQqzETkRpGjFjcIhW6uqWH09po=
github.com/tinylib/msgp v1.2.5/go.mod h1:ykjzy2wzgrlvpDCRc4LA8UXy6D8bzMSuAF3WD57Gok0=
github.com/tklauser/go-sysconf v0.3.14 h1:g5vzr9iPFFz24v2KZXs/pvpvh8/V9Fw6vQK5ZZb78yU=
github.com/tklauser/go-sysconf v0.3.14/go.mod h1:1ym4lWMLUOhuBOPGtRcJm7tEGX4SCYNEEEtghGG/8uY=
github.com/tklauser/numcpus v0.8.0 h1:Mx4Wwe/FjZLeQsK/6kt2EOepwwSl7SmJrK5bV/dXYgY=
github.com/tklauser/numcpus v0.8.0/go.mod h1:ZJZlAY+dmR4eut8epnzf0u/VwodKmryxR8txiloSqBE=
github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo0=
github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.31.0 h1:ihbySMvVjLAeSH1IbfcRTkD/iNscyz8rGzjF/E5hV6U=
golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.33.0 h1:74SYHlV8BIgHIFC/LrYkOGIwL19eTYXQ5wc6TBuO36I=
golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ=
golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA=
golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo=
golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ=
google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg=
google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
golang-github-minio-madmin-go-3.0.104/group-commands.go 0000664 0000000 0000000 00000010062 14774251704 0022661 0 ustar 00root root 0000000 0000000 //
// Copyright (c) 2015-2024 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as
// published by the Free Software Foundation, either version 3 of the
// License, or (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see .
//
package madmin
import (
"context"
"encoding/json"
"io"
"net/http"
"net/url"
"time"
)
// GroupAddRemove is type for adding/removing members to/from a group.
type GroupAddRemove struct {
Group string `json:"group"`
Members []string `json:"members"`
Status GroupStatus `json:"groupStatus"`
IsRemove bool `json:"isRemove"`
}
// UpdateGroupMembers - adds/removes users to/from a group. Server
// creates the group as needed. Group is removed if remove request is
// made on empty group.
func (adm *AdminClient) UpdateGroupMembers(ctx context.Context, g GroupAddRemove) error {
data, err := json.Marshal(g)
if err != nil {
return err
}
reqData := requestData{
relPath: adminAPIPrefix + "/update-group-members",
content: data,
}
// Execute PUT on /minio/admin/v3/update-group-members
resp, err := adm.executeMethod(ctx, http.MethodPut, reqData)
defer closeResponse(resp)
if err != nil {
return err
}
if resp.StatusCode != http.StatusOK {
return httpRespToErrorResponse(resp)
}
return nil
}
// GroupDesc is a type that holds group info along with the policy
// attached to it.
type GroupDesc struct {
Name string `json:"name"`
Status string `json:"status"`
Members []string `json:"members"`
Policy string `json:"policy"`
UpdatedAt time.Time `json:"updatedAt,omitempty"`
}
// GetGroupDescription - fetches information on a group.
func (adm *AdminClient) GetGroupDescription(ctx context.Context, group string) (*GroupDesc, error) {
v := url.Values{}
v.Set("group", group)
reqData := requestData{
relPath: adminAPIPrefix + "/group",
queryValues: v,
}
resp, err := adm.executeMethod(ctx, http.MethodGet, reqData)
defer closeResponse(resp)
if err != nil {
return nil, err
}
if resp.StatusCode != http.StatusOK {
return nil, httpRespToErrorResponse(resp)
}
data, err := io.ReadAll(resp.Body)
if err != nil {
return nil, err
}
gd := GroupDesc{}
if err = json.Unmarshal(data, &gd); err != nil {
return nil, err
}
return &gd, nil
}
// ListGroups - lists all groups names present on the server.
func (adm *AdminClient) ListGroups(ctx context.Context) ([]string, error) {
reqData := requestData{
relPath: adminAPIPrefix + "/groups",
}
resp, err := adm.executeMethod(ctx, http.MethodGet, reqData)
defer closeResponse(resp)
if err != nil {
return nil, err
}
if resp.StatusCode != http.StatusOK {
return nil, httpRespToErrorResponse(resp)
}
data, err := io.ReadAll(resp.Body)
if err != nil {
return nil, err
}
groups := []string{}
if err = json.Unmarshal(data, &groups); err != nil {
return nil, err
}
return groups, nil
}
// GroupStatus - group status.
type GroupStatus string
// GroupStatus values.
const (
GroupEnabled GroupStatus = "enabled"
GroupDisabled GroupStatus = "disabled"
)
// SetGroupStatus - sets the status of a group.
func (adm *AdminClient) SetGroupStatus(ctx context.Context, group string, status GroupStatus) error {
v := url.Values{}
v.Set("group", group)
v.Set("status", string(status))
reqData := requestData{
relPath: adminAPIPrefix + "/set-group-status",
queryValues: v,
}
resp, err := adm.executeMethod(ctx, http.MethodPut, reqData)
defer closeResponse(resp)
if err != nil {
return err
}
if resp.StatusCode != http.StatusOK {
return httpRespToErrorResponse(resp)
}
return nil
}
golang-github-minio-madmin-go-3.0.104/heal-commands.go 0000664 0000000 0000000 00000032350 14774251704 0022442 0 ustar 00root root 0000000 0000000 //
// Copyright (c) 2015-2024 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as
// published by the Free Software Foundation, either version 3 of the
// License, or (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see .
//
package madmin
import (
"context"
"encoding/json"
"fmt"
"io"
"net/http"
"net/url"
"sort"
"time"
)
//msgp:clearomitted
//msgp:tag json
//go:generate msgp
// HealScanMode represents the type of healing scan
type HealScanMode int
const (
// HealUnknownScan default is unknown
HealUnknownScan HealScanMode = iota
// HealNormalScan checks if parts are present and not outdated
HealNormalScan
// HealDeepScan checks for parts bitrot checksums
HealDeepScan
)
// HealOpts - collection of options for a heal sequence
type HealOpts struct {
Recursive bool `json:"recursive"`
DryRun bool `json:"dryRun"`
Remove bool `json:"remove"`
Recreate bool `json:"recreate"` // Rewrite all resources specified at the bucket or prefix.
ScanMode HealScanMode `json:"scanMode"`
UpdateParity bool `json:"updateParity"` // Update the parity of the existing object with a new one
NoLock bool `json:"nolock"`
// Pool to heal. nil indicates "all pools" (and sets).
Pool *int `json:"pool,omitempty"`
// Set to heal. nil indicates "all sets". Should always be nil if Pool is nil.
Set *int `json:"set,omitempty"`
}
// Equal returns true if no is same as o.
func (o HealOpts) Equal(no HealOpts) bool {
if o.Recursive != no.Recursive {
return false
}
if o.DryRun != no.DryRun {
return false
}
if o.Remove != no.Remove {
return false
}
if o.Recreate != no.Recreate {
return false
}
if o.UpdateParity != no.UpdateParity {
return false
}
return o.ScanMode == no.ScanMode
}
// HealStartSuccess - holds information about a successfully started
// heal operation
type HealStartSuccess struct {
ClientToken string `json:"clientToken"`
ClientAddress string `json:"clientAddress"`
StartTime time.Time `json:"startTime"`
}
// HealStopSuccess - holds information about a successfully stopped
// heal operation.
type HealStopSuccess HealStartSuccess
// HealTaskStatus - status struct for a heal task
type HealTaskStatus struct {
Summary string `json:"summary"`
FailureDetail string `json:"detail"`
StartTime time.Time `json:"startTime"`
HealSettings HealOpts `json:"settings"`
Items []HealResultItem `json:"items,omitempty"`
}
// HealItemType - specify the type of heal operation in a healing
// result
type HealItemType string
// HealItemType constants
const (
HealItemMetadata HealItemType = "metadata"
HealItemBucket HealItemType = "bucket"
HealItemBucketMetadata HealItemType = "bucket-metadata"
HealItemObject HealItemType = "object"
)
// Drive state constants
const (
DriveStateOk string = "ok"
DriveStateOffline string = "offline"
DriveStateCorrupt string = "corrupt"
DriveStateMissing string = "missing"
DriveStatePermission string = "permission-denied"
DriveStateFaulty string = "faulty"
DriveStateRootMount string = "root-mount"
DriveStateUnknown string = "unknown"
DriveStateUnformatted string = "unformatted" // only returned by disk
)
// HealDriveInfo - struct for an individual drive info item.
type HealDriveInfo struct {
UUID string `json:"uuid"`
Endpoint string `json:"endpoint"`
State string `json:"state"`
}
// HealResultItem - struct for an individual heal result item
type HealResultItem struct {
ResultIndex int64 `json:"resultId"`
Type HealItemType `json:"type"`
Bucket string `json:"bucket"`
Object string `json:"object"`
VersionID string `json:"versionId"`
Detail string `json:"detail"`
ParityBlocks int `json:"parityBlocks,omitempty"`
DataBlocks int `json:"dataBlocks,omitempty"`
DiskCount int `json:"diskCount"`
SetCount int `json:"setCount"`
// below slices are from drive info.
Before struct {
Drives []HealDriveInfo `json:"drives"`
} `json:"before"`
After struct {
Drives []HealDriveInfo `json:"drives"`
} `json:"after"`
ObjectSize int64 `json:"objectSize"`
}
// GetMissingCounts - returns the number of missing disks before
// and after heal
func (hri *HealResultItem) GetMissingCounts() (b, a int) {
if hri == nil {
return
}
for _, v := range hri.Before.Drives {
if v.State == DriveStateMissing {
b++
}
}
for _, v := range hri.After.Drives {
if v.State == DriveStateMissing {
a++
}
}
return
}
// GetOfflineCounts - returns the number of offline disks before
// and after heal
func (hri *HealResultItem) GetOfflineCounts() (b, a int) {
if hri == nil {
return
}
for _, v := range hri.Before.Drives {
if v.State == DriveStateOffline {
b++
}
}
for _, v := range hri.After.Drives {
if v.State == DriveStateOffline {
a++
}
}
return
}
// GetCorruptedCounts - returns the number of corrupted disks before
// and after heal
func (hri *HealResultItem) GetCorruptedCounts() (b, a int) {
if hri == nil {
return
}
for _, v := range hri.Before.Drives {
if v.State == DriveStateCorrupt {
b++
}
}
for _, v := range hri.After.Drives {
if v.State == DriveStateCorrupt {
a++
}
}
return
}
// GetOnlineCounts - returns the number of online disks before
// and after heal
func (hri *HealResultItem) GetOnlineCounts() (b, a int) {
if hri == nil {
return
}
for _, v := range hri.Before.Drives {
if v.State == DriveStateOk {
b++
}
}
for _, v := range hri.After.Drives {
if v.State == DriveStateOk {
a++
}
}
return
}
// Heal - API endpoint to start heal and to fetch status
// forceStart and forceStop are mutually exclusive, you can either
// set one of them to 'true'. If both are set 'forceStart' will be
// honored.
func (adm *AdminClient) Heal(ctx context.Context, bucket, prefix string,
healOpts HealOpts, clientToken string, forceStart, forceStop bool) (
healStart HealStartSuccess, healTaskStatus HealTaskStatus, err error,
) {
if forceStart && forceStop {
return healStart, healTaskStatus, ErrInvalidArgument("forceStart and forceStop set to true is not allowed")
}
body, err := json.Marshal(healOpts)
if err != nil {
return healStart, healTaskStatus, err
}
path := fmt.Sprintf(adminAPIPrefix+"/heal/%s", bucket)
if bucket != "" && prefix != "" {
path += "/" + prefix
}
// execute POST request to heal api
queryVals := make(url.Values)
if clientToken != "" {
queryVals.Set("clientToken", clientToken)
body = []byte{}
}
// Anyone can be set, either force start or forceStop.
if forceStart {
queryVals.Set("forceStart", "true")
} else if forceStop {
queryVals.Set("forceStop", "true")
}
resp, err := adm.executeMethod(ctx,
http.MethodPost, requestData{
relPath: path,
content: body,
queryValues: queryVals,
})
defer closeResponse(resp)
if err != nil {
return healStart, healTaskStatus, err
}
if resp.StatusCode != http.StatusOK {
return healStart, healTaskStatus, httpRespToErrorResponse(resp)
}
respBytes, err := io.ReadAll(resp.Body)
if err != nil {
return healStart, healTaskStatus, err
}
// Was it a status request?
if clientToken == "" {
// As a special operation forceStop would return a
// similar struct as healStart will have the
// heal sequence information about the heal which
// was stopped.
err = json.Unmarshal(respBytes, &healStart)
} else {
err = json.Unmarshal(respBytes, &healTaskStatus)
}
if err != nil {
// May be the server responded with error after success
// message, handle it separately here.
var errResp ErrorResponse
err = json.Unmarshal(respBytes, &errResp)
if err != nil {
// Unknown structure return error anyways.
return healStart, healTaskStatus, err
}
return healStart, healTaskStatus, errResp
}
return healStart, healTaskStatus, nil
}
// MRFStatus exposes MRF metrics of a server
type MRFStatus struct {
BytesHealed uint64 `json:"bytes_healed"`
ItemsHealed uint64 `json:"items_healed"`
}
// BgHealState represents the status of the background heal
type BgHealState struct {
// List of offline endpoints with no background heal state info
OfflineEndpoints []string `json:"offline_nodes"`
// Total items scanned by the continuous background healing
ScannedItemsCount int64
// Disks currently in heal states
HealDisks []string
// SetStatus contains information for each set.
Sets []SetStatus `json:"sets"`
// Endpoint -> MRF Status
MRF map[string]MRFStatus `json:"mrf"`
// Parity per storage class
SCParity map[string]int `json:"sc_parity"`
}
// SetStatus contains information about the heal status of a set.
type SetStatus struct {
ID string `json:"id"`
PoolIndex int `json:"pool_index"`
SetIndex int `json:"set_index"`
HealStatus string `json:"heal_status"`
HealPriority string `json:"heal_priority"`
TotalObjects int `json:"total_objects"`
Disks []Disk `json:"disks"`
}
type HealingDriveReason int8
const (
// HealingReasonFreshDisk is the 0 value default, which is a fresh disk
HealingReasonFreshDisk HealingDriveReason = iota
// HealingReasonOfflineDisk means the disk was detected as being offline for too long
HealingReasonOfflineDisk
)
// HealingDisk contains information about
type HealingDisk struct {
// Copied from cmd/background-newdisks-heal-ops.go
// When adding new field, update (*healingTracker).toHealingDisk
ID string `json:"id"`
HealID string `json:"heal_id"`
PoolIndex int `json:"pool_index"`
SetIndex int `json:"set_index"`
DiskIndex int `json:"disk_index"`
Endpoint string `json:"endpoint"`
Path string `json:"path"`
Started time.Time `json:"started"`
LastUpdate time.Time `json:"last_update"`
RetryAttempts uint64 `json:"retry_attempts"`
ObjectsTotalCount uint64 `json:"objects_total_count"`
ObjectsTotalSize uint64 `json:"objects_total_size"`
ItemsHealed uint64 `json:"items_healed"`
ItemsFailed uint64 `json:"items_failed"`
ItemsSkipped uint64 `json:"items_skipped"`
BytesDone uint64 `json:"bytes_done"`
BytesFailed uint64 `json:"bytes_failed"`
BytesSkipped uint64 `json:"bytes_skipped"`
ObjectsHealed uint64 `json:"objects_healed"` // Deprecated July 2021
ObjectsFailed uint64 `json:"objects_failed"` // Deprecated July 2021
// Last object scanned.
Bucket string `json:"current_bucket"`
Object string `json:"current_object"`
// Filled on startup/restarts.
QueuedBuckets []string `json:"queued_buckets"`
// Filled during heal.
HealedBuckets []string `json:"healed_buckets"`
// Healing of this drive is finished, successfully or not
Finished bool `json:"finished"`
// The reason the healing was started, in order to decide which drive has priority.
Reason HealingDriveReason `json:"reason"`
// future add more tracking capabilities
}
// Merge others into b.
func (b *BgHealState) Merge(others ...BgHealState) {
// SCParity is the same from all nodes, just pick
// the information from the first node.
if b.SCParity == nil && len(others) > 0 {
b.SCParity = make(map[string]int)
for k, v := range others[0].SCParity {
b.SCParity[k] = v
}
}
if b.MRF == nil {
b.MRF = make(map[string]MRFStatus)
}
for _, other := range others {
b.OfflineEndpoints = append(b.OfflineEndpoints, other.OfflineEndpoints...)
for k, v := range other.MRF {
b.MRF[k] = v
}
b.ScannedItemsCount += other.ScannedItemsCount
// Add disk if not present.
// If present select the one with latest lastupdate.
addDisksFromSet := func(set SetStatus) {
found := -1
for idx, s := range b.Sets {
if s.PoolIndex == set.PoolIndex && s.SetIndex == set.SetIndex {
found = idx
}
}
if found == -1 {
b.Sets = append(b.Sets, set)
} else {
b.Sets[found].Disks = append(b.Sets[found].Disks, set.Disks...)
}
}
for _, set := range other.Sets {
addDisksFromSet(set)
}
}
sort.Slice(b.Sets, func(i, j int) bool {
if b.Sets[i].PoolIndex != b.Sets[j].PoolIndex {
return b.Sets[i].PoolIndex < b.Sets[j].PoolIndex
}
return b.Sets[i].SetIndex < b.Sets[j].SetIndex
})
}
// BackgroundHealStatus returns the background heal status of the
// current server or cluster.
func (adm *AdminClient) BackgroundHealStatus(ctx context.Context) (BgHealState, error) {
// Execute POST request to background heal status api
resp, err := adm.executeMethod(ctx,
http.MethodPost,
requestData{relPath: adminAPIPrefix + "/background-heal/status"})
if err != nil {
return BgHealState{}, err
}
defer closeResponse(resp)
if resp.StatusCode != http.StatusOK {
return BgHealState{}, httpRespToErrorResponse(resp)
}
respBytes, err := io.ReadAll(resp.Body)
if err != nil {
return BgHealState{}, err
}
var healState BgHealState
err = json.Unmarshal(respBytes, &healState)
if err != nil {
return BgHealState{}, err
}
return healState, nil
}
golang-github-minio-madmin-go-3.0.104/heal-commands_gen.go 0000664 0000000 0000000 00000311571 14774251704 0023300 0 ustar 00root root 0000000 0000000 package madmin
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
import (
"github.com/tinylib/msgp/msgp"
)
// DecodeMsg implements msgp.Decodable
func (z *BgHealState) DecodeMsg(dc *msgp.Reader) (err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, err = dc.ReadMapHeader()
if err != nil {
err = msgp.WrapError(err)
return
}
for zb0001 > 0 {
zb0001--
field, err = dc.ReadMapKeyPtr()
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "offline_nodes":
var zb0002 uint32
zb0002, err = dc.ReadArrayHeader()
if err != nil {
err = msgp.WrapError(err, "OfflineEndpoints")
return
}
if cap(z.OfflineEndpoints) >= int(zb0002) {
z.OfflineEndpoints = (z.OfflineEndpoints)[:zb0002]
} else {
z.OfflineEndpoints = make([]string, zb0002)
}
for za0001 := range z.OfflineEndpoints {
z.OfflineEndpoints[za0001], err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "OfflineEndpoints", za0001)
return
}
}
case "ScannedItemsCount":
z.ScannedItemsCount, err = dc.ReadInt64()
if err != nil {
err = msgp.WrapError(err, "ScannedItemsCount")
return
}
case "HealDisks":
var zb0003 uint32
zb0003, err = dc.ReadArrayHeader()
if err != nil {
err = msgp.WrapError(err, "HealDisks")
return
}
if cap(z.HealDisks) >= int(zb0003) {
z.HealDisks = (z.HealDisks)[:zb0003]
} else {
z.HealDisks = make([]string, zb0003)
}
for za0002 := range z.HealDisks {
z.HealDisks[za0002], err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "HealDisks", za0002)
return
}
}
case "sets":
var zb0004 uint32
zb0004, err = dc.ReadArrayHeader()
if err != nil {
err = msgp.WrapError(err, "Sets")
return
}
if cap(z.Sets) >= int(zb0004) {
z.Sets = (z.Sets)[:zb0004]
} else {
z.Sets = make([]SetStatus, zb0004)
}
for za0003 := range z.Sets {
err = z.Sets[za0003].DecodeMsg(dc)
if err != nil {
err = msgp.WrapError(err, "Sets", za0003)
return
}
}
case "mrf":
var zb0005 uint32
zb0005, err = dc.ReadMapHeader()
if err != nil {
err = msgp.WrapError(err, "MRF")
return
}
if z.MRF == nil {
z.MRF = make(map[string]MRFStatus, zb0005)
} else if len(z.MRF) > 0 {
for key := range z.MRF {
delete(z.MRF, key)
}
}
for zb0005 > 0 {
zb0005--
var za0004 string
var za0005 MRFStatus
za0004, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "MRF")
return
}
var zb0006 uint32
zb0006, err = dc.ReadMapHeader()
if err != nil {
err = msgp.WrapError(err, "MRF", za0004)
return
}
for zb0006 > 0 {
zb0006--
field, err = dc.ReadMapKeyPtr()
if err != nil {
err = msgp.WrapError(err, "MRF", za0004)
return
}
switch msgp.UnsafeString(field) {
case "bytes_healed":
za0005.BytesHealed, err = dc.ReadUint64()
if err != nil {
err = msgp.WrapError(err, "MRF", za0004, "BytesHealed")
return
}
case "items_healed":
za0005.ItemsHealed, err = dc.ReadUint64()
if err != nil {
err = msgp.WrapError(err, "MRF", za0004, "ItemsHealed")
return
}
default:
err = dc.Skip()
if err != nil {
err = msgp.WrapError(err, "MRF", za0004)
return
}
}
}
z.MRF[za0004] = za0005
}
case "sc_parity":
var zb0007 uint32
zb0007, err = dc.ReadMapHeader()
if err != nil {
err = msgp.WrapError(err, "SCParity")
return
}
if z.SCParity == nil {
z.SCParity = make(map[string]int, zb0007)
} else if len(z.SCParity) > 0 {
for key := range z.SCParity {
delete(z.SCParity, key)
}
}
for zb0007 > 0 {
zb0007--
var za0006 string
var za0007 int
za0006, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "SCParity")
return
}
za0007, err = dc.ReadInt()
if err != nil {
err = msgp.WrapError(err, "SCParity", za0006)
return
}
z.SCParity[za0006] = za0007
}
default:
err = dc.Skip()
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
return
}
// EncodeMsg implements msgp.Encodable
func (z *BgHealState) EncodeMsg(en *msgp.Writer) (err error) {
// map header, size 6
// write "offline_nodes"
err = en.Append(0x86, 0xad, 0x6f, 0x66, 0x66, 0x6c, 0x69, 0x6e, 0x65, 0x5f, 0x6e, 0x6f, 0x64, 0x65, 0x73)
if err != nil {
return
}
err = en.WriteArrayHeader(uint32(len(z.OfflineEndpoints)))
if err != nil {
err = msgp.WrapError(err, "OfflineEndpoints")
return
}
for za0001 := range z.OfflineEndpoints {
err = en.WriteString(z.OfflineEndpoints[za0001])
if err != nil {
err = msgp.WrapError(err, "OfflineEndpoints", za0001)
return
}
}
// write "ScannedItemsCount"
err = en.Append(0xb1, 0x53, 0x63, 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74)
if err != nil {
return
}
err = en.WriteInt64(z.ScannedItemsCount)
if err != nil {
err = msgp.WrapError(err, "ScannedItemsCount")
return
}
// write "HealDisks"
err = en.Append(0xa9, 0x48, 0x65, 0x61, 0x6c, 0x44, 0x69, 0x73, 0x6b, 0x73)
if err != nil {
return
}
err = en.WriteArrayHeader(uint32(len(z.HealDisks)))
if err != nil {
err = msgp.WrapError(err, "HealDisks")
return
}
for za0002 := range z.HealDisks {
err = en.WriteString(z.HealDisks[za0002])
if err != nil {
err = msgp.WrapError(err, "HealDisks", za0002)
return
}
}
// write "sets"
err = en.Append(0xa4, 0x73, 0x65, 0x74, 0x73)
if err != nil {
return
}
err = en.WriteArrayHeader(uint32(len(z.Sets)))
if err != nil {
err = msgp.WrapError(err, "Sets")
return
}
for za0003 := range z.Sets {
err = z.Sets[za0003].EncodeMsg(en)
if err != nil {
err = msgp.WrapError(err, "Sets", za0003)
return
}
}
// write "mrf"
err = en.Append(0xa3, 0x6d, 0x72, 0x66)
if err != nil {
return
}
err = en.WriteMapHeader(uint32(len(z.MRF)))
if err != nil {
err = msgp.WrapError(err, "MRF")
return
}
for za0004, za0005 := range z.MRF {
err = en.WriteString(za0004)
if err != nil {
err = msgp.WrapError(err, "MRF")
return
}
// map header, size 2
// write "bytes_healed"
err = en.Append(0x82, 0xac, 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f, 0x68, 0x65, 0x61, 0x6c, 0x65, 0x64)
if err != nil {
return
}
err = en.WriteUint64(za0005.BytesHealed)
if err != nil {
err = msgp.WrapError(err, "MRF", za0004, "BytesHealed")
return
}
// write "items_healed"
err = en.Append(0xac, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x5f, 0x68, 0x65, 0x61, 0x6c, 0x65, 0x64)
if err != nil {
return
}
err = en.WriteUint64(za0005.ItemsHealed)
if err != nil {
err = msgp.WrapError(err, "MRF", za0004, "ItemsHealed")
return
}
}
// write "sc_parity"
err = en.Append(0xa9, 0x73, 0x63, 0x5f, 0x70, 0x61, 0x72, 0x69, 0x74, 0x79)
if err != nil {
return
}
err = en.WriteMapHeader(uint32(len(z.SCParity)))
if err != nil {
err = msgp.WrapError(err, "SCParity")
return
}
for za0006, za0007 := range z.SCParity {
err = en.WriteString(za0006)
if err != nil {
err = msgp.WrapError(err, "SCParity")
return
}
err = en.WriteInt(za0007)
if err != nil {
err = msgp.WrapError(err, "SCParity", za0006)
return
}
}
return
}
// MarshalMsg implements msgp.Marshaler
func (z *BgHealState) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize())
// map header, size 6
// string "offline_nodes"
o = append(o, 0x86, 0xad, 0x6f, 0x66, 0x66, 0x6c, 0x69, 0x6e, 0x65, 0x5f, 0x6e, 0x6f, 0x64, 0x65, 0x73)
o = msgp.AppendArrayHeader(o, uint32(len(z.OfflineEndpoints)))
for za0001 := range z.OfflineEndpoints {
o = msgp.AppendString(o, z.OfflineEndpoints[za0001])
}
// string "ScannedItemsCount"
o = append(o, 0xb1, 0x53, 0x63, 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74)
o = msgp.AppendInt64(o, z.ScannedItemsCount)
// string "HealDisks"
o = append(o, 0xa9, 0x48, 0x65, 0x61, 0x6c, 0x44, 0x69, 0x73, 0x6b, 0x73)
o = msgp.AppendArrayHeader(o, uint32(len(z.HealDisks)))
for za0002 := range z.HealDisks {
o = msgp.AppendString(o, z.HealDisks[za0002])
}
// string "sets"
o = append(o, 0xa4, 0x73, 0x65, 0x74, 0x73)
o = msgp.AppendArrayHeader(o, uint32(len(z.Sets)))
for za0003 := range z.Sets {
o, err = z.Sets[za0003].MarshalMsg(o)
if err != nil {
err = msgp.WrapError(err, "Sets", za0003)
return
}
}
// string "mrf"
o = append(o, 0xa3, 0x6d, 0x72, 0x66)
o = msgp.AppendMapHeader(o, uint32(len(z.MRF)))
for za0004, za0005 := range z.MRF {
o = msgp.AppendString(o, za0004)
// map header, size 2
// string "bytes_healed"
o = append(o, 0x82, 0xac, 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f, 0x68, 0x65, 0x61, 0x6c, 0x65, 0x64)
o = msgp.AppendUint64(o, za0005.BytesHealed)
// string "items_healed"
o = append(o, 0xac, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x5f, 0x68, 0x65, 0x61, 0x6c, 0x65, 0x64)
o = msgp.AppendUint64(o, za0005.ItemsHealed)
}
// string "sc_parity"
o = append(o, 0xa9, 0x73, 0x63, 0x5f, 0x70, 0x61, 0x72, 0x69, 0x74, 0x79)
o = msgp.AppendMapHeader(o, uint32(len(z.SCParity)))
for za0006, za0007 := range z.SCParity {
o = msgp.AppendString(o, za0006)
o = msgp.AppendInt(o, za0007)
}
return
}
// UnmarshalMsg implements msgp.Unmarshaler
func (z *BgHealState) UnmarshalMsg(bts []byte) (o []byte, err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
for zb0001 > 0 {
zb0001--
field, bts, err = msgp.ReadMapKeyZC(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "offline_nodes":
var zb0002 uint32
zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "OfflineEndpoints")
return
}
if cap(z.OfflineEndpoints) >= int(zb0002) {
z.OfflineEndpoints = (z.OfflineEndpoints)[:zb0002]
} else {
z.OfflineEndpoints = make([]string, zb0002)
}
for za0001 := range z.OfflineEndpoints {
z.OfflineEndpoints[za0001], bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "OfflineEndpoints", za0001)
return
}
}
case "ScannedItemsCount":
z.ScannedItemsCount, bts, err = msgp.ReadInt64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "ScannedItemsCount")
return
}
case "HealDisks":
var zb0003 uint32
zb0003, bts, err = msgp.ReadArrayHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "HealDisks")
return
}
if cap(z.HealDisks) >= int(zb0003) {
z.HealDisks = (z.HealDisks)[:zb0003]
} else {
z.HealDisks = make([]string, zb0003)
}
for za0002 := range z.HealDisks {
z.HealDisks[za0002], bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "HealDisks", za0002)
return
}
}
case "sets":
var zb0004 uint32
zb0004, bts, err = msgp.ReadArrayHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Sets")
return
}
if cap(z.Sets) >= int(zb0004) {
z.Sets = (z.Sets)[:zb0004]
} else {
z.Sets = make([]SetStatus, zb0004)
}
for za0003 := range z.Sets {
bts, err = z.Sets[za0003].UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "Sets", za0003)
return
}
}
case "mrf":
var zb0005 uint32
zb0005, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "MRF")
return
}
if z.MRF == nil {
z.MRF = make(map[string]MRFStatus, zb0005)
} else if len(z.MRF) > 0 {
for key := range z.MRF {
delete(z.MRF, key)
}
}
for zb0005 > 0 {
var za0004 string
var za0005 MRFStatus
zb0005--
za0004, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "MRF")
return
}
var zb0006 uint32
zb0006, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "MRF", za0004)
return
}
for zb0006 > 0 {
zb0006--
field, bts, err = msgp.ReadMapKeyZC(bts)
if err != nil {
err = msgp.WrapError(err, "MRF", za0004)
return
}
switch msgp.UnsafeString(field) {
case "bytes_healed":
za0005.BytesHealed, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "MRF", za0004, "BytesHealed")
return
}
case "items_healed":
za0005.ItemsHealed, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "MRF", za0004, "ItemsHealed")
return
}
default:
bts, err = msgp.Skip(bts)
if err != nil {
err = msgp.WrapError(err, "MRF", za0004)
return
}
}
}
z.MRF[za0004] = za0005
}
case "sc_parity":
var zb0007 uint32
zb0007, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "SCParity")
return
}
if z.SCParity == nil {
z.SCParity = make(map[string]int, zb0007)
} else if len(z.SCParity) > 0 {
for key := range z.SCParity {
delete(z.SCParity, key)
}
}
for zb0007 > 0 {
var za0006 string
var za0007 int
zb0007--
za0006, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "SCParity")
return
}
za0007, bts, err = msgp.ReadIntBytes(bts)
if err != nil {
err = msgp.WrapError(err, "SCParity", za0006)
return
}
z.SCParity[za0006] = za0007
}
default:
bts, err = msgp.Skip(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
o = bts
return
}
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z *BgHealState) Msgsize() (s int) {
s = 1 + 14 + msgp.ArrayHeaderSize
for za0001 := range z.OfflineEndpoints {
s += msgp.StringPrefixSize + len(z.OfflineEndpoints[za0001])
}
s += 18 + msgp.Int64Size + 10 + msgp.ArrayHeaderSize
for za0002 := range z.HealDisks {
s += msgp.StringPrefixSize + len(z.HealDisks[za0002])
}
s += 5 + msgp.ArrayHeaderSize
for za0003 := range z.Sets {
s += z.Sets[za0003].Msgsize()
}
s += 4 + msgp.MapHeaderSize
if z.MRF != nil {
for za0004, za0005 := range z.MRF {
_ = za0005
s += msgp.StringPrefixSize + len(za0004) + 1 + 13 + msgp.Uint64Size + 13 + msgp.Uint64Size
}
}
s += 10 + msgp.MapHeaderSize
if z.SCParity != nil {
for za0006, za0007 := range z.SCParity {
_ = za0007
s += msgp.StringPrefixSize + len(za0006) + msgp.IntSize
}
}
return
}
// DecodeMsg implements msgp.Decodable
func (z *HealDriveInfo) DecodeMsg(dc *msgp.Reader) (err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, err = dc.ReadMapHeader()
if err != nil {
err = msgp.WrapError(err)
return
}
for zb0001 > 0 {
zb0001--
field, err = dc.ReadMapKeyPtr()
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "uuid":
z.UUID, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "UUID")
return
}
case "endpoint":
z.Endpoint, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "Endpoint")
return
}
case "state":
z.State, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "State")
return
}
default:
err = dc.Skip()
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
return
}
// EncodeMsg implements msgp.Encodable
func (z HealDriveInfo) EncodeMsg(en *msgp.Writer) (err error) {
// map header, size 3
// write "uuid"
err = en.Append(0x83, 0xa4, 0x75, 0x75, 0x69, 0x64)
if err != nil {
return
}
err = en.WriteString(z.UUID)
if err != nil {
err = msgp.WrapError(err, "UUID")
return
}
// write "endpoint"
err = en.Append(0xa8, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74)
if err != nil {
return
}
err = en.WriteString(z.Endpoint)
if err != nil {
err = msgp.WrapError(err, "Endpoint")
return
}
// write "state"
err = en.Append(0xa5, 0x73, 0x74, 0x61, 0x74, 0x65)
if err != nil {
return
}
err = en.WriteString(z.State)
if err != nil {
err = msgp.WrapError(err, "State")
return
}
return
}
// MarshalMsg implements msgp.Marshaler
func (z HealDriveInfo) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize())
// map header, size 3
// string "uuid"
o = append(o, 0x83, 0xa4, 0x75, 0x75, 0x69, 0x64)
o = msgp.AppendString(o, z.UUID)
// string "endpoint"
o = append(o, 0xa8, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74)
o = msgp.AppendString(o, z.Endpoint)
// string "state"
o = append(o, 0xa5, 0x73, 0x74, 0x61, 0x74, 0x65)
o = msgp.AppendString(o, z.State)
return
}
// UnmarshalMsg implements msgp.Unmarshaler
func (z *HealDriveInfo) UnmarshalMsg(bts []byte) (o []byte, err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
for zb0001 > 0 {
zb0001--
field, bts, err = msgp.ReadMapKeyZC(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "uuid":
z.UUID, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "UUID")
return
}
case "endpoint":
z.Endpoint, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Endpoint")
return
}
case "state":
z.State, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "State")
return
}
default:
bts, err = msgp.Skip(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
o = bts
return
}
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z HealDriveInfo) Msgsize() (s int) {
s = 1 + 5 + msgp.StringPrefixSize + len(z.UUID) + 9 + msgp.StringPrefixSize + len(z.Endpoint) + 6 + msgp.StringPrefixSize + len(z.State)
return
}
// DecodeMsg implements msgp.Decodable
func (z *HealItemType) DecodeMsg(dc *msgp.Reader) (err error) {
{
var zb0001 string
zb0001, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err)
return
}
(*z) = HealItemType(zb0001)
}
return
}
// EncodeMsg implements msgp.Encodable
func (z HealItemType) EncodeMsg(en *msgp.Writer) (err error) {
err = en.WriteString(string(z))
if err != nil {
err = msgp.WrapError(err)
return
}
return
}
// MarshalMsg implements msgp.Marshaler
func (z HealItemType) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize())
o = msgp.AppendString(o, string(z))
return
}
// UnmarshalMsg implements msgp.Unmarshaler
func (z *HealItemType) UnmarshalMsg(bts []byte) (o []byte, err error) {
{
var zb0001 string
zb0001, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
(*z) = HealItemType(zb0001)
}
o = bts
return
}
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z HealItemType) Msgsize() (s int) {
s = msgp.StringPrefixSize + len(string(z))
return
}
// DecodeMsg implements msgp.Decodable
func (z *HealOpts) DecodeMsg(dc *msgp.Reader) (err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, err = dc.ReadMapHeader()
if err != nil {
err = msgp.WrapError(err)
return
}
var zb0001Mask uint8 /* 2 bits */
_ = zb0001Mask
for zb0001 > 0 {
zb0001--
field, err = dc.ReadMapKeyPtr()
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "recursive":
z.Recursive, err = dc.ReadBool()
if err != nil {
err = msgp.WrapError(err, "Recursive")
return
}
case "dryRun":
z.DryRun, err = dc.ReadBool()
if err != nil {
err = msgp.WrapError(err, "DryRun")
return
}
case "remove":
z.Remove, err = dc.ReadBool()
if err != nil {
err = msgp.WrapError(err, "Remove")
return
}
case "recreate":
z.Recreate, err = dc.ReadBool()
if err != nil {
err = msgp.WrapError(err, "Recreate")
return
}
case "scanMode":
{
var zb0002 int
zb0002, err = dc.ReadInt()
if err != nil {
err = msgp.WrapError(err, "ScanMode")
return
}
z.ScanMode = HealScanMode(zb0002)
}
case "updateParity":
z.UpdateParity, err = dc.ReadBool()
if err != nil {
err = msgp.WrapError(err, "UpdateParity")
return
}
case "nolock":
z.NoLock, err = dc.ReadBool()
if err != nil {
err = msgp.WrapError(err, "NoLock")
return
}
case "pool":
if dc.IsNil() {
err = dc.ReadNil()
if err != nil {
err = msgp.WrapError(err, "Pool")
return
}
z.Pool = nil
} else {
if z.Pool == nil {
z.Pool = new(int)
}
*z.Pool, err = dc.ReadInt()
if err != nil {
err = msgp.WrapError(err, "Pool")
return
}
}
zb0001Mask |= 0x1
case "set":
if dc.IsNil() {
err = dc.ReadNil()
if err != nil {
err = msgp.WrapError(err, "Set")
return
}
z.Set = nil
} else {
if z.Set == nil {
z.Set = new(int)
}
*z.Set, err = dc.ReadInt()
if err != nil {
err = msgp.WrapError(err, "Set")
return
}
}
zb0001Mask |= 0x2
default:
err = dc.Skip()
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
// Clear omitted fields.
if zb0001Mask != 0x3 {
if (zb0001Mask & 0x1) == 0 {
z.Pool = nil
}
if (zb0001Mask & 0x2) == 0 {
z.Set = nil
}
}
return
}
// EncodeMsg implements msgp.Encodable
func (z *HealOpts) EncodeMsg(en *msgp.Writer) (err error) {
// check for omitted fields
zb0001Len := uint32(9)
var zb0001Mask uint16 /* 9 bits */
_ = zb0001Mask
if z.Pool == nil {
zb0001Len--
zb0001Mask |= 0x80
}
if z.Set == nil {
zb0001Len--
zb0001Mask |= 0x100
}
// variable map header, size zb0001Len
err = en.Append(0x80 | uint8(zb0001Len))
if err != nil {
return
}
// skip if no fields are to be emitted
if zb0001Len != 0 {
// write "recursive"
err = en.Append(0xa9, 0x72, 0x65, 0x63, 0x75, 0x72, 0x73, 0x69, 0x76, 0x65)
if err != nil {
return
}
err = en.WriteBool(z.Recursive)
if err != nil {
err = msgp.WrapError(err, "Recursive")
return
}
// write "dryRun"
err = en.Append(0xa6, 0x64, 0x72, 0x79, 0x52, 0x75, 0x6e)
if err != nil {
return
}
err = en.WriteBool(z.DryRun)
if err != nil {
err = msgp.WrapError(err, "DryRun")
return
}
// write "remove"
err = en.Append(0xa6, 0x72, 0x65, 0x6d, 0x6f, 0x76, 0x65)
if err != nil {
return
}
err = en.WriteBool(z.Remove)
if err != nil {
err = msgp.WrapError(err, "Remove")
return
}
// write "recreate"
err = en.Append(0xa8, 0x72, 0x65, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65)
if err != nil {
return
}
err = en.WriteBool(z.Recreate)
if err != nil {
err = msgp.WrapError(err, "Recreate")
return
}
// write "scanMode"
err = en.Append(0xa8, 0x73, 0x63, 0x61, 0x6e, 0x4d, 0x6f, 0x64, 0x65)
if err != nil {
return
}
err = en.WriteInt(int(z.ScanMode))
if err != nil {
err = msgp.WrapError(err, "ScanMode")
return
}
// write "updateParity"
err = en.Append(0xac, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x50, 0x61, 0x72, 0x69, 0x74, 0x79)
if err != nil {
return
}
err = en.WriteBool(z.UpdateParity)
if err != nil {
err = msgp.WrapError(err, "UpdateParity")
return
}
// write "nolock"
err = en.Append(0xa6, 0x6e, 0x6f, 0x6c, 0x6f, 0x63, 0x6b)
if err != nil {
return
}
err = en.WriteBool(z.NoLock)
if err != nil {
err = msgp.WrapError(err, "NoLock")
return
}
if (zb0001Mask & 0x80) == 0 { // if not omitted
// write "pool"
err = en.Append(0xa4, 0x70, 0x6f, 0x6f, 0x6c)
if err != nil {
return
}
if z.Pool == nil {
err = en.WriteNil()
if err != nil {
return
}
} else {
err = en.WriteInt(*z.Pool)
if err != nil {
err = msgp.WrapError(err, "Pool")
return
}
}
}
if (zb0001Mask & 0x100) == 0 { // if not omitted
// write "set"
err = en.Append(0xa3, 0x73, 0x65, 0x74)
if err != nil {
return
}
if z.Set == nil {
err = en.WriteNil()
if err != nil {
return
}
} else {
err = en.WriteInt(*z.Set)
if err != nil {
err = msgp.WrapError(err, "Set")
return
}
}
}
}
return
}
// MarshalMsg implements msgp.Marshaler
func (z *HealOpts) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize())
// check for omitted fields
zb0001Len := uint32(9)
var zb0001Mask uint16 /* 9 bits */
_ = zb0001Mask
if z.Pool == nil {
zb0001Len--
zb0001Mask |= 0x80
}
if z.Set == nil {
zb0001Len--
zb0001Mask |= 0x100
}
// variable map header, size zb0001Len
o = append(o, 0x80|uint8(zb0001Len))
// skip if no fields are to be emitted
if zb0001Len != 0 {
// string "recursive"
o = append(o, 0xa9, 0x72, 0x65, 0x63, 0x75, 0x72, 0x73, 0x69, 0x76, 0x65)
o = msgp.AppendBool(o, z.Recursive)
// string "dryRun"
o = append(o, 0xa6, 0x64, 0x72, 0x79, 0x52, 0x75, 0x6e)
o = msgp.AppendBool(o, z.DryRun)
// string "remove"
o = append(o, 0xa6, 0x72, 0x65, 0x6d, 0x6f, 0x76, 0x65)
o = msgp.AppendBool(o, z.Remove)
// string "recreate"
o = append(o, 0xa8, 0x72, 0x65, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65)
o = msgp.AppendBool(o, z.Recreate)
// string "scanMode"
o = append(o, 0xa8, 0x73, 0x63, 0x61, 0x6e, 0x4d, 0x6f, 0x64, 0x65)
o = msgp.AppendInt(o, int(z.ScanMode))
// string "updateParity"
o = append(o, 0xac, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x50, 0x61, 0x72, 0x69, 0x74, 0x79)
o = msgp.AppendBool(o, z.UpdateParity)
// string "nolock"
o = append(o, 0xa6, 0x6e, 0x6f, 0x6c, 0x6f, 0x63, 0x6b)
o = msgp.AppendBool(o, z.NoLock)
if (zb0001Mask & 0x80) == 0 { // if not omitted
// string "pool"
o = append(o, 0xa4, 0x70, 0x6f, 0x6f, 0x6c)
if z.Pool == nil {
o = msgp.AppendNil(o)
} else {
o = msgp.AppendInt(o, *z.Pool)
}
}
if (zb0001Mask & 0x100) == 0 { // if not omitted
// string "set"
o = append(o, 0xa3, 0x73, 0x65, 0x74)
if z.Set == nil {
o = msgp.AppendNil(o)
} else {
o = msgp.AppendInt(o, *z.Set)
}
}
}
return
}
// UnmarshalMsg implements msgp.Unmarshaler
func (z *HealOpts) UnmarshalMsg(bts []byte) (o []byte, err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
var zb0001Mask uint8 /* 2 bits */
_ = zb0001Mask
for zb0001 > 0 {
zb0001--
field, bts, err = msgp.ReadMapKeyZC(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "recursive":
z.Recursive, bts, err = msgp.ReadBoolBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Recursive")
return
}
case "dryRun":
z.DryRun, bts, err = msgp.ReadBoolBytes(bts)
if err != nil {
err = msgp.WrapError(err, "DryRun")
return
}
case "remove":
z.Remove, bts, err = msgp.ReadBoolBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Remove")
return
}
case "recreate":
z.Recreate, bts, err = msgp.ReadBoolBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Recreate")
return
}
case "scanMode":
{
var zb0002 int
zb0002, bts, err = msgp.ReadIntBytes(bts)
if err != nil {
err = msgp.WrapError(err, "ScanMode")
return
}
z.ScanMode = HealScanMode(zb0002)
}
case "updateParity":
z.UpdateParity, bts, err = msgp.ReadBoolBytes(bts)
if err != nil {
err = msgp.WrapError(err, "UpdateParity")
return
}
case "nolock":
z.NoLock, bts, err = msgp.ReadBoolBytes(bts)
if err != nil {
err = msgp.WrapError(err, "NoLock")
return
}
case "pool":
if msgp.IsNil(bts) {
bts, err = msgp.ReadNilBytes(bts)
if err != nil {
return
}
z.Pool = nil
} else {
if z.Pool == nil {
z.Pool = new(int)
}
*z.Pool, bts, err = msgp.ReadIntBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Pool")
return
}
}
zb0001Mask |= 0x1
case "set":
if msgp.IsNil(bts) {
bts, err = msgp.ReadNilBytes(bts)
if err != nil {
return
}
z.Set = nil
} else {
if z.Set == nil {
z.Set = new(int)
}
*z.Set, bts, err = msgp.ReadIntBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Set")
return
}
}
zb0001Mask |= 0x2
default:
bts, err = msgp.Skip(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
// Clear omitted fields.
if zb0001Mask != 0x3 {
if (zb0001Mask & 0x1) == 0 {
z.Pool = nil
}
if (zb0001Mask & 0x2) == 0 {
z.Set = nil
}
}
o = bts
return
}
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z *HealOpts) Msgsize() (s int) {
s = 1 + 10 + msgp.BoolSize + 7 + msgp.BoolSize + 7 + msgp.BoolSize + 9 + msgp.BoolSize + 9 + msgp.IntSize + 13 + msgp.BoolSize + 7 + msgp.BoolSize + 5
if z.Pool == nil {
s += msgp.NilSize
} else {
s += msgp.IntSize
}
s += 4
if z.Set == nil {
s += msgp.NilSize
} else {
s += msgp.IntSize
}
return
}
// DecodeMsg implements msgp.Decodable
func (z *HealResultItem) DecodeMsg(dc *msgp.Reader) (err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, err = dc.ReadMapHeader()
if err != nil {
err = msgp.WrapError(err)
return
}
var zb0001Mask uint8 /* 2 bits */
_ = zb0001Mask
for zb0001 > 0 {
zb0001--
field, err = dc.ReadMapKeyPtr()
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "resultId":
z.ResultIndex, err = dc.ReadInt64()
if err != nil {
err = msgp.WrapError(err, "ResultIndex")
return
}
case "type":
{
var zb0002 string
zb0002, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "Type")
return
}
z.Type = HealItemType(zb0002)
}
case "bucket":
z.Bucket, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "Bucket")
return
}
case "object":
z.Object, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "Object")
return
}
case "versionId":
z.VersionID, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "VersionID")
return
}
case "detail":
z.Detail, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "Detail")
return
}
case "parityBlocks":
z.ParityBlocks, err = dc.ReadInt()
if err != nil {
err = msgp.WrapError(err, "ParityBlocks")
return
}
zb0001Mask |= 0x1
case "dataBlocks":
z.DataBlocks, err = dc.ReadInt()
if err != nil {
err = msgp.WrapError(err, "DataBlocks")
return
}
zb0001Mask |= 0x2
case "diskCount":
z.DiskCount, err = dc.ReadInt()
if err != nil {
err = msgp.WrapError(err, "DiskCount")
return
}
case "setCount":
z.SetCount, err = dc.ReadInt()
if err != nil {
err = msgp.WrapError(err, "SetCount")
return
}
case "before":
var zb0003 uint32
zb0003, err = dc.ReadMapHeader()
if err != nil {
err = msgp.WrapError(err, "Before")
return
}
for zb0003 > 0 {
zb0003--
field, err = dc.ReadMapKeyPtr()
if err != nil {
err = msgp.WrapError(err, "Before")
return
}
switch msgp.UnsafeString(field) {
case "drives":
var zb0004 uint32
zb0004, err = dc.ReadArrayHeader()
if err != nil {
err = msgp.WrapError(err, "Before", "Drives")
return
}
if cap(z.Before.Drives) >= int(zb0004) {
z.Before.Drives = (z.Before.Drives)[:zb0004]
} else {
z.Before.Drives = make([]HealDriveInfo, zb0004)
}
for za0001 := range z.Before.Drives {
var zb0005 uint32
zb0005, err = dc.ReadMapHeader()
if err != nil {
err = msgp.WrapError(err, "Before", "Drives", za0001)
return
}
for zb0005 > 0 {
zb0005--
field, err = dc.ReadMapKeyPtr()
if err != nil {
err = msgp.WrapError(err, "Before", "Drives", za0001)
return
}
switch msgp.UnsafeString(field) {
case "uuid":
z.Before.Drives[za0001].UUID, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "Before", "Drives", za0001, "UUID")
return
}
case "endpoint":
z.Before.Drives[za0001].Endpoint, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "Before", "Drives", za0001, "Endpoint")
return
}
case "state":
z.Before.Drives[za0001].State, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "Before", "Drives", za0001, "State")
return
}
default:
err = dc.Skip()
if err != nil {
err = msgp.WrapError(err, "Before", "Drives", za0001)
return
}
}
}
}
default:
err = dc.Skip()
if err != nil {
err = msgp.WrapError(err, "Before")
return
}
}
}
case "after":
var zb0006 uint32
zb0006, err = dc.ReadMapHeader()
if err != nil {
err = msgp.WrapError(err, "After")
return
}
for zb0006 > 0 {
zb0006--
field, err = dc.ReadMapKeyPtr()
if err != nil {
err = msgp.WrapError(err, "After")
return
}
switch msgp.UnsafeString(field) {
case "drives":
var zb0007 uint32
zb0007, err = dc.ReadArrayHeader()
if err != nil {
err = msgp.WrapError(err, "After", "Drives")
return
}
if cap(z.After.Drives) >= int(zb0007) {
z.After.Drives = (z.After.Drives)[:zb0007]
} else {
z.After.Drives = make([]HealDriveInfo, zb0007)
}
for za0002 := range z.After.Drives {
var zb0008 uint32
zb0008, err = dc.ReadMapHeader()
if err != nil {
err = msgp.WrapError(err, "After", "Drives", za0002)
return
}
for zb0008 > 0 {
zb0008--
field, err = dc.ReadMapKeyPtr()
if err != nil {
err = msgp.WrapError(err, "After", "Drives", za0002)
return
}
switch msgp.UnsafeString(field) {
case "uuid":
z.After.Drives[za0002].UUID, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "After", "Drives", za0002, "UUID")
return
}
case "endpoint":
z.After.Drives[za0002].Endpoint, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "After", "Drives", za0002, "Endpoint")
return
}
case "state":
z.After.Drives[za0002].State, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "After", "Drives", za0002, "State")
return
}
default:
err = dc.Skip()
if err != nil {
err = msgp.WrapError(err, "After", "Drives", za0002)
return
}
}
}
}
default:
err = dc.Skip()
if err != nil {
err = msgp.WrapError(err, "After")
return
}
}
}
case "objectSize":
z.ObjectSize, err = dc.ReadInt64()
if err != nil {
err = msgp.WrapError(err, "ObjectSize")
return
}
default:
err = dc.Skip()
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
// Clear omitted fields.
if zb0001Mask != 0x3 {
if (zb0001Mask & 0x1) == 0 {
z.ParityBlocks = 0
}
if (zb0001Mask & 0x2) == 0 {
z.DataBlocks = 0
}
}
return
}
// EncodeMsg implements msgp.Encodable
func (z *HealResultItem) EncodeMsg(en *msgp.Writer) (err error) {
// check for omitted fields
zb0001Len := uint32(13)
var zb0001Mask uint16 /* 13 bits */
_ = zb0001Mask
if z.ParityBlocks == 0 {
zb0001Len--
zb0001Mask |= 0x40
}
if z.DataBlocks == 0 {
zb0001Len--
zb0001Mask |= 0x80
}
// variable map header, size zb0001Len
err = en.Append(0x80 | uint8(zb0001Len))
if err != nil {
return
}
// skip if no fields are to be emitted
if zb0001Len != 0 {
// write "resultId"
err = en.Append(0xa8, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x49, 0x64)
if err != nil {
return
}
err = en.WriteInt64(z.ResultIndex)
if err != nil {
err = msgp.WrapError(err, "ResultIndex")
return
}
// write "type"
err = en.Append(0xa4, 0x74, 0x79, 0x70, 0x65)
if err != nil {
return
}
err = en.WriteString(string(z.Type))
if err != nil {
err = msgp.WrapError(err, "Type")
return
}
// write "bucket"
err = en.Append(0xa6, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74)
if err != nil {
return
}
err = en.WriteString(z.Bucket)
if err != nil {
err = msgp.WrapError(err, "Bucket")
return
}
// write "object"
err = en.Append(0xa6, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74)
if err != nil {
return
}
err = en.WriteString(z.Object)
if err != nil {
err = msgp.WrapError(err, "Object")
return
}
// write "versionId"
err = en.Append(0xa9, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x49, 0x64)
if err != nil {
return
}
err = en.WriteString(z.VersionID)
if err != nil {
err = msgp.WrapError(err, "VersionID")
return
}
// write "detail"
err = en.Append(0xa6, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c)
if err != nil {
return
}
err = en.WriteString(z.Detail)
if err != nil {
err = msgp.WrapError(err, "Detail")
return
}
if (zb0001Mask & 0x40) == 0 { // if not omitted
// write "parityBlocks"
err = en.Append(0xac, 0x70, 0x61, 0x72, 0x69, 0x74, 0x79, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x73)
if err != nil {
return
}
err = en.WriteInt(z.ParityBlocks)
if err != nil {
err = msgp.WrapError(err, "ParityBlocks")
return
}
}
if (zb0001Mask & 0x80) == 0 { // if not omitted
// write "dataBlocks"
err = en.Append(0xaa, 0x64, 0x61, 0x74, 0x61, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x73)
if err != nil {
return
}
err = en.WriteInt(z.DataBlocks)
if err != nil {
err = msgp.WrapError(err, "DataBlocks")
return
}
}
// write "diskCount"
err = en.Append(0xa9, 0x64, 0x69, 0x73, 0x6b, 0x43, 0x6f, 0x75, 0x6e, 0x74)
if err != nil {
return
}
err = en.WriteInt(z.DiskCount)
if err != nil {
err = msgp.WrapError(err, "DiskCount")
return
}
// write "setCount"
err = en.Append(0xa8, 0x73, 0x65, 0x74, 0x43, 0x6f, 0x75, 0x6e, 0x74)
if err != nil {
return
}
err = en.WriteInt(z.SetCount)
if err != nil {
err = msgp.WrapError(err, "SetCount")
return
}
// write "before"
err = en.Append(0xa6, 0x62, 0x65, 0x66, 0x6f, 0x72, 0x65)
if err != nil {
return
}
// map header, size 1
// write "drives"
err = en.Append(0x81, 0xa6, 0x64, 0x72, 0x69, 0x76, 0x65, 0x73)
if err != nil {
return
}
err = en.WriteArrayHeader(uint32(len(z.Before.Drives)))
if err != nil {
err = msgp.WrapError(err, "Before", "Drives")
return
}
for za0001 := range z.Before.Drives {
// map header, size 3
// write "uuid"
err = en.Append(0x83, 0xa4, 0x75, 0x75, 0x69, 0x64)
if err != nil {
return
}
err = en.WriteString(z.Before.Drives[za0001].UUID)
if err != nil {
err = msgp.WrapError(err, "Before", "Drives", za0001, "UUID")
return
}
// write "endpoint"
err = en.Append(0xa8, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74)
if err != nil {
return
}
err = en.WriteString(z.Before.Drives[za0001].Endpoint)
if err != nil {
err = msgp.WrapError(err, "Before", "Drives", za0001, "Endpoint")
return
}
// write "state"
err = en.Append(0xa5, 0x73, 0x74, 0x61, 0x74, 0x65)
if err != nil {
return
}
err = en.WriteString(z.Before.Drives[za0001].State)
if err != nil {
err = msgp.WrapError(err, "Before", "Drives", za0001, "State")
return
}
}
// write "after"
err = en.Append(0xa5, 0x61, 0x66, 0x74, 0x65, 0x72)
if err != nil {
return
}
// map header, size 1
// write "drives"
err = en.Append(0x81, 0xa6, 0x64, 0x72, 0x69, 0x76, 0x65, 0x73)
if err != nil {
return
}
err = en.WriteArrayHeader(uint32(len(z.After.Drives)))
if err != nil {
err = msgp.WrapError(err, "After", "Drives")
return
}
for za0002 := range z.After.Drives {
// map header, size 3
// write "uuid"
err = en.Append(0x83, 0xa4, 0x75, 0x75, 0x69, 0x64)
if err != nil {
return
}
err = en.WriteString(z.After.Drives[za0002].UUID)
if err != nil {
err = msgp.WrapError(err, "After", "Drives", za0002, "UUID")
return
}
// write "endpoint"
err = en.Append(0xa8, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74)
if err != nil {
return
}
err = en.WriteString(z.After.Drives[za0002].Endpoint)
if err != nil {
err = msgp.WrapError(err, "After", "Drives", za0002, "Endpoint")
return
}
// write "state"
err = en.Append(0xa5, 0x73, 0x74, 0x61, 0x74, 0x65)
if err != nil {
return
}
err = en.WriteString(z.After.Drives[za0002].State)
if err != nil {
err = msgp.WrapError(err, "After", "Drives", za0002, "State")
return
}
}
// write "objectSize"
err = en.Append(0xaa, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x53, 0x69, 0x7a, 0x65)
if err != nil {
return
}
err = en.WriteInt64(z.ObjectSize)
if err != nil {
err = msgp.WrapError(err, "ObjectSize")
return
}
}
return
}
// MarshalMsg implements msgp.Marshaler
func (z *HealResultItem) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize())
// check for omitted fields
zb0001Len := uint32(13)
var zb0001Mask uint16 /* 13 bits */
_ = zb0001Mask
if z.ParityBlocks == 0 {
zb0001Len--
zb0001Mask |= 0x40
}
if z.DataBlocks == 0 {
zb0001Len--
zb0001Mask |= 0x80
}
// variable map header, size zb0001Len
o = append(o, 0x80|uint8(zb0001Len))
// skip if no fields are to be emitted
if zb0001Len != 0 {
// string "resultId"
o = append(o, 0xa8, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x49, 0x64)
o = msgp.AppendInt64(o, z.ResultIndex)
// string "type"
o = append(o, 0xa4, 0x74, 0x79, 0x70, 0x65)
o = msgp.AppendString(o, string(z.Type))
// string "bucket"
o = append(o, 0xa6, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74)
o = msgp.AppendString(o, z.Bucket)
// string "object"
o = append(o, 0xa6, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74)
o = msgp.AppendString(o, z.Object)
// string "versionId"
o = append(o, 0xa9, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x49, 0x64)
o = msgp.AppendString(o, z.VersionID)
// string "detail"
o = append(o, 0xa6, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c)
o = msgp.AppendString(o, z.Detail)
if (zb0001Mask & 0x40) == 0 { // if not omitted
// string "parityBlocks"
o = append(o, 0xac, 0x70, 0x61, 0x72, 0x69, 0x74, 0x79, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x73)
o = msgp.AppendInt(o, z.ParityBlocks)
}
if (zb0001Mask & 0x80) == 0 { // if not omitted
// string "dataBlocks"
o = append(o, 0xaa, 0x64, 0x61, 0x74, 0x61, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x73)
o = msgp.AppendInt(o, z.DataBlocks)
}
// string "diskCount"
o = append(o, 0xa9, 0x64, 0x69, 0x73, 0x6b, 0x43, 0x6f, 0x75, 0x6e, 0x74)
o = msgp.AppendInt(o, z.DiskCount)
// string "setCount"
o = append(o, 0xa8, 0x73, 0x65, 0x74, 0x43, 0x6f, 0x75, 0x6e, 0x74)
o = msgp.AppendInt(o, z.SetCount)
// string "before"
o = append(o, 0xa6, 0x62, 0x65, 0x66, 0x6f, 0x72, 0x65)
// map header, size 1
// string "drives"
o = append(o, 0x81, 0xa6, 0x64, 0x72, 0x69, 0x76, 0x65, 0x73)
o = msgp.AppendArrayHeader(o, uint32(len(z.Before.Drives)))
for za0001 := range z.Before.Drives {
// map header, size 3
// string "uuid"
o = append(o, 0x83, 0xa4, 0x75, 0x75, 0x69, 0x64)
o = msgp.AppendString(o, z.Before.Drives[za0001].UUID)
// string "endpoint"
o = append(o, 0xa8, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74)
o = msgp.AppendString(o, z.Before.Drives[za0001].Endpoint)
// string "state"
o = append(o, 0xa5, 0x73, 0x74, 0x61, 0x74, 0x65)
o = msgp.AppendString(o, z.Before.Drives[za0001].State)
}
// string "after"
o = append(o, 0xa5, 0x61, 0x66, 0x74, 0x65, 0x72)
// map header, size 1
// string "drives"
o = append(o, 0x81, 0xa6, 0x64, 0x72, 0x69, 0x76, 0x65, 0x73)
o = msgp.AppendArrayHeader(o, uint32(len(z.After.Drives)))
for za0002 := range z.After.Drives {
// map header, size 3
// string "uuid"
o = append(o, 0x83, 0xa4, 0x75, 0x75, 0x69, 0x64)
o = msgp.AppendString(o, z.After.Drives[za0002].UUID)
// string "endpoint"
o = append(o, 0xa8, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74)
o = msgp.AppendString(o, z.After.Drives[za0002].Endpoint)
// string "state"
o = append(o, 0xa5, 0x73, 0x74, 0x61, 0x74, 0x65)
o = msgp.AppendString(o, z.After.Drives[za0002].State)
}
// string "objectSize"
o = append(o, 0xaa, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x53, 0x69, 0x7a, 0x65)
o = msgp.AppendInt64(o, z.ObjectSize)
}
return
}
// UnmarshalMsg implements msgp.Unmarshaler
func (z *HealResultItem) UnmarshalMsg(bts []byte) (o []byte, err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
var zb0001Mask uint8 /* 2 bits */
_ = zb0001Mask
for zb0001 > 0 {
zb0001--
field, bts, err = msgp.ReadMapKeyZC(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "resultId":
z.ResultIndex, bts, err = msgp.ReadInt64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "ResultIndex")
return
}
case "type":
{
var zb0002 string
zb0002, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Type")
return
}
z.Type = HealItemType(zb0002)
}
case "bucket":
z.Bucket, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Bucket")
return
}
case "object":
z.Object, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Object")
return
}
case "versionId":
z.VersionID, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "VersionID")
return
}
case "detail":
z.Detail, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Detail")
return
}
case "parityBlocks":
z.ParityBlocks, bts, err = msgp.ReadIntBytes(bts)
if err != nil {
err = msgp.WrapError(err, "ParityBlocks")
return
}
zb0001Mask |= 0x1
case "dataBlocks":
z.DataBlocks, bts, err = msgp.ReadIntBytes(bts)
if err != nil {
err = msgp.WrapError(err, "DataBlocks")
return
}
zb0001Mask |= 0x2
case "diskCount":
z.DiskCount, bts, err = msgp.ReadIntBytes(bts)
if err != nil {
err = msgp.WrapError(err, "DiskCount")
return
}
case "setCount":
z.SetCount, bts, err = msgp.ReadIntBytes(bts)
if err != nil {
err = msgp.WrapError(err, "SetCount")
return
}
case "before":
var zb0003 uint32
zb0003, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Before")
return
}
for zb0003 > 0 {
zb0003--
field, bts, err = msgp.ReadMapKeyZC(bts)
if err != nil {
err = msgp.WrapError(err, "Before")
return
}
switch msgp.UnsafeString(field) {
case "drives":
var zb0004 uint32
zb0004, bts, err = msgp.ReadArrayHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Before", "Drives")
return
}
if cap(z.Before.Drives) >= int(zb0004) {
z.Before.Drives = (z.Before.Drives)[:zb0004]
} else {
z.Before.Drives = make([]HealDriveInfo, zb0004)
}
for za0001 := range z.Before.Drives {
var zb0005 uint32
zb0005, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Before", "Drives", za0001)
return
}
for zb0005 > 0 {
zb0005--
field, bts, err = msgp.ReadMapKeyZC(bts)
if err != nil {
err = msgp.WrapError(err, "Before", "Drives", za0001)
return
}
switch msgp.UnsafeString(field) {
case "uuid":
z.Before.Drives[za0001].UUID, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Before", "Drives", za0001, "UUID")
return
}
case "endpoint":
z.Before.Drives[za0001].Endpoint, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Before", "Drives", za0001, "Endpoint")
return
}
case "state":
z.Before.Drives[za0001].State, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Before", "Drives", za0001, "State")
return
}
default:
bts, err = msgp.Skip(bts)
if err != nil {
err = msgp.WrapError(err, "Before", "Drives", za0001)
return
}
}
}
}
default:
bts, err = msgp.Skip(bts)
if err != nil {
err = msgp.WrapError(err, "Before")
return
}
}
}
case "after":
var zb0006 uint32
zb0006, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "After")
return
}
for zb0006 > 0 {
zb0006--
field, bts, err = msgp.ReadMapKeyZC(bts)
if err != nil {
err = msgp.WrapError(err, "After")
return
}
switch msgp.UnsafeString(field) {
case "drives":
var zb0007 uint32
zb0007, bts, err = msgp.ReadArrayHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "After", "Drives")
return
}
if cap(z.After.Drives) >= int(zb0007) {
z.After.Drives = (z.After.Drives)[:zb0007]
} else {
z.After.Drives = make([]HealDriveInfo, zb0007)
}
for za0002 := range z.After.Drives {
var zb0008 uint32
zb0008, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "After", "Drives", za0002)
return
}
for zb0008 > 0 {
zb0008--
field, bts, err = msgp.ReadMapKeyZC(bts)
if err != nil {
err = msgp.WrapError(err, "After", "Drives", za0002)
return
}
switch msgp.UnsafeString(field) {
case "uuid":
z.After.Drives[za0002].UUID, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "After", "Drives", za0002, "UUID")
return
}
case "endpoint":
z.After.Drives[za0002].Endpoint, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "After", "Drives", za0002, "Endpoint")
return
}
case "state":
z.After.Drives[za0002].State, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "After", "Drives", za0002, "State")
return
}
default:
bts, err = msgp.Skip(bts)
if err != nil {
err = msgp.WrapError(err, "After", "Drives", za0002)
return
}
}
}
}
default:
bts, err = msgp.Skip(bts)
if err != nil {
err = msgp.WrapError(err, "After")
return
}
}
}
case "objectSize":
z.ObjectSize, bts, err = msgp.ReadInt64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "ObjectSize")
return
}
default:
bts, err = msgp.Skip(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
// Clear omitted fields.
if zb0001Mask != 0x3 {
if (zb0001Mask & 0x1) == 0 {
z.ParityBlocks = 0
}
if (zb0001Mask & 0x2) == 0 {
z.DataBlocks = 0
}
}
o = bts
return
}
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z *HealResultItem) Msgsize() (s int) {
s = 1 + 9 + msgp.Int64Size + 5 + msgp.StringPrefixSize + len(string(z.Type)) + 7 + msgp.StringPrefixSize + len(z.Bucket) + 7 + msgp.StringPrefixSize + len(z.Object) + 10 + msgp.StringPrefixSize + len(z.VersionID) + 7 + msgp.StringPrefixSize + len(z.Detail) + 13 + msgp.IntSize + 11 + msgp.IntSize + 10 + msgp.IntSize + 9 + msgp.IntSize + 7 + 1 + 7 + msgp.ArrayHeaderSize
for za0001 := range z.Before.Drives {
s += 1 + 5 + msgp.StringPrefixSize + len(z.Before.Drives[za0001].UUID) + 9 + msgp.StringPrefixSize + len(z.Before.Drives[za0001].Endpoint) + 6 + msgp.StringPrefixSize + len(z.Before.Drives[za0001].State)
}
s += 6 + 1 + 7 + msgp.ArrayHeaderSize
for za0002 := range z.After.Drives {
s += 1 + 5 + msgp.StringPrefixSize + len(z.After.Drives[za0002].UUID) + 9 + msgp.StringPrefixSize + len(z.After.Drives[za0002].Endpoint) + 6 + msgp.StringPrefixSize + len(z.After.Drives[za0002].State)
}
s += 11 + msgp.Int64Size
return
}
// DecodeMsg implements msgp.Decodable
func (z *HealScanMode) DecodeMsg(dc *msgp.Reader) (err error) {
{
var zb0001 int
zb0001, err = dc.ReadInt()
if err != nil {
err = msgp.WrapError(err)
return
}
(*z) = HealScanMode(zb0001)
}
return
}
// EncodeMsg implements msgp.Encodable
func (z HealScanMode) EncodeMsg(en *msgp.Writer) (err error) {
err = en.WriteInt(int(z))
if err != nil {
err = msgp.WrapError(err)
return
}
return
}
// MarshalMsg implements msgp.Marshaler
func (z HealScanMode) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize())
o = msgp.AppendInt(o, int(z))
return
}
// UnmarshalMsg implements msgp.Unmarshaler
func (z *HealScanMode) UnmarshalMsg(bts []byte) (o []byte, err error) {
{
var zb0001 int
zb0001, bts, err = msgp.ReadIntBytes(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
(*z) = HealScanMode(zb0001)
}
o = bts
return
}
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z HealScanMode) Msgsize() (s int) {
s = msgp.IntSize
return
}
// DecodeMsg implements msgp.Decodable
func (z *HealStartSuccess) DecodeMsg(dc *msgp.Reader) (err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, err = dc.ReadMapHeader()
if err != nil {
err = msgp.WrapError(err)
return
}
for zb0001 > 0 {
zb0001--
field, err = dc.ReadMapKeyPtr()
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "clientToken":
z.ClientToken, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "ClientToken")
return
}
case "clientAddress":
z.ClientAddress, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "ClientAddress")
return
}
case "startTime":
z.StartTime, err = dc.ReadTime()
if err != nil {
err = msgp.WrapError(err, "StartTime")
return
}
default:
err = dc.Skip()
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
return
}
// EncodeMsg implements msgp.Encodable
func (z HealStartSuccess) EncodeMsg(en *msgp.Writer) (err error) {
// map header, size 3
// write "clientToken"
err = en.Append(0x83, 0xab, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x54, 0x6f, 0x6b, 0x65, 0x6e)
if err != nil {
return
}
err = en.WriteString(z.ClientToken)
if err != nil {
err = msgp.WrapError(err, "ClientToken")
return
}
// write "clientAddress"
err = en.Append(0xad, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73)
if err != nil {
return
}
err = en.WriteString(z.ClientAddress)
if err != nil {
err = msgp.WrapError(err, "ClientAddress")
return
}
// write "startTime"
err = en.Append(0xa9, 0x73, 0x74, 0x61, 0x72, 0x74, 0x54, 0x69, 0x6d, 0x65)
if err != nil {
return
}
err = en.WriteTime(z.StartTime)
if err != nil {
err = msgp.WrapError(err, "StartTime")
return
}
return
}
// MarshalMsg implements msgp.Marshaler
func (z HealStartSuccess) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize())
// map header, size 3
// string "clientToken"
o = append(o, 0x83, 0xab, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x54, 0x6f, 0x6b, 0x65, 0x6e)
o = msgp.AppendString(o, z.ClientToken)
// string "clientAddress"
o = append(o, 0xad, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73)
o = msgp.AppendString(o, z.ClientAddress)
// string "startTime"
o = append(o, 0xa9, 0x73, 0x74, 0x61, 0x72, 0x74, 0x54, 0x69, 0x6d, 0x65)
o = msgp.AppendTime(o, z.StartTime)
return
}
// UnmarshalMsg implements msgp.Unmarshaler
func (z *HealStartSuccess) UnmarshalMsg(bts []byte) (o []byte, err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
for zb0001 > 0 {
zb0001--
field, bts, err = msgp.ReadMapKeyZC(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "clientToken":
z.ClientToken, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "ClientToken")
return
}
case "clientAddress":
z.ClientAddress, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "ClientAddress")
return
}
case "startTime":
z.StartTime, bts, err = msgp.ReadTimeBytes(bts)
if err != nil {
err = msgp.WrapError(err, "StartTime")
return
}
default:
bts, err = msgp.Skip(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
o = bts
return
}
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z HealStartSuccess) Msgsize() (s int) {
s = 1 + 12 + msgp.StringPrefixSize + len(z.ClientToken) + 14 + msgp.StringPrefixSize + len(z.ClientAddress) + 10 + msgp.TimeSize
return
}
// DecodeMsg implements msgp.Decodable
func (z *HealStopSuccess) DecodeMsg(dc *msgp.Reader) (err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, err = dc.ReadMapHeader()
if err != nil {
err = msgp.WrapError(err)
return
}
for zb0001 > 0 {
zb0001--
field, err = dc.ReadMapKeyPtr()
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "clientToken":
z.ClientToken, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "ClientToken")
return
}
case "clientAddress":
z.ClientAddress, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "ClientAddress")
return
}
case "startTime":
z.StartTime, err = dc.ReadTime()
if err != nil {
err = msgp.WrapError(err, "StartTime")
return
}
default:
err = dc.Skip()
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
return
}
// EncodeMsg implements msgp.Encodable
func (z HealStopSuccess) EncodeMsg(en *msgp.Writer) (err error) {
// map header, size 3
// write "clientToken"
err = en.Append(0x83, 0xab, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x54, 0x6f, 0x6b, 0x65, 0x6e)
if err != nil {
return
}
err = en.WriteString(z.ClientToken)
if err != nil {
err = msgp.WrapError(err, "ClientToken")
return
}
// write "clientAddress"
err = en.Append(0xad, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73)
if err != nil {
return
}
err = en.WriteString(z.ClientAddress)
if err != nil {
err = msgp.WrapError(err, "ClientAddress")
return
}
// write "startTime"
err = en.Append(0xa9, 0x73, 0x74, 0x61, 0x72, 0x74, 0x54, 0x69, 0x6d, 0x65)
if err != nil {
return
}
err = en.WriteTime(z.StartTime)
if err != nil {
err = msgp.WrapError(err, "StartTime")
return
}
return
}
// MarshalMsg implements msgp.Marshaler
func (z HealStopSuccess) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize())
// map header, size 3
// string "clientToken"
o = append(o, 0x83, 0xab, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x54, 0x6f, 0x6b, 0x65, 0x6e)
o = msgp.AppendString(o, z.ClientToken)
// string "clientAddress"
o = append(o, 0xad, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73)
o = msgp.AppendString(o, z.ClientAddress)
// string "startTime"
o = append(o, 0xa9, 0x73, 0x74, 0x61, 0x72, 0x74, 0x54, 0x69, 0x6d, 0x65)
o = msgp.AppendTime(o, z.StartTime)
return
}
// UnmarshalMsg implements msgp.Unmarshaler
func (z *HealStopSuccess) UnmarshalMsg(bts []byte) (o []byte, err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
for zb0001 > 0 {
zb0001--
field, bts, err = msgp.ReadMapKeyZC(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "clientToken":
z.ClientToken, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "ClientToken")
return
}
case "clientAddress":
z.ClientAddress, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "ClientAddress")
return
}
case "startTime":
z.StartTime, bts, err = msgp.ReadTimeBytes(bts)
if err != nil {
err = msgp.WrapError(err, "StartTime")
return
}
default:
bts, err = msgp.Skip(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
o = bts
return
}
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z HealStopSuccess) Msgsize() (s int) {
s = 1 + 12 + msgp.StringPrefixSize + len(z.ClientToken) + 14 + msgp.StringPrefixSize + len(z.ClientAddress) + 10 + msgp.TimeSize
return
}
// DecodeMsg implements msgp.Decodable
func (z *HealTaskStatus) DecodeMsg(dc *msgp.Reader) (err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, err = dc.ReadMapHeader()
if err != nil {
err = msgp.WrapError(err)
return
}
var zb0001Mask uint8 /* 1 bits */
_ = zb0001Mask
for zb0001 > 0 {
zb0001--
field, err = dc.ReadMapKeyPtr()
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "summary":
z.Summary, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "Summary")
return
}
case "detail":
z.FailureDetail, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "FailureDetail")
return
}
case "startTime":
z.StartTime, err = dc.ReadTime()
if err != nil {
err = msgp.WrapError(err, "StartTime")
return
}
case "settings":
err = z.HealSettings.DecodeMsg(dc)
if err != nil {
err = msgp.WrapError(err, "HealSettings")
return
}
case "items":
var zb0002 uint32
zb0002, err = dc.ReadArrayHeader()
if err != nil {
err = msgp.WrapError(err, "Items")
return
}
if cap(z.Items) >= int(zb0002) {
z.Items = (z.Items)[:zb0002]
} else {
z.Items = make([]HealResultItem, zb0002)
}
for za0001 := range z.Items {
err = z.Items[za0001].DecodeMsg(dc)
if err != nil {
err = msgp.WrapError(err, "Items", za0001)
return
}
}
zb0001Mask |= 0x1
default:
err = dc.Skip()
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
// Clear omitted fields.
if (zb0001Mask & 0x1) == 0 {
z.Items = nil
}
return
}
// EncodeMsg implements msgp.Encodable
func (z *HealTaskStatus) EncodeMsg(en *msgp.Writer) (err error) {
// check for omitted fields
zb0001Len := uint32(5)
var zb0001Mask uint8 /* 5 bits */
_ = zb0001Mask
if z.Items == nil {
zb0001Len--
zb0001Mask |= 0x10
}
// variable map header, size zb0001Len
err = en.Append(0x80 | uint8(zb0001Len))
if err != nil {
return
}
// skip if no fields are to be emitted
if zb0001Len != 0 {
// write "summary"
err = en.Append(0xa7, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79)
if err != nil {
return
}
err = en.WriteString(z.Summary)
if err != nil {
err = msgp.WrapError(err, "Summary")
return
}
// write "detail"
err = en.Append(0xa6, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c)
if err != nil {
return
}
err = en.WriteString(z.FailureDetail)
if err != nil {
err = msgp.WrapError(err, "FailureDetail")
return
}
// write "startTime"
err = en.Append(0xa9, 0x73, 0x74, 0x61, 0x72, 0x74, 0x54, 0x69, 0x6d, 0x65)
if err != nil {
return
}
err = en.WriteTime(z.StartTime)
if err != nil {
err = msgp.WrapError(err, "StartTime")
return
}
// write "settings"
err = en.Append(0xa8, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73)
if err != nil {
return
}
err = z.HealSettings.EncodeMsg(en)
if err != nil {
err = msgp.WrapError(err, "HealSettings")
return
}
if (zb0001Mask & 0x10) == 0 { // if not omitted
// write "items"
err = en.Append(0xa5, 0x69, 0x74, 0x65, 0x6d, 0x73)
if err != nil {
return
}
err = en.WriteArrayHeader(uint32(len(z.Items)))
if err != nil {
err = msgp.WrapError(err, "Items")
return
}
for za0001 := range z.Items {
err = z.Items[za0001].EncodeMsg(en)
if err != nil {
err = msgp.WrapError(err, "Items", za0001)
return
}
}
}
}
return
}
// MarshalMsg implements msgp.Marshaler
func (z *HealTaskStatus) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize())
// check for omitted fields
zb0001Len := uint32(5)
var zb0001Mask uint8 /* 5 bits */
_ = zb0001Mask
if z.Items == nil {
zb0001Len--
zb0001Mask |= 0x10
}
// variable map header, size zb0001Len
o = append(o, 0x80|uint8(zb0001Len))
// skip if no fields are to be emitted
if zb0001Len != 0 {
// string "summary"
o = append(o, 0xa7, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79)
o = msgp.AppendString(o, z.Summary)
// string "detail"
o = append(o, 0xa6, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c)
o = msgp.AppendString(o, z.FailureDetail)
// string "startTime"
o = append(o, 0xa9, 0x73, 0x74, 0x61, 0x72, 0x74, 0x54, 0x69, 0x6d, 0x65)
o = msgp.AppendTime(o, z.StartTime)
// string "settings"
o = append(o, 0xa8, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73)
o, err = z.HealSettings.MarshalMsg(o)
if err != nil {
err = msgp.WrapError(err, "HealSettings")
return
}
if (zb0001Mask & 0x10) == 0 { // if not omitted
// string "items"
o = append(o, 0xa5, 0x69, 0x74, 0x65, 0x6d, 0x73)
o = msgp.AppendArrayHeader(o, uint32(len(z.Items)))
for za0001 := range z.Items {
o, err = z.Items[za0001].MarshalMsg(o)
if err != nil {
err = msgp.WrapError(err, "Items", za0001)
return
}
}
}
}
return
}
// UnmarshalMsg implements msgp.Unmarshaler
func (z *HealTaskStatus) UnmarshalMsg(bts []byte) (o []byte, err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
var zb0001Mask uint8 /* 1 bits */
_ = zb0001Mask
for zb0001 > 0 {
zb0001--
field, bts, err = msgp.ReadMapKeyZC(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "summary":
z.Summary, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Summary")
return
}
case "detail":
z.FailureDetail, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "FailureDetail")
return
}
case "startTime":
z.StartTime, bts, err = msgp.ReadTimeBytes(bts)
if err != nil {
err = msgp.WrapError(err, "StartTime")
return
}
case "settings":
bts, err = z.HealSettings.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "HealSettings")
return
}
case "items":
var zb0002 uint32
zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Items")
return
}
if cap(z.Items) >= int(zb0002) {
z.Items = (z.Items)[:zb0002]
} else {
z.Items = make([]HealResultItem, zb0002)
}
for za0001 := range z.Items {
bts, err = z.Items[za0001].UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "Items", za0001)
return
}
}
zb0001Mask |= 0x1
default:
bts, err = msgp.Skip(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
// Clear omitted fields.
if (zb0001Mask & 0x1) == 0 {
z.Items = nil
}
o = bts
return
}
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z *HealTaskStatus) Msgsize() (s int) {
s = 1 + 8 + msgp.StringPrefixSize + len(z.Summary) + 7 + msgp.StringPrefixSize + len(z.FailureDetail) + 10 + msgp.TimeSize + 9 + z.HealSettings.Msgsize() + 6 + msgp.ArrayHeaderSize
for za0001 := range z.Items {
s += z.Items[za0001].Msgsize()
}
return
}
// DecodeMsg implements msgp.Decodable
func (z *HealingDisk) DecodeMsg(dc *msgp.Reader) (err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, err = dc.ReadMapHeader()
if err != nil {
err = msgp.WrapError(err)
return
}
for zb0001 > 0 {
zb0001--
field, err = dc.ReadMapKeyPtr()
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "id":
z.ID, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "ID")
return
}
case "heal_id":
z.HealID, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "HealID")
return
}
case "pool_index":
z.PoolIndex, err = dc.ReadInt()
if err != nil {
err = msgp.WrapError(err, "PoolIndex")
return
}
case "set_index":
z.SetIndex, err = dc.ReadInt()
if err != nil {
err = msgp.WrapError(err, "SetIndex")
return
}
case "disk_index":
z.DiskIndex, err = dc.ReadInt()
if err != nil {
err = msgp.WrapError(err, "DiskIndex")
return
}
case "endpoint":
z.Endpoint, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "Endpoint")
return
}
case "path":
z.Path, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "Path")
return
}
case "started":
z.Started, err = dc.ReadTime()
if err != nil {
err = msgp.WrapError(err, "Started")
return
}
case "last_update":
z.LastUpdate, err = dc.ReadTime()
if err != nil {
err = msgp.WrapError(err, "LastUpdate")
return
}
case "retry_attempts":
z.RetryAttempts, err = dc.ReadUint64()
if err != nil {
err = msgp.WrapError(err, "RetryAttempts")
return
}
case "objects_total_count":
z.ObjectsTotalCount, err = dc.ReadUint64()
if err != nil {
err = msgp.WrapError(err, "ObjectsTotalCount")
return
}
case "objects_total_size":
z.ObjectsTotalSize, err = dc.ReadUint64()
if err != nil {
err = msgp.WrapError(err, "ObjectsTotalSize")
return
}
case "items_healed":
z.ItemsHealed, err = dc.ReadUint64()
if err != nil {
err = msgp.WrapError(err, "ItemsHealed")
return
}
case "items_failed":
z.ItemsFailed, err = dc.ReadUint64()
if err != nil {
err = msgp.WrapError(err, "ItemsFailed")
return
}
case "items_skipped":
z.ItemsSkipped, err = dc.ReadUint64()
if err != nil {
err = msgp.WrapError(err, "ItemsSkipped")
return
}
case "bytes_done":
z.BytesDone, err = dc.ReadUint64()
if err != nil {
err = msgp.WrapError(err, "BytesDone")
return
}
case "bytes_failed":
z.BytesFailed, err = dc.ReadUint64()
if err != nil {
err = msgp.WrapError(err, "BytesFailed")
return
}
case "bytes_skipped":
z.BytesSkipped, err = dc.ReadUint64()
if err != nil {
err = msgp.WrapError(err, "BytesSkipped")
return
}
case "objects_healed":
z.ObjectsHealed, err = dc.ReadUint64()
if err != nil {
err = msgp.WrapError(err, "ObjectsHealed")
return
}
case "objects_failed":
z.ObjectsFailed, err = dc.ReadUint64()
if err != nil {
err = msgp.WrapError(err, "ObjectsFailed")
return
}
case "current_bucket":
z.Bucket, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "Bucket")
return
}
case "current_object":
z.Object, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "Object")
return
}
case "queued_buckets":
var zb0002 uint32
zb0002, err = dc.ReadArrayHeader()
if err != nil {
err = msgp.WrapError(err, "QueuedBuckets")
return
}
if cap(z.QueuedBuckets) >= int(zb0002) {
z.QueuedBuckets = (z.QueuedBuckets)[:zb0002]
} else {
z.QueuedBuckets = make([]string, zb0002)
}
for za0001 := range z.QueuedBuckets {
z.QueuedBuckets[za0001], err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "QueuedBuckets", za0001)
return
}
}
case "healed_buckets":
var zb0003 uint32
zb0003, err = dc.ReadArrayHeader()
if err != nil {
err = msgp.WrapError(err, "HealedBuckets")
return
}
if cap(z.HealedBuckets) >= int(zb0003) {
z.HealedBuckets = (z.HealedBuckets)[:zb0003]
} else {
z.HealedBuckets = make([]string, zb0003)
}
for za0002 := range z.HealedBuckets {
z.HealedBuckets[za0002], err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "HealedBuckets", za0002)
return
}
}
case "finished":
z.Finished, err = dc.ReadBool()
if err != nil {
err = msgp.WrapError(err, "Finished")
return
}
case "reason":
{
var zb0004 int8
zb0004, err = dc.ReadInt8()
if err != nil {
err = msgp.WrapError(err, "Reason")
return
}
z.Reason = HealingDriveReason(zb0004)
}
default:
err = dc.Skip()
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
return
}
// EncodeMsg implements msgp.Encodable
func (z *HealingDisk) EncodeMsg(en *msgp.Writer) (err error) {
// map header, size 26
// write "id"
err = en.Append(0xde, 0x0, 0x1a, 0xa2, 0x69, 0x64)
if err != nil {
return
}
err = en.WriteString(z.ID)
if err != nil {
err = msgp.WrapError(err, "ID")
return
}
// write "heal_id"
err = en.Append(0xa7, 0x68, 0x65, 0x61, 0x6c, 0x5f, 0x69, 0x64)
if err != nil {
return
}
err = en.WriteString(z.HealID)
if err != nil {
err = msgp.WrapError(err, "HealID")
return
}
// write "pool_index"
err = en.Append(0xaa, 0x70, 0x6f, 0x6f, 0x6c, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78)
if err != nil {
return
}
err = en.WriteInt(z.PoolIndex)
if err != nil {
err = msgp.WrapError(err, "PoolIndex")
return
}
// write "set_index"
err = en.Append(0xa9, 0x73, 0x65, 0x74, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78)
if err != nil {
return
}
err = en.WriteInt(z.SetIndex)
if err != nil {
err = msgp.WrapError(err, "SetIndex")
return
}
// write "disk_index"
err = en.Append(0xaa, 0x64, 0x69, 0x73, 0x6b, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78)
if err != nil {
return
}
err = en.WriteInt(z.DiskIndex)
if err != nil {
err = msgp.WrapError(err, "DiskIndex")
return
}
// write "endpoint"
err = en.Append(0xa8, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74)
if err != nil {
return
}
err = en.WriteString(z.Endpoint)
if err != nil {
err = msgp.WrapError(err, "Endpoint")
return
}
// write "path"
err = en.Append(0xa4, 0x70, 0x61, 0x74, 0x68)
if err != nil {
return
}
err = en.WriteString(z.Path)
if err != nil {
err = msgp.WrapError(err, "Path")
return
}
// write "started"
err = en.Append(0xa7, 0x73, 0x74, 0x61, 0x72, 0x74, 0x65, 0x64)
if err != nil {
return
}
err = en.WriteTime(z.Started)
if err != nil {
err = msgp.WrapError(err, "Started")
return
}
// write "last_update"
err = en.Append(0xab, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65)
if err != nil {
return
}
err = en.WriteTime(z.LastUpdate)
if err != nil {
err = msgp.WrapError(err, "LastUpdate")
return
}
// write "retry_attempts"
err = en.Append(0xae, 0x72, 0x65, 0x74, 0x72, 0x79, 0x5f, 0x61, 0x74, 0x74, 0x65, 0x6d, 0x70, 0x74, 0x73)
if err != nil {
return
}
err = en.WriteUint64(z.RetryAttempts)
if err != nil {
err = msgp.WrapError(err, "RetryAttempts")
return
}
// write "objects_total_count"
err = en.Append(0xb3, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x5f, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74)
if err != nil {
return
}
err = en.WriteUint64(z.ObjectsTotalCount)
if err != nil {
err = msgp.WrapError(err, "ObjectsTotalCount")
return
}
// write "objects_total_size"
err = en.Append(0xb2, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x5f, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x73, 0x69, 0x7a, 0x65)
if err != nil {
return
}
err = en.WriteUint64(z.ObjectsTotalSize)
if err != nil {
err = msgp.WrapError(err, "ObjectsTotalSize")
return
}
// write "items_healed"
err = en.Append(0xac, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x5f, 0x68, 0x65, 0x61, 0x6c, 0x65, 0x64)
if err != nil {
return
}
err = en.WriteUint64(z.ItemsHealed)
if err != nil {
err = msgp.WrapError(err, "ItemsHealed")
return
}
// write "items_failed"
err = en.Append(0xac, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x5f, 0x66, 0x61, 0x69, 0x6c, 0x65, 0x64)
if err != nil {
return
}
err = en.WriteUint64(z.ItemsFailed)
if err != nil {
err = msgp.WrapError(err, "ItemsFailed")
return
}
// write "items_skipped"
err = en.Append(0xad, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x5f, 0x73, 0x6b, 0x69, 0x70, 0x70, 0x65, 0x64)
if err != nil {
return
}
err = en.WriteUint64(z.ItemsSkipped)
if err != nil {
err = msgp.WrapError(err, "ItemsSkipped")
return
}
// write "bytes_done"
err = en.Append(0xaa, 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f, 0x64, 0x6f, 0x6e, 0x65)
if err != nil {
return
}
err = en.WriteUint64(z.BytesDone)
if err != nil {
err = msgp.WrapError(err, "BytesDone")
return
}
// write "bytes_failed"
err = en.Append(0xac, 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f, 0x66, 0x61, 0x69, 0x6c, 0x65, 0x64)
if err != nil {
return
}
err = en.WriteUint64(z.BytesFailed)
if err != nil {
err = msgp.WrapError(err, "BytesFailed")
return
}
// write "bytes_skipped"
err = en.Append(0xad, 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f, 0x73, 0x6b, 0x69, 0x70, 0x70, 0x65, 0x64)
if err != nil {
return
}
err = en.WriteUint64(z.BytesSkipped)
if err != nil {
err = msgp.WrapError(err, "BytesSkipped")
return
}
// write "objects_healed"
err = en.Append(0xae, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x5f, 0x68, 0x65, 0x61, 0x6c, 0x65, 0x64)
if err != nil {
return
}
err = en.WriteUint64(z.ObjectsHealed)
if err != nil {
err = msgp.WrapError(err, "ObjectsHealed")
return
}
// write "objects_failed"
err = en.Append(0xae, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x5f, 0x66, 0x61, 0x69, 0x6c, 0x65, 0x64)
if err != nil {
return
}
err = en.WriteUint64(z.ObjectsFailed)
if err != nil {
err = msgp.WrapError(err, "ObjectsFailed")
return
}
// write "current_bucket"
err = en.Append(0xae, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74)
if err != nil {
return
}
err = en.WriteString(z.Bucket)
if err != nil {
err = msgp.WrapError(err, "Bucket")
return
}
// write "current_object"
err = en.Append(0xae, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74)
if err != nil {
return
}
err = en.WriteString(z.Object)
if err != nil {
err = msgp.WrapError(err, "Object")
return
}
// write "queued_buckets"
err = en.Append(0xae, 0x71, 0x75, 0x65, 0x75, 0x65, 0x64, 0x5f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73)
if err != nil {
return
}
err = en.WriteArrayHeader(uint32(len(z.QueuedBuckets)))
if err != nil {
err = msgp.WrapError(err, "QueuedBuckets")
return
}
for za0001 := range z.QueuedBuckets {
err = en.WriteString(z.QueuedBuckets[za0001])
if err != nil {
err = msgp.WrapError(err, "QueuedBuckets", za0001)
return
}
}
// write "healed_buckets"
err = en.Append(0xae, 0x68, 0x65, 0x61, 0x6c, 0x65, 0x64, 0x5f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73)
if err != nil {
return
}
err = en.WriteArrayHeader(uint32(len(z.HealedBuckets)))
if err != nil {
err = msgp.WrapError(err, "HealedBuckets")
return
}
for za0002 := range z.HealedBuckets {
err = en.WriteString(z.HealedBuckets[za0002])
if err != nil {
err = msgp.WrapError(err, "HealedBuckets", za0002)
return
}
}
// write "finished"
err = en.Append(0xa8, 0x66, 0x69, 0x6e, 0x69, 0x73, 0x68, 0x65, 0x64)
if err != nil {
return
}
err = en.WriteBool(z.Finished)
if err != nil {
err = msgp.WrapError(err, "Finished")
return
}
// write "reason"
err = en.Append(0xa6, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e)
if err != nil {
return
}
err = en.WriteInt8(int8(z.Reason))
if err != nil {
err = msgp.WrapError(err, "Reason")
return
}
return
}
// MarshalMsg implements msgp.Marshaler
func (z *HealingDisk) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize())
// map header, size 26
// string "id"
o = append(o, 0xde, 0x0, 0x1a, 0xa2, 0x69, 0x64)
o = msgp.AppendString(o, z.ID)
// string "heal_id"
o = append(o, 0xa7, 0x68, 0x65, 0x61, 0x6c, 0x5f, 0x69, 0x64)
o = msgp.AppendString(o, z.HealID)
// string "pool_index"
o = append(o, 0xaa, 0x70, 0x6f, 0x6f, 0x6c, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78)
o = msgp.AppendInt(o, z.PoolIndex)
// string "set_index"
o = append(o, 0xa9, 0x73, 0x65, 0x74, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78)
o = msgp.AppendInt(o, z.SetIndex)
// string "disk_index"
o = append(o, 0xaa, 0x64, 0x69, 0x73, 0x6b, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78)
o = msgp.AppendInt(o, z.DiskIndex)
// string "endpoint"
o = append(o, 0xa8, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74)
o = msgp.AppendString(o, z.Endpoint)
// string "path"
o = append(o, 0xa4, 0x70, 0x61, 0x74, 0x68)
o = msgp.AppendString(o, z.Path)
// string "started"
o = append(o, 0xa7, 0x73, 0x74, 0x61, 0x72, 0x74, 0x65, 0x64)
o = msgp.AppendTime(o, z.Started)
// string "last_update"
o = append(o, 0xab, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65)
o = msgp.AppendTime(o, z.LastUpdate)
// string "retry_attempts"
o = append(o, 0xae, 0x72, 0x65, 0x74, 0x72, 0x79, 0x5f, 0x61, 0x74, 0x74, 0x65, 0x6d, 0x70, 0x74, 0x73)
o = msgp.AppendUint64(o, z.RetryAttempts)
// string "objects_total_count"
o = append(o, 0xb3, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x5f, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74)
o = msgp.AppendUint64(o, z.ObjectsTotalCount)
// string "objects_total_size"
o = append(o, 0xb2, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x5f, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x73, 0x69, 0x7a, 0x65)
o = msgp.AppendUint64(o, z.ObjectsTotalSize)
// string "items_healed"
o = append(o, 0xac, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x5f, 0x68, 0x65, 0x61, 0x6c, 0x65, 0x64)
o = msgp.AppendUint64(o, z.ItemsHealed)
// string "items_failed"
o = append(o, 0xac, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x5f, 0x66, 0x61, 0x69, 0x6c, 0x65, 0x64)
o = msgp.AppendUint64(o, z.ItemsFailed)
// string "items_skipped"
o = append(o, 0xad, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x5f, 0x73, 0x6b, 0x69, 0x70, 0x70, 0x65, 0x64)
o = msgp.AppendUint64(o, z.ItemsSkipped)
// string "bytes_done"
o = append(o, 0xaa, 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f, 0x64, 0x6f, 0x6e, 0x65)
o = msgp.AppendUint64(o, z.BytesDone)
// string "bytes_failed"
o = append(o, 0xac, 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f, 0x66, 0x61, 0x69, 0x6c, 0x65, 0x64)
o = msgp.AppendUint64(o, z.BytesFailed)
// string "bytes_skipped"
o = append(o, 0xad, 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f, 0x73, 0x6b, 0x69, 0x70, 0x70, 0x65, 0x64)
o = msgp.AppendUint64(o, z.BytesSkipped)
// string "objects_healed"
o = append(o, 0xae, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x5f, 0x68, 0x65, 0x61, 0x6c, 0x65, 0x64)
o = msgp.AppendUint64(o, z.ObjectsHealed)
// string "objects_failed"
o = append(o, 0xae, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x5f, 0x66, 0x61, 0x69, 0x6c, 0x65, 0x64)
o = msgp.AppendUint64(o, z.ObjectsFailed)
// string "current_bucket"
o = append(o, 0xae, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74)
o = msgp.AppendString(o, z.Bucket)
// string "current_object"
o = append(o, 0xae, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74)
o = msgp.AppendString(o, z.Object)
// string "queued_buckets"
o = append(o, 0xae, 0x71, 0x75, 0x65, 0x75, 0x65, 0x64, 0x5f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73)
o = msgp.AppendArrayHeader(o, uint32(len(z.QueuedBuckets)))
for za0001 := range z.QueuedBuckets {
o = msgp.AppendString(o, z.QueuedBuckets[za0001])
}
// string "healed_buckets"
o = append(o, 0xae, 0x68, 0x65, 0x61, 0x6c, 0x65, 0x64, 0x5f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73)
o = msgp.AppendArrayHeader(o, uint32(len(z.HealedBuckets)))
for za0002 := range z.HealedBuckets {
o = msgp.AppendString(o, z.HealedBuckets[za0002])
}
// string "finished"
o = append(o, 0xa8, 0x66, 0x69, 0x6e, 0x69, 0x73, 0x68, 0x65, 0x64)
o = msgp.AppendBool(o, z.Finished)
// string "reason"
o = append(o, 0xa6, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e)
o = msgp.AppendInt8(o, int8(z.Reason))
return
}
// UnmarshalMsg implements msgp.Unmarshaler
func (z *HealingDisk) UnmarshalMsg(bts []byte) (o []byte, err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
for zb0001 > 0 {
zb0001--
field, bts, err = msgp.ReadMapKeyZC(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "id":
z.ID, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "ID")
return
}
case "heal_id":
z.HealID, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "HealID")
return
}
case "pool_index":
z.PoolIndex, bts, err = msgp.ReadIntBytes(bts)
if err != nil {
err = msgp.WrapError(err, "PoolIndex")
return
}
case "set_index":
z.SetIndex, bts, err = msgp.ReadIntBytes(bts)
if err != nil {
err = msgp.WrapError(err, "SetIndex")
return
}
case "disk_index":
z.DiskIndex, bts, err = msgp.ReadIntBytes(bts)
if err != nil {
err = msgp.WrapError(err, "DiskIndex")
return
}
case "endpoint":
z.Endpoint, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Endpoint")
return
}
case "path":
z.Path, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Path")
return
}
case "started":
z.Started, bts, err = msgp.ReadTimeBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Started")
return
}
case "last_update":
z.LastUpdate, bts, err = msgp.ReadTimeBytes(bts)
if err != nil {
err = msgp.WrapError(err, "LastUpdate")
return
}
case "retry_attempts":
z.RetryAttempts, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "RetryAttempts")
return
}
case "objects_total_count":
z.ObjectsTotalCount, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "ObjectsTotalCount")
return
}
case "objects_total_size":
z.ObjectsTotalSize, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "ObjectsTotalSize")
return
}
case "items_healed":
z.ItemsHealed, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "ItemsHealed")
return
}
case "items_failed":
z.ItemsFailed, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "ItemsFailed")
return
}
case "items_skipped":
z.ItemsSkipped, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "ItemsSkipped")
return
}
case "bytes_done":
z.BytesDone, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "BytesDone")
return
}
case "bytes_failed":
z.BytesFailed, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "BytesFailed")
return
}
case "bytes_skipped":
z.BytesSkipped, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "BytesSkipped")
return
}
case "objects_healed":
z.ObjectsHealed, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "ObjectsHealed")
return
}
case "objects_failed":
z.ObjectsFailed, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "ObjectsFailed")
return
}
case "current_bucket":
z.Bucket, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Bucket")
return
}
case "current_object":
z.Object, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Object")
return
}
case "queued_buckets":
var zb0002 uint32
zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "QueuedBuckets")
return
}
if cap(z.QueuedBuckets) >= int(zb0002) {
z.QueuedBuckets = (z.QueuedBuckets)[:zb0002]
} else {
z.QueuedBuckets = make([]string, zb0002)
}
for za0001 := range z.QueuedBuckets {
z.QueuedBuckets[za0001], bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "QueuedBuckets", za0001)
return
}
}
case "healed_buckets":
var zb0003 uint32
zb0003, bts, err = msgp.ReadArrayHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "HealedBuckets")
return
}
if cap(z.HealedBuckets) >= int(zb0003) {
z.HealedBuckets = (z.HealedBuckets)[:zb0003]
} else {
z.HealedBuckets = make([]string, zb0003)
}
for za0002 := range z.HealedBuckets {
z.HealedBuckets[za0002], bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "HealedBuckets", za0002)
return
}
}
case "finished":
z.Finished, bts, err = msgp.ReadBoolBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Finished")
return
}
case "reason":
{
var zb0004 int8
zb0004, bts, err = msgp.ReadInt8Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "Reason")
return
}
z.Reason = HealingDriveReason(zb0004)
}
default:
bts, err = msgp.Skip(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
o = bts
return
}
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z *HealingDisk) Msgsize() (s int) {
s = 3 + 3 + msgp.StringPrefixSize + len(z.ID) + 8 + msgp.StringPrefixSize + len(z.HealID) + 11 + msgp.IntSize + 10 + msgp.IntSize + 11 + msgp.IntSize + 9 + msgp.StringPrefixSize + len(z.Endpoint) + 5 + msgp.StringPrefixSize + len(z.Path) + 8 + msgp.TimeSize + 12 + msgp.TimeSize + 15 + msgp.Uint64Size + 20 + msgp.Uint64Size + 19 + msgp.Uint64Size + 13 + msgp.Uint64Size + 13 + msgp.Uint64Size + 14 + msgp.Uint64Size + 11 + msgp.Uint64Size + 13 + msgp.Uint64Size + 14 + msgp.Uint64Size + 15 + msgp.Uint64Size + 15 + msgp.Uint64Size + 15 + msgp.StringPrefixSize + len(z.Bucket) + 15 + msgp.StringPrefixSize + len(z.Object) + 15 + msgp.ArrayHeaderSize
for za0001 := range z.QueuedBuckets {
s += msgp.StringPrefixSize + len(z.QueuedBuckets[za0001])
}
s += 15 + msgp.ArrayHeaderSize
for za0002 := range z.HealedBuckets {
s += msgp.StringPrefixSize + len(z.HealedBuckets[za0002])
}
s += 9 + msgp.BoolSize + 7 + msgp.Int8Size
return
}
// DecodeMsg implements msgp.Decodable
func (z *HealingDriveReason) DecodeMsg(dc *msgp.Reader) (err error) {
{
var zb0001 int8
zb0001, err = dc.ReadInt8()
if err != nil {
err = msgp.WrapError(err)
return
}
(*z) = HealingDriveReason(zb0001)
}
return
}
// EncodeMsg implements msgp.Encodable
func (z HealingDriveReason) EncodeMsg(en *msgp.Writer) (err error) {
err = en.WriteInt8(int8(z))
if err != nil {
err = msgp.WrapError(err)
return
}
return
}
// MarshalMsg implements msgp.Marshaler
func (z HealingDriveReason) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize())
o = msgp.AppendInt8(o, int8(z))
return
}
// UnmarshalMsg implements msgp.Unmarshaler
func (z *HealingDriveReason) UnmarshalMsg(bts []byte) (o []byte, err error) {
{
var zb0001 int8
zb0001, bts, err = msgp.ReadInt8Bytes(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
(*z) = HealingDriveReason(zb0001)
}
o = bts
return
}
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z HealingDriveReason) Msgsize() (s int) {
s = msgp.Int8Size
return
}
// DecodeMsg implements msgp.Decodable
func (z *MRFStatus) DecodeMsg(dc *msgp.Reader) (err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, err = dc.ReadMapHeader()
if err != nil {
err = msgp.WrapError(err)
return
}
for zb0001 > 0 {
zb0001--
field, err = dc.ReadMapKeyPtr()
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "bytes_healed":
z.BytesHealed, err = dc.ReadUint64()
if err != nil {
err = msgp.WrapError(err, "BytesHealed")
return
}
case "items_healed":
z.ItemsHealed, err = dc.ReadUint64()
if err != nil {
err = msgp.WrapError(err, "ItemsHealed")
return
}
default:
err = dc.Skip()
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
return
}
// EncodeMsg implements msgp.Encodable
func (z MRFStatus) EncodeMsg(en *msgp.Writer) (err error) {
// map header, size 2
// write "bytes_healed"
err = en.Append(0x82, 0xac, 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f, 0x68, 0x65, 0x61, 0x6c, 0x65, 0x64)
if err != nil {
return
}
err = en.WriteUint64(z.BytesHealed)
if err != nil {
err = msgp.WrapError(err, "BytesHealed")
return
}
// write "items_healed"
err = en.Append(0xac, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x5f, 0x68, 0x65, 0x61, 0x6c, 0x65, 0x64)
if err != nil {
return
}
err = en.WriteUint64(z.ItemsHealed)
if err != nil {
err = msgp.WrapError(err, "ItemsHealed")
return
}
return
}
// MarshalMsg implements msgp.Marshaler
func (z MRFStatus) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize())
// map header, size 2
// string "bytes_healed"
o = append(o, 0x82, 0xac, 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f, 0x68, 0x65, 0x61, 0x6c, 0x65, 0x64)
o = msgp.AppendUint64(o, z.BytesHealed)
// string "items_healed"
o = append(o, 0xac, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x5f, 0x68, 0x65, 0x61, 0x6c, 0x65, 0x64)
o = msgp.AppendUint64(o, z.ItemsHealed)
return
}
// UnmarshalMsg implements msgp.Unmarshaler
func (z *MRFStatus) UnmarshalMsg(bts []byte) (o []byte, err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
for zb0001 > 0 {
zb0001--
field, bts, err = msgp.ReadMapKeyZC(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "bytes_healed":
z.BytesHealed, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "BytesHealed")
return
}
case "items_healed":
z.ItemsHealed, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "ItemsHealed")
return
}
default:
bts, err = msgp.Skip(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
o = bts
return
}
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z MRFStatus) Msgsize() (s int) {
s = 1 + 13 + msgp.Uint64Size + 13 + msgp.Uint64Size
return
}
// DecodeMsg implements msgp.Decodable
func (z *SetStatus) DecodeMsg(dc *msgp.Reader) (err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, err = dc.ReadMapHeader()
if err != nil {
err = msgp.WrapError(err)
return
}
for zb0001 > 0 {
zb0001--
field, err = dc.ReadMapKeyPtr()
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "id":
z.ID, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "ID")
return
}
case "pool_index":
z.PoolIndex, err = dc.ReadInt()
if err != nil {
err = msgp.WrapError(err, "PoolIndex")
return
}
case "set_index":
z.SetIndex, err = dc.ReadInt()
if err != nil {
err = msgp.WrapError(err, "SetIndex")
return
}
case "heal_status":
z.HealStatus, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "HealStatus")
return
}
case "heal_priority":
z.HealPriority, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "HealPriority")
return
}
case "total_objects":
z.TotalObjects, err = dc.ReadInt()
if err != nil {
err = msgp.WrapError(err, "TotalObjects")
return
}
case "disks":
var zb0002 uint32
zb0002, err = dc.ReadArrayHeader()
if err != nil {
err = msgp.WrapError(err, "Disks")
return
}
if cap(z.Disks) >= int(zb0002) {
z.Disks = (z.Disks)[:zb0002]
} else {
z.Disks = make([]Disk, zb0002)
}
for za0001 := range z.Disks {
err = z.Disks[za0001].DecodeMsg(dc)
if err != nil {
err = msgp.WrapError(err, "Disks", za0001)
return
}
}
default:
err = dc.Skip()
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
return
}
// EncodeMsg implements msgp.Encodable
func (z *SetStatus) EncodeMsg(en *msgp.Writer) (err error) {
// map header, size 7
// write "id"
err = en.Append(0x87, 0xa2, 0x69, 0x64)
if err != nil {
return
}
err = en.WriteString(z.ID)
if err != nil {
err = msgp.WrapError(err, "ID")
return
}
// write "pool_index"
err = en.Append(0xaa, 0x70, 0x6f, 0x6f, 0x6c, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78)
if err != nil {
return
}
err = en.WriteInt(z.PoolIndex)
if err != nil {
err = msgp.WrapError(err, "PoolIndex")
return
}
// write "set_index"
err = en.Append(0xa9, 0x73, 0x65, 0x74, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78)
if err != nil {
return
}
err = en.WriteInt(z.SetIndex)
if err != nil {
err = msgp.WrapError(err, "SetIndex")
return
}
// write "heal_status"
err = en.Append(0xab, 0x68, 0x65, 0x61, 0x6c, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73)
if err != nil {
return
}
err = en.WriteString(z.HealStatus)
if err != nil {
err = msgp.WrapError(err, "HealStatus")
return
}
// write "heal_priority"
err = en.Append(0xad, 0x68, 0x65, 0x61, 0x6c, 0x5f, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79)
if err != nil {
return
}
err = en.WriteString(z.HealPriority)
if err != nil {
err = msgp.WrapError(err, "HealPriority")
return
}
// write "total_objects"
err = en.Append(0xad, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73)
if err != nil {
return
}
err = en.WriteInt(z.TotalObjects)
if err != nil {
err = msgp.WrapError(err, "TotalObjects")
return
}
// write "disks"
err = en.Append(0xa5, 0x64, 0x69, 0x73, 0x6b, 0x73)
if err != nil {
return
}
err = en.WriteArrayHeader(uint32(len(z.Disks)))
if err != nil {
err = msgp.WrapError(err, "Disks")
return
}
for za0001 := range z.Disks {
err = z.Disks[za0001].EncodeMsg(en)
if err != nil {
err = msgp.WrapError(err, "Disks", za0001)
return
}
}
return
}
// MarshalMsg implements msgp.Marshaler
func (z *SetStatus) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize())
// map header, size 7
// string "id"
o = append(o, 0x87, 0xa2, 0x69, 0x64)
o = msgp.AppendString(o, z.ID)
// string "pool_index"
o = append(o, 0xaa, 0x70, 0x6f, 0x6f, 0x6c, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78)
o = msgp.AppendInt(o, z.PoolIndex)
// string "set_index"
o = append(o, 0xa9, 0x73, 0x65, 0x74, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78)
o = msgp.AppendInt(o, z.SetIndex)
// string "heal_status"
o = append(o, 0xab, 0x68, 0x65, 0x61, 0x6c, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73)
o = msgp.AppendString(o, z.HealStatus)
// string "heal_priority"
o = append(o, 0xad, 0x68, 0x65, 0x61, 0x6c, 0x5f, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79)
o = msgp.AppendString(o, z.HealPriority)
// string "total_objects"
o = append(o, 0xad, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73)
o = msgp.AppendInt(o, z.TotalObjects)
// string "disks"
o = append(o, 0xa5, 0x64, 0x69, 0x73, 0x6b, 0x73)
o = msgp.AppendArrayHeader(o, uint32(len(z.Disks)))
for za0001 := range z.Disks {
o, err = z.Disks[za0001].MarshalMsg(o)
if err != nil {
err = msgp.WrapError(err, "Disks", za0001)
return
}
}
return
}
// UnmarshalMsg implements msgp.Unmarshaler
func (z *SetStatus) UnmarshalMsg(bts []byte) (o []byte, err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
for zb0001 > 0 {
zb0001--
field, bts, err = msgp.ReadMapKeyZC(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "id":
z.ID, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "ID")
return
}
case "pool_index":
z.PoolIndex, bts, err = msgp.ReadIntBytes(bts)
if err != nil {
err = msgp.WrapError(err, "PoolIndex")
return
}
case "set_index":
z.SetIndex, bts, err = msgp.ReadIntBytes(bts)
if err != nil {
err = msgp.WrapError(err, "SetIndex")
return
}
case "heal_status":
z.HealStatus, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "HealStatus")
return
}
case "heal_priority":
z.HealPriority, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "HealPriority")
return
}
case "total_objects":
z.TotalObjects, bts, err = msgp.ReadIntBytes(bts)
if err != nil {
err = msgp.WrapError(err, "TotalObjects")
return
}
case "disks":
var zb0002 uint32
zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Disks")
return
}
if cap(z.Disks) >= int(zb0002) {
z.Disks = (z.Disks)[:zb0002]
} else {
z.Disks = make([]Disk, zb0002)
}
for za0001 := range z.Disks {
bts, err = z.Disks[za0001].UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "Disks", za0001)
return
}
}
default:
bts, err = msgp.Skip(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
o = bts
return
}
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z *SetStatus) Msgsize() (s int) {
s = 1 + 3 + msgp.StringPrefixSize + len(z.ID) + 11 + msgp.IntSize + 10 + msgp.IntSize + 12 + msgp.StringPrefixSize + len(z.HealStatus) + 14 + msgp.StringPrefixSize + len(z.HealPriority) + 14 + msgp.IntSize + 6 + msgp.ArrayHeaderSize
for za0001 := range z.Disks {
s += z.Disks[za0001].Msgsize()
}
return
}
golang-github-minio-madmin-go-3.0.104/heal-commands_gen_test.go 0000664 0000000 0000000 00000053577 14774251704 0024350 0 ustar 00root root 0000000 0000000 package madmin
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
import (
"bytes"
"testing"
"github.com/tinylib/msgp/msgp"
)
func TestMarshalUnmarshalBgHealState(t *testing.T) {
v := BgHealState{}
bts, err := v.MarshalMsg(nil)
if err != nil {
t.Fatal(err)
}
left, err := v.UnmarshalMsg(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
}
left, err = msgp.Skip(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
}
}
func BenchmarkMarshalMsgBgHealState(b *testing.B) {
v := BgHealState{}
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.MarshalMsg(nil)
}
}
func BenchmarkAppendMsgBgHealState(b *testing.B) {
v := BgHealState{}
bts := make([]byte, 0, v.Msgsize())
bts, _ = v.MarshalMsg(bts[0:0])
b.SetBytes(int64(len(bts)))
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
bts, _ = v.MarshalMsg(bts[0:0])
}
}
func BenchmarkUnmarshalBgHealState(b *testing.B) {
v := BgHealState{}
bts, _ := v.MarshalMsg(nil)
b.ReportAllocs()
b.SetBytes(int64(len(bts)))
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := v.UnmarshalMsg(bts)
if err != nil {
b.Fatal(err)
}
}
}
func TestEncodeDecodeBgHealState(t *testing.T) {
v := BgHealState{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
m := v.Msgsize()
if buf.Len() > m {
t.Log("WARNING: TestEncodeDecodeBgHealState Msgsize() is inaccurate")
}
vn := BgHealState{}
err := msgp.Decode(&buf, &vn)
if err != nil {
t.Error(err)
}
buf.Reset()
msgp.Encode(&buf, &v)
err = msgp.NewReader(&buf).Skip()
if err != nil {
t.Error(err)
}
}
func BenchmarkEncodeBgHealState(b *testing.B) {
v := BgHealState{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
en := msgp.NewWriter(msgp.Nowhere)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.EncodeMsg(en)
}
en.Flush()
}
func BenchmarkDecodeBgHealState(b *testing.B) {
v := BgHealState{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
rd := msgp.NewEndlessReader(buf.Bytes(), b)
dc := msgp.NewReader(rd)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
err := v.DecodeMsg(dc)
if err != nil {
b.Fatal(err)
}
}
}
func TestMarshalUnmarshalHealDriveInfo(t *testing.T) {
v := HealDriveInfo{}
bts, err := v.MarshalMsg(nil)
if err != nil {
t.Fatal(err)
}
left, err := v.UnmarshalMsg(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
}
left, err = msgp.Skip(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
}
}
func BenchmarkMarshalMsgHealDriveInfo(b *testing.B) {
v := HealDriveInfo{}
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.MarshalMsg(nil)
}
}
func BenchmarkAppendMsgHealDriveInfo(b *testing.B) {
v := HealDriveInfo{}
bts := make([]byte, 0, v.Msgsize())
bts, _ = v.MarshalMsg(bts[0:0])
b.SetBytes(int64(len(bts)))
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
bts, _ = v.MarshalMsg(bts[0:0])
}
}
func BenchmarkUnmarshalHealDriveInfo(b *testing.B) {
v := HealDriveInfo{}
bts, _ := v.MarshalMsg(nil)
b.ReportAllocs()
b.SetBytes(int64(len(bts)))
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := v.UnmarshalMsg(bts)
if err != nil {
b.Fatal(err)
}
}
}
func TestEncodeDecodeHealDriveInfo(t *testing.T) {
v := HealDriveInfo{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
m := v.Msgsize()
if buf.Len() > m {
t.Log("WARNING: TestEncodeDecodeHealDriveInfo Msgsize() is inaccurate")
}
vn := HealDriveInfo{}
err := msgp.Decode(&buf, &vn)
if err != nil {
t.Error(err)
}
buf.Reset()
msgp.Encode(&buf, &v)
err = msgp.NewReader(&buf).Skip()
if err != nil {
t.Error(err)
}
}
func BenchmarkEncodeHealDriveInfo(b *testing.B) {
v := HealDriveInfo{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
en := msgp.NewWriter(msgp.Nowhere)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.EncodeMsg(en)
}
en.Flush()
}
func BenchmarkDecodeHealDriveInfo(b *testing.B) {
v := HealDriveInfo{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
rd := msgp.NewEndlessReader(buf.Bytes(), b)
dc := msgp.NewReader(rd)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
err := v.DecodeMsg(dc)
if err != nil {
b.Fatal(err)
}
}
}
func TestMarshalUnmarshalHealOpts(t *testing.T) {
v := HealOpts{}
bts, err := v.MarshalMsg(nil)
if err != nil {
t.Fatal(err)
}
left, err := v.UnmarshalMsg(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
}
left, err = msgp.Skip(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
}
}
func BenchmarkMarshalMsgHealOpts(b *testing.B) {
v := HealOpts{}
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.MarshalMsg(nil)
}
}
func BenchmarkAppendMsgHealOpts(b *testing.B) {
v := HealOpts{}
bts := make([]byte, 0, v.Msgsize())
bts, _ = v.MarshalMsg(bts[0:0])
b.SetBytes(int64(len(bts)))
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
bts, _ = v.MarshalMsg(bts[0:0])
}
}
func BenchmarkUnmarshalHealOpts(b *testing.B) {
v := HealOpts{}
bts, _ := v.MarshalMsg(nil)
b.ReportAllocs()
b.SetBytes(int64(len(bts)))
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := v.UnmarshalMsg(bts)
if err != nil {
b.Fatal(err)
}
}
}
func TestEncodeDecodeHealOpts(t *testing.T) {
v := HealOpts{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
m := v.Msgsize()
if buf.Len() > m {
t.Log("WARNING: TestEncodeDecodeHealOpts Msgsize() is inaccurate")
}
vn := HealOpts{}
err := msgp.Decode(&buf, &vn)
if err != nil {
t.Error(err)
}
buf.Reset()
msgp.Encode(&buf, &v)
err = msgp.NewReader(&buf).Skip()
if err != nil {
t.Error(err)
}
}
func BenchmarkEncodeHealOpts(b *testing.B) {
v := HealOpts{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
en := msgp.NewWriter(msgp.Nowhere)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.EncodeMsg(en)
}
en.Flush()
}
func BenchmarkDecodeHealOpts(b *testing.B) {
v := HealOpts{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
rd := msgp.NewEndlessReader(buf.Bytes(), b)
dc := msgp.NewReader(rd)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
err := v.DecodeMsg(dc)
if err != nil {
b.Fatal(err)
}
}
}
func TestMarshalUnmarshalHealResultItem(t *testing.T) {
v := HealResultItem{}
bts, err := v.MarshalMsg(nil)
if err != nil {
t.Fatal(err)
}
left, err := v.UnmarshalMsg(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
}
left, err = msgp.Skip(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
}
}
func BenchmarkMarshalMsgHealResultItem(b *testing.B) {
v := HealResultItem{}
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.MarshalMsg(nil)
}
}
func BenchmarkAppendMsgHealResultItem(b *testing.B) {
v := HealResultItem{}
bts := make([]byte, 0, v.Msgsize())
bts, _ = v.MarshalMsg(bts[0:0])
b.SetBytes(int64(len(bts)))
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
bts, _ = v.MarshalMsg(bts[0:0])
}
}
func BenchmarkUnmarshalHealResultItem(b *testing.B) {
v := HealResultItem{}
bts, _ := v.MarshalMsg(nil)
b.ReportAllocs()
b.SetBytes(int64(len(bts)))
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := v.UnmarshalMsg(bts)
if err != nil {
b.Fatal(err)
}
}
}
func TestEncodeDecodeHealResultItem(t *testing.T) {
v := HealResultItem{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
m := v.Msgsize()
if buf.Len() > m {
t.Log("WARNING: TestEncodeDecodeHealResultItem Msgsize() is inaccurate")
}
vn := HealResultItem{}
err := msgp.Decode(&buf, &vn)
if err != nil {
t.Error(err)
}
buf.Reset()
msgp.Encode(&buf, &v)
err = msgp.NewReader(&buf).Skip()
if err != nil {
t.Error(err)
}
}
func BenchmarkEncodeHealResultItem(b *testing.B) {
v := HealResultItem{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
en := msgp.NewWriter(msgp.Nowhere)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.EncodeMsg(en)
}
en.Flush()
}
func BenchmarkDecodeHealResultItem(b *testing.B) {
v := HealResultItem{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
rd := msgp.NewEndlessReader(buf.Bytes(), b)
dc := msgp.NewReader(rd)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
err := v.DecodeMsg(dc)
if err != nil {
b.Fatal(err)
}
}
}
func TestMarshalUnmarshalHealStartSuccess(t *testing.T) {
v := HealStartSuccess{}
bts, err := v.MarshalMsg(nil)
if err != nil {
t.Fatal(err)
}
left, err := v.UnmarshalMsg(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
}
left, err = msgp.Skip(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
}
}
func BenchmarkMarshalMsgHealStartSuccess(b *testing.B) {
v := HealStartSuccess{}
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.MarshalMsg(nil)
}
}
func BenchmarkAppendMsgHealStartSuccess(b *testing.B) {
v := HealStartSuccess{}
bts := make([]byte, 0, v.Msgsize())
bts, _ = v.MarshalMsg(bts[0:0])
b.SetBytes(int64(len(bts)))
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
bts, _ = v.MarshalMsg(bts[0:0])
}
}
func BenchmarkUnmarshalHealStartSuccess(b *testing.B) {
v := HealStartSuccess{}
bts, _ := v.MarshalMsg(nil)
b.ReportAllocs()
b.SetBytes(int64(len(bts)))
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := v.UnmarshalMsg(bts)
if err != nil {
b.Fatal(err)
}
}
}
func TestEncodeDecodeHealStartSuccess(t *testing.T) {
v := HealStartSuccess{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
m := v.Msgsize()
if buf.Len() > m {
t.Log("WARNING: TestEncodeDecodeHealStartSuccess Msgsize() is inaccurate")
}
vn := HealStartSuccess{}
err := msgp.Decode(&buf, &vn)
if err != nil {
t.Error(err)
}
buf.Reset()
msgp.Encode(&buf, &v)
err = msgp.NewReader(&buf).Skip()
if err != nil {
t.Error(err)
}
}
func BenchmarkEncodeHealStartSuccess(b *testing.B) {
v := HealStartSuccess{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
en := msgp.NewWriter(msgp.Nowhere)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.EncodeMsg(en)
}
en.Flush()
}
func BenchmarkDecodeHealStartSuccess(b *testing.B) {
v := HealStartSuccess{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
rd := msgp.NewEndlessReader(buf.Bytes(), b)
dc := msgp.NewReader(rd)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
err := v.DecodeMsg(dc)
if err != nil {
b.Fatal(err)
}
}
}
func TestMarshalUnmarshalHealStopSuccess(t *testing.T) {
v := HealStopSuccess{}
bts, err := v.MarshalMsg(nil)
if err != nil {
t.Fatal(err)
}
left, err := v.UnmarshalMsg(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
}
left, err = msgp.Skip(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
}
}
func BenchmarkMarshalMsgHealStopSuccess(b *testing.B) {
v := HealStopSuccess{}
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.MarshalMsg(nil)
}
}
func BenchmarkAppendMsgHealStopSuccess(b *testing.B) {
v := HealStopSuccess{}
bts := make([]byte, 0, v.Msgsize())
bts, _ = v.MarshalMsg(bts[0:0])
b.SetBytes(int64(len(bts)))
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
bts, _ = v.MarshalMsg(bts[0:0])
}
}
func BenchmarkUnmarshalHealStopSuccess(b *testing.B) {
v := HealStopSuccess{}
bts, _ := v.MarshalMsg(nil)
b.ReportAllocs()
b.SetBytes(int64(len(bts)))
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := v.UnmarshalMsg(bts)
if err != nil {
b.Fatal(err)
}
}
}
func TestEncodeDecodeHealStopSuccess(t *testing.T) {
v := HealStopSuccess{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
m := v.Msgsize()
if buf.Len() > m {
t.Log("WARNING: TestEncodeDecodeHealStopSuccess Msgsize() is inaccurate")
}
vn := HealStopSuccess{}
err := msgp.Decode(&buf, &vn)
if err != nil {
t.Error(err)
}
buf.Reset()
msgp.Encode(&buf, &v)
err = msgp.NewReader(&buf).Skip()
if err != nil {
t.Error(err)
}
}
func BenchmarkEncodeHealStopSuccess(b *testing.B) {
v := HealStopSuccess{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
en := msgp.NewWriter(msgp.Nowhere)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.EncodeMsg(en)
}
en.Flush()
}
func BenchmarkDecodeHealStopSuccess(b *testing.B) {
v := HealStopSuccess{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
rd := msgp.NewEndlessReader(buf.Bytes(), b)
dc := msgp.NewReader(rd)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
err := v.DecodeMsg(dc)
if err != nil {
b.Fatal(err)
}
}
}
func TestMarshalUnmarshalHealTaskStatus(t *testing.T) {
v := HealTaskStatus{}
bts, err := v.MarshalMsg(nil)
if err != nil {
t.Fatal(err)
}
left, err := v.UnmarshalMsg(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
}
left, err = msgp.Skip(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
}
}
func BenchmarkMarshalMsgHealTaskStatus(b *testing.B) {
v := HealTaskStatus{}
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.MarshalMsg(nil)
}
}
func BenchmarkAppendMsgHealTaskStatus(b *testing.B) {
v := HealTaskStatus{}
bts := make([]byte, 0, v.Msgsize())
bts, _ = v.MarshalMsg(bts[0:0])
b.SetBytes(int64(len(bts)))
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
bts, _ = v.MarshalMsg(bts[0:0])
}
}
func BenchmarkUnmarshalHealTaskStatus(b *testing.B) {
v := HealTaskStatus{}
bts, _ := v.MarshalMsg(nil)
b.ReportAllocs()
b.SetBytes(int64(len(bts)))
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := v.UnmarshalMsg(bts)
if err != nil {
b.Fatal(err)
}
}
}
func TestEncodeDecodeHealTaskStatus(t *testing.T) {
v := HealTaskStatus{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
m := v.Msgsize()
if buf.Len() > m {
t.Log("WARNING: TestEncodeDecodeHealTaskStatus Msgsize() is inaccurate")
}
vn := HealTaskStatus{}
err := msgp.Decode(&buf, &vn)
if err != nil {
t.Error(err)
}
buf.Reset()
msgp.Encode(&buf, &v)
err = msgp.NewReader(&buf).Skip()
if err != nil {
t.Error(err)
}
}
func BenchmarkEncodeHealTaskStatus(b *testing.B) {
v := HealTaskStatus{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
en := msgp.NewWriter(msgp.Nowhere)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.EncodeMsg(en)
}
en.Flush()
}
func BenchmarkDecodeHealTaskStatus(b *testing.B) {
v := HealTaskStatus{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
rd := msgp.NewEndlessReader(buf.Bytes(), b)
dc := msgp.NewReader(rd)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
err := v.DecodeMsg(dc)
if err != nil {
b.Fatal(err)
}
}
}
func TestMarshalUnmarshalHealingDisk(t *testing.T) {
v := HealingDisk{}
bts, err := v.MarshalMsg(nil)
if err != nil {
t.Fatal(err)
}
left, err := v.UnmarshalMsg(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
}
left, err = msgp.Skip(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
}
}
func BenchmarkMarshalMsgHealingDisk(b *testing.B) {
v := HealingDisk{}
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.MarshalMsg(nil)
}
}
func BenchmarkAppendMsgHealingDisk(b *testing.B) {
v := HealingDisk{}
bts := make([]byte, 0, v.Msgsize())
bts, _ = v.MarshalMsg(bts[0:0])
b.SetBytes(int64(len(bts)))
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
bts, _ = v.MarshalMsg(bts[0:0])
}
}
func BenchmarkUnmarshalHealingDisk(b *testing.B) {
v := HealingDisk{}
bts, _ := v.MarshalMsg(nil)
b.ReportAllocs()
b.SetBytes(int64(len(bts)))
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := v.UnmarshalMsg(bts)
if err != nil {
b.Fatal(err)
}
}
}
func TestEncodeDecodeHealingDisk(t *testing.T) {
v := HealingDisk{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
m := v.Msgsize()
if buf.Len() > m {
t.Log("WARNING: TestEncodeDecodeHealingDisk Msgsize() is inaccurate")
}
vn := HealingDisk{}
err := msgp.Decode(&buf, &vn)
if err != nil {
t.Error(err)
}
buf.Reset()
msgp.Encode(&buf, &v)
err = msgp.NewReader(&buf).Skip()
if err != nil {
t.Error(err)
}
}
func BenchmarkEncodeHealingDisk(b *testing.B) {
v := HealingDisk{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
en := msgp.NewWriter(msgp.Nowhere)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.EncodeMsg(en)
}
en.Flush()
}
func BenchmarkDecodeHealingDisk(b *testing.B) {
v := HealingDisk{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
rd := msgp.NewEndlessReader(buf.Bytes(), b)
dc := msgp.NewReader(rd)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
err := v.DecodeMsg(dc)
if err != nil {
b.Fatal(err)
}
}
}
func TestMarshalUnmarshalMRFStatus(t *testing.T) {
v := MRFStatus{}
bts, err := v.MarshalMsg(nil)
if err != nil {
t.Fatal(err)
}
left, err := v.UnmarshalMsg(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
}
left, err = msgp.Skip(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
}
}
func BenchmarkMarshalMsgMRFStatus(b *testing.B) {
v := MRFStatus{}
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.MarshalMsg(nil)
}
}
func BenchmarkAppendMsgMRFStatus(b *testing.B) {
v := MRFStatus{}
bts := make([]byte, 0, v.Msgsize())
bts, _ = v.MarshalMsg(bts[0:0])
b.SetBytes(int64(len(bts)))
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
bts, _ = v.MarshalMsg(bts[0:0])
}
}
func BenchmarkUnmarshalMRFStatus(b *testing.B) {
v := MRFStatus{}
bts, _ := v.MarshalMsg(nil)
b.ReportAllocs()
b.SetBytes(int64(len(bts)))
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := v.UnmarshalMsg(bts)
if err != nil {
b.Fatal(err)
}
}
}
func TestEncodeDecodeMRFStatus(t *testing.T) {
v := MRFStatus{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
m := v.Msgsize()
if buf.Len() > m {
t.Log("WARNING: TestEncodeDecodeMRFStatus Msgsize() is inaccurate")
}
vn := MRFStatus{}
err := msgp.Decode(&buf, &vn)
if err != nil {
t.Error(err)
}
buf.Reset()
msgp.Encode(&buf, &v)
err = msgp.NewReader(&buf).Skip()
if err != nil {
t.Error(err)
}
}
func BenchmarkEncodeMRFStatus(b *testing.B) {
v := MRFStatus{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
en := msgp.NewWriter(msgp.Nowhere)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.EncodeMsg(en)
}
en.Flush()
}
func BenchmarkDecodeMRFStatus(b *testing.B) {
v := MRFStatus{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
rd := msgp.NewEndlessReader(buf.Bytes(), b)
dc := msgp.NewReader(rd)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
err := v.DecodeMsg(dc)
if err != nil {
b.Fatal(err)
}
}
}
func TestMarshalUnmarshalSetStatus(t *testing.T) {
v := SetStatus{}
bts, err := v.MarshalMsg(nil)
if err != nil {
t.Fatal(err)
}
left, err := v.UnmarshalMsg(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
}
left, err = msgp.Skip(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
}
}
func BenchmarkMarshalMsgSetStatus(b *testing.B) {
v := SetStatus{}
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.MarshalMsg(nil)
}
}
func BenchmarkAppendMsgSetStatus(b *testing.B) {
v := SetStatus{}
bts := make([]byte, 0, v.Msgsize())
bts, _ = v.MarshalMsg(bts[0:0])
b.SetBytes(int64(len(bts)))
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
bts, _ = v.MarshalMsg(bts[0:0])
}
}
func BenchmarkUnmarshalSetStatus(b *testing.B) {
v := SetStatus{}
bts, _ := v.MarshalMsg(nil)
b.ReportAllocs()
b.SetBytes(int64(len(bts)))
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := v.UnmarshalMsg(bts)
if err != nil {
b.Fatal(err)
}
}
}
func TestEncodeDecodeSetStatus(t *testing.T) {
v := SetStatus{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
m := v.Msgsize()
if buf.Len() > m {
t.Log("WARNING: TestEncodeDecodeSetStatus Msgsize() is inaccurate")
}
vn := SetStatus{}
err := msgp.Decode(&buf, &vn)
if err != nil {
t.Error(err)
}
buf.Reset()
msgp.Encode(&buf, &v)
err = msgp.NewReader(&buf).Skip()
if err != nil {
t.Error(err)
}
}
func BenchmarkEncodeSetStatus(b *testing.B) {
v := SetStatus{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
en := msgp.NewWriter(msgp.Nowhere)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.EncodeMsg(en)
}
en.Flush()
}
func BenchmarkDecodeSetStatus(b *testing.B) {
v := SetStatus{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
rd := msgp.NewEndlessReader(buf.Bytes(), b)
dc := msgp.NewReader(rd)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
err := v.DecodeMsg(dc)
if err != nil {
b.Fatal(err)
}
}
}
golang-github-minio-madmin-go-3.0.104/heal-commands_test.go 0000664 0000000 0000000 00000004463 14774251704 0023505 0 ustar 00root root 0000000 0000000 //
// Copyright (c) 2015-2024 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as
// published by the Free Software Foundation, either version 3 of the
// License, or (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see .
//
package madmin
import (
"testing"
)
// Tests heal drives missing and offline counts.
func TestHealDriveCounts(t *testing.T) {
rs := HealResultItem{}
rs.Before.Drives = make([]HealDriveInfo, 20)
rs.After.Drives = make([]HealDriveInfo, 20)
for i := range rs.Before.Drives {
if i < 4 {
rs.Before.Drives[i] = HealDriveInfo{State: DriveStateMissing}
rs.After.Drives[i] = HealDriveInfo{State: DriveStateMissing}
} else if i > 4 && i < 15 {
rs.Before.Drives[i] = HealDriveInfo{State: DriveStateOffline}
rs.After.Drives[i] = HealDriveInfo{State: DriveStateOffline}
} else if i > 15 {
rs.Before.Drives[i] = HealDriveInfo{State: DriveStateCorrupt}
rs.After.Drives[i] = HealDriveInfo{State: DriveStateCorrupt}
} else {
rs.Before.Drives[i] = HealDriveInfo{State: DriveStateOk}
rs.After.Drives[i] = HealDriveInfo{State: DriveStateOk}
}
}
i, j := rs.GetOnlineCounts()
if i > 2 {
t.Errorf("Expected '2', got %d before online disks", i)
}
if j > 2 {
t.Errorf("Expected '2', got %d after online disks", j)
}
i, j = rs.GetOfflineCounts()
if i > 10 {
t.Errorf("Expected '10', got %d before offline disks", i)
}
if j > 10 {
t.Errorf("Expected '10', got %d after offline disks", j)
}
i, j = rs.GetCorruptedCounts()
if i > 4 {
t.Errorf("Expected '4', got %d before corrupted disks", i)
}
if j > 4 {
t.Errorf("Expected '4', got %d after corrupted disks", j)
}
i, j = rs.GetMissingCounts()
if i > 4 {
t.Errorf("Expected '4', got %d before missing disks", i)
}
if j > 4 {
t.Errorf("Expected '4', got %d after missing disks", i)
}
}
golang-github-minio-madmin-go-3.0.104/health-old.go 0000664 0000000 0000000 00000040235 14774251704 0021754 0 ustar 00root root 0000000 0000000 //
// Copyright (c) 2015-2024 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as
// published by the Free Software Foundation, either version 3 of the
// License, or (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see .
//
package madmin
import (
"encoding/json"
"math/big"
"time"
"github.com/shirou/gopsutil/v3/cpu"
diskhw "github.com/shirou/gopsutil/v3/disk"
"github.com/shirou/gopsutil/v3/host"
"github.com/shirou/gopsutil/v3/mem"
"github.com/shirou/gopsutil/v3/process"
)
// HealthInfoV0 - MinIO cluster's health Info version 0
type HealthInfoV0 struct {
TimeStamp time.Time `json:"timestamp,omitempty"`
Error string `json:"error,omitempty"`
Perf PerfInfoV0 `json:"perf,omitempty"`
Minio MinioHealthInfoV0 `json:"minio,omitempty"`
Sys SysHealthInfo `json:"sys,omitempty"`
}
// HealthInfoV2 - MinIO cluster's health Info version 2
type HealthInfoV2 struct {
Version string `json:"version"`
Error string `json:"error,omitempty"`
TimeStamp time.Time `json:"timestamp,omitempty"`
Sys SysInfo `json:"sys,omitempty"`
Perf PerfInfo `json:"perf,omitempty"`
Minio MinioHealthInfo `json:"minio,omitempty"`
}
func (info HealthInfoV2) String() string {
data, err := json.Marshal(info)
if err != nil {
panic(err) // This never happens.
}
return string(data)
}
// JSON returns this structure as JSON formatted string.
func (info HealthInfoV2) JSON() string {
data, err := json.MarshalIndent(info, " ", " ")
if err != nil {
panic(err) // This never happens.
}
return string(data)
}
// GetError - returns error from the cluster health info v2
func (info HealthInfoV2) GetError() string {
return info.Error
}
// GetStatus - returns status of the cluster health info v2
func (info HealthInfoV2) GetStatus() string {
if info.Error != "" {
return "error"
}
return "success"
}
// GetTimestamp - returns timestamp from the cluster health info v2
func (info HealthInfoV2) GetTimestamp() time.Time {
return info.TimeStamp
}
// Latency contains write operation latency in seconds of a disk drive.
type Latency struct {
Avg float64 `json:"avg"`
Max float64 `json:"max"`
Min float64 `json:"min"`
Percentile50 float64 `json:"percentile_50"`
Percentile90 float64 `json:"percentile_90"`
Percentile99 float64 `json:"percentile_99"`
}
// Throughput contains write performance in bytes per second of a disk drive.
type Throughput struct {
Avg uint64 `json:"avg"`
Max uint64 `json:"max"`
Min uint64 `json:"min"`
Percentile50 uint64 `json:"percentile_50"`
Percentile90 uint64 `json:"percentile_90"`
Percentile99 uint64 `json:"percentile_99"`
}
// DrivePerfInfo contains disk drive's performance information.
type DrivePerfInfo struct {
Error string `json:"error,omitempty"`
Path string `json:"path"`
Latency Latency `json:"latency,omitempty"`
Throughput Throughput `json:"throughput,omitempty"`
}
// DrivePerfInfos contains all disk drive's performance information of a node.
type DrivePerfInfos struct {
NodeCommon
SerialPerf []DrivePerfInfo `json:"serial_perf,omitempty"`
ParallelPerf []DrivePerfInfo `json:"parallel_perf,omitempty"`
}
// PeerNetPerfInfo contains network performance information of a node.
type PeerNetPerfInfo struct {
NodeCommon
Latency Latency `json:"latency,omitempty"`
Throughput Throughput `json:"throughput,omitempty"`
}
// NetPerfInfo contains network performance information of a node to other nodes.
type NetPerfInfo struct {
NodeCommon
RemotePeers []PeerNetPerfInfo `json:"remote_peers,omitempty"`
}
// PerfInfo - Includes Drive and Net perf info for the entire MinIO cluster
type PerfInfo struct {
Drives []DrivePerfInfos `json:"drives,omitempty"`
Net []NetPerfInfo `json:"net,omitempty"`
NetParallel NetPerfInfo `json:"net_parallel,omitempty"`
}
func (info HealthInfoV0) String() string {
data, err := json.Marshal(info)
if err != nil {
panic(err) // This never happens.
}
return string(data)
}
// JSON returns this structure as JSON formatted string.
func (info HealthInfoV0) JSON() string {
data, err := json.MarshalIndent(info, " ", " ")
if err != nil {
panic(err) // This never happens.
}
return string(data)
}
// SysHealthInfo - Includes hardware and system information of the MinIO cluster
type SysHealthInfo struct {
CPUInfo []ServerCPUInfo `json:"cpus,omitempty"`
DiskHwInfo []ServerDiskHwInfo `json:"drives,omitempty"`
OsInfo []ServerOsInfo `json:"osinfos,omitempty"`
MemInfo []ServerMemInfo `json:"meminfos,omitempty"`
ProcInfo []ServerProcInfo `json:"procinfos,omitempty"`
Error string `json:"error,omitempty"`
}
// ServerProcInfo - Includes host process lvl information
type ServerProcInfo struct {
Addr string `json:"addr"`
Processes []SysProcess `json:"processes,omitempty"`
Error string `json:"error,omitempty"`
}
// SysProcess - Includes process lvl information about a single process
type SysProcess struct {
Pid int32 `json:"pid"`
Background bool `json:"background,omitempty"`
CPUPercent float64 `json:"cpupercent,omitempty"`
Children []int32 `json:"children,omitempty"`
CmdLine string `json:"cmd,omitempty"`
ConnectionCount int `json:"connection_count,omitempty"`
CreateTime int64 `json:"createtime,omitempty"`
Cwd string `json:"cwd,omitempty"`
Exe string `json:"exe,omitempty"`
Gids []int32 `json:"gids,omitempty"`
IOCounters *process.IOCountersStat `json:"iocounters,omitempty"`
IsRunning bool `json:"isrunning,omitempty"`
MemInfo *process.MemoryInfoStat `json:"meminfo,omitempty"`
MemMaps *[]process.MemoryMapsStat `json:"memmaps,omitempty"`
MemPercent float32 `json:"mempercent,omitempty"`
Name string `json:"name,omitempty"`
Nice int32 `json:"nice,omitempty"`
NumCtxSwitches *process.NumCtxSwitchesStat `json:"numctxswitches,omitempty"`
NumFds int32 `json:"numfds,omitempty"`
NumThreads int32 `json:"numthreads,omitempty"`
PageFaults *process.PageFaultsStat `json:"pagefaults,omitempty"`
Parent int32 `json:"parent,omitempty"`
Ppid int32 `json:"ppid,omitempty"`
Status string `json:"status,omitempty"`
Tgid int32 `json:"tgid,omitempty"`
Times *cpu.TimesStat `json:"cputimes,omitempty"`
Uids []int32 `json:"uids,omitempty"`
Username string `json:"username,omitempty"`
}
// GetOwner - returns owner of the process
func (sp SysProcess) GetOwner() string {
return sp.Username
}
// ServerMemInfo - Includes host virtual and swap mem information
type ServerMemInfo struct {
Addr string `json:"addr"`
SwapMem *mem.SwapMemoryStat `json:"swap,omitempty"`
VirtualMem *mem.VirtualMemoryStat `json:"virtualmem,omitempty"`
Error string `json:"error,omitempty"`
}
// ServerOsInfo - Includes host os information
type ServerOsInfo struct {
Addr string `json:"addr"`
Info *host.InfoStat `json:"info,omitempty"`
Sensors []host.TemperatureStat `json:"sensors,omitempty"`
Users []host.UserStat `json:"users,omitempty"`
Error string `json:"error,omitempty"`
}
// ServerCPUInfo - Includes cpu and timer stats of each node of the MinIO cluster
type ServerCPUInfo struct {
Addr string `json:"addr"`
CPUStat []cpu.InfoStat `json:"cpu,omitempty"`
TimeStat []cpu.TimesStat `json:"time,omitempty"`
Error string `json:"error,omitempty"`
}
// MinioHealthInfoV0 - Includes MinIO confifuration information
type MinioHealthInfoV0 struct {
Info InfoMessage `json:"info,omitempty"`
Config interface{} `json:"config,omitempty"`
Error string `json:"error,omitempty"`
}
// ServerDiskHwInfo - Includes usage counters, disk counters and partitions
type ServerDiskHwInfo struct {
Addr string `json:"addr"`
Usage []*diskhw.UsageStat `json:"usages,omitempty"`
Partitions []PartitionStat `json:"partitions,omitempty"`
Counters map[string]diskhw.IOCountersStat `json:"counters,omitempty"`
Error string `json:"error,omitempty"`
}
// GetTotalCapacity gets the total capacity a server holds.
func (s *ServerDiskHwInfo) GetTotalCapacity() (capacity uint64) {
for _, u := range s.Usage {
capacity += u.Total
}
return
}
// GetTotalFreeCapacity gets the total capacity that is free.
func (s *ServerDiskHwInfo) GetTotalFreeCapacity() (capacity uint64) {
for _, u := range s.Usage {
capacity += u.Free
}
return
}
// GetTotalUsedCapacity gets the total capacity used.
func (s *ServerDiskHwInfo) GetTotalUsedCapacity() (capacity uint64) {
for _, u := range s.Usage {
capacity += u.Used
}
return
}
// SmartInfo contains S.M.A.R.T data about the drive
type SmartInfo struct {
Device string `json:"device"`
Scsi *SmartScsiInfo `json:"scsi,omitempty"`
Nvme *SmartNvmeInfo `json:"nvme,omitempty"`
Ata *SmartAtaInfo `json:"ata,omitempty"`
}
// SmartNvmeInfo contains NVMe drive info
type SmartNvmeInfo struct {
SerialNum string `json:"serialNum,omitempty"`
VendorID string `json:"vendorId,omitempty"`
FirmwareVersion string `json:"firmwareVersion,omitempty"`
ModelNum string `json:"modelNum,omitempty"`
SpareAvailable string `json:"spareAvailable,omitempty"`
SpareThreshold string `json:"spareThreshold,omitempty"`
Temperature string `json:"temperature,omitempty"`
CriticalWarning string `json:"criticalWarning,omitempty"`
MaxDataTransferPages int `json:"maxDataTransferPages,omitempty"`
ControllerBusyTime *big.Int `json:"controllerBusyTime,omitempty"`
PowerOnHours *big.Int `json:"powerOnHours,omitempty"`
PowerCycles *big.Int `json:"powerCycles,omitempty"`
UnsafeShutdowns *big.Int `json:"unsafeShutdowns,omitempty"`
MediaAndDataIntegrityErrors *big.Int `json:"mediaAndDataIntgerityErrors,omitempty"`
DataUnitsReadBytes *big.Int `json:"dataUnitsReadBytes,omitempty"`
DataUnitsWrittenBytes *big.Int `json:"dataUnitsWrittenBytes,omitempty"`
HostReadCommands *big.Int `json:"hostReadCommands,omitempty"`
HostWriteCommands *big.Int `json:"hostWriteCommands,omitempty"`
}
// SmartScsiInfo contains SCSI drive Info
type SmartScsiInfo struct {
CapacityBytes int64 `json:"scsiCapacityBytes,omitempty"`
ModeSenseBuf string `json:"scsiModeSenseBuf,omitempty"`
RespLen int64 `json:"scsirespLen,omitempty"`
BdLen int64 `json:"scsiBdLen,omitempty"`
Offset int64 `json:"scsiOffset,omitempty"`
RPM int64 `json:"sciRpm,omitempty"`
}
// SmartAtaInfo contains ATA drive info
type SmartAtaInfo struct {
LUWWNDeviceID string `json:"scsiLuWWNDeviceID,omitempty"`
SerialNum string `json:"serialNum,omitempty"`
ModelNum string `json:"modelNum,omitempty"`
FirmwareRevision string `json:"firmwareRevision,omitempty"`
RotationRate string `json:"RotationRate,omitempty"`
ATAMajorVersion string `json:"MajorVersion,omitempty"`
ATAMinorVersion string `json:"MinorVersion,omitempty"`
SmartSupportAvailable bool `json:"smartSupportAvailable,omitempty"`
SmartSupportEnabled bool `json:"smartSupportEnabled,omitempty"`
ErrorLog string `json:"smartErrorLog,omitempty"`
Transport string `json:"transport,omitempty"`
}
// PartitionStat - includes data from both shirou/psutil.diskHw.PartitionStat as well as SMART data
type PartitionStat struct {
Device string `json:"device"`
Mountpoint string `json:"mountpoint,omitempty"`
Fstype string `json:"fstype,omitempty"`
Opts string `json:"opts,omitempty"`
SmartInfo SmartInfo `json:"smartInfo,omitempty"`
}
// PerfInfoV0 - Includes Drive and Net perf info for the entire MinIO cluster
type PerfInfoV0 struct {
DriveInfo []ServerDrivesInfo `json:"drives,omitempty"`
Net []ServerNetHealthInfo `json:"net,omitempty"`
NetParallel ServerNetHealthInfo `json:"net_parallel,omitempty"`
Error string `json:"error,omitempty"`
}
// ServerDrivesInfo - Drive info about all drives in a single MinIO node
type ServerDrivesInfo struct {
Addr string `json:"addr"`
Serial []DrivePerfInfoV0 `json:"serial,omitempty"` // Drive perf info collected one drive at a time
Parallel []DrivePerfInfoV0 `json:"parallel,omitempty"` // Drive perf info collected in parallel
Error string `json:"error,omitempty"`
}
// DiskLatency holds latency information for write operations to the drive
type DiskLatency struct {
Avg float64 `json:"avg_secs,omitempty"`
Percentile50 float64 `json:"percentile50_secs,omitempty"`
Percentile90 float64 `json:"percentile90_secs,omitempty"`
Percentile99 float64 `json:"percentile99_secs,omitempty"`
Min float64 `json:"min_secs,omitempty"`
Max float64 `json:"max_secs,omitempty"`
}
// DiskThroughput holds throughput information for write operations to the drive
type DiskThroughput struct {
Avg float64 `json:"avg_bytes_per_sec,omitempty"`
Percentile50 float64 `json:"percentile50_bytes_per_sec,omitempty"`
Percentile90 float64 `json:"percentile90_bytes_per_sec,omitempty"`
Percentile99 float64 `json:"percentile99_bytes_per_sec,omitempty"`
Min float64 `json:"min_bytes_per_sec,omitempty"`
Max float64 `json:"max_bytes_per_sec,omitempty"`
}
// DrivePerfInfoV0 - Stats about a single drive in a MinIO node
type DrivePerfInfoV0 struct {
Path string `json:"endpoint"`
Latency DiskLatency `json:"latency,omitempty"`
Throughput DiskThroughput `json:"throughput,omitempty"`
Error string `json:"error,omitempty"`
}
// ServerNetHealthInfo - Network health info about a single MinIO node
type ServerNetHealthInfo struct {
Addr string `json:"addr"`
Net []NetPerfInfoV0 `json:"net,omitempty"`
Error string `json:"error,omitempty"`
}
// NetLatency holds latency information for read/write operations to the drive
type NetLatency struct {
Avg float64 `json:"avg_secs,omitempty"`
Percentile50 float64 `json:"percentile50_secs,omitempty"`
Percentile90 float64 `json:"percentile90_secs,omitempty"`
Percentile99 float64 `json:"percentile99_secs,omitempty"`
Min float64 `json:"min_secs,omitempty"`
Max float64 `json:"max_secs,omitempty"`
}
// NetThroughput holds throughput information for read/write operations to the drive
type NetThroughput struct {
Avg float64 `json:"avg_bytes_per_sec,omitempty"`
Percentile50 float64 `json:"percentile50_bytes_per_sec,omitempty"`
Percentile90 float64 `json:"percentile90_bytes_per_sec,omitempty"`
Percentile99 float64 `json:"percentile99_bytes_per_sec,omitempty"`
Min float64 `json:"min_bytes_per_sec,omitempty"`
Max float64 `json:"max_bytes_per_sec,omitempty"`
}
// NetPerfInfoV0 - one-to-one network connectivity Stats between 2 MinIO nodes
type NetPerfInfoV0 struct {
Addr string `json:"remote"`
Latency NetLatency `json:"latency,omitempty"`
Throughput NetThroughput `json:"throughput,omitempty"`
Error string `json:"error,omitempty"`
}
golang-github-minio-madmin-go-3.0.104/health.go 0000664 0000000 0000000 00000102442 14774251704 0021177 0 ustar 00root root 0000000 0000000 //
// Copyright (c) 2015-2025 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as
// published by the Free Software Foundation, either version 3 of the
// License, or (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see .
//
package madmin
import (
"bufio"
"context"
"encoding/json"
"errors"
"io"
"net/http"
"net/url"
"os"
"os/exec"
"path"
"path/filepath"
"runtime"
"strconv"
"strings"
"syscall"
"time"
"github.com/minio/madmin-go/v3/cgroup"
"github.com/minio/madmin-go/v3/kernel"
"github.com/prometheus/procfs"
"github.com/shirou/gopsutil/v3/cpu"
"github.com/shirou/gopsutil/v3/disk"
"github.com/shirou/gopsutil/v3/host"
"github.com/shirou/gopsutil/v3/mem"
"github.com/shirou/gopsutil/v3/process"
)
// NodeCommon - Common fields across most node-specific health structs
type NodeCommon struct {
Addr string `json:"addr"`
Error string `json:"error,omitempty"`
}
// GetAddr - return the address of the node
func (n *NodeCommon) GetAddr() string {
return n.Addr
}
// SetAddr - set the address of the node
func (n *NodeCommon) SetAddr(addr string) {
n.Addr = addr
}
// SetError - set the address of the node
func (n *NodeCommon) SetError(err string) {
n.Error = err
}
const (
// HealthInfoVersion0 is version 0
HealthInfoVersion0 = ""
// HealthInfoVersion1 is version 1
HealthInfoVersion1 = "1"
// HealthInfoVersion2 is version 2
HealthInfoVersion2 = "2"
// HealthInfoVersion3 is version 3
HealthInfoVersion3 = "3"
// HealthInfoVersion is current health info version.
HealthInfoVersion = HealthInfoVersion3
)
const (
SysErrAuditEnabled = "audit is enabled"
SysErrUpdatedbInstalled = "updatedb is installed"
)
const (
SrvSELinux = "selinux"
SrvNotInstalled = "not-installed"
)
const (
sysClassBlock = "/sys/class/block"
sysClassDMI = "/sys/class/dmi"
runDevDataPfx = "/run/udev/data/b"
devDir = "/dev/"
devLoopDir = "/dev/loop"
)
// NodeInfo - Interface to abstract any struct that contains address/endpoint and error fields
type NodeInfo interface {
GetAddr() string
SetAddr(addr string)
SetError(err string)
}
// SysErrors - contains a system error
type SysErrors struct {
NodeCommon
Errors []string `json:"errors,omitempty"`
}
// SysServices - info about services that affect minio
type SysServices struct {
NodeCommon
Services []SysService `json:"services,omitempty"`
}
// SysConfig - info about services that affect minio
type SysConfig struct {
NodeCommon
Config map[string]interface{} `json:"config,omitempty"`
}
// SysService - name and status of a sys service
type SysService struct {
Name string `json:"name"`
Status string `json:"status"`
}
// CPU contains system's CPU information.
type CPU struct {
VendorID string `json:"vendor_id"`
Family string `json:"family"`
Model string `json:"model"`
Stepping int32 `json:"stepping"`
PhysicalID string `json:"physical_id"`
ModelName string `json:"model_name"`
Mhz float64 `json:"mhz"`
CacheSize int32 `json:"cache_size"`
Flags []string `json:"flags"`
Microcode string `json:"microcode"`
Cores int `json:"cores"` // computed
}
// CPUs contains all CPU information of a node.
type CPUs struct {
NodeCommon
CPUs []CPU `json:"cpus,omitempty"`
CPUFreqStats []CPUFreqStats `json:"freq_stats,omitempty"`
}
// CPUFreqStats CPU frequency stats
type CPUFreqStats struct {
Name string
CpuinfoCurrentFrequency *uint64
CpuinfoMinimumFrequency *uint64
CpuinfoMaximumFrequency *uint64
CpuinfoTransitionLatency *uint64
ScalingCurrentFrequency *uint64
ScalingMinimumFrequency *uint64
ScalingMaximumFrequency *uint64
AvailableGovernors string
Driver string
Governor string
RelatedCpus string
SetSpeed string
}
// GetCPUs returns system's all CPU information.
func GetCPUs(ctx context.Context, addr string) CPUs {
infos, err := cpu.InfoWithContext(ctx)
if err != nil {
return CPUs{
NodeCommon: NodeCommon{
Addr: addr,
Error: err.Error(),
},
}
}
cpuMap := map[string]CPU{}
for _, info := range infos {
cpu, found := cpuMap[info.PhysicalID]
if found {
cpu.Cores++
} else {
cpu = CPU{
VendorID: info.VendorID,
Family: info.Family,
Model: info.Model,
Stepping: info.Stepping,
PhysicalID: info.PhysicalID,
ModelName: info.ModelName,
Mhz: info.Mhz,
CacheSize: info.CacheSize,
Flags: info.Flags,
Microcode: info.Microcode,
Cores: 1,
}
}
cpuMap[info.PhysicalID] = cpu
}
cpus := []CPU{}
for _, cpu := range cpuMap {
cpus = append(cpus, cpu)
}
var errMsg string
freqStats, err := getCPUFreqStats()
if err != nil {
errMsg = err.Error()
}
return CPUs{
NodeCommon: NodeCommon{Addr: addr, Error: errMsg},
CPUs: cpus,
CPUFreqStats: freqStats,
}
}
// Partition contains disk partition's information.
type Partition struct {
Error string `json:"error,omitempty"`
Device string `json:"device,omitempty"`
Major uint32 `json:"major"`
Minor uint32 `json:"minor"`
Model string `json:"model,omitempty"`
Revision string `json:"revision,omitempty"`
Mountpoint string `json:"mountpoint,omitempty"`
FSType string `json:"fs_type,omitempty"`
MountOptions string `json:"mount_options,omitempty"`
MountFSType string `json:"mount_fs_type,omitempty"`
SpaceTotal uint64 `json:"space_total,omitempty"`
SpaceFree uint64 `json:"space_free,omitempty"`
InodeTotal uint64 `json:"inode_total,omitempty"`
InodeFree uint64 `json:"inode_free,omitempty"`
}
// NetSettings - rx/tx settings of an interface
type NetSettings struct {
// hardware capacity
RxMaxPending uint32 `json:"rx_max_pending"`
TxMaxPending uint32 `json:"tx_max_pending"`
MaxCombined uint32 `json:"max_combined"`
// configured limits
RxPending uint32 `json:"rx_pending"`
TxPending uint32 `json:"tx_pending"`
CombinedCount uint32 `json:"combined_count"`
}
// NetInfo contains information about a network inerface
type NetInfo struct {
NodeCommon
Interface string `json:"interface,omitempty"`
Driver string `json:"driver,omitempty"`
FirmwareVersion string `json:"firmware_version,omitempty"`
Settings *NetSettings `json:"settings,omitempty"`
}
// Partitions contains all disk partitions information of a node.
type Partitions struct {
NodeCommon
Partitions []Partition `json:"partitions,omitempty"`
}
// driveHwInfo contains hardware information about a drive
type driveHwInfo struct {
Model string
Revision string
Major uint32
Minor uint32
}
func getDriveHwInfo(partDevice string) (info driveHwInfo, err error) {
partDevName := strings.ReplaceAll(partDevice, devDir, "")
devPath := path.Join(sysClassBlock, partDevName, "dev")
_, err = os.Stat(devPath)
if err != nil {
return
}
var data []byte
data, err = os.ReadFile(devPath)
if err != nil {
return
}
majorMinor := strings.TrimSpace(string(data))
mm := strings.SplitN(majorMinor, ":", 2)
major, err := strconv.ParseUint(mm[0], 10, 32)
if err == nil {
info.Major = uint32(major)
}
minor, err := strconv.ParseUint(mm[1], 10, 32)
if err == nil {
info.Minor = uint32(minor)
}
driveInfoPath := runDevDataPfx + majorMinor
var f *os.File
f, err = os.Open(driveInfoPath)
if err != nil {
return
}
defer f.Close()
buf := bufio.NewScanner(f)
for buf.Scan() {
field := strings.SplitN(buf.Text(), "=", 2)
if len(field) == 2 {
if field[0] == "E:ID_MODEL" {
info.Model = field[1]
}
if field[0] == "E:ID_REVISION" {
info.Revision = field[1]
}
if len(info.Model) > 0 && len(info.Revision) > 0 {
break
}
}
}
return
}
// GetPartitions returns all disk partitions information of a node running linux only operating system.
func GetPartitions(ctx context.Context, addr string) Partitions {
if runtime.GOOS != "linux" {
return Partitions{
NodeCommon: NodeCommon{
Addr: addr,
Error: "unsupported operating system " + runtime.GOOS,
},
}
}
parts, err := disk.PartitionsWithContext(ctx, false)
if err != nil {
return Partitions{
NodeCommon: NodeCommon{
Addr: addr,
Error: err.Error(),
},
}
}
partitions := []Partition{}
for i := range parts {
usage, err := disk.UsageWithContext(ctx, parts[i].Mountpoint)
if err != nil {
partitions = append(partitions, Partition{
Device: parts[i].Device,
Error: err.Error(),
})
} else {
var di driveHwInfo
device := parts[i].Device
if strings.HasPrefix(device, devDir) && !strings.HasPrefix(device, devLoopDir) {
// ignore any error in finding device model
di, _ = getDriveHwInfo(device)
}
partitions = append(partitions, Partition{
Device: device,
Mountpoint: parts[i].Mountpoint,
FSType: parts[i].Fstype,
MountOptions: strings.Join(parts[i].Opts, ","),
MountFSType: usage.Fstype,
SpaceTotal: usage.Total,
SpaceFree: usage.Free,
InodeTotal: usage.InodesTotal,
InodeFree: usage.InodesFree,
Model: di.Model,
Revision: di.Revision,
Major: di.Major,
Minor: di.Minor,
})
}
}
return Partitions{
NodeCommon: NodeCommon{Addr: addr},
Partitions: partitions,
}
}
// OSInfo contains operating system's information.
type OSInfo struct {
NodeCommon
Info host.InfoStat `json:"info,omitempty"`
Sensors []host.TemperatureStat `json:"sensors,omitempty"`
}
// TimeInfo contains current time with timezone, and
// the roundtrip duration when fetching it remotely
type TimeInfo struct {
CurrentTime time.Time `json:"current_time"`
RoundtripDuration int32 `json:"roundtrip_duration"`
TimeZone string `json:"time_zone"`
}
// XFSErrorConfigs - stores the error configs of all XFS devices on the server
type XFSErrorConfigs struct {
Configs []XFSErrorConfig `json:"configs,omitempty"`
Error string `json:"error,omitempty"`
}
// XFSErrorConfig - stores XFS error configuration info for max_retries
type XFSErrorConfig struct {
ConfigFile string `json:"config_file"`
MaxRetries int `json:"max_retries"`
}
// GetOSInfo returns linux only operating system's information.
func GetOSInfo(ctx context.Context, addr string) OSInfo {
if runtime.GOOS != "linux" {
return OSInfo{
NodeCommon: NodeCommon{
Addr: addr,
Error: "unsupported operating system " + runtime.GOOS,
},
}
}
kr, err := kernel.CurrentRelease()
if err != nil {
return OSInfo{
NodeCommon: NodeCommon{
Addr: addr,
Error: err.Error(),
},
}
}
info, err := host.InfoWithContext(ctx)
if err != nil {
return OSInfo{
NodeCommon: NodeCommon{
Addr: addr,
Error: err.Error(),
},
}
}
osInfo := OSInfo{
NodeCommon: NodeCommon{Addr: addr},
Info: *info,
}
osInfo.Info.KernelVersion = kr
osInfo.Sensors, _ = host.SensorsTemperaturesWithContext(ctx)
return osInfo
}
// GetSysConfig returns config values from the system
// (only those affecting minio performance)
func GetSysConfig(_ context.Context, addr string) SysConfig {
sc := SysConfig{
NodeCommon: NodeCommon{Addr: addr},
Config: map[string]interface{}{},
}
proc, err := procfs.Self()
if err != nil {
sc.Error = "rlimit: " + err.Error()
} else {
limits, err := proc.Limits()
if err != nil {
sc.Error = "rlimit: " + err.Error()
} else {
sc.Config["rlimit-max"] = limits.OpenFiles
}
}
zone, _ := time.Now().Zone()
sc.Config["time-info"] = TimeInfo{
CurrentTime: time.Now(),
TimeZone: zone,
}
xfsErrorConfigs := getXFSErrorMaxRetries()
if len(xfsErrorConfigs.Configs) > 0 || len(xfsErrorConfigs.Error) > 0 {
sc.Config["xfs-error-config"] = xfsErrorConfigs
}
sc.Config["thp-config"] = getTHPConfigs()
procCmdLine, err := getProcCmdLine()
if err != nil {
errMsg := "proc-cmdline: " + err.Error()
if len(sc.Error) == 0 {
sc.Error = errMsg
} else {
sc.Error = sc.Error + ", " + errMsg
}
} else {
sc.Config["proc-cmdline"] = procCmdLine
}
return sc
}
func readIntFromFile(filePath string) (num int, err error) {
var file *os.File
file, err = os.Open(filePath)
if err != nil {
return
}
defer file.Close()
var data []byte
data, err = io.ReadAll(file)
if err != nil {
return
}
return strconv.Atoi(strings.TrimSpace(string(data)))
}
func getTHPConfigs() map[string]string {
configs := map[string]string{}
captureTHPConfig(configs, "/sys/kernel/mm/transparent_hugepage/enabled", "enabled")
captureTHPConfig(configs, "/sys/kernel/mm/transparent_hugepage/defrag", "defrag")
captureTHPConfig(configs, "/sys/kernel/mm/transparent_hugepage/khugepaged/max_ptes_none", "max_ptes_none")
return configs
}
func getProcCmdLine() ([]string, error) {
fs, err := procfs.NewDefaultFS()
if err != nil {
return nil, err
}
return fs.CmdLine()
}
func captureTHPConfig(configs map[string]string, filePath string, cfgName string) {
errFieldName := cfgName + "_error"
data, err := os.ReadFile(filePath)
if err != nil {
configs[errFieldName] = err.Error()
return
}
configs[cfgName] = strings.TrimSpace(string(data))
}
func getXFSErrorMaxRetries() XFSErrorConfigs {
xfsErrCfgPattern := "/sys/fs/xfs/*/error/metadata/*/max_retries"
configFiles, err := filepath.Glob(xfsErrCfgPattern)
if err != nil {
return XFSErrorConfigs{Error: err.Error()}
}
configs := []XFSErrorConfig{}
var errMsg string
for _, configFile := range configFiles {
maxRetries, err := readIntFromFile(configFile)
if err != nil {
errMsg = err.Error()
break
}
configs = append(configs, XFSErrorConfig{
ConfigFile: configFile,
MaxRetries: maxRetries,
})
}
return XFSErrorConfigs{
Configs: configs,
Error: errMsg,
}
}
// ProductInfo defines a host's product information
type ProductInfo struct {
NodeCommon
Family string `json:"family"`
Name string `json:"name"`
Vendor string `json:"vendor"`
SerialNumber string `json:"serial_number"`
UUID string `json:"uuid"`
SKU string `json:"sku"`
Version string `json:"version"`
}
func getDMIInfo(ask string) string {
value, err := os.ReadFile(path.Join(sysClassDMI, "id", ask))
if err != nil {
return "unknown"
}
return strings.TrimSpace(string(value))
}
// GetProductInfo returns a host's product information
func GetProductInfo(addr string) ProductInfo {
return ProductInfo{
NodeCommon: NodeCommon{Addr: addr},
Family: getDMIInfo("product_family"),
Name: getDMIInfo("product_name"),
Vendor: getDMIInfo("sys_vendor"),
SerialNumber: getDMIInfo("product_serial"),
UUID: getDMIInfo("product_uuid"),
SKU: getDMIInfo("product_sku"),
Version: getDMIInfo("product_version"),
}
}
// GetSysServices returns info of sys services that affect minio
func GetSysServices(_ context.Context, addr string) SysServices {
ss := SysServices{
NodeCommon: NodeCommon{Addr: addr},
Services: []SysService{},
}
srv, e := getSELinuxInfo()
if e != nil {
ss.Error = e.Error()
} else {
ss.Services = append(ss.Services, srv)
}
return ss
}
func getSELinuxInfo() (SysService, error) {
ss := SysService{Name: SrvSELinux}
file, err := os.Open("/etc/selinux/config")
if err != nil {
if errors.Is(err, os.ErrNotExist) {
ss.Status = SrvNotInstalled
return ss, nil
}
return ss, err
}
defer file.Close()
scanner := bufio.NewScanner(file)
for scanner.Scan() {
tokens := strings.SplitN(strings.TrimSpace(scanner.Text()), "=", 2)
if len(tokens) == 2 && tokens[0] == "SELINUX" {
ss.Status = tokens[1]
return ss, nil
}
}
return ss, scanner.Err()
}
// GetSysErrors returns issues in system setup/config
func GetSysErrors(_ context.Context, addr string) SysErrors {
se := SysErrors{NodeCommon: NodeCommon{Addr: addr}}
if runtime.GOOS != "linux" {
return se
}
ae, err := isAuditEnabled()
if err != nil {
se.Error = "audit: " + err.Error()
} else if ae {
se.Errors = append(se.Errors, SysErrAuditEnabled)
}
_, err = exec.LookPath("updatedb")
if err == nil {
se.Errors = append(se.Errors, SysErrUpdatedbInstalled)
} else if !strings.HasSuffix(err.Error(), exec.ErrNotFound.Error()) {
errMsg := "updatedb: " + err.Error()
if len(se.Error) == 0 {
se.Error = errMsg
} else {
se.Error = se.Error + ", " + errMsg
}
}
return se
}
// Audit is enabled if either `audit=1` is present in /proc/cmdline
// or the `kauditd` process is running
func isAuditEnabled() (bool, error) {
file, err := os.Open("/proc/cmdline")
if err != nil {
return false, err
}
defer file.Close()
scanner := bufio.NewScanner(file)
for scanner.Scan() {
if strings.Contains(scanner.Text(), "audit=1") {
return true, nil
}
}
return isKauditdRunning()
}
func isKauditdRunning() (bool, error) {
procs, err := process.Processes()
if err != nil {
return false, err
}
for _, proc := range procs {
pname, err := proc.Name()
if err == nil && pname == "kauditd" {
return true, nil
}
}
return false, nil
}
// Get the final system memory limit chosen by the user.
// by default without any configuration on a vanilla Linux
// system you would see physical RAM limit. If cgroup
// is configured at some point in time this function
// would return the memory limit chosen for the given pid.
func getMemoryLimit(sysLimit uint64) uint64 {
// Following code is deliberately ignoring the error.
cGroupLimit, err := cgroup.GetMemoryLimit(os.Getpid())
if err == nil && cGroupLimit <= sysLimit {
// cgroup limit is lesser than system limit means
// user wants to limit the memory usage further
return cGroupLimit
}
return sysLimit
}
// GetMemInfo returns system's RAM and swap information.
func GetMemInfo(ctx context.Context, addr string) MemInfo {
meminfo, err := mem.VirtualMemoryWithContext(ctx)
if err != nil {
return MemInfo{
NodeCommon: NodeCommon{
Addr: addr,
Error: err.Error(),
},
}
}
swapinfo, err := mem.SwapMemoryWithContext(ctx)
if err != nil {
return MemInfo{
NodeCommon: NodeCommon{
Addr: addr,
Error: err.Error(),
},
}
}
return MemInfo{
NodeCommon: NodeCommon{Addr: addr},
Total: meminfo.Total,
Used: meminfo.Used,
Free: meminfo.Free,
Available: meminfo.Available,
Shared: meminfo.Shared,
Cache: meminfo.Cached,
Buffers: meminfo.Buffers,
SwapSpaceTotal: swapinfo.Total,
SwapSpaceFree: swapinfo.Free,
Limit: getMemoryLimit(meminfo.Total),
}
}
// ProcInfo contains current process's information.
type ProcInfo struct {
NodeCommon
PID int32 `json:"pid,omitempty"`
IsBackground bool `json:"is_background,omitempty"`
CPUPercent float64 `json:"cpu_percent,omitempty"`
ChildrenPIDs []int32 `json:"children_pids,omitempty"`
CmdLine string `json:"cmd_line,omitempty"`
NumConnections int `json:"num_connections,omitempty"`
CreateTime int64 `json:"create_time,omitempty"`
CWD string `json:"cwd,omitempty"`
ExecPath string `json:"exec_path,omitempty"`
GIDs []int32 `json:"gids,omitempty"`
IOCounters process.IOCountersStat `json:"iocounters,omitempty"`
IsRunning bool `json:"is_running,omitempty"`
MemInfo process.MemoryInfoStat `json:"mem_info,omitempty"`
MemMaps []process.MemoryMapsStat `json:"mem_maps,omitempty"`
MemPercent float32 `json:"mem_percent,omitempty"`
Name string `json:"name,omitempty"`
Nice int32 `json:"nice,omitempty"`
NumCtxSwitches process.NumCtxSwitchesStat `json:"num_ctx_switches,omitempty"`
NumFDs int32 `json:"num_fds,omitempty"`
NumThreads int32 `json:"num_threads,omitempty"`
PageFaults process.PageFaultsStat `json:"page_faults,omitempty"`
PPID int32 `json:"ppid,omitempty"`
Status string `json:"status,omitempty"`
TGID int32 `json:"tgid,omitempty"`
Times cpu.TimesStat `json:"times,omitempty"`
UIDs []int32 `json:"uids,omitempty"`
Username string `json:"username,omitempty"`
}
// GetProcInfo returns current MinIO process information.
func GetProcInfo(ctx context.Context, addr string) ProcInfo {
pid := int32(syscall.Getpid())
procInfo := ProcInfo{
NodeCommon: NodeCommon{Addr: addr},
PID: pid,
}
var err error
proc, err := process.NewProcess(pid)
if err != nil {
procInfo.Error = err.Error()
return procInfo
}
procInfo.IsBackground, err = proc.BackgroundWithContext(ctx)
if err != nil {
procInfo.Error = err.Error()
return procInfo
}
procInfo.CPUPercent, err = proc.CPUPercentWithContext(ctx)
if err != nil {
procInfo.Error = err.Error()
return procInfo
}
procInfo.ChildrenPIDs = []int32{}
children, _ := proc.ChildrenWithContext(ctx)
for i := range children {
procInfo.ChildrenPIDs = append(procInfo.ChildrenPIDs, children[i].Pid)
}
procInfo.CmdLine, err = proc.CmdlineWithContext(ctx)
if err != nil {
procInfo.Error = err.Error()
return procInfo
}
connections, err := proc.ConnectionsWithContext(ctx)
if err != nil {
procInfo.Error = err.Error()
return procInfo
}
procInfo.NumConnections = len(connections)
procInfo.CreateTime, err = proc.CreateTimeWithContext(ctx)
if err != nil {
procInfo.Error = err.Error()
return procInfo
}
procInfo.CWD, err = proc.CwdWithContext(ctx)
if err != nil {
procInfo.Error = err.Error()
return procInfo
}
procInfo.ExecPath, err = proc.ExeWithContext(ctx)
if err != nil {
procInfo.Error = err.Error()
return procInfo
}
procInfo.GIDs, err = proc.GidsWithContext(ctx)
if err != nil {
procInfo.Error = err.Error()
return procInfo
}
ioCounters, err := proc.IOCountersWithContext(ctx)
if err != nil {
procInfo.Error = err.Error()
return procInfo
}
procInfo.IOCounters = *ioCounters
procInfo.IsRunning, err = proc.IsRunningWithContext(ctx)
if err != nil {
procInfo.Error = err.Error()
return procInfo
}
memInfo, err := proc.MemoryInfoWithContext(ctx)
if err != nil {
procInfo.Error = err.Error()
return procInfo
}
procInfo.MemInfo = *memInfo
memMaps, err := proc.MemoryMapsWithContext(ctx, true)
if err != nil {
procInfo.Error = err.Error()
return procInfo
}
procInfo.MemMaps = *memMaps
procInfo.MemPercent, err = proc.MemoryPercentWithContext(ctx)
if err != nil {
procInfo.Error = err.Error()
return procInfo
}
procInfo.Name, err = proc.NameWithContext(ctx)
if err != nil {
procInfo.Error = err.Error()
return procInfo
}
procInfo.Nice, err = proc.NiceWithContext(ctx)
if err != nil {
procInfo.Error = err.Error()
return procInfo
}
numCtxSwitches, err := proc.NumCtxSwitchesWithContext(ctx)
if err != nil {
procInfo.Error = err.Error()
return procInfo
}
procInfo.NumCtxSwitches = *numCtxSwitches
procInfo.NumFDs, err = proc.NumFDsWithContext(ctx)
if err != nil {
procInfo.Error = err.Error()
return procInfo
}
procInfo.NumThreads, err = proc.NumThreadsWithContext(ctx)
if err != nil {
procInfo.Error = err.Error()
return procInfo
}
pageFaults, err := proc.PageFaultsWithContext(ctx)
if err != nil {
procInfo.Error = err.Error()
return procInfo
}
procInfo.PageFaults = *pageFaults
procInfo.PPID, _ = proc.PpidWithContext(ctx)
status, err := proc.StatusWithContext(ctx)
if err != nil {
procInfo.Error = err.Error()
return procInfo
}
procInfo.Status = status[0]
procInfo.TGID, err = proc.Tgid()
if err != nil {
procInfo.Error = err.Error()
return procInfo
}
times, err := proc.TimesWithContext(ctx)
if err != nil {
procInfo.Error = err.Error()
return procInfo
}
procInfo.Times = *times
procInfo.UIDs, err = proc.UidsWithContext(ctx)
if err != nil {
procInfo.Error = err.Error()
return procInfo
}
// In certain environments, it is not possible to get username e.g. minio-operator
// Plus it's not a serious error. So ignore error if any.
procInfo.Username, err = proc.UsernameWithContext(ctx)
if err != nil {
procInfo.Username = ""
}
return procInfo
}
// SysInfo - Includes hardware and system information of the MinIO cluster
type SysInfo struct {
CPUInfo []CPUs `json:"cpus,omitempty"`
Partitions []Partitions `json:"partitions,omitempty"`
OSInfo []OSInfo `json:"osinfo,omitempty"`
MemInfo []MemInfo `json:"meminfo,omitempty"`
ProcInfo []ProcInfo `json:"procinfo,omitempty"`
NetInfo []NetInfo `json:"netinfo,omitempty"`
SysErrs []SysErrors `json:"errors,omitempty"`
SysServices []SysServices `json:"services,omitempty"`
SysConfig []SysConfig `json:"config,omitempty"`
ProductInfo []ProductInfo `json:"productinfo,omitempty"`
KubernetesInfo KubernetesInfo `json:"kubernetes"`
}
// KubernetesInfo - Information about the kubernetes platform
type KubernetesInfo struct {
Major string `json:"major,omitempty"`
Minor string `json:"minor,omitempty"`
GitVersion string `json:"gitVersion,omitempty"`
GitCommit string `json:"gitCommit,omitempty"`
BuildDate time.Time `json:"buildDate,omitempty"`
Platform string `json:"platform,omitempty"`
Error string `json:"error,omitempty"`
}
// SpeedTestResults - Includes perf test results of the MinIO cluster
type SpeedTestResults struct {
DrivePerf []DriveSpeedTestResult `json:"drive,omitempty"`
ObjPerf []SpeedTestResult `json:"obj,omitempty"`
NetPerf []NetperfNodeResult `json:"net,omitempty"`
Error string `json:"error,omitempty"`
}
// MinioConfig contains minio configuration of a node.
type MinioConfig struct {
Error string `json:"error,omitempty"`
Config interface{} `json:"config,omitempty"`
}
// ServerInfo holds server information
type ServerInfo struct {
State string `json:"state,omitempty"`
Endpoint string `json:"endpoint,omitempty"`
Uptime int64 `json:"uptime,omitempty"`
Version string `json:"version,omitempty"`
CommitID string `json:"commitID,omitempty"`
Network map[string]string `json:"network,omitempty"`
Drives []Disk `json:"drives,omitempty"`
PoolNumber int `json:"poolNumber,omitempty"` // Only set if len(PoolNumbers) == 1
PoolNumbers []int `json:"poolNumbers,omitempty"`
MemStats MemStats `json:"mem_stats"`
GoMaxProcs int `json:"go_max_procs"`
NumCPU int `json:"num_cpu"`
RuntimeVersion string `json:"runtime_version"`
GCStats *GCStats `json:"gc_stats,omitempty"`
MinioEnvVars map[string]string `json:"minio_env_vars,omitempty"`
Edition string `json:"edition"`
License *LicenseInfo `json:"license,omitempty"`
}
// MinioInfo contains MinIO server and object storage information.
type MinioInfo struct {
Mode string `json:"mode,omitempty"`
Domain []string `json:"domain,omitempty"`
Region string `json:"region,omitempty"`
SQSARN []string `json:"sqsARN,omitempty"`
DeploymentID string `json:"deploymentID,omitempty"`
Buckets Buckets `json:"buckets,omitempty"`
Objects Objects `json:"objects,omitempty"`
Usage Usage `json:"usage,omitempty"`
Services Services `json:"services,omitempty"`
Backend interface{} `json:"backend,omitempty"`
Servers []ServerInfo `json:"servers,omitempty"`
TLS *TLSInfo `json:"tls"`
IsKubernetes *bool `json:"is_kubernetes"`
IsDocker *bool `json:"is_docker"`
Metrics *RealtimeMetrics `json:"metrics,omitempty"`
TierConfigs []TierConfig `json:"tier_configs,omitempty"`
}
type TLSInfo struct {
TLSEnabled bool `json:"tls_enabled"`
Certs []TLSCert `json:"certs,omitempty"`
}
type TLSCert struct {
PubKeyAlgo string `json:"pub_key_algo"`
SignatureAlgo string `json:"signature_algo"`
NotBefore time.Time `json:"not_before"`
NotAfter time.Time `json:"not_after"`
Checksum string `json:"checksum"`
}
// MinioHealthInfo - Includes MinIO confifuration information
type MinioHealthInfo struct {
Error string `json:"error,omitempty"`
Config MinioConfig `json:"config,omitempty"`
Info MinioInfo `json:"info,omitempty"`
Replication *ReplDiagInfo `json:"replication,omitempty"`
}
// HealthInfo - MinIO cluster's health Info
type HealthInfo struct {
Version string `json:"version"`
Error string `json:"error,omitempty"`
TimeStamp time.Time `json:"timestamp,omitempty"`
Sys SysInfo `json:"sys,omitempty"`
Minio MinioHealthInfo `json:"minio,omitempty"`
}
func (info HealthInfo) String() string {
data, err := json.Marshal(info)
if err != nil {
panic(err) // This never happens.
}
return string(data)
}
// JSON returns this structure as JSON formatted string.
func (info HealthInfo) JSON() string {
data, err := json.MarshalIndent(info, " ", " ")
if err != nil {
panic(err) // This never happens.
}
return string(data)
}
// GetError - returns error from the cluster health info
func (info HealthInfo) GetError() string {
return info.Error
}
// GetStatus - returns status of the cluster health info
func (info HealthInfo) GetStatus() string {
if info.Error != "" {
return "error"
}
return "success"
}
// GetTimestamp - returns timestamp from the cluster health info
func (info HealthInfo) GetTimestamp() time.Time {
return info.TimeStamp
}
// HealthDataType - Typed Health data types
type HealthDataType string
// HealthDataTypes
const (
HealthDataTypeMinioInfo HealthDataType = "minioinfo"
HealthDataTypeMinioConfig HealthDataType = "minioconfig"
HealthDataTypeSysCPU HealthDataType = "syscpu"
HealthDataTypeSysDriveHw HealthDataType = "sysdrivehw"
HealthDataTypeSysOsInfo HealthDataType = "sysosinfo"
HealthDataTypeSysMem HealthDataType = "sysmem"
HealthDataTypeSysNet HealthDataType = "sysnet"
HealthDataTypeSysProcess HealthDataType = "sysprocess"
HealthDataTypeSysErrors HealthDataType = "syserrors"
HealthDataTypeSysServices HealthDataType = "sysservices"
HealthDataTypeSysConfig HealthDataType = "sysconfig"
HealthDataTypeReplication HealthDataType = "replication"
)
// HealthDataTypesMap - Map of Health datatypes
var HealthDataTypesMap = map[string]HealthDataType{
"minioinfo": HealthDataTypeMinioInfo,
"minioconfig": HealthDataTypeMinioConfig,
"syscpu": HealthDataTypeSysCPU,
"sysdrivehw": HealthDataTypeSysDriveHw,
"sysosinfo": HealthDataTypeSysOsInfo,
"sysmem": HealthDataTypeSysMem,
"sysnet": HealthDataTypeSysNet,
"sysprocess": HealthDataTypeSysProcess,
"syserrors": HealthDataTypeSysErrors,
"sysservices": HealthDataTypeSysServices,
"sysconfig": HealthDataTypeSysConfig,
"replication": HealthDataTypeReplication,
}
// HealthDataTypesList - List of health datatypes
var HealthDataTypesList = []HealthDataType{
HealthDataTypeMinioInfo,
HealthDataTypeMinioConfig,
HealthDataTypeSysCPU,
HealthDataTypeSysDriveHw,
HealthDataTypeSysOsInfo,
HealthDataTypeSysMem,
HealthDataTypeSysNet,
HealthDataTypeSysProcess,
HealthDataTypeSysErrors,
HealthDataTypeSysServices,
HealthDataTypeSysConfig,
HealthDataTypeReplication,
}
// HealthInfoVersionStruct - struct for health info version
type HealthInfoVersionStruct struct {
Version string `json:"version,omitempty"`
Error string `json:"error,omitempty"`
}
// ServerHealthInfo - Connect to a minio server and call Health Info Management API
// to fetch server's information represented by HealthInfo structure
func (adm *AdminClient) ServerHealthInfo(ctx context.Context, types []HealthDataType, deadline time.Duration, anonymize string) (*http.Response, string, error) {
v := url.Values{}
v.Set("deadline", deadline.Truncate(1*time.Second).String())
v.Set("anonymize", anonymize)
for _, d := range HealthDataTypesList { // Init all parameters to false.
v.Set(string(d), "false")
}
for _, d := range types {
v.Set(string(d), "true")
}
resp, err := adm.executeMethod(
ctx, "GET", requestData{
relPath: adminAPIPrefix + "/healthinfo",
queryValues: v,
},
)
if err != nil {
closeResponse(resp)
return nil, "", err
}
if resp.StatusCode != http.StatusOK {
closeResponse(resp)
return nil, "", httpRespToErrorResponse(resp)
}
decoder := json.NewDecoder(resp.Body)
var version HealthInfoVersionStruct
if err = decoder.Decode(&version); err != nil {
closeResponse(resp)
return nil, "", err
}
if version.Error != "" {
closeResponse(resp)
return nil, "", errors.New(version.Error)
}
switch version.Version {
case "", HealthInfoVersion2, HealthInfoVersion:
default:
closeResponse(resp)
return nil, "", errors.New("Upgrade Minio Client to support health info version " + version.Version)
}
return resp, version.Version, nil
}
golang-github-minio-madmin-go-3.0.104/iam-migrate.go 0000664 0000000 0000000 00000012644 14774251704 0022132 0 ustar 00root root 0000000 0000000 //
// Copyright (c) 2015-2024 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as
// published by the Free Software Foundation, either version 3 of the
// License, or (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see .
//
package madmin
import (
"context"
"encoding/json"
"io"
"net/http"
)
// ImportIAMResult - represents the structure iam import response
type ImportIAMResult struct {
// Skipped entries while import
// This could be due to groups, policies etc missing for
// impoprted entries. We dont fail hard in this case and
// skip those entries
Skipped IAMEntities `json:"skipped,omitempty"`
// Removed entries - this mostly happens for policies
// where empty might be getting imported and that's invalid
Removed IAMEntities `json:"removed,omitempty"`
// Newly added entries
Added IAMEntities `json:"added,omitempty"`
// Failed entries while import. This would have details of
// failed entities with respective errors
Failed IAMErrEntities `json:"failed,omitempty"`
}
// IAMEntities - represents different IAM entities
type IAMEntities struct {
// List of policy names
Policies []string `json:"policies,omitempty"`
// List of user names
Users []string `json:"users,omitempty"`
// List of group names
Groups []string `json:"groups,omitempty"`
// List of Service Account names
ServiceAccounts []string `json:"serviceAccounts,omitempty"`
// List of user policies, each entry in map represents list of policies
// applicable to the user
UserPolicies []map[string][]string `json:"userPolicies,omitempty"`
// List of group policies, each entry in map represents list of policies
// applicable to the group
GroupPolicies []map[string][]string `json:"groupPolicies,omitempty"`
// List of STS policies, each entry in map represents list of policies
// applicable to the STS
STSPolicies []map[string][]string `json:"stsPolicies,omitempty"`
}
// IAMErrEntities - represents errored out IAM entries while import with error
type IAMErrEntities struct {
// List of errored out policies with errors
Policies []IAMErrEntity `json:"policies,omitempty"`
// List of errored out users with errors
Users []IAMErrEntity `json:"users,omitempty"`
// List of errored out groups with errors
Groups []IAMErrEntity `json:"groups,omitempty"`
// List of errored out service accounts with errors
ServiceAccounts []IAMErrEntity `json:"serviceAccounts,omitempty"`
// List of errored out user policies with errors
UserPolicies []IAMErrPolicyEntity `json:"userPolicies,omitempty"`
// List of errored out group policies with errors
GroupPolicies []IAMErrPolicyEntity `json:"groupPolicies,omitempty"`
// List of errored out STS policies with errors
STSPolicies []IAMErrPolicyEntity `json:"stsPolicies,omitempty"`
}
// IAMErrEntity - represents errored out IAM entity
type IAMErrEntity struct {
// Name of the errored IAM entity
Name string `json:"name,omitempty"`
// Actual error
Error error `json:"error,omitempty"`
}
// IAMErrPolicyEntity - represents errored out IAM policies
type IAMErrPolicyEntity struct {
// Name of entity (user, group, STS)
Name string `json:"name,omitempty"`
// List of policies
Policies []string `json:"policies,omitempty"`
// Actual error
Error error `json:"error,omitempty"`
}
// ExportIAM makes an admin call to export IAM data
func (adm *AdminClient) ExportIAM(ctx context.Context) (io.ReadCloser, error) {
path := adminAPIPrefix + "/export-iam"
resp, err := adm.executeMethod(ctx,
http.MethodGet, requestData{
relPath: path,
},
)
if err != nil {
return nil, err
}
if resp.StatusCode != http.StatusOK {
closeResponse(resp)
return nil, httpRespToErrorResponse(resp)
}
return resp.Body, nil
}
// ImportIAM makes an admin call to setup IAM from imported content
func (adm *AdminClient) ImportIAM(ctx context.Context, contentReader io.ReadCloser) error {
content, err := io.ReadAll(contentReader)
if err != nil {
return err
}
path := adminAPIPrefix + "/import-iam"
resp, err := adm.executeMethod(ctx,
http.MethodPut, requestData{
relPath: path,
content: content,
},
)
defer closeResponse(resp)
if err != nil {
return err
}
if resp.StatusCode != http.StatusOK {
return httpRespToErrorResponse(resp)
}
return nil
}
// ImportIAMV2 makes an admin call to setup IAM from imported content
func (adm *AdminClient) ImportIAMV2(ctx context.Context, contentReader io.ReadCloser) (iamr ImportIAMResult, err error) {
content, err := io.ReadAll(contentReader)
if err != nil {
return iamr, err
}
path := adminAPIPrefix + "/import-iam-v2"
resp, err := adm.executeMethod(ctx,
http.MethodPut, requestData{
relPath: path,
content: content,
},
)
defer closeResponse(resp)
if err != nil {
return iamr, err
}
if resp.StatusCode != http.StatusOK {
return iamr, httpRespToErrorResponse(resp)
}
b, err := io.ReadAll(resp.Body)
if err != nil {
return iamr, err
}
if err = json.Unmarshal(b, &iamr); err != nil {
return iamr, err
}
return iamr, nil
}
golang-github-minio-madmin-go-3.0.104/idp-commands.go 0000664 0000000 0000000 00000034777 14774251704 0022324 0 ustar 00root root 0000000 0000000 //
// Copyright (c) 2015-2024 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as
// published by the Free Software Foundation, either version 3 of the
// License, or (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see .
//
package madmin
import (
"context"
"encoding/json"
"errors"
"fmt"
"net/http"
"net/url"
"strings"
"time"
"github.com/minio/minio-go/v7/pkg/set"
)
// AddOrUpdateIDPConfig - creates a new or updates an existing IDP
// configuration on the server.
func (adm *AdminClient) AddOrUpdateIDPConfig(ctx context.Context, cfgType, cfgName, cfgData string, update bool) (restart bool, err error) {
encBytes, err := EncryptData(adm.getSecretKey(), []byte(cfgData))
if err != nil {
return false, err
}
method := http.MethodPut
if update {
method = http.MethodPost
}
if cfgName == "" {
cfgName = Default
}
h := make(http.Header, 1)
h.Add("Content-Type", "application/octet-stream")
reqData := requestData{
customHeaders: h,
relPath: strings.Join([]string{adminAPIPrefix, "idp-config", cfgType, cfgName}, "/"),
content: encBytes,
}
resp, err := adm.executeMethod(ctx, method, reqData)
defer closeResponse(resp)
if err != nil {
return false, err
}
// FIXME: Remove support for this older API in 2023-04 (about 6 months).
//
// Attempt to fall back to older IDP API.
if resp.StatusCode == http.StatusUpgradeRequired {
// close old response
closeResponse(resp)
// Fallback is needed for `mc admin idp set myminio openid ...` only, as
// this was the only released API supported in the older version.
queryParams := make(url.Values, 2)
queryParams.Set("type", cfgType)
queryParams.Set("name", cfgName)
reqData := requestData{
customHeaders: h,
relPath: adminAPIPrefix + "/idp-config",
queryValues: queryParams,
content: encBytes,
}
resp, err = adm.executeMethod(ctx, http.MethodPut, reqData)
defer closeResponse(resp)
if err != nil {
return false, err
}
}
if resp.StatusCode != http.StatusOK {
return false, httpRespToErrorResponse(resp)
}
return resp.Header.Get(ConfigAppliedHeader) != ConfigAppliedTrue, nil
}
// IDPCfgInfo represents a single configuration or related parameter
type IDPCfgInfo struct {
Key string `json:"key"`
Value string `json:"value"`
IsCfg bool `json:"isCfg"`
IsEnv bool `json:"isEnv"` // relevant only when isCfg=true
}
// IDPConfig contains IDP configuration information returned by server.
type IDPConfig struct {
Type string `json:"type"`
Name string `json:"name,omitempty"`
Info []IDPCfgInfo `json:"info"`
}
// Constants for IDP configuration types.
const (
OpenidIDPCfg string = "openid"
LDAPIDPCfg string = "ldap"
)
// ValidIDPConfigTypes - set of valid IDP configs.
var ValidIDPConfigTypes = set.CreateStringSet(OpenidIDPCfg, LDAPIDPCfg)
// GetIDPConfig - fetch IDP config from server.
func (adm *AdminClient) GetIDPConfig(ctx context.Context, cfgType, cfgName string) (c IDPConfig, err error) {
if !ValidIDPConfigTypes.Contains(cfgType) {
return c, fmt.Errorf("invalid config type: %s", cfgType)
}
if cfgName == "" {
cfgName = Default
}
reqData := requestData{
relPath: strings.Join([]string{adminAPIPrefix, "idp-config", cfgType, cfgName}, "/"),
}
resp, err := adm.executeMethod(ctx, http.MethodGet, reqData)
defer closeResponse(resp)
if err != nil {
return c, err
}
// FIXME: Remove support for this older API in 2023-04 (about 6 months).
//
// Attempt to fall back to older IDP API.
if resp.StatusCode == http.StatusUpgradeRequired {
// close old response
closeResponse(resp)
queryParams := make(url.Values, 2)
queryParams.Set("type", cfgType)
queryParams.Set("name", cfgName)
reqData := requestData{
relPath: adminAPIPrefix + "/idp-config",
queryValues: queryParams,
}
resp, err = adm.executeMethod(ctx, http.MethodGet, reqData)
defer closeResponse(resp)
if err != nil {
return c, err
}
}
if resp.StatusCode != http.StatusOK {
return c, httpRespToErrorResponse(resp)
}
content, err := DecryptData(adm.getSecretKey(), resp.Body)
if err != nil {
return c, err
}
err = json.Unmarshal(content, &c)
return c, err
}
// IDPListItem - represents an item in the List IDPs call.
type IDPListItem struct {
Type string `json:"type"`
Name string `json:"name"`
Enabled bool `json:"enabled"`
RoleARN string `json:"roleARN,omitempty"`
}
// ListIDPConfig - list IDP configuration on the server.
func (adm *AdminClient) ListIDPConfig(ctx context.Context, cfgType string) ([]IDPListItem, error) {
if !ValidIDPConfigTypes.Contains(cfgType) {
return nil, fmt.Errorf("invalid config type: %s", cfgType)
}
reqData := requestData{
relPath: strings.Join([]string{adminAPIPrefix, "idp-config", cfgType}, "/"),
}
resp, err := adm.executeMethod(ctx, http.MethodGet, reqData)
defer closeResponse(resp)
if err != nil {
return nil, err
}
// FIXME: Remove support for this older API in 2023-04 (about 6 months).
//
// Attempt to fall back to older IDP API.
if resp.StatusCode == http.StatusUpgradeRequired {
// close old response
closeResponse(resp)
queryParams := make(url.Values, 2)
queryParams.Set("type", cfgType)
reqData := requestData{
relPath: adminAPIPrefix + "/idp-config",
queryValues: queryParams,
}
resp, err = adm.executeMethod(ctx, http.MethodGet, reqData)
defer closeResponse(resp)
if err != nil {
return nil, err
}
}
if resp.StatusCode != http.StatusOK {
return nil, httpRespToErrorResponse(resp)
}
content, err := DecryptData(adm.getSecretKey(), resp.Body)
if err != nil {
return nil, err
}
var lst []IDPListItem
err = json.Unmarshal(content, &lst)
return lst, err
}
// DeleteIDPConfig - delete an IDP configuration on the server.
func (adm *AdminClient) DeleteIDPConfig(ctx context.Context, cfgType, cfgName string) (restart bool, err error) {
if cfgName == "" {
cfgName = Default
}
reqData := requestData{
relPath: strings.Join([]string{adminAPIPrefix, "idp-config", cfgType, cfgName}, "/"),
}
resp, err := adm.executeMethod(ctx, http.MethodDelete, reqData)
defer closeResponse(resp)
if err != nil {
return false, err
}
// FIXME: Remove support for this older API in 2023-04 (about 6 months).
//
// Attempt to fall back to older IDP API.
if resp.StatusCode == http.StatusUpgradeRequired {
// close old response
closeResponse(resp)
queryParams := make(url.Values, 2)
queryParams.Set("type", cfgType)
queryParams.Set("name", cfgName)
reqData := requestData{
relPath: adminAPIPrefix + "/idp-config",
queryValues: queryParams,
}
resp, err = adm.executeMethod(ctx, http.MethodDelete, reqData)
defer closeResponse(resp)
if err != nil {
return false, err
}
}
if resp.StatusCode != http.StatusOK {
return false, httpRespToErrorResponse(resp)
}
return resp.Header.Get(ConfigAppliedHeader) != ConfigAppliedTrue, nil
}
// PolicyEntitiesResult - contains response to a policy entities query.
type PolicyEntitiesResult struct {
Timestamp time.Time `json:"timestamp"`
UserMappings []UserPolicyEntities `json:"userMappings,omitempty"`
GroupMappings []GroupPolicyEntities `json:"groupMappings,omitempty"`
PolicyMappings []PolicyEntities `json:"policyMappings,omitempty"`
}
// UserPolicyEntities - user -> policies mapping
type UserPolicyEntities struct {
User string `json:"user"`
Policies []string `json:"policies"`
MemberOfMappings []GroupPolicyEntities `json:"memberOfMappings,omitempty"`
}
// GroupPolicyEntities - group -> policies mapping
type GroupPolicyEntities struct {
Group string `json:"group"`
Policies []string `json:"policies"`
}
// PolicyEntities - policy -> user+group mapping
type PolicyEntities struct {
Policy string `json:"policy"`
Users []string `json:"users"`
Groups []string `json:"groups"`
}
// PolicyEntitiesQuery - contains request info for policy entities query.
type PolicyEntitiesQuery struct {
Users []string
Groups []string
Policy []string
}
// GetLDAPPolicyEntities - returns LDAP policy entities.
func (adm *AdminClient) GetLDAPPolicyEntities(ctx context.Context,
q PolicyEntitiesQuery,
) (r PolicyEntitiesResult, err error) {
params := make(url.Values)
params["user"] = q.Users
params["group"] = q.Groups
params["policy"] = q.Policy
reqData := requestData{
relPath: adminAPIPrefix + "/idp/ldap/policy-entities",
queryValues: params,
}
resp, err := adm.executeMethod(ctx, http.MethodGet, reqData)
defer closeResponse(resp)
if err != nil {
return r, err
}
if resp.StatusCode != http.StatusOK {
return r, httpRespToErrorResponse(resp)
}
content, err := DecryptData(adm.getSecretKey(), resp.Body)
if err != nil {
return r, err
}
err = json.Unmarshal(content, &r)
return r, err
}
// PolicyAssociationResp - result of a policy association request.
type PolicyAssociationResp struct {
PoliciesAttached []string `json:"policiesAttached,omitempty"`
PoliciesDetached []string `json:"policiesDetached,omitempty"`
UpdatedAt time.Time `json:"updatedAt"`
}
// PolicyAssociationReq - request to attach/detach policies from/to a user or
// group.
type PolicyAssociationReq struct {
Policies []string `json:"policies"`
// Exactly one of the following must be non-empty in a valid request.
User string `json:"user,omitempty"`
Group string `json:"group,omitempty"`
}
// IsValid validates the object and returns a reason for when it is not.
func (p PolicyAssociationReq) IsValid() error {
if len(p.Policies) == 0 {
return errors.New("no policy names were given")
}
for _, p := range p.Policies {
if p == "" {
return errors.New("an empty policy name was given")
}
}
if p.User == "" && p.Group == "" {
return errors.New("no user or group association was given")
}
if p.User != "" && p.Group != "" {
return errors.New("either a group or a user association must be given, not both")
}
return nil
}
// AttachPolicyLDAP - client call to attach policies for LDAP.
func (adm *AdminClient) AttachPolicyLDAP(ctx context.Context, par PolicyAssociationReq) (PolicyAssociationResp, error) {
return adm.attachOrDetachPolicyLDAP(ctx, true, par)
}
// DetachPolicyLDAP - client call to detach policies for LDAP.
func (adm *AdminClient) DetachPolicyLDAP(ctx context.Context, par PolicyAssociationReq) (PolicyAssociationResp, error) {
return adm.attachOrDetachPolicyLDAP(ctx, false, par)
}
func (adm *AdminClient) attachOrDetachPolicyLDAP(ctx context.Context, isAttach bool,
par PolicyAssociationReq,
) (PolicyAssociationResp, error) {
plainBytes, err := json.Marshal(par)
if err != nil {
return PolicyAssociationResp{}, err
}
encBytes, err := EncryptData(adm.getSecretKey(), plainBytes)
if err != nil {
return PolicyAssociationResp{}, err
}
suffix := "detach"
if isAttach {
suffix = "attach"
}
h := make(http.Header, 1)
h.Add("Content-Type", "application/octet-stream")
reqData := requestData{
customHeaders: h,
relPath: adminAPIPrefix + "/idp/ldap/policy/" + suffix,
content: encBytes,
}
resp, err := adm.executeMethod(ctx, http.MethodPost, reqData)
defer closeResponse(resp)
if err != nil {
return PolicyAssociationResp{}, err
}
if resp.StatusCode != http.StatusOK {
return PolicyAssociationResp{}, httpRespToErrorResponse(resp)
}
content, err := DecryptData(adm.getSecretKey(), resp.Body)
if err != nil {
return PolicyAssociationResp{}, err
}
r := PolicyAssociationResp{}
err = json.Unmarshal(content, &r)
return r, err
}
// ListAccessKeysLDAPResp is the response body of the list service accounts call
type ListAccessKeysLDAPResp ListAccessKeysResp
// ListAccessKeysLDAP - list service accounts belonging to the specified user
//
// Deprecated: Use ListAccessKeysLDAPBulk instead.
func (adm *AdminClient) ListAccessKeysLDAP(ctx context.Context, userDN string, listType string) (ListAccessKeysLDAPResp, error) {
queryValues := url.Values{}
queryValues.Set("listType", listType)
queryValues.Set("userDN", userDN)
reqData := requestData{
relPath: adminAPIPrefix + "/idp/ldap/list-access-keys",
queryValues: queryValues,
}
// Execute GET on /minio/admin/v3/idp/ldap/list-access-keys
resp, err := adm.executeMethod(ctx, http.MethodGet, reqData)
defer closeResponse(resp)
if err != nil {
return ListAccessKeysLDAPResp{}, err
}
if resp.StatusCode != http.StatusOK {
return ListAccessKeysLDAPResp{}, httpRespToErrorResponse(resp)
}
data, err := DecryptData(adm.getSecretKey(), resp.Body)
if err != nil {
return ListAccessKeysLDAPResp{}, err
}
var listResp ListAccessKeysLDAPResp
if err = json.Unmarshal(data, &listResp); err != nil {
return ListAccessKeysLDAPResp{}, err
}
return listResp, nil
}
// ListAccessKeysLDAPBulk - list access keys belonging to the given users or all users
func (adm *AdminClient) ListAccessKeysLDAPBulk(ctx context.Context, users []string, listType string, all bool) (map[string]ListAccessKeysLDAPResp, error) {
return adm.ListAccessKeysLDAPBulkWithOpts(ctx, users, ListAccessKeysOpts{ListType: listType, All: all})
}
// ListAccessKeysLDAPBulkWithOpts - list access keys belonging to the given users or all users
func (adm *AdminClient) ListAccessKeysLDAPBulkWithOpts(ctx context.Context, users []string, opts ListAccessKeysOpts) (map[string]ListAccessKeysLDAPResp, error) {
if len(users) > 0 && opts.All {
return nil, errors.New("either specify userDNs or all, not both")
}
queryValues := url.Values{}
queryValues.Set("listType", opts.ListType)
queryValues["userDNs"] = users
if opts.All {
queryValues.Set("all", "true")
}
reqData := requestData{
relPath: adminAPIPrefix + "/idp/ldap/list-access-keys-bulk",
queryValues: queryValues,
}
// Execute GET on /minio/admin/v3/idp/ldap/list-access-keys-bulk
resp, err := adm.executeMethod(ctx, http.MethodGet, reqData)
defer closeResponse(resp)
if err != nil {
return nil, err
}
if resp.StatusCode != http.StatusOK {
return nil, httpRespToErrorResponse(resp)
}
data, err := DecryptData(adm.getSecretKey(), resp.Body)
if err != nil {
return nil, err
}
listResp := make(map[string]ListAccessKeysLDAPResp)
if err = json.Unmarshal(data, &listResp); err != nil {
return nil, err
}
return listResp, nil
}
golang-github-minio-madmin-go-3.0.104/info-commands.go 0000664 0000000 0000000 00000046372 14774251704 0022475 0 ustar 00root root 0000000 0000000 //
// Copyright (c) 2015-2024 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as
// published by the Free Software Foundation, either version 3 of the
// License, or (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see .
//
package madmin
import (
"context"
"encoding/json"
"net/http"
"net/url"
"strconv"
"time"
)
//msgp:clearomitted
//msgp:tag json
//go:generate msgp
// BackendType - represents different backend types.
type BackendType int
// Enum for different backend types.
const (
Unknown BackendType = iota
// Filesystem backend.
FS
// Multi disk Erasure (single, distributed) backend.
Erasure
// Gateway to other storage
Gateway
// Add your own backend.
)
// ItemState - represents the status of any item in offline,init,online state
type ItemState string
const (
// ItemOffline indicates that the item is offline
ItemOffline = ItemState("offline")
// ItemInitializing indicates that the item is still in initialization phase
ItemInitializing = ItemState("initializing")
// ItemOnline indicates that the item is online
ItemOnline = ItemState("online")
)
// StorageInfo - represents total capacity of underlying storage.
type StorageInfo struct {
Disks []Disk
// Backend type.
Backend BackendInfo
}
// BackendInfo - contains info of the underlying backend
type BackendInfo struct {
// Represents various backend types, currently on FS, Erasure and Gateway
Type BackendType
// Following fields are only meaningful if BackendType is Gateway.
GatewayOnline bool
// Following fields are only meaningful if BackendType is Erasure.
OnlineDisks BackendDisks // Online disks during server startup.
OfflineDisks BackendDisks // Offline disks during server startup.
// Following fields are only meaningful if BackendType is Erasure.
StandardSCData []int // Data disks for currently configured Standard storage class.
StandardSCParities []int // Parity disks per pool for currently configured Standard storage class
RRSCData []int // Data disks for currently configured Reduced Redundancy storage class.
RRSCParities []int // Parity disks per pool for currently configured Reduced Redundancy storage class.
// Adds number of erasure sets and drives per set.
TotalSets []int // Each index value corresponds to per pool
DrivesPerSet []int // Each index value corresponds to per pool
// Deprecated Aug 2023
StandardSCParity int // Parity disks for currently configured Standard storage class.
RRSCParity int // Parity disks for currently configured Reduced Redundancy storage class.
}
// BackendDisks - represents the map of endpoint-disks.
type BackendDisks map[string]int
// Sum - Return the sum of the disks in the endpoint-disk map.
func (d1 BackendDisks) Sum() (sum int) {
for _, count := range d1 {
sum += count
}
return sum
}
// Merge - Reduces two endpoint-disk maps.
func (d1 BackendDisks) Merge(d2 BackendDisks) BackendDisks {
if len(d2) == 0 {
d2 = make(BackendDisks)
}
merged := make(BackendDisks)
for i1, v1 := range d1 {
if v2, ok := d2[i1]; ok {
merged[i1] = v2 + v1
continue
}
merged[i1] = v1
}
return merged
}
// StorageInfo - Connect to a minio server and call Storage Info Management API
// to fetch server's information represented by StorageInfo structure
func (adm *AdminClient) StorageInfo(ctx context.Context) (StorageInfo, error) {
resp, err := adm.executeMethod(ctx, http.MethodGet, requestData{relPath: adminAPIPrefix + "/storageinfo"})
defer closeResponse(resp)
if err != nil {
return StorageInfo{}, err
}
// Check response http status code
if resp.StatusCode != http.StatusOK {
return StorageInfo{}, httpRespToErrorResponse(resp)
}
// Unmarshal the server's json response
var storageInfo StorageInfo
if err = json.NewDecoder(resp.Body).Decode(&storageInfo); err != nil {
return StorageInfo{}, err
}
return storageInfo, nil
}
// BucketUsageInfo - bucket usage info provides
// - total size of the bucket
// - total objects in a bucket
// - object size histogram per bucket
type BucketUsageInfo struct {
Size uint64 `json:"size"`
ReplicationPendingSize uint64 `json:"objectsPendingReplicationTotalSize"`
ReplicationFailedSize uint64 `json:"objectsFailedReplicationTotalSize"`
ReplicatedSize uint64 `json:"objectsReplicatedTotalSize"`
ReplicaSize uint64 `json:"objectReplicaTotalSize"`
ReplicationPendingCount uint64 `json:"objectsPendingReplicationCount"`
ReplicationFailedCount uint64 `json:"objectsFailedReplicationCount"`
VersionsCount uint64 `json:"versionsCount"`
ObjectsCount uint64 `json:"objectsCount"`
DeleteMarkersCount uint64 `json:"deleteMarkersCount"`
ObjectSizesHistogram map[string]uint64 `json:"objectsSizesHistogram"`
ObjectVersionsHistogram map[string]uint64 `json:"objectsVersionsHistogram"`
}
// DataUsageInfo represents data usage stats of the underlying Object API
type DataUsageInfo struct {
// LastUpdate is the timestamp of when the data usage info was last updated.
// This does not indicate a full scan.
LastUpdate time.Time `json:"lastUpdate"`
// Objects total count across all buckets
ObjectsTotalCount uint64 `json:"objectsCount"`
// Objects total size across all buckets
ObjectsTotalSize uint64 `json:"objectsTotalSize"`
// Total Size for objects that have not yet been replicated
ReplicationPendingSize uint64 `json:"objectsPendingReplicationTotalSize"`
// Total size for objects that have witness one or more failures and will be retried
ReplicationFailedSize uint64 `json:"objectsFailedReplicationTotalSize"`
// Total size for objects that have been replicated to destination
ReplicatedSize uint64 `json:"objectsReplicatedTotalSize"`
// Total size for objects that are replicas
ReplicaSize uint64 `json:"objectsReplicaTotalSize"`
// Total number of objects pending replication
ReplicationPendingCount uint64 `json:"objectsPendingReplicationCount"`
// Total number of objects that failed replication
ReplicationFailedCount uint64 `json:"objectsFailedReplicationCount"`
// Total number of buckets in this cluster
BucketsCount uint64 `json:"bucketsCount"`
// Buckets usage info provides following information across all buckets
// - total size of the bucket
// - total objects in a bucket
// - object size histogram per bucket
BucketsUsage map[string]BucketUsageInfo `json:"bucketsUsageInfo"`
// TierStats holds per-tier stats like bytes tiered, etc.
TierStats map[string]TierStats `json:"tierStats"`
// Deprecated kept here for backward compatibility reasons.
BucketSizes map[string]uint64 `json:"bucketsSizes"`
// Server capacity related data
TotalCapacity uint64 `json:"capacity"`
TotalFreeCapacity uint64 `json:"freeCapacity"`
TotalUsedCapacity uint64 `json:"usedCapacity"`
}
// DataUsageInfo - returns data usage of the current object API
func (adm *AdminClient) DataUsageInfo(ctx context.Context) (DataUsageInfo, error) {
values := make(url.Values)
values.Set("capacity", "true") // We can make this configurable in future but for now its fine.
resp, err := adm.executeMethod(ctx, http.MethodGet, requestData{
relPath: adminAPIPrefix + "/datausageinfo",
queryValues: values,
})
defer closeResponse(resp)
if err != nil {
return DataUsageInfo{}, err
}
// Check response http status code
if resp.StatusCode != http.StatusOK {
return DataUsageInfo{}, httpRespToErrorResponse(resp)
}
// Unmarshal the server's json response
var dataUsageInfo DataUsageInfo
if err = json.NewDecoder(resp.Body).Decode(&dataUsageInfo); err != nil {
return DataUsageInfo{}, err
}
return dataUsageInfo, nil
}
// ErasureSetInfo provides information per erasure set
type ErasureSetInfo struct {
ID int `json:"id"`
RawUsage uint64 `json:"rawUsage"`
RawCapacity uint64 `json:"rawCapacity"`
Usage uint64 `json:"usage"`
ObjectsCount uint64 `json:"objectsCount"`
VersionsCount uint64 `json:"versionsCount"`
DeleteMarkersCount uint64 `json:"deleteMarkersCount"`
HealDisks int `json:"healDisks"`
}
// InfoMessage container to hold server admin related information.
type InfoMessage struct {
Mode string `json:"mode,omitempty"`
Domain []string `json:"domain,omitempty"`
Region string `json:"region,omitempty"`
SQSARN []string `json:"sqsARN,omitempty"`
DeploymentID string `json:"deploymentID,omitempty"`
Buckets Buckets `json:"buckets,omitempty"`
Objects Objects `json:"objects,omitempty"`
Versions Versions `json:"versions,omitempty"`
DeleteMarkers DeleteMarkers `json:"deletemarkers,omitempty"`
Usage Usage `json:"usage,omitempty"`
Services Services `json:"services,omitempty"`
Backend ErasureBackend `json:"backend,omitempty"`
Servers []ServerProperties `json:"servers,omitempty"`
Pools map[int]map[int]ErasureSetInfo `json:"pools,omitempty"`
}
func (info InfoMessage) BackendType() BackendType {
// MinIO server type default
switch info.Backend.Type {
case "Erasure":
return Erasure
case "FS":
return FS
default:
return Unknown
}
}
func (info InfoMessage) StandardParity() int {
switch info.BackendType() {
case Erasure:
return info.Backend.StandardSCParity
default:
return -1
}
}
// Services contains different services information
type Services struct {
KMS KMS `json:"kms,omitempty"` // deprecated july 2023
KMSStatus []KMS `json:"kmsStatus,omitempty"`
LDAP LDAP `json:"ldap,omitempty"`
Logger []Logger `json:"logger,omitempty"`
Audit []Audit `json:"audit,omitempty"`
Notifications []map[string][]TargetIDStatus `json:"notifications,omitempty"`
}
// Buckets contains the number of buckets
type Buckets struct {
Count uint64 `json:"count"`
Error string `json:"error,omitempty"`
}
// Objects contains the number of objects
type Objects struct {
Count uint64 `json:"count"`
Error string `json:"error,omitempty"`
}
// Versions contains the number of versions
type Versions struct {
Count uint64 `json:"count"`
Error string `json:"error,omitempty"`
}
// DeleteMarkers contains the number of delete markers
type DeleteMarkers struct {
Count uint64 `json:"count"`
Error string `json:"error,omitempty"`
}
// Usage contains the total size used
type Usage struct {
Size uint64 `json:"size"`
Error string `json:"error,omitempty"`
}
// TierStats contains per-tier statistics like total size, number of
// objects/versions transitioned, etc.
type TierStats struct {
TotalSize uint64 `json:"totalSize"`
NumVersions int `json:"numVersions"`
NumObjects int `json:"numObjects"`
}
// KMS contains KMS status information
type KMS struct {
Status string `json:"status,omitempty"`
Encrypt string `json:"encrypt,omitempty"`
Decrypt string `json:"decrypt,omitempty"`
Endpoint string `json:"endpoint,omitempty"`
Version string `json:"version,omitempty"`
}
// LDAP contains ldap status
type LDAP struct {
Status string `json:"status,omitempty"`
}
// Status of endpoint
type Status struct {
Status string `json:"status,omitempty"`
}
// Audit contains audit logger status
type Audit map[string]Status
// Logger contains logger status
type Logger map[string]Status
// TargetIDStatus containsid and status
type TargetIDStatus map[string]Status
//msgp:replace backendType with:string
// backendType - indicates the type of backend storage
type backendType string
const (
// FsType - Backend is FS Type
FsType = backendType("FS")
// ErasureType - Backend is Erasure type
ErasureType = backendType("Erasure")
)
// FSBackend contains specific FS storage information
type FSBackend struct {
Type backendType `json:"backendType"`
}
// ErasureBackend contains specific erasure storage information
type ErasureBackend struct {
Type backendType `json:"backendType"`
OnlineDisks int `json:"onlineDisks"`
OfflineDisks int `json:"offlineDisks"`
// Parity disks for currently configured Standard storage class.
StandardSCParity int `json:"standardSCParity"`
// Parity disks for currently configured Reduced Redundancy storage class.
RRSCParity int `json:"rrSCParity"`
// Per pool information
TotalSets []int `json:"totalSets"`
DrivesPerSet []int `json:"totalDrivesPerSet"`
}
// ServerProperties holds server information
type ServerProperties struct {
State string `json:"state,omitempty"`
Endpoint string `json:"endpoint,omitempty"`
Scheme string `json:"scheme,omitempty"`
Uptime int64 `json:"uptime,omitempty"`
Version string `json:"version,omitempty"`
CommitID string `json:"commitID,omitempty"`
Network map[string]string `json:"network,omitempty"`
Disks []Disk `json:"drives,omitempty"`
PoolNumber int `json:"poolNumber,omitempty"` // Only set if len(PoolNumbers) == 1
PoolNumbers []int `json:"poolNumbers,omitempty"`
MemStats MemStats `json:"mem_stats"`
GoMaxProcs int `json:"go_max_procs,omitempty"`
NumCPU int `json:"num_cpu,omitempty"`
RuntimeVersion string `json:"runtime_version,omitempty"`
GCStats *GCStats `json:"gc_stats,omitempty"`
MinioEnvVars map[string]string `json:"minio_env_vars,omitempty"`
Edition string `json:"edition"`
License *LicenseInfo `json:"license,omitempty"`
IsLeader bool `json:"is_leader"`
ILMExpiryInProgress bool `json:"ilm_expiry_in_progress"`
}
// MemStats is strip down version of runtime.MemStats containing memory stats of MinIO server.
type MemStats struct {
Alloc uint64
TotalAlloc uint64
Mallocs uint64
Frees uint64
HeapAlloc uint64
}
// GCStats collect information about recent garbage collections.
type GCStats struct {
LastGC time.Time `json:"last_gc"` // time of last collection
NumGC int64 `json:"num_gc"` // number of garbage collections
PauseTotal time.Duration `json:"pause_total"` // total pause for all collections
Pause []time.Duration `json:"pause"` // pause history, most recent first
PauseEnd []time.Time `json:"pause_end"` // pause end times history, most recent first
}
// DiskMetrics has the information about XL Storage APIs
// the number of calls of each API and the moving average of
// the duration, in nanosecond, of each API.
type DiskMetrics struct {
LastMinute map[string]TimedAction `json:"lastMinute,omitempty"`
APICalls map[string]uint64 `json:"apiCalls,omitempty"`
// TotalTokens set per drive max concurrent I/O.
TotalTokens uint32 `json:"totalTokens,omitempty"`
// TotalWaiting the amount of concurrent I/O waiting on disk
TotalWaiting uint32 `json:"totalWaiting,omitempty"`
// Captures all data availability errors such as
// permission denied, faulty disk and timeout errors.
TotalErrorsAvailability uint64 `json:"totalErrorsAvailability,omitempty"`
// Captures all timeout only errors
TotalErrorsTimeout uint64 `json:"totalErrorsTimeout,omitempty"`
// Total writes on disk (could be empty if the feature
// is not enabled on the server)
TotalWrites uint64 `json:"totalWrites,omitempty"`
// Total deletes on disk (could be empty if the feature
// is not enabled on the server)
TotalDeletes uint64 `json:"totalDeletes,omitempty"`
// Deprecated: Use LastMinute instead. Not populated from servers after July 2022.
APILatencies map[string]interface{} `json:"apiLatencies,omitempty"`
}
// CacheStats drive cache stats
type CacheStats struct {
Capacity int64 `json:"capacity"`
Used int64 `json:"used"`
Hits int64 `json:"hits"`
Misses int64 `json:"misses"`
DelHits int64 `json:"delHits"`
DelMisses int64 `json:"delMisses"`
Collisions int64 `json:"collisions"`
}
// Disk holds Disk information
type Disk struct {
Endpoint string `json:"endpoint,omitempty"`
RootDisk bool `json:"rootDisk,omitempty"`
DrivePath string `json:"path,omitempty"`
Healing bool `json:"healing,omitempty"`
Scanning bool `json:"scanning,omitempty"`
State string `json:"state,omitempty"`
UUID string `json:"uuid,omitempty"`
Major uint32 `json:"major"`
Minor uint32 `json:"minor"`
Model string `json:"model,omitempty"`
TotalSpace uint64 `json:"totalspace,omitempty"`
UsedSpace uint64 `json:"usedspace,omitempty"`
AvailableSpace uint64 `json:"availspace,omitempty"`
ReadThroughput float64 `json:"readthroughput,omitempty"`
WriteThroughPut float64 `json:"writethroughput,omitempty"`
ReadLatency float64 `json:"readlatency,omitempty"`
WriteLatency float64 `json:"writelatency,omitempty"`
Utilization float64 `json:"utilization,omitempty"`
Metrics *DiskMetrics `json:"metrics,omitempty"`
HealInfo *HealingDisk `json:"heal_info,omitempty"`
UsedInodes uint64 `json:"used_inodes"`
FreeInodes uint64 `json:"free_inodes,omitempty"`
Local bool `json:"local,omitempty"`
Cache *CacheStats `json:"cacheStats,omitempty"`
// Indexes, will be -1 until assigned a set.
PoolIndex int `json:"pool_index"`
SetIndex int `json:"set_index"`
DiskIndex int `json:"disk_index"`
}
// ServerInfoOpts ask for additional data from the server
type ServerInfoOpts struct {
Metrics bool
}
// WithDriveMetrics asks server to return additional metrics per drive
func WithDriveMetrics(metrics bool) func(*ServerInfoOpts) {
return func(opts *ServerInfoOpts) {
opts.Metrics = metrics
}
}
// ServerInfo - Connect to a minio server and call Server Admin Info Management API
// to fetch server's information represented by infoMessage structure
func (adm *AdminClient) ServerInfo(ctx context.Context, options ...func(*ServerInfoOpts)) (InfoMessage, error) {
srvOpts := &ServerInfoOpts{}
for _, o := range options {
o(srvOpts)
}
values := make(url.Values)
values.Set("metrics", strconv.FormatBool(srvOpts.Metrics))
resp, err := adm.executeMethod(ctx,
http.MethodGet,
requestData{
relPath: adminAPIPrefix + "/info",
queryValues: values,
})
defer closeResponse(resp)
if err != nil {
return InfoMessage{}, err
}
// Check response http status code
if resp.StatusCode != http.StatusOK {
return InfoMessage{}, httpRespToErrorResponse(resp)
}
// Unmarshal the server's json response
var message InfoMessage
if err = json.NewDecoder(resp.Body).Decode(&message); err != nil {
return InfoMessage{}, err
}
return message, nil
}
golang-github-minio-madmin-go-3.0.104/info-commands_gen.go 0000664 0000000 0000000 00001124520 14774251704 0023317 0 ustar 00root root 0000000 0000000 package madmin
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
import (
"time"
"github.com/tinylib/msgp/msgp"
)
// DecodeMsg implements msgp.Decodable
func (z *Audit) DecodeMsg(dc *msgp.Reader) (err error) {
var zb0003 uint32
zb0003, err = dc.ReadMapHeader()
if err != nil {
err = msgp.WrapError(err)
return
}
if (*z) == nil {
(*z) = make(Audit, zb0003)
} else if len((*z)) > 0 {
for key := range *z {
delete((*z), key)
}
}
var field []byte
_ = field
for zb0003 > 0 {
zb0003--
var zb0001 string
var zb0002 Status
zb0001, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err)
return
}
var zb0004 uint32
zb0004, err = dc.ReadMapHeader()
if err != nil {
err = msgp.WrapError(err, zb0001)
return
}
var zb0004Mask uint8 /* 1 bits */
_ = zb0004Mask
for zb0004 > 0 {
zb0004--
field, err = dc.ReadMapKeyPtr()
if err != nil {
err = msgp.WrapError(err, zb0001)
return
}
switch msgp.UnsafeString(field) {
case "status":
zb0002.Status, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, zb0001, "Status")
return
}
zb0004Mask |= 0x1
default:
err = dc.Skip()
if err != nil {
err = msgp.WrapError(err, zb0001)
return
}
}
}
// Clear omitted fields.
if (zb0004Mask & 0x1) == 0 {
zb0002.Status = ""
}
(*z)[zb0001] = zb0002
}
return
}
// EncodeMsg implements msgp.Encodable
func (z Audit) EncodeMsg(en *msgp.Writer) (err error) {
err = en.WriteMapHeader(uint32(len(z)))
if err != nil {
err = msgp.WrapError(err)
return
}
for zb0005, zb0006 := range z {
err = en.WriteString(zb0005)
if err != nil {
err = msgp.WrapError(err)
return
}
// check for omitted fields
zb0001Len := uint32(1)
var zb0001Mask uint8 /* 1 bits */
_ = zb0001Mask
if zb0006.Status == "" {
zb0001Len--
zb0001Mask |= 0x1
}
// variable map header, size zb0001Len
err = en.Append(0x80 | uint8(zb0001Len))
if err != nil {
return
}
if (zb0001Mask & 0x1) == 0 { // if not omitted
// write "status"
err = en.Append(0xa6, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73)
if err != nil {
return
}
err = en.WriteString(zb0006.Status)
if err != nil {
err = msgp.WrapError(err, zb0005, "Status")
return
}
}
}
return
}
// MarshalMsg implements msgp.Marshaler
func (z Audit) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize())
o = msgp.AppendMapHeader(o, uint32(len(z)))
for zb0005, zb0006 := range z {
o = msgp.AppendString(o, zb0005)
// check for omitted fields
zb0001Len := uint32(1)
var zb0001Mask uint8 /* 1 bits */
_ = zb0001Mask
if zb0006.Status == "" {
zb0001Len--
zb0001Mask |= 0x1
}
// variable map header, size zb0001Len
o = append(o, 0x80|uint8(zb0001Len))
if (zb0001Mask & 0x1) == 0 { // if not omitted
// string "status"
o = append(o, 0xa6, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73)
o = msgp.AppendString(o, zb0006.Status)
}
}
return
}
// UnmarshalMsg implements msgp.Unmarshaler
func (z *Audit) UnmarshalMsg(bts []byte) (o []byte, err error) {
var zb0003 uint32
zb0003, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
if (*z) == nil {
(*z) = make(Audit, zb0003)
} else if len((*z)) > 0 {
for key := range *z {
delete((*z), key)
}
}
var field []byte
_ = field
for zb0003 > 0 {
var zb0001 string
var zb0002 Status
zb0003--
zb0001, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
var zb0004 uint32
zb0004, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, zb0001)
return
}
var zb0004Mask uint8 /* 1 bits */
_ = zb0004Mask
for zb0004 > 0 {
zb0004--
field, bts, err = msgp.ReadMapKeyZC(bts)
if err != nil {
err = msgp.WrapError(err, zb0001)
return
}
switch msgp.UnsafeString(field) {
case "status":
zb0002.Status, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, zb0001, "Status")
return
}
zb0004Mask |= 0x1
default:
bts, err = msgp.Skip(bts)
if err != nil {
err = msgp.WrapError(err, zb0001)
return
}
}
}
// Clear omitted fields.
if (zb0004Mask & 0x1) == 0 {
zb0002.Status = ""
}
(*z)[zb0001] = zb0002
}
o = bts
return
}
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z Audit) Msgsize() (s int) {
s = msgp.MapHeaderSize
if z != nil {
for zb0005, zb0006 := range z {
_ = zb0006
s += msgp.StringPrefixSize + len(zb0005) + 1 + 7 + msgp.StringPrefixSize + len(zb0006.Status)
}
}
return
}
// DecodeMsg implements msgp.Decodable
func (z *BackendDisks) DecodeMsg(dc *msgp.Reader) (err error) {
var zb0003 uint32
zb0003, err = dc.ReadMapHeader()
if err != nil {
err = msgp.WrapError(err)
return
}
if (*z) == nil {
(*z) = make(BackendDisks, zb0003)
} else if len((*z)) > 0 {
for key := range *z {
delete((*z), key)
}
}
var field []byte
_ = field
for zb0003 > 0 {
zb0003--
var zb0001 string
var zb0002 int
zb0001, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err)
return
}
zb0002, err = dc.ReadInt()
if err != nil {
err = msgp.WrapError(err, zb0001)
return
}
(*z)[zb0001] = zb0002
}
return
}
// EncodeMsg implements msgp.Encodable
func (z BackendDisks) EncodeMsg(en *msgp.Writer) (err error) {
err = en.WriteMapHeader(uint32(len(z)))
if err != nil {
err = msgp.WrapError(err)
return
}
for zb0004, zb0005 := range z {
err = en.WriteString(zb0004)
if err != nil {
err = msgp.WrapError(err)
return
}
err = en.WriteInt(zb0005)
if err != nil {
err = msgp.WrapError(err, zb0004)
return
}
}
return
}
// MarshalMsg implements msgp.Marshaler
func (z BackendDisks) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize())
o = msgp.AppendMapHeader(o, uint32(len(z)))
for zb0004, zb0005 := range z {
o = msgp.AppendString(o, zb0004)
o = msgp.AppendInt(o, zb0005)
}
return
}
// UnmarshalMsg implements msgp.Unmarshaler
func (z *BackendDisks) UnmarshalMsg(bts []byte) (o []byte, err error) {
var zb0003 uint32
zb0003, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
if (*z) == nil {
(*z) = make(BackendDisks, zb0003)
} else if len((*z)) > 0 {
for key := range *z {
delete((*z), key)
}
}
var field []byte
_ = field
for zb0003 > 0 {
var zb0001 string
var zb0002 int
zb0003--
zb0001, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
zb0002, bts, err = msgp.ReadIntBytes(bts)
if err != nil {
err = msgp.WrapError(err, zb0001)
return
}
(*z)[zb0001] = zb0002
}
o = bts
return
}
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z BackendDisks) Msgsize() (s int) {
s = msgp.MapHeaderSize
if z != nil {
for zb0004, zb0005 := range z {
_ = zb0005
s += msgp.StringPrefixSize + len(zb0004) + msgp.IntSize
}
}
return
}
// DecodeMsg implements msgp.Decodable
func (z *BackendInfo) DecodeMsg(dc *msgp.Reader) (err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, err = dc.ReadMapHeader()
if err != nil {
err = msgp.WrapError(err)
return
}
for zb0001 > 0 {
zb0001--
field, err = dc.ReadMapKeyPtr()
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "Type":
{
var zb0002 int
zb0002, err = dc.ReadInt()
if err != nil {
err = msgp.WrapError(err, "Type")
return
}
z.Type = BackendType(zb0002)
}
case "GatewayOnline":
z.GatewayOnline, err = dc.ReadBool()
if err != nil {
err = msgp.WrapError(err, "GatewayOnline")
return
}
case "OnlineDisks":
var zb0003 uint32
zb0003, err = dc.ReadMapHeader()
if err != nil {
err = msgp.WrapError(err, "OnlineDisks")
return
}
if z.OnlineDisks == nil {
z.OnlineDisks = make(BackendDisks, zb0003)
} else if len(z.OnlineDisks) > 0 {
for key := range z.OnlineDisks {
delete(z.OnlineDisks, key)
}
}
for zb0003 > 0 {
zb0003--
var za0001 string
var za0002 int
za0001, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "OnlineDisks")
return
}
za0002, err = dc.ReadInt()
if err != nil {
err = msgp.WrapError(err, "OnlineDisks", za0001)
return
}
z.OnlineDisks[za0001] = za0002
}
case "OfflineDisks":
var zb0004 uint32
zb0004, err = dc.ReadMapHeader()
if err != nil {
err = msgp.WrapError(err, "OfflineDisks")
return
}
if z.OfflineDisks == nil {
z.OfflineDisks = make(BackendDisks, zb0004)
} else if len(z.OfflineDisks) > 0 {
for key := range z.OfflineDisks {
delete(z.OfflineDisks, key)
}
}
for zb0004 > 0 {
zb0004--
var za0003 string
var za0004 int
za0003, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "OfflineDisks")
return
}
za0004, err = dc.ReadInt()
if err != nil {
err = msgp.WrapError(err, "OfflineDisks", za0003)
return
}
z.OfflineDisks[za0003] = za0004
}
case "StandardSCData":
var zb0005 uint32
zb0005, err = dc.ReadArrayHeader()
if err != nil {
err = msgp.WrapError(err, "StandardSCData")
return
}
if cap(z.StandardSCData) >= int(zb0005) {
z.StandardSCData = (z.StandardSCData)[:zb0005]
} else {
z.StandardSCData = make([]int, zb0005)
}
for za0005 := range z.StandardSCData {
z.StandardSCData[za0005], err = dc.ReadInt()
if err != nil {
err = msgp.WrapError(err, "StandardSCData", za0005)
return
}
}
case "StandardSCParities":
var zb0006 uint32
zb0006, err = dc.ReadArrayHeader()
if err != nil {
err = msgp.WrapError(err, "StandardSCParities")
return
}
if cap(z.StandardSCParities) >= int(zb0006) {
z.StandardSCParities = (z.StandardSCParities)[:zb0006]
} else {
z.StandardSCParities = make([]int, zb0006)
}
for za0006 := range z.StandardSCParities {
z.StandardSCParities[za0006], err = dc.ReadInt()
if err != nil {
err = msgp.WrapError(err, "StandardSCParities", za0006)
return
}
}
case "RRSCData":
var zb0007 uint32
zb0007, err = dc.ReadArrayHeader()
if err != nil {
err = msgp.WrapError(err, "RRSCData")
return
}
if cap(z.RRSCData) >= int(zb0007) {
z.RRSCData = (z.RRSCData)[:zb0007]
} else {
z.RRSCData = make([]int, zb0007)
}
for za0007 := range z.RRSCData {
z.RRSCData[za0007], err = dc.ReadInt()
if err != nil {
err = msgp.WrapError(err, "RRSCData", za0007)
return
}
}
case "RRSCParities":
var zb0008 uint32
zb0008, err = dc.ReadArrayHeader()
if err != nil {
err = msgp.WrapError(err, "RRSCParities")
return
}
if cap(z.RRSCParities) >= int(zb0008) {
z.RRSCParities = (z.RRSCParities)[:zb0008]
} else {
z.RRSCParities = make([]int, zb0008)
}
for za0008 := range z.RRSCParities {
z.RRSCParities[za0008], err = dc.ReadInt()
if err != nil {
err = msgp.WrapError(err, "RRSCParities", za0008)
return
}
}
case "TotalSets":
var zb0009 uint32
zb0009, err = dc.ReadArrayHeader()
if err != nil {
err = msgp.WrapError(err, "TotalSets")
return
}
if cap(z.TotalSets) >= int(zb0009) {
z.TotalSets = (z.TotalSets)[:zb0009]
} else {
z.TotalSets = make([]int, zb0009)
}
for za0009 := range z.TotalSets {
z.TotalSets[za0009], err = dc.ReadInt()
if err != nil {
err = msgp.WrapError(err, "TotalSets", za0009)
return
}
}
case "DrivesPerSet":
var zb0010 uint32
zb0010, err = dc.ReadArrayHeader()
if err != nil {
err = msgp.WrapError(err, "DrivesPerSet")
return
}
if cap(z.DrivesPerSet) >= int(zb0010) {
z.DrivesPerSet = (z.DrivesPerSet)[:zb0010]
} else {
z.DrivesPerSet = make([]int, zb0010)
}
for za0010 := range z.DrivesPerSet {
z.DrivesPerSet[za0010], err = dc.ReadInt()
if err != nil {
err = msgp.WrapError(err, "DrivesPerSet", za0010)
return
}
}
case "StandardSCParity":
z.StandardSCParity, err = dc.ReadInt()
if err != nil {
err = msgp.WrapError(err, "StandardSCParity")
return
}
case "RRSCParity":
z.RRSCParity, err = dc.ReadInt()
if err != nil {
err = msgp.WrapError(err, "RRSCParity")
return
}
default:
err = dc.Skip()
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
return
}
// EncodeMsg implements msgp.Encodable
func (z *BackendInfo) EncodeMsg(en *msgp.Writer) (err error) {
// map header, size 12
// write "Type"
err = en.Append(0x8c, 0xa4, 0x54, 0x79, 0x70, 0x65)
if err != nil {
return
}
err = en.WriteInt(int(z.Type))
if err != nil {
err = msgp.WrapError(err, "Type")
return
}
// write "GatewayOnline"
err = en.Append(0xad, 0x47, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x4f, 0x6e, 0x6c, 0x69, 0x6e, 0x65)
if err != nil {
return
}
err = en.WriteBool(z.GatewayOnline)
if err != nil {
err = msgp.WrapError(err, "GatewayOnline")
return
}
// write "OnlineDisks"
err = en.Append(0xab, 0x4f, 0x6e, 0x6c, 0x69, 0x6e, 0x65, 0x44, 0x69, 0x73, 0x6b, 0x73)
if err != nil {
return
}
err = en.WriteMapHeader(uint32(len(z.OnlineDisks)))
if err != nil {
err = msgp.WrapError(err, "OnlineDisks")
return
}
for za0001, za0002 := range z.OnlineDisks {
err = en.WriteString(za0001)
if err != nil {
err = msgp.WrapError(err, "OnlineDisks")
return
}
err = en.WriteInt(za0002)
if err != nil {
err = msgp.WrapError(err, "OnlineDisks", za0001)
return
}
}
// write "OfflineDisks"
err = en.Append(0xac, 0x4f, 0x66, 0x66, 0x6c, 0x69, 0x6e, 0x65, 0x44, 0x69, 0x73, 0x6b, 0x73)
if err != nil {
return
}
err = en.WriteMapHeader(uint32(len(z.OfflineDisks)))
if err != nil {
err = msgp.WrapError(err, "OfflineDisks")
return
}
for za0003, za0004 := range z.OfflineDisks {
err = en.WriteString(za0003)
if err != nil {
err = msgp.WrapError(err, "OfflineDisks")
return
}
err = en.WriteInt(za0004)
if err != nil {
err = msgp.WrapError(err, "OfflineDisks", za0003)
return
}
}
// write "StandardSCData"
err = en.Append(0xae, 0x53, 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72, 0x64, 0x53, 0x43, 0x44, 0x61, 0x74, 0x61)
if err != nil {
return
}
err = en.WriteArrayHeader(uint32(len(z.StandardSCData)))
if err != nil {
err = msgp.WrapError(err, "StandardSCData")
return
}
for za0005 := range z.StandardSCData {
err = en.WriteInt(z.StandardSCData[za0005])
if err != nil {
err = msgp.WrapError(err, "StandardSCData", za0005)
return
}
}
// write "StandardSCParities"
err = en.Append(0xb2, 0x53, 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72, 0x64, 0x53, 0x43, 0x50, 0x61, 0x72, 0x69, 0x74, 0x69, 0x65, 0x73)
if err != nil {
return
}
err = en.WriteArrayHeader(uint32(len(z.StandardSCParities)))
if err != nil {
err = msgp.WrapError(err, "StandardSCParities")
return
}
for za0006 := range z.StandardSCParities {
err = en.WriteInt(z.StandardSCParities[za0006])
if err != nil {
err = msgp.WrapError(err, "StandardSCParities", za0006)
return
}
}
// write "RRSCData"
err = en.Append(0xa8, 0x52, 0x52, 0x53, 0x43, 0x44, 0x61, 0x74, 0x61)
if err != nil {
return
}
err = en.WriteArrayHeader(uint32(len(z.RRSCData)))
if err != nil {
err = msgp.WrapError(err, "RRSCData")
return
}
for za0007 := range z.RRSCData {
err = en.WriteInt(z.RRSCData[za0007])
if err != nil {
err = msgp.WrapError(err, "RRSCData", za0007)
return
}
}
// write "RRSCParities"
err = en.Append(0xac, 0x52, 0x52, 0x53, 0x43, 0x50, 0x61, 0x72, 0x69, 0x74, 0x69, 0x65, 0x73)
if err != nil {
return
}
err = en.WriteArrayHeader(uint32(len(z.RRSCParities)))
if err != nil {
err = msgp.WrapError(err, "RRSCParities")
return
}
for za0008 := range z.RRSCParities {
err = en.WriteInt(z.RRSCParities[za0008])
if err != nil {
err = msgp.WrapError(err, "RRSCParities", za0008)
return
}
}
// write "TotalSets"
err = en.Append(0xa9, 0x54, 0x6f, 0x74, 0x61, 0x6c, 0x53, 0x65, 0x74, 0x73)
if err != nil {
return
}
err = en.WriteArrayHeader(uint32(len(z.TotalSets)))
if err != nil {
err = msgp.WrapError(err, "TotalSets")
return
}
for za0009 := range z.TotalSets {
err = en.WriteInt(z.TotalSets[za0009])
if err != nil {
err = msgp.WrapError(err, "TotalSets", za0009)
return
}
}
// write "DrivesPerSet"
err = en.Append(0xac, 0x44, 0x72, 0x69, 0x76, 0x65, 0x73, 0x50, 0x65, 0x72, 0x53, 0x65, 0x74)
if err != nil {
return
}
err = en.WriteArrayHeader(uint32(len(z.DrivesPerSet)))
if err != nil {
err = msgp.WrapError(err, "DrivesPerSet")
return
}
for za0010 := range z.DrivesPerSet {
err = en.WriteInt(z.DrivesPerSet[za0010])
if err != nil {
err = msgp.WrapError(err, "DrivesPerSet", za0010)
return
}
}
// write "StandardSCParity"
err = en.Append(0xb0, 0x53, 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72, 0x64, 0x53, 0x43, 0x50, 0x61, 0x72, 0x69, 0x74, 0x79)
if err != nil {
return
}
err = en.WriteInt(z.StandardSCParity)
if err != nil {
err = msgp.WrapError(err, "StandardSCParity")
return
}
// write "RRSCParity"
err = en.Append(0xaa, 0x52, 0x52, 0x53, 0x43, 0x50, 0x61, 0x72, 0x69, 0x74, 0x79)
if err != nil {
return
}
err = en.WriteInt(z.RRSCParity)
if err != nil {
err = msgp.WrapError(err, "RRSCParity")
return
}
return
}
// MarshalMsg implements msgp.Marshaler
func (z *BackendInfo) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize())
// map header, size 12
// string "Type"
o = append(o, 0x8c, 0xa4, 0x54, 0x79, 0x70, 0x65)
o = msgp.AppendInt(o, int(z.Type))
// string "GatewayOnline"
o = append(o, 0xad, 0x47, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x4f, 0x6e, 0x6c, 0x69, 0x6e, 0x65)
o = msgp.AppendBool(o, z.GatewayOnline)
// string "OnlineDisks"
o = append(o, 0xab, 0x4f, 0x6e, 0x6c, 0x69, 0x6e, 0x65, 0x44, 0x69, 0x73, 0x6b, 0x73)
o = msgp.AppendMapHeader(o, uint32(len(z.OnlineDisks)))
for za0001, za0002 := range z.OnlineDisks {
o = msgp.AppendString(o, za0001)
o = msgp.AppendInt(o, za0002)
}
// string "OfflineDisks"
o = append(o, 0xac, 0x4f, 0x66, 0x66, 0x6c, 0x69, 0x6e, 0x65, 0x44, 0x69, 0x73, 0x6b, 0x73)
o = msgp.AppendMapHeader(o, uint32(len(z.OfflineDisks)))
for za0003, za0004 := range z.OfflineDisks {
o = msgp.AppendString(o, za0003)
o = msgp.AppendInt(o, za0004)
}
// string "StandardSCData"
o = append(o, 0xae, 0x53, 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72, 0x64, 0x53, 0x43, 0x44, 0x61, 0x74, 0x61)
o = msgp.AppendArrayHeader(o, uint32(len(z.StandardSCData)))
for za0005 := range z.StandardSCData {
o = msgp.AppendInt(o, z.StandardSCData[za0005])
}
// string "StandardSCParities"
o = append(o, 0xb2, 0x53, 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72, 0x64, 0x53, 0x43, 0x50, 0x61, 0x72, 0x69, 0x74, 0x69, 0x65, 0x73)
o = msgp.AppendArrayHeader(o, uint32(len(z.StandardSCParities)))
for za0006 := range z.StandardSCParities {
o = msgp.AppendInt(o, z.StandardSCParities[za0006])
}
// string "RRSCData"
o = append(o, 0xa8, 0x52, 0x52, 0x53, 0x43, 0x44, 0x61, 0x74, 0x61)
o = msgp.AppendArrayHeader(o, uint32(len(z.RRSCData)))
for za0007 := range z.RRSCData {
o = msgp.AppendInt(o, z.RRSCData[za0007])
}
// string "RRSCParities"
o = append(o, 0xac, 0x52, 0x52, 0x53, 0x43, 0x50, 0x61, 0x72, 0x69, 0x74, 0x69, 0x65, 0x73)
o = msgp.AppendArrayHeader(o, uint32(len(z.RRSCParities)))
for za0008 := range z.RRSCParities {
o = msgp.AppendInt(o, z.RRSCParities[za0008])
}
// string "TotalSets"
o = append(o, 0xa9, 0x54, 0x6f, 0x74, 0x61, 0x6c, 0x53, 0x65, 0x74, 0x73)
o = msgp.AppendArrayHeader(o, uint32(len(z.TotalSets)))
for za0009 := range z.TotalSets {
o = msgp.AppendInt(o, z.TotalSets[za0009])
}
// string "DrivesPerSet"
o = append(o, 0xac, 0x44, 0x72, 0x69, 0x76, 0x65, 0x73, 0x50, 0x65, 0x72, 0x53, 0x65, 0x74)
o = msgp.AppendArrayHeader(o, uint32(len(z.DrivesPerSet)))
for za0010 := range z.DrivesPerSet {
o = msgp.AppendInt(o, z.DrivesPerSet[za0010])
}
// string "StandardSCParity"
o = append(o, 0xb0, 0x53, 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72, 0x64, 0x53, 0x43, 0x50, 0x61, 0x72, 0x69, 0x74, 0x79)
o = msgp.AppendInt(o, z.StandardSCParity)
// string "RRSCParity"
o = append(o, 0xaa, 0x52, 0x52, 0x53, 0x43, 0x50, 0x61, 0x72, 0x69, 0x74, 0x79)
o = msgp.AppendInt(o, z.RRSCParity)
return
}
// UnmarshalMsg implements msgp.Unmarshaler
func (z *BackendInfo) UnmarshalMsg(bts []byte) (o []byte, err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
for zb0001 > 0 {
zb0001--
field, bts, err = msgp.ReadMapKeyZC(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "Type":
{
var zb0002 int
zb0002, bts, err = msgp.ReadIntBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Type")
return
}
z.Type = BackendType(zb0002)
}
case "GatewayOnline":
z.GatewayOnline, bts, err = msgp.ReadBoolBytes(bts)
if err != nil {
err = msgp.WrapError(err, "GatewayOnline")
return
}
case "OnlineDisks":
var zb0003 uint32
zb0003, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "OnlineDisks")
return
}
if z.OnlineDisks == nil {
z.OnlineDisks = make(BackendDisks, zb0003)
} else if len(z.OnlineDisks) > 0 {
for key := range z.OnlineDisks {
delete(z.OnlineDisks, key)
}
}
for zb0003 > 0 {
var za0001 string
var za0002 int
zb0003--
za0001, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "OnlineDisks")
return
}
za0002, bts, err = msgp.ReadIntBytes(bts)
if err != nil {
err = msgp.WrapError(err, "OnlineDisks", za0001)
return
}
z.OnlineDisks[za0001] = za0002
}
case "OfflineDisks":
var zb0004 uint32
zb0004, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "OfflineDisks")
return
}
if z.OfflineDisks == nil {
z.OfflineDisks = make(BackendDisks, zb0004)
} else if len(z.OfflineDisks) > 0 {
for key := range z.OfflineDisks {
delete(z.OfflineDisks, key)
}
}
for zb0004 > 0 {
var za0003 string
var za0004 int
zb0004--
za0003, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "OfflineDisks")
return
}
za0004, bts, err = msgp.ReadIntBytes(bts)
if err != nil {
err = msgp.WrapError(err, "OfflineDisks", za0003)
return
}
z.OfflineDisks[za0003] = za0004
}
case "StandardSCData":
var zb0005 uint32
zb0005, bts, err = msgp.ReadArrayHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "StandardSCData")
return
}
if cap(z.StandardSCData) >= int(zb0005) {
z.StandardSCData = (z.StandardSCData)[:zb0005]
} else {
z.StandardSCData = make([]int, zb0005)
}
for za0005 := range z.StandardSCData {
z.StandardSCData[za0005], bts, err = msgp.ReadIntBytes(bts)
if err != nil {
err = msgp.WrapError(err, "StandardSCData", za0005)
return
}
}
case "StandardSCParities":
var zb0006 uint32
zb0006, bts, err = msgp.ReadArrayHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "StandardSCParities")
return
}
if cap(z.StandardSCParities) >= int(zb0006) {
z.StandardSCParities = (z.StandardSCParities)[:zb0006]
} else {
z.StandardSCParities = make([]int, zb0006)
}
for za0006 := range z.StandardSCParities {
z.StandardSCParities[za0006], bts, err = msgp.ReadIntBytes(bts)
if err != nil {
err = msgp.WrapError(err, "StandardSCParities", za0006)
return
}
}
case "RRSCData":
var zb0007 uint32
zb0007, bts, err = msgp.ReadArrayHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "RRSCData")
return
}
if cap(z.RRSCData) >= int(zb0007) {
z.RRSCData = (z.RRSCData)[:zb0007]
} else {
z.RRSCData = make([]int, zb0007)
}
for za0007 := range z.RRSCData {
z.RRSCData[za0007], bts, err = msgp.ReadIntBytes(bts)
if err != nil {
err = msgp.WrapError(err, "RRSCData", za0007)
return
}
}
case "RRSCParities":
var zb0008 uint32
zb0008, bts, err = msgp.ReadArrayHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "RRSCParities")
return
}
if cap(z.RRSCParities) >= int(zb0008) {
z.RRSCParities = (z.RRSCParities)[:zb0008]
} else {
z.RRSCParities = make([]int, zb0008)
}
for za0008 := range z.RRSCParities {
z.RRSCParities[za0008], bts, err = msgp.ReadIntBytes(bts)
if err != nil {
err = msgp.WrapError(err, "RRSCParities", za0008)
return
}
}
case "TotalSets":
var zb0009 uint32
zb0009, bts, err = msgp.ReadArrayHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "TotalSets")
return
}
if cap(z.TotalSets) >= int(zb0009) {
z.TotalSets = (z.TotalSets)[:zb0009]
} else {
z.TotalSets = make([]int, zb0009)
}
for za0009 := range z.TotalSets {
z.TotalSets[za0009], bts, err = msgp.ReadIntBytes(bts)
if err != nil {
err = msgp.WrapError(err, "TotalSets", za0009)
return
}
}
case "DrivesPerSet":
var zb0010 uint32
zb0010, bts, err = msgp.ReadArrayHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "DrivesPerSet")
return
}
if cap(z.DrivesPerSet) >= int(zb0010) {
z.DrivesPerSet = (z.DrivesPerSet)[:zb0010]
} else {
z.DrivesPerSet = make([]int, zb0010)
}
for za0010 := range z.DrivesPerSet {
z.DrivesPerSet[za0010], bts, err = msgp.ReadIntBytes(bts)
if err != nil {
err = msgp.WrapError(err, "DrivesPerSet", za0010)
return
}
}
case "StandardSCParity":
z.StandardSCParity, bts, err = msgp.ReadIntBytes(bts)
if err != nil {
err = msgp.WrapError(err, "StandardSCParity")
return
}
case "RRSCParity":
z.RRSCParity, bts, err = msgp.ReadIntBytes(bts)
if err != nil {
err = msgp.WrapError(err, "RRSCParity")
return
}
default:
bts, err = msgp.Skip(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
o = bts
return
}
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z *BackendInfo) Msgsize() (s int) {
s = 1 + 5 + msgp.IntSize + 14 + msgp.BoolSize + 12 + msgp.MapHeaderSize
if z.OnlineDisks != nil {
for za0001, za0002 := range z.OnlineDisks {
_ = za0002
s += msgp.StringPrefixSize + len(za0001) + msgp.IntSize
}
}
s += 13 + msgp.MapHeaderSize
if z.OfflineDisks != nil {
for za0003, za0004 := range z.OfflineDisks {
_ = za0004
s += msgp.StringPrefixSize + len(za0003) + msgp.IntSize
}
}
s += 15 + msgp.ArrayHeaderSize + (len(z.StandardSCData) * (msgp.IntSize)) + 19 + msgp.ArrayHeaderSize + (len(z.StandardSCParities) * (msgp.IntSize)) + 9 + msgp.ArrayHeaderSize + (len(z.RRSCData) * (msgp.IntSize)) + 13 + msgp.ArrayHeaderSize + (len(z.RRSCParities) * (msgp.IntSize)) + 10 + msgp.ArrayHeaderSize + (len(z.TotalSets) * (msgp.IntSize)) + 13 + msgp.ArrayHeaderSize + (len(z.DrivesPerSet) * (msgp.IntSize)) + 17 + msgp.IntSize + 11 + msgp.IntSize
return
}
// DecodeMsg implements msgp.Decodable
func (z *BackendType) DecodeMsg(dc *msgp.Reader) (err error) {
{
var zb0001 int
zb0001, err = dc.ReadInt()
if err != nil {
err = msgp.WrapError(err)
return
}
(*z) = BackendType(zb0001)
}
return
}
// EncodeMsg implements msgp.Encodable
func (z BackendType) EncodeMsg(en *msgp.Writer) (err error) {
err = en.WriteInt(int(z))
if err != nil {
err = msgp.WrapError(err)
return
}
return
}
// MarshalMsg implements msgp.Marshaler
func (z BackendType) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize())
o = msgp.AppendInt(o, int(z))
return
}
// UnmarshalMsg implements msgp.Unmarshaler
func (z *BackendType) UnmarshalMsg(bts []byte) (o []byte, err error) {
{
var zb0001 int
zb0001, bts, err = msgp.ReadIntBytes(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
(*z) = BackendType(zb0001)
}
o = bts
return
}
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z BackendType) Msgsize() (s int) {
s = msgp.IntSize
return
}
// DecodeMsg implements msgp.Decodable
func (z *BucketUsageInfo) DecodeMsg(dc *msgp.Reader) (err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, err = dc.ReadMapHeader()
if err != nil {
err = msgp.WrapError(err)
return
}
for zb0001 > 0 {
zb0001--
field, err = dc.ReadMapKeyPtr()
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "size":
z.Size, err = dc.ReadUint64()
if err != nil {
err = msgp.WrapError(err, "Size")
return
}
case "objectsPendingReplicationTotalSize":
z.ReplicationPendingSize, err = dc.ReadUint64()
if err != nil {
err = msgp.WrapError(err, "ReplicationPendingSize")
return
}
case "objectsFailedReplicationTotalSize":
z.ReplicationFailedSize, err = dc.ReadUint64()
if err != nil {
err = msgp.WrapError(err, "ReplicationFailedSize")
return
}
case "objectsReplicatedTotalSize":
z.ReplicatedSize, err = dc.ReadUint64()
if err != nil {
err = msgp.WrapError(err, "ReplicatedSize")
return
}
case "objectReplicaTotalSize":
z.ReplicaSize, err = dc.ReadUint64()
if err != nil {
err = msgp.WrapError(err, "ReplicaSize")
return
}
case "objectsPendingReplicationCount":
z.ReplicationPendingCount, err = dc.ReadUint64()
if err != nil {
err = msgp.WrapError(err, "ReplicationPendingCount")
return
}
case "objectsFailedReplicationCount":
z.ReplicationFailedCount, err = dc.ReadUint64()
if err != nil {
err = msgp.WrapError(err, "ReplicationFailedCount")
return
}
case "versionsCount":
z.VersionsCount, err = dc.ReadUint64()
if err != nil {
err = msgp.WrapError(err, "VersionsCount")
return
}
case "objectsCount":
z.ObjectsCount, err = dc.ReadUint64()
if err != nil {
err = msgp.WrapError(err, "ObjectsCount")
return
}
case "deleteMarkersCount":
z.DeleteMarkersCount, err = dc.ReadUint64()
if err != nil {
err = msgp.WrapError(err, "DeleteMarkersCount")
return
}
case "objectsSizesHistogram":
var zb0002 uint32
zb0002, err = dc.ReadMapHeader()
if err != nil {
err = msgp.WrapError(err, "ObjectSizesHistogram")
return
}
if z.ObjectSizesHistogram == nil {
z.ObjectSizesHistogram = make(map[string]uint64, zb0002)
} else if len(z.ObjectSizesHistogram) > 0 {
for key := range z.ObjectSizesHistogram {
delete(z.ObjectSizesHistogram, key)
}
}
for zb0002 > 0 {
zb0002--
var za0001 string
var za0002 uint64
za0001, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "ObjectSizesHistogram")
return
}
za0002, err = dc.ReadUint64()
if err != nil {
err = msgp.WrapError(err, "ObjectSizesHistogram", za0001)
return
}
z.ObjectSizesHistogram[za0001] = za0002
}
case "objectsVersionsHistogram":
var zb0003 uint32
zb0003, err = dc.ReadMapHeader()
if err != nil {
err = msgp.WrapError(err, "ObjectVersionsHistogram")
return
}
if z.ObjectVersionsHistogram == nil {
z.ObjectVersionsHistogram = make(map[string]uint64, zb0003)
} else if len(z.ObjectVersionsHistogram) > 0 {
for key := range z.ObjectVersionsHistogram {
delete(z.ObjectVersionsHistogram, key)
}
}
for zb0003 > 0 {
zb0003--
var za0003 string
var za0004 uint64
za0003, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "ObjectVersionsHistogram")
return
}
za0004, err = dc.ReadUint64()
if err != nil {
err = msgp.WrapError(err, "ObjectVersionsHistogram", za0003)
return
}
z.ObjectVersionsHistogram[za0003] = za0004
}
default:
err = dc.Skip()
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
return
}
// EncodeMsg implements msgp.Encodable
func (z *BucketUsageInfo) EncodeMsg(en *msgp.Writer) (err error) {
// map header, size 12
// write "size"
err = en.Append(0x8c, 0xa4, 0x73, 0x69, 0x7a, 0x65)
if err != nil {
return
}
err = en.WriteUint64(z.Size)
if err != nil {
err = msgp.WrapError(err, "Size")
return
}
// write "objectsPendingReplicationTotalSize"
err = en.Append(0xd9, 0x22, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x50, 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x6f, 0x74, 0x61, 0x6c, 0x53, 0x69, 0x7a, 0x65)
if err != nil {
return
}
err = en.WriteUint64(z.ReplicationPendingSize)
if err != nil {
err = msgp.WrapError(err, "ReplicationPendingSize")
return
}
// write "objectsFailedReplicationTotalSize"
err = en.Append(0xd9, 0x21, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x46, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x6f, 0x74, 0x61, 0x6c, 0x53, 0x69, 0x7a, 0x65)
if err != nil {
return
}
err = en.WriteUint64(z.ReplicationFailedSize)
if err != nil {
err = msgp.WrapError(err, "ReplicationFailedSize")
return
}
// write "objectsReplicatedTotalSize"
err = en.Append(0xba, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x65, 0x64, 0x54, 0x6f, 0x74, 0x61, 0x6c, 0x53, 0x69, 0x7a, 0x65)
if err != nil {
return
}
err = en.WriteUint64(z.ReplicatedSize)
if err != nil {
err = msgp.WrapError(err, "ReplicatedSize")
return
}
// write "objectReplicaTotalSize"
err = en.Append(0xb6, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x54, 0x6f, 0x74, 0x61, 0x6c, 0x53, 0x69, 0x7a, 0x65)
if err != nil {
return
}
err = en.WriteUint64(z.ReplicaSize)
if err != nil {
err = msgp.WrapError(err, "ReplicaSize")
return
}
// write "objectsPendingReplicationCount"
err = en.Append(0xbe, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x50, 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x75, 0x6e, 0x74)
if err != nil {
return
}
err = en.WriteUint64(z.ReplicationPendingCount)
if err != nil {
err = msgp.WrapError(err, "ReplicationPendingCount")
return
}
// write "objectsFailedReplicationCount"
err = en.Append(0xbd, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x46, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x75, 0x6e, 0x74)
if err != nil {
return
}
err = en.WriteUint64(z.ReplicationFailedCount)
if err != nil {
err = msgp.WrapError(err, "ReplicationFailedCount")
return
}
// write "versionsCount"
err = en.Append(0xad, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74)
if err != nil {
return
}
err = en.WriteUint64(z.VersionsCount)
if err != nil {
err = msgp.WrapError(err, "VersionsCount")
return
}
// write "objectsCount"
err = en.Append(0xac, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74)
if err != nil {
return
}
err = en.WriteUint64(z.ObjectsCount)
if err != nil {
err = msgp.WrapError(err, "ObjectsCount")
return
}
// write "deleteMarkersCount"
err = en.Append(0xb2, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4d, 0x61, 0x72, 0x6b, 0x65, 0x72, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74)
if err != nil {
return
}
err = en.WriteUint64(z.DeleteMarkersCount)
if err != nil {
err = msgp.WrapError(err, "DeleteMarkersCount")
return
}
// write "objectsSizesHistogram"
err = en.Append(0xb5, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x53, 0x69, 0x7a, 0x65, 0x73, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d)
if err != nil {
return
}
err = en.WriteMapHeader(uint32(len(z.ObjectSizesHistogram)))
if err != nil {
err = msgp.WrapError(err, "ObjectSizesHistogram")
return
}
for za0001, za0002 := range z.ObjectSizesHistogram {
err = en.WriteString(za0001)
if err != nil {
err = msgp.WrapError(err, "ObjectSizesHistogram")
return
}
err = en.WriteUint64(za0002)
if err != nil {
err = msgp.WrapError(err, "ObjectSizesHistogram", za0001)
return
}
}
// write "objectsVersionsHistogram"
err = en.Append(0xb8, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d)
if err != nil {
return
}
err = en.WriteMapHeader(uint32(len(z.ObjectVersionsHistogram)))
if err != nil {
err = msgp.WrapError(err, "ObjectVersionsHistogram")
return
}
for za0003, za0004 := range z.ObjectVersionsHistogram {
err = en.WriteString(za0003)
if err != nil {
err = msgp.WrapError(err, "ObjectVersionsHistogram")
return
}
err = en.WriteUint64(za0004)
if err != nil {
err = msgp.WrapError(err, "ObjectVersionsHistogram", za0003)
return
}
}
return
}
// MarshalMsg implements msgp.Marshaler
func (z *BucketUsageInfo) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize())
// map header, size 12
// string "size"
o = append(o, 0x8c, 0xa4, 0x73, 0x69, 0x7a, 0x65)
o = msgp.AppendUint64(o, z.Size)
// string "objectsPendingReplicationTotalSize"
o = append(o, 0xd9, 0x22, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x50, 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x6f, 0x74, 0x61, 0x6c, 0x53, 0x69, 0x7a, 0x65)
o = msgp.AppendUint64(o, z.ReplicationPendingSize)
// string "objectsFailedReplicationTotalSize"
o = append(o, 0xd9, 0x21, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x46, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x6f, 0x74, 0x61, 0x6c, 0x53, 0x69, 0x7a, 0x65)
o = msgp.AppendUint64(o, z.ReplicationFailedSize)
// string "objectsReplicatedTotalSize"
o = append(o, 0xba, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x65, 0x64, 0x54, 0x6f, 0x74, 0x61, 0x6c, 0x53, 0x69, 0x7a, 0x65)
o = msgp.AppendUint64(o, z.ReplicatedSize)
// string "objectReplicaTotalSize"
o = append(o, 0xb6, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x54, 0x6f, 0x74, 0x61, 0x6c, 0x53, 0x69, 0x7a, 0x65)
o = msgp.AppendUint64(o, z.ReplicaSize)
// string "objectsPendingReplicationCount"
o = append(o, 0xbe, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x50, 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x75, 0x6e, 0x74)
o = msgp.AppendUint64(o, z.ReplicationPendingCount)
// string "objectsFailedReplicationCount"
o = append(o, 0xbd, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x46, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x75, 0x6e, 0x74)
o = msgp.AppendUint64(o, z.ReplicationFailedCount)
// string "versionsCount"
o = append(o, 0xad, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74)
o = msgp.AppendUint64(o, z.VersionsCount)
// string "objectsCount"
o = append(o, 0xac, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74)
o = msgp.AppendUint64(o, z.ObjectsCount)
// string "deleteMarkersCount"
o = append(o, 0xb2, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4d, 0x61, 0x72, 0x6b, 0x65, 0x72, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74)
o = msgp.AppendUint64(o, z.DeleteMarkersCount)
// string "objectsSizesHistogram"
o = append(o, 0xb5, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x53, 0x69, 0x7a, 0x65, 0x73, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d)
o = msgp.AppendMapHeader(o, uint32(len(z.ObjectSizesHistogram)))
for za0001, za0002 := range z.ObjectSizesHistogram {
o = msgp.AppendString(o, za0001)
o = msgp.AppendUint64(o, za0002)
}
// string "objectsVersionsHistogram"
o = append(o, 0xb8, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d)
o = msgp.AppendMapHeader(o, uint32(len(z.ObjectVersionsHistogram)))
for za0003, za0004 := range z.ObjectVersionsHistogram {
o = msgp.AppendString(o, za0003)
o = msgp.AppendUint64(o, za0004)
}
return
}
// UnmarshalMsg implements msgp.Unmarshaler
func (z *BucketUsageInfo) UnmarshalMsg(bts []byte) (o []byte, err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
for zb0001 > 0 {
zb0001--
field, bts, err = msgp.ReadMapKeyZC(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "size":
z.Size, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "Size")
return
}
case "objectsPendingReplicationTotalSize":
z.ReplicationPendingSize, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "ReplicationPendingSize")
return
}
case "objectsFailedReplicationTotalSize":
z.ReplicationFailedSize, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "ReplicationFailedSize")
return
}
case "objectsReplicatedTotalSize":
z.ReplicatedSize, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "ReplicatedSize")
return
}
case "objectReplicaTotalSize":
z.ReplicaSize, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "ReplicaSize")
return
}
case "objectsPendingReplicationCount":
z.ReplicationPendingCount, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "ReplicationPendingCount")
return
}
case "objectsFailedReplicationCount":
z.ReplicationFailedCount, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "ReplicationFailedCount")
return
}
case "versionsCount":
z.VersionsCount, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "VersionsCount")
return
}
case "objectsCount":
z.ObjectsCount, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "ObjectsCount")
return
}
case "deleteMarkersCount":
z.DeleteMarkersCount, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "DeleteMarkersCount")
return
}
case "objectsSizesHistogram":
var zb0002 uint32
zb0002, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "ObjectSizesHistogram")
return
}
if z.ObjectSizesHistogram == nil {
z.ObjectSizesHistogram = make(map[string]uint64, zb0002)
} else if len(z.ObjectSizesHistogram) > 0 {
for key := range z.ObjectSizesHistogram {
delete(z.ObjectSizesHistogram, key)
}
}
for zb0002 > 0 {
var za0001 string
var za0002 uint64
zb0002--
za0001, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "ObjectSizesHistogram")
return
}
za0002, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "ObjectSizesHistogram", za0001)
return
}
z.ObjectSizesHistogram[za0001] = za0002
}
case "objectsVersionsHistogram":
var zb0003 uint32
zb0003, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "ObjectVersionsHistogram")
return
}
if z.ObjectVersionsHistogram == nil {
z.ObjectVersionsHistogram = make(map[string]uint64, zb0003)
} else if len(z.ObjectVersionsHistogram) > 0 {
for key := range z.ObjectVersionsHistogram {
delete(z.ObjectVersionsHistogram, key)
}
}
for zb0003 > 0 {
var za0003 string
var za0004 uint64
zb0003--
za0003, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "ObjectVersionsHistogram")
return
}
za0004, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "ObjectVersionsHistogram", za0003)
return
}
z.ObjectVersionsHistogram[za0003] = za0004
}
default:
bts, err = msgp.Skip(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
o = bts
return
}
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z *BucketUsageInfo) Msgsize() (s int) {
s = 1 + 5 + msgp.Uint64Size + 36 + msgp.Uint64Size + 35 + msgp.Uint64Size + 27 + msgp.Uint64Size + 23 + msgp.Uint64Size + 31 + msgp.Uint64Size + 30 + msgp.Uint64Size + 14 + msgp.Uint64Size + 13 + msgp.Uint64Size + 19 + msgp.Uint64Size + 22 + msgp.MapHeaderSize
if z.ObjectSizesHistogram != nil {
for za0001, za0002 := range z.ObjectSizesHistogram {
_ = za0002
s += msgp.StringPrefixSize + len(za0001) + msgp.Uint64Size
}
}
s += 25 + msgp.MapHeaderSize
if z.ObjectVersionsHistogram != nil {
for za0003, za0004 := range z.ObjectVersionsHistogram {
_ = za0004
s += msgp.StringPrefixSize + len(za0003) + msgp.Uint64Size
}
}
return
}
// DecodeMsg implements msgp.Decodable
func (z *Buckets) DecodeMsg(dc *msgp.Reader) (err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, err = dc.ReadMapHeader()
if err != nil {
err = msgp.WrapError(err)
return
}
var zb0001Mask uint8 /* 1 bits */
_ = zb0001Mask
for zb0001 > 0 {
zb0001--
field, err = dc.ReadMapKeyPtr()
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "count":
z.Count, err = dc.ReadUint64()
if err != nil {
err = msgp.WrapError(err, "Count")
return
}
case "error":
z.Error, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "Error")
return
}
zb0001Mask |= 0x1
default:
err = dc.Skip()
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
// Clear omitted fields.
if (zb0001Mask & 0x1) == 0 {
z.Error = ""
}
return
}
// EncodeMsg implements msgp.Encodable
func (z Buckets) EncodeMsg(en *msgp.Writer) (err error) {
// check for omitted fields
zb0001Len := uint32(2)
var zb0001Mask uint8 /* 2 bits */
_ = zb0001Mask
if z.Error == "" {
zb0001Len--
zb0001Mask |= 0x2
}
// variable map header, size zb0001Len
err = en.Append(0x80 | uint8(zb0001Len))
if err != nil {
return
}
// skip if no fields are to be emitted
if zb0001Len != 0 {
// write "count"
err = en.Append(0xa5, 0x63, 0x6f, 0x75, 0x6e, 0x74)
if err != nil {
return
}
err = en.WriteUint64(z.Count)
if err != nil {
err = msgp.WrapError(err, "Count")
return
}
if (zb0001Mask & 0x2) == 0 { // if not omitted
// write "error"
err = en.Append(0xa5, 0x65, 0x72, 0x72, 0x6f, 0x72)
if err != nil {
return
}
err = en.WriteString(z.Error)
if err != nil {
err = msgp.WrapError(err, "Error")
return
}
}
}
return
}
// MarshalMsg implements msgp.Marshaler
func (z Buckets) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize())
// check for omitted fields
zb0001Len := uint32(2)
var zb0001Mask uint8 /* 2 bits */
_ = zb0001Mask
if z.Error == "" {
zb0001Len--
zb0001Mask |= 0x2
}
// variable map header, size zb0001Len
o = append(o, 0x80|uint8(zb0001Len))
// skip if no fields are to be emitted
if zb0001Len != 0 {
// string "count"
o = append(o, 0xa5, 0x63, 0x6f, 0x75, 0x6e, 0x74)
o = msgp.AppendUint64(o, z.Count)
if (zb0001Mask & 0x2) == 0 { // if not omitted
// string "error"
o = append(o, 0xa5, 0x65, 0x72, 0x72, 0x6f, 0x72)
o = msgp.AppendString(o, z.Error)
}
}
return
}
// UnmarshalMsg implements msgp.Unmarshaler
func (z *Buckets) UnmarshalMsg(bts []byte) (o []byte, err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
var zb0001Mask uint8 /* 1 bits */
_ = zb0001Mask
for zb0001 > 0 {
zb0001--
field, bts, err = msgp.ReadMapKeyZC(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "count":
z.Count, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "Count")
return
}
case "error":
z.Error, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Error")
return
}
zb0001Mask |= 0x1
default:
bts, err = msgp.Skip(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
// Clear omitted fields.
if (zb0001Mask & 0x1) == 0 {
z.Error = ""
}
o = bts
return
}
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z Buckets) Msgsize() (s int) {
s = 1 + 6 + msgp.Uint64Size + 6 + msgp.StringPrefixSize + len(z.Error)
return
}
// DecodeMsg implements msgp.Decodable
func (z *CacheStats) DecodeMsg(dc *msgp.Reader) (err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, err = dc.ReadMapHeader()
if err != nil {
err = msgp.WrapError(err)
return
}
for zb0001 > 0 {
zb0001--
field, err = dc.ReadMapKeyPtr()
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "capacity":
z.Capacity, err = dc.ReadInt64()
if err != nil {
err = msgp.WrapError(err, "Capacity")
return
}
case "used":
z.Used, err = dc.ReadInt64()
if err != nil {
err = msgp.WrapError(err, "Used")
return
}
case "hits":
z.Hits, err = dc.ReadInt64()
if err != nil {
err = msgp.WrapError(err, "Hits")
return
}
case "misses":
z.Misses, err = dc.ReadInt64()
if err != nil {
err = msgp.WrapError(err, "Misses")
return
}
case "delHits":
z.DelHits, err = dc.ReadInt64()
if err != nil {
err = msgp.WrapError(err, "DelHits")
return
}
case "delMisses":
z.DelMisses, err = dc.ReadInt64()
if err != nil {
err = msgp.WrapError(err, "DelMisses")
return
}
case "collisions":
z.Collisions, err = dc.ReadInt64()
if err != nil {
err = msgp.WrapError(err, "Collisions")
return
}
default:
err = dc.Skip()
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
return
}
// EncodeMsg implements msgp.Encodable
func (z *CacheStats) EncodeMsg(en *msgp.Writer) (err error) {
// map header, size 7
// write "capacity"
err = en.Append(0x87, 0xa8, 0x63, 0x61, 0x70, 0x61, 0x63, 0x69, 0x74, 0x79)
if err != nil {
return
}
err = en.WriteInt64(z.Capacity)
if err != nil {
err = msgp.WrapError(err, "Capacity")
return
}
// write "used"
err = en.Append(0xa4, 0x75, 0x73, 0x65, 0x64)
if err != nil {
return
}
err = en.WriteInt64(z.Used)
if err != nil {
err = msgp.WrapError(err, "Used")
return
}
// write "hits"
err = en.Append(0xa4, 0x68, 0x69, 0x74, 0x73)
if err != nil {
return
}
err = en.WriteInt64(z.Hits)
if err != nil {
err = msgp.WrapError(err, "Hits")
return
}
// write "misses"
err = en.Append(0xa6, 0x6d, 0x69, 0x73, 0x73, 0x65, 0x73)
if err != nil {
return
}
err = en.WriteInt64(z.Misses)
if err != nil {
err = msgp.WrapError(err, "Misses")
return
}
// write "delHits"
err = en.Append(0xa7, 0x64, 0x65, 0x6c, 0x48, 0x69, 0x74, 0x73)
if err != nil {
return
}
err = en.WriteInt64(z.DelHits)
if err != nil {
err = msgp.WrapError(err, "DelHits")
return
}
// write "delMisses"
err = en.Append(0xa9, 0x64, 0x65, 0x6c, 0x4d, 0x69, 0x73, 0x73, 0x65, 0x73)
if err != nil {
return
}
err = en.WriteInt64(z.DelMisses)
if err != nil {
err = msgp.WrapError(err, "DelMisses")
return
}
// write "collisions"
err = en.Append(0xaa, 0x63, 0x6f, 0x6c, 0x6c, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x73)
if err != nil {
return
}
err = en.WriteInt64(z.Collisions)
if err != nil {
err = msgp.WrapError(err, "Collisions")
return
}
return
}
// MarshalMsg implements msgp.Marshaler
func (z *CacheStats) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize())
// map header, size 7
// string "capacity"
o = append(o, 0x87, 0xa8, 0x63, 0x61, 0x70, 0x61, 0x63, 0x69, 0x74, 0x79)
o = msgp.AppendInt64(o, z.Capacity)
// string "used"
o = append(o, 0xa4, 0x75, 0x73, 0x65, 0x64)
o = msgp.AppendInt64(o, z.Used)
// string "hits"
o = append(o, 0xa4, 0x68, 0x69, 0x74, 0x73)
o = msgp.AppendInt64(o, z.Hits)
// string "misses"
o = append(o, 0xa6, 0x6d, 0x69, 0x73, 0x73, 0x65, 0x73)
o = msgp.AppendInt64(o, z.Misses)
// string "delHits"
o = append(o, 0xa7, 0x64, 0x65, 0x6c, 0x48, 0x69, 0x74, 0x73)
o = msgp.AppendInt64(o, z.DelHits)
// string "delMisses"
o = append(o, 0xa9, 0x64, 0x65, 0x6c, 0x4d, 0x69, 0x73, 0x73, 0x65, 0x73)
o = msgp.AppendInt64(o, z.DelMisses)
// string "collisions"
o = append(o, 0xaa, 0x63, 0x6f, 0x6c, 0x6c, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x73)
o = msgp.AppendInt64(o, z.Collisions)
return
}
// UnmarshalMsg implements msgp.Unmarshaler
func (z *CacheStats) UnmarshalMsg(bts []byte) (o []byte, err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
for zb0001 > 0 {
zb0001--
field, bts, err = msgp.ReadMapKeyZC(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "capacity":
z.Capacity, bts, err = msgp.ReadInt64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "Capacity")
return
}
case "used":
z.Used, bts, err = msgp.ReadInt64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "Used")
return
}
case "hits":
z.Hits, bts, err = msgp.ReadInt64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "Hits")
return
}
case "misses":
z.Misses, bts, err = msgp.ReadInt64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "Misses")
return
}
case "delHits":
z.DelHits, bts, err = msgp.ReadInt64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "DelHits")
return
}
case "delMisses":
z.DelMisses, bts, err = msgp.ReadInt64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "DelMisses")
return
}
case "collisions":
z.Collisions, bts, err = msgp.ReadInt64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "Collisions")
return
}
default:
bts, err = msgp.Skip(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
o = bts
return
}
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z *CacheStats) Msgsize() (s int) {
s = 1 + 9 + msgp.Int64Size + 5 + msgp.Int64Size + 5 + msgp.Int64Size + 7 + msgp.Int64Size + 8 + msgp.Int64Size + 10 + msgp.Int64Size + 11 + msgp.Int64Size
return
}
// DecodeMsg implements msgp.Decodable
func (z *DataUsageInfo) DecodeMsg(dc *msgp.Reader) (err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, err = dc.ReadMapHeader()
if err != nil {
err = msgp.WrapError(err)
return
}
for zb0001 > 0 {
zb0001--
field, err = dc.ReadMapKeyPtr()
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "lastUpdate":
z.LastUpdate, err = dc.ReadTime()
if err != nil {
err = msgp.WrapError(err, "LastUpdate")
return
}
case "objectsCount":
z.ObjectsTotalCount, err = dc.ReadUint64()
if err != nil {
err = msgp.WrapError(err, "ObjectsTotalCount")
return
}
case "objectsTotalSize":
z.ObjectsTotalSize, err = dc.ReadUint64()
if err != nil {
err = msgp.WrapError(err, "ObjectsTotalSize")
return
}
case "objectsPendingReplicationTotalSize":
z.ReplicationPendingSize, err = dc.ReadUint64()
if err != nil {
err = msgp.WrapError(err, "ReplicationPendingSize")
return
}
case "objectsFailedReplicationTotalSize":
z.ReplicationFailedSize, err = dc.ReadUint64()
if err != nil {
err = msgp.WrapError(err, "ReplicationFailedSize")
return
}
case "objectsReplicatedTotalSize":
z.ReplicatedSize, err = dc.ReadUint64()
if err != nil {
err = msgp.WrapError(err, "ReplicatedSize")
return
}
case "objectsReplicaTotalSize":
z.ReplicaSize, err = dc.ReadUint64()
if err != nil {
err = msgp.WrapError(err, "ReplicaSize")
return
}
case "objectsPendingReplicationCount":
z.ReplicationPendingCount, err = dc.ReadUint64()
if err != nil {
err = msgp.WrapError(err, "ReplicationPendingCount")
return
}
case "objectsFailedReplicationCount":
z.ReplicationFailedCount, err = dc.ReadUint64()
if err != nil {
err = msgp.WrapError(err, "ReplicationFailedCount")
return
}
case "bucketsCount":
z.BucketsCount, err = dc.ReadUint64()
if err != nil {
err = msgp.WrapError(err, "BucketsCount")
return
}
case "bucketsUsageInfo":
var zb0002 uint32
zb0002, err = dc.ReadMapHeader()
if err != nil {
err = msgp.WrapError(err, "BucketsUsage")
return
}
if z.BucketsUsage == nil {
z.BucketsUsage = make(map[string]BucketUsageInfo, zb0002)
} else if len(z.BucketsUsage) > 0 {
for key := range z.BucketsUsage {
delete(z.BucketsUsage, key)
}
}
for zb0002 > 0 {
zb0002--
var za0001 string
var za0002 BucketUsageInfo
za0001, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "BucketsUsage")
return
}
err = za0002.DecodeMsg(dc)
if err != nil {
err = msgp.WrapError(err, "BucketsUsage", za0001)
return
}
z.BucketsUsage[za0001] = za0002
}
case "tierStats":
var zb0003 uint32
zb0003, err = dc.ReadMapHeader()
if err != nil {
err = msgp.WrapError(err, "TierStats")
return
}
if z.TierStats == nil {
z.TierStats = make(map[string]TierStats, zb0003)
} else if len(z.TierStats) > 0 {
for key := range z.TierStats {
delete(z.TierStats, key)
}
}
for zb0003 > 0 {
zb0003--
var za0003 string
var za0004 TierStats
za0003, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "TierStats")
return
}
var zb0004 uint32
zb0004, err = dc.ReadMapHeader()
if err != nil {
err = msgp.WrapError(err, "TierStats", za0003)
return
}
for zb0004 > 0 {
zb0004--
field, err = dc.ReadMapKeyPtr()
if err != nil {
err = msgp.WrapError(err, "TierStats", za0003)
return
}
switch msgp.UnsafeString(field) {
case "totalSize":
za0004.TotalSize, err = dc.ReadUint64()
if err != nil {
err = msgp.WrapError(err, "TierStats", za0003, "TotalSize")
return
}
case "numVersions":
za0004.NumVersions, err = dc.ReadInt()
if err != nil {
err = msgp.WrapError(err, "TierStats", za0003, "NumVersions")
return
}
case "numObjects":
za0004.NumObjects, err = dc.ReadInt()
if err != nil {
err = msgp.WrapError(err, "TierStats", za0003, "NumObjects")
return
}
default:
err = dc.Skip()
if err != nil {
err = msgp.WrapError(err, "TierStats", za0003)
return
}
}
}
z.TierStats[za0003] = za0004
}
case "bucketsSizes":
var zb0005 uint32
zb0005, err = dc.ReadMapHeader()
if err != nil {
err = msgp.WrapError(err, "BucketSizes")
return
}
if z.BucketSizes == nil {
z.BucketSizes = make(map[string]uint64, zb0005)
} else if len(z.BucketSizes) > 0 {
for key := range z.BucketSizes {
delete(z.BucketSizes, key)
}
}
for zb0005 > 0 {
zb0005--
var za0005 string
var za0006 uint64
za0005, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "BucketSizes")
return
}
za0006, err = dc.ReadUint64()
if err != nil {
err = msgp.WrapError(err, "BucketSizes", za0005)
return
}
z.BucketSizes[za0005] = za0006
}
case "capacity":
z.TotalCapacity, err = dc.ReadUint64()
if err != nil {
err = msgp.WrapError(err, "TotalCapacity")
return
}
case "freeCapacity":
z.TotalFreeCapacity, err = dc.ReadUint64()
if err != nil {
err = msgp.WrapError(err, "TotalFreeCapacity")
return
}
case "usedCapacity":
z.TotalUsedCapacity, err = dc.ReadUint64()
if err != nil {
err = msgp.WrapError(err, "TotalUsedCapacity")
return
}
default:
err = dc.Skip()
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
return
}
// EncodeMsg implements msgp.Encodable
func (z *DataUsageInfo) EncodeMsg(en *msgp.Writer) (err error) {
// map header, size 16
// write "lastUpdate"
err = en.Append(0xde, 0x0, 0x10, 0xaa, 0x6c, 0x61, 0x73, 0x74, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65)
if err != nil {
return
}
err = en.WriteTime(z.LastUpdate)
if err != nil {
err = msgp.WrapError(err, "LastUpdate")
return
}
// write "objectsCount"
err = en.Append(0xac, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74)
if err != nil {
return
}
err = en.WriteUint64(z.ObjectsTotalCount)
if err != nil {
err = msgp.WrapError(err, "ObjectsTotalCount")
return
}
// write "objectsTotalSize"
err = en.Append(0xb0, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x54, 0x6f, 0x74, 0x61, 0x6c, 0x53, 0x69, 0x7a, 0x65)
if err != nil {
return
}
err = en.WriteUint64(z.ObjectsTotalSize)
if err != nil {
err = msgp.WrapError(err, "ObjectsTotalSize")
return
}
// write "objectsPendingReplicationTotalSize"
err = en.Append(0xd9, 0x22, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x50, 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x6f, 0x74, 0x61, 0x6c, 0x53, 0x69, 0x7a, 0x65)
if err != nil {
return
}
err = en.WriteUint64(z.ReplicationPendingSize)
if err != nil {
err = msgp.WrapError(err, "ReplicationPendingSize")
return
}
// write "objectsFailedReplicationTotalSize"
err = en.Append(0xd9, 0x21, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x46, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x6f, 0x74, 0x61, 0x6c, 0x53, 0x69, 0x7a, 0x65)
if err != nil {
return
}
err = en.WriteUint64(z.ReplicationFailedSize)
if err != nil {
err = msgp.WrapError(err, "ReplicationFailedSize")
return
}
// write "objectsReplicatedTotalSize"
err = en.Append(0xba, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x65, 0x64, 0x54, 0x6f, 0x74, 0x61, 0x6c, 0x53, 0x69, 0x7a, 0x65)
if err != nil {
return
}
err = en.WriteUint64(z.ReplicatedSize)
if err != nil {
err = msgp.WrapError(err, "ReplicatedSize")
return
}
// write "objectsReplicaTotalSize"
err = en.Append(0xb7, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x54, 0x6f, 0x74, 0x61, 0x6c, 0x53, 0x69, 0x7a, 0x65)
if err != nil {
return
}
err = en.WriteUint64(z.ReplicaSize)
if err != nil {
err = msgp.WrapError(err, "ReplicaSize")
return
}
// write "objectsPendingReplicationCount"
err = en.Append(0xbe, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x50, 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x75, 0x6e, 0x74)
if err != nil {
return
}
err = en.WriteUint64(z.ReplicationPendingCount)
if err != nil {
err = msgp.WrapError(err, "ReplicationPendingCount")
return
}
// write "objectsFailedReplicationCount"
err = en.Append(0xbd, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x46, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x75, 0x6e, 0x74)
if err != nil {
return
}
err = en.WriteUint64(z.ReplicationFailedCount)
if err != nil {
err = msgp.WrapError(err, "ReplicationFailedCount")
return
}
// write "bucketsCount"
err = en.Append(0xac, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74)
if err != nil {
return
}
err = en.WriteUint64(z.BucketsCount)
if err != nil {
err = msgp.WrapError(err, "BucketsCount")
return
}
// write "bucketsUsageInfo"
err = en.Append(0xb0, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x55, 0x73, 0x61, 0x67, 0x65, 0x49, 0x6e, 0x66, 0x6f)
if err != nil {
return
}
err = en.WriteMapHeader(uint32(len(z.BucketsUsage)))
if err != nil {
err = msgp.WrapError(err, "BucketsUsage")
return
}
for za0001, za0002 := range z.BucketsUsage {
err = en.WriteString(za0001)
if err != nil {
err = msgp.WrapError(err, "BucketsUsage")
return
}
err = za0002.EncodeMsg(en)
if err != nil {
err = msgp.WrapError(err, "BucketsUsage", za0001)
return
}
}
// write "tierStats"
err = en.Append(0xa9, 0x74, 0x69, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x73)
if err != nil {
return
}
err = en.WriteMapHeader(uint32(len(z.TierStats)))
if err != nil {
err = msgp.WrapError(err, "TierStats")
return
}
for za0003, za0004 := range z.TierStats {
err = en.WriteString(za0003)
if err != nil {
err = msgp.WrapError(err, "TierStats")
return
}
// map header, size 3
// write "totalSize"
err = en.Append(0x83, 0xa9, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x53, 0x69, 0x7a, 0x65)
if err != nil {
return
}
err = en.WriteUint64(za0004.TotalSize)
if err != nil {
err = msgp.WrapError(err, "TierStats", za0003, "TotalSize")
return
}
// write "numVersions"
err = en.Append(0xab, 0x6e, 0x75, 0x6d, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73)
if err != nil {
return
}
err = en.WriteInt(za0004.NumVersions)
if err != nil {
err = msgp.WrapError(err, "TierStats", za0003, "NumVersions")
return
}
// write "numObjects"
err = en.Append(0xaa, 0x6e, 0x75, 0x6d, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73)
if err != nil {
return
}
err = en.WriteInt(za0004.NumObjects)
if err != nil {
err = msgp.WrapError(err, "TierStats", za0003, "NumObjects")
return
}
}
// write "bucketsSizes"
err = en.Append(0xac, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x53, 0x69, 0x7a, 0x65, 0x73)
if err != nil {
return
}
err = en.WriteMapHeader(uint32(len(z.BucketSizes)))
if err != nil {
err = msgp.WrapError(err, "BucketSizes")
return
}
for za0005, za0006 := range z.BucketSizes {
err = en.WriteString(za0005)
if err != nil {
err = msgp.WrapError(err, "BucketSizes")
return
}
err = en.WriteUint64(za0006)
if err != nil {
err = msgp.WrapError(err, "BucketSizes", za0005)
return
}
}
// write "capacity"
err = en.Append(0xa8, 0x63, 0x61, 0x70, 0x61, 0x63, 0x69, 0x74, 0x79)
if err != nil {
return
}
err = en.WriteUint64(z.TotalCapacity)
if err != nil {
err = msgp.WrapError(err, "TotalCapacity")
return
}
// write "freeCapacity"
err = en.Append(0xac, 0x66, 0x72, 0x65, 0x65, 0x43, 0x61, 0x70, 0x61, 0x63, 0x69, 0x74, 0x79)
if err != nil {
return
}
err = en.WriteUint64(z.TotalFreeCapacity)
if err != nil {
err = msgp.WrapError(err, "TotalFreeCapacity")
return
}
// write "usedCapacity"
err = en.Append(0xac, 0x75, 0x73, 0x65, 0x64, 0x43, 0x61, 0x70, 0x61, 0x63, 0x69, 0x74, 0x79)
if err != nil {
return
}
err = en.WriteUint64(z.TotalUsedCapacity)
if err != nil {
err = msgp.WrapError(err, "TotalUsedCapacity")
return
}
return
}
// MarshalMsg implements msgp.Marshaler
func (z *DataUsageInfo) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize())
// map header, size 16
// string "lastUpdate"
o = append(o, 0xde, 0x0, 0x10, 0xaa, 0x6c, 0x61, 0x73, 0x74, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65)
o = msgp.AppendTime(o, z.LastUpdate)
// string "objectsCount"
o = append(o, 0xac, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74)
o = msgp.AppendUint64(o, z.ObjectsTotalCount)
// string "objectsTotalSize"
o = append(o, 0xb0, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x54, 0x6f, 0x74, 0x61, 0x6c, 0x53, 0x69, 0x7a, 0x65)
o = msgp.AppendUint64(o, z.ObjectsTotalSize)
// string "objectsPendingReplicationTotalSize"
o = append(o, 0xd9, 0x22, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x50, 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x6f, 0x74, 0x61, 0x6c, 0x53, 0x69, 0x7a, 0x65)
o = msgp.AppendUint64(o, z.ReplicationPendingSize)
// string "objectsFailedReplicationTotalSize"
o = append(o, 0xd9, 0x21, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x46, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x6f, 0x74, 0x61, 0x6c, 0x53, 0x69, 0x7a, 0x65)
o = msgp.AppendUint64(o, z.ReplicationFailedSize)
// string "objectsReplicatedTotalSize"
o = append(o, 0xba, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x65, 0x64, 0x54, 0x6f, 0x74, 0x61, 0x6c, 0x53, 0x69, 0x7a, 0x65)
o = msgp.AppendUint64(o, z.ReplicatedSize)
// string "objectsReplicaTotalSize"
o = append(o, 0xb7, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x54, 0x6f, 0x74, 0x61, 0x6c, 0x53, 0x69, 0x7a, 0x65)
o = msgp.AppendUint64(o, z.ReplicaSize)
// string "objectsPendingReplicationCount"
o = append(o, 0xbe, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x50, 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x75, 0x6e, 0x74)
o = msgp.AppendUint64(o, z.ReplicationPendingCount)
// string "objectsFailedReplicationCount"
o = append(o, 0xbd, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x46, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x75, 0x6e, 0x74)
o = msgp.AppendUint64(o, z.ReplicationFailedCount)
// string "bucketsCount"
o = append(o, 0xac, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74)
o = msgp.AppendUint64(o, z.BucketsCount)
// string "bucketsUsageInfo"
o = append(o, 0xb0, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x55, 0x73, 0x61, 0x67, 0x65, 0x49, 0x6e, 0x66, 0x6f)
o = msgp.AppendMapHeader(o, uint32(len(z.BucketsUsage)))
for za0001, za0002 := range z.BucketsUsage {
o = msgp.AppendString(o, za0001)
o, err = za0002.MarshalMsg(o)
if err != nil {
err = msgp.WrapError(err, "BucketsUsage", za0001)
return
}
}
// string "tierStats"
o = append(o, 0xa9, 0x74, 0x69, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x73)
o = msgp.AppendMapHeader(o, uint32(len(z.TierStats)))
for za0003, za0004 := range z.TierStats {
o = msgp.AppendString(o, za0003)
// map header, size 3
// string "totalSize"
o = append(o, 0x83, 0xa9, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x53, 0x69, 0x7a, 0x65)
o = msgp.AppendUint64(o, za0004.TotalSize)
// string "numVersions"
o = append(o, 0xab, 0x6e, 0x75, 0x6d, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73)
o = msgp.AppendInt(o, za0004.NumVersions)
// string "numObjects"
o = append(o, 0xaa, 0x6e, 0x75, 0x6d, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73)
o = msgp.AppendInt(o, za0004.NumObjects)
}
// string "bucketsSizes"
o = append(o, 0xac, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x53, 0x69, 0x7a, 0x65, 0x73)
o = msgp.AppendMapHeader(o, uint32(len(z.BucketSizes)))
for za0005, za0006 := range z.BucketSizes {
o = msgp.AppendString(o, za0005)
o = msgp.AppendUint64(o, za0006)
}
// string "capacity"
o = append(o, 0xa8, 0x63, 0x61, 0x70, 0x61, 0x63, 0x69, 0x74, 0x79)
o = msgp.AppendUint64(o, z.TotalCapacity)
// string "freeCapacity"
o = append(o, 0xac, 0x66, 0x72, 0x65, 0x65, 0x43, 0x61, 0x70, 0x61, 0x63, 0x69, 0x74, 0x79)
o = msgp.AppendUint64(o, z.TotalFreeCapacity)
// string "usedCapacity"
o = append(o, 0xac, 0x75, 0x73, 0x65, 0x64, 0x43, 0x61, 0x70, 0x61, 0x63, 0x69, 0x74, 0x79)
o = msgp.AppendUint64(o, z.TotalUsedCapacity)
return
}
// UnmarshalMsg implements msgp.Unmarshaler
func (z *DataUsageInfo) UnmarshalMsg(bts []byte) (o []byte, err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
for zb0001 > 0 {
zb0001--
field, bts, err = msgp.ReadMapKeyZC(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "lastUpdate":
z.LastUpdate, bts, err = msgp.ReadTimeBytes(bts)
if err != nil {
err = msgp.WrapError(err, "LastUpdate")
return
}
case "objectsCount":
z.ObjectsTotalCount, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "ObjectsTotalCount")
return
}
case "objectsTotalSize":
z.ObjectsTotalSize, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "ObjectsTotalSize")
return
}
case "objectsPendingReplicationTotalSize":
z.ReplicationPendingSize, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "ReplicationPendingSize")
return
}
case "objectsFailedReplicationTotalSize":
z.ReplicationFailedSize, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "ReplicationFailedSize")
return
}
case "objectsReplicatedTotalSize":
z.ReplicatedSize, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "ReplicatedSize")
return
}
case "objectsReplicaTotalSize":
z.ReplicaSize, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "ReplicaSize")
return
}
case "objectsPendingReplicationCount":
z.ReplicationPendingCount, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "ReplicationPendingCount")
return
}
case "objectsFailedReplicationCount":
z.ReplicationFailedCount, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "ReplicationFailedCount")
return
}
case "bucketsCount":
z.BucketsCount, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "BucketsCount")
return
}
case "bucketsUsageInfo":
var zb0002 uint32
zb0002, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "BucketsUsage")
return
}
if z.BucketsUsage == nil {
z.BucketsUsage = make(map[string]BucketUsageInfo, zb0002)
} else if len(z.BucketsUsage) > 0 {
for key := range z.BucketsUsage {
delete(z.BucketsUsage, key)
}
}
for zb0002 > 0 {
var za0001 string
var za0002 BucketUsageInfo
zb0002--
za0001, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "BucketsUsage")
return
}
bts, err = za0002.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "BucketsUsage", za0001)
return
}
z.BucketsUsage[za0001] = za0002
}
case "tierStats":
var zb0003 uint32
zb0003, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "TierStats")
return
}
if z.TierStats == nil {
z.TierStats = make(map[string]TierStats, zb0003)
} else if len(z.TierStats) > 0 {
for key := range z.TierStats {
delete(z.TierStats, key)
}
}
for zb0003 > 0 {
var za0003 string
var za0004 TierStats
zb0003--
za0003, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "TierStats")
return
}
var zb0004 uint32
zb0004, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "TierStats", za0003)
return
}
for zb0004 > 0 {
zb0004--
field, bts, err = msgp.ReadMapKeyZC(bts)
if err != nil {
err = msgp.WrapError(err, "TierStats", za0003)
return
}
switch msgp.UnsafeString(field) {
case "totalSize":
za0004.TotalSize, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "TierStats", za0003, "TotalSize")
return
}
case "numVersions":
za0004.NumVersions, bts, err = msgp.ReadIntBytes(bts)
if err != nil {
err = msgp.WrapError(err, "TierStats", za0003, "NumVersions")
return
}
case "numObjects":
za0004.NumObjects, bts, err = msgp.ReadIntBytes(bts)
if err != nil {
err = msgp.WrapError(err, "TierStats", za0003, "NumObjects")
return
}
default:
bts, err = msgp.Skip(bts)
if err != nil {
err = msgp.WrapError(err, "TierStats", za0003)
return
}
}
}
z.TierStats[za0003] = za0004
}
case "bucketsSizes":
var zb0005 uint32
zb0005, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "BucketSizes")
return
}
if z.BucketSizes == nil {
z.BucketSizes = make(map[string]uint64, zb0005)
} else if len(z.BucketSizes) > 0 {
for key := range z.BucketSizes {
delete(z.BucketSizes, key)
}
}
for zb0005 > 0 {
var za0005 string
var za0006 uint64
zb0005--
za0005, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "BucketSizes")
return
}
za0006, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "BucketSizes", za0005)
return
}
z.BucketSizes[za0005] = za0006
}
case "capacity":
z.TotalCapacity, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "TotalCapacity")
return
}
case "freeCapacity":
z.TotalFreeCapacity, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "TotalFreeCapacity")
return
}
case "usedCapacity":
z.TotalUsedCapacity, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "TotalUsedCapacity")
return
}
default:
bts, err = msgp.Skip(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
o = bts
return
}
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z *DataUsageInfo) Msgsize() (s int) {
s = 3 + 11 + msgp.TimeSize + 13 + msgp.Uint64Size + 17 + msgp.Uint64Size + 36 + msgp.Uint64Size + 35 + msgp.Uint64Size + 27 + msgp.Uint64Size + 24 + msgp.Uint64Size + 31 + msgp.Uint64Size + 30 + msgp.Uint64Size + 13 + msgp.Uint64Size + 17 + msgp.MapHeaderSize
if z.BucketsUsage != nil {
for za0001, za0002 := range z.BucketsUsage {
_ = za0002
s += msgp.StringPrefixSize + len(za0001) + za0002.Msgsize()
}
}
s += 10 + msgp.MapHeaderSize
if z.TierStats != nil {
for za0003, za0004 := range z.TierStats {
_ = za0004
s += msgp.StringPrefixSize + len(za0003) + 1 + 10 + msgp.Uint64Size + 12 + msgp.IntSize + 11 + msgp.IntSize
}
}
s += 13 + msgp.MapHeaderSize
if z.BucketSizes != nil {
for za0005, za0006 := range z.BucketSizes {
_ = za0006
s += msgp.StringPrefixSize + len(za0005) + msgp.Uint64Size
}
}
s += 9 + msgp.Uint64Size + 13 + msgp.Uint64Size + 13 + msgp.Uint64Size
return
}
// DecodeMsg implements msgp.Decodable
func (z *DeleteMarkers) DecodeMsg(dc *msgp.Reader) (err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, err = dc.ReadMapHeader()
if err != nil {
err = msgp.WrapError(err)
return
}
var zb0001Mask uint8 /* 1 bits */
_ = zb0001Mask
for zb0001 > 0 {
zb0001--
field, err = dc.ReadMapKeyPtr()
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "count":
z.Count, err = dc.ReadUint64()
if err != nil {
err = msgp.WrapError(err, "Count")
return
}
case "error":
z.Error, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "Error")
return
}
zb0001Mask |= 0x1
default:
err = dc.Skip()
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
// Clear omitted fields.
if (zb0001Mask & 0x1) == 0 {
z.Error = ""
}
return
}
// EncodeMsg implements msgp.Encodable
func (z DeleteMarkers) EncodeMsg(en *msgp.Writer) (err error) {
// check for omitted fields
zb0001Len := uint32(2)
var zb0001Mask uint8 /* 2 bits */
_ = zb0001Mask
if z.Error == "" {
zb0001Len--
zb0001Mask |= 0x2
}
// variable map header, size zb0001Len
err = en.Append(0x80 | uint8(zb0001Len))
if err != nil {
return
}
// skip if no fields are to be emitted
if zb0001Len != 0 {
// write "count"
err = en.Append(0xa5, 0x63, 0x6f, 0x75, 0x6e, 0x74)
if err != nil {
return
}
err = en.WriteUint64(z.Count)
if err != nil {
err = msgp.WrapError(err, "Count")
return
}
if (zb0001Mask & 0x2) == 0 { // if not omitted
// write "error"
err = en.Append(0xa5, 0x65, 0x72, 0x72, 0x6f, 0x72)
if err != nil {
return
}
err = en.WriteString(z.Error)
if err != nil {
err = msgp.WrapError(err, "Error")
return
}
}
}
return
}
// MarshalMsg implements msgp.Marshaler
func (z DeleteMarkers) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize())
// check for omitted fields
zb0001Len := uint32(2)
var zb0001Mask uint8 /* 2 bits */
_ = zb0001Mask
if z.Error == "" {
zb0001Len--
zb0001Mask |= 0x2
}
// variable map header, size zb0001Len
o = append(o, 0x80|uint8(zb0001Len))
// skip if no fields are to be emitted
if zb0001Len != 0 {
// string "count"
o = append(o, 0xa5, 0x63, 0x6f, 0x75, 0x6e, 0x74)
o = msgp.AppendUint64(o, z.Count)
if (zb0001Mask & 0x2) == 0 { // if not omitted
// string "error"
o = append(o, 0xa5, 0x65, 0x72, 0x72, 0x6f, 0x72)
o = msgp.AppendString(o, z.Error)
}
}
return
}
// UnmarshalMsg implements msgp.Unmarshaler
func (z *DeleteMarkers) UnmarshalMsg(bts []byte) (o []byte, err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
var zb0001Mask uint8 /* 1 bits */
_ = zb0001Mask
for zb0001 > 0 {
zb0001--
field, bts, err = msgp.ReadMapKeyZC(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "count":
z.Count, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "Count")
return
}
case "error":
z.Error, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Error")
return
}
zb0001Mask |= 0x1
default:
bts, err = msgp.Skip(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
// Clear omitted fields.
if (zb0001Mask & 0x1) == 0 {
z.Error = ""
}
o = bts
return
}
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z DeleteMarkers) Msgsize() (s int) {
s = 1 + 6 + msgp.Uint64Size + 6 + msgp.StringPrefixSize + len(z.Error)
return
}
// DecodeMsg implements msgp.Decodable
func (z *Disk) DecodeMsg(dc *msgp.Reader) (err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, err = dc.ReadMapHeader()
if err != nil {
err = msgp.WrapError(err)
return
}
var zb0001Mask uint32 /* 21 bits */
_ = zb0001Mask
for zb0001 > 0 {
zb0001--
field, err = dc.ReadMapKeyPtr()
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "endpoint":
z.Endpoint, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "Endpoint")
return
}
zb0001Mask |= 0x1
case "rootDisk":
z.RootDisk, err = dc.ReadBool()
if err != nil {
err = msgp.WrapError(err, "RootDisk")
return
}
zb0001Mask |= 0x2
case "path":
z.DrivePath, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "DrivePath")
return
}
zb0001Mask |= 0x4
case "healing":
z.Healing, err = dc.ReadBool()
if err != nil {
err = msgp.WrapError(err, "Healing")
return
}
zb0001Mask |= 0x8
case "scanning":
z.Scanning, err = dc.ReadBool()
if err != nil {
err = msgp.WrapError(err, "Scanning")
return
}
zb0001Mask |= 0x10
case "state":
z.State, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "State")
return
}
zb0001Mask |= 0x20
case "uuid":
z.UUID, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "UUID")
return
}
zb0001Mask |= 0x40
case "major":
z.Major, err = dc.ReadUint32()
if err != nil {
err = msgp.WrapError(err, "Major")
return
}
case "minor":
z.Minor, err = dc.ReadUint32()
if err != nil {
err = msgp.WrapError(err, "Minor")
return
}
case "model":
z.Model, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "Model")
return
}
zb0001Mask |= 0x80
case "totalspace":
z.TotalSpace, err = dc.ReadUint64()
if err != nil {
err = msgp.WrapError(err, "TotalSpace")
return
}
zb0001Mask |= 0x100
case "usedspace":
z.UsedSpace, err = dc.ReadUint64()
if err != nil {
err = msgp.WrapError(err, "UsedSpace")
return
}
zb0001Mask |= 0x200
case "availspace":
z.AvailableSpace, err = dc.ReadUint64()
if err != nil {
err = msgp.WrapError(err, "AvailableSpace")
return
}
zb0001Mask |= 0x400
case "readthroughput":
z.ReadThroughput, err = dc.ReadFloat64()
if err != nil {
err = msgp.WrapError(err, "ReadThroughput")
return
}
zb0001Mask |= 0x800
case "writethroughput":
z.WriteThroughPut, err = dc.ReadFloat64()
if err != nil {
err = msgp.WrapError(err, "WriteThroughPut")
return
}
zb0001Mask |= 0x1000
case "readlatency":
z.ReadLatency, err = dc.ReadFloat64()
if err != nil {
err = msgp.WrapError(err, "ReadLatency")
return
}
zb0001Mask |= 0x2000
case "writelatency":
z.WriteLatency, err = dc.ReadFloat64()
if err != nil {
err = msgp.WrapError(err, "WriteLatency")
return
}
zb0001Mask |= 0x4000
case "utilization":
z.Utilization, err = dc.ReadFloat64()
if err != nil {
err = msgp.WrapError(err, "Utilization")
return
}
zb0001Mask |= 0x8000
case "metrics":
if dc.IsNil() {
err = dc.ReadNil()
if err != nil {
err = msgp.WrapError(err, "Metrics")
return
}
z.Metrics = nil
} else {
if z.Metrics == nil {
z.Metrics = new(DiskMetrics)
}
err = z.Metrics.DecodeMsg(dc)
if err != nil {
err = msgp.WrapError(err, "Metrics")
return
}
}
zb0001Mask |= 0x10000
case "heal_info":
if dc.IsNil() {
err = dc.ReadNil()
if err != nil {
err = msgp.WrapError(err, "HealInfo")
return
}
z.HealInfo = nil
} else {
if z.HealInfo == nil {
z.HealInfo = new(HealingDisk)
}
err = z.HealInfo.DecodeMsg(dc)
if err != nil {
err = msgp.WrapError(err, "HealInfo")
return
}
}
zb0001Mask |= 0x20000
case "used_inodes":
z.UsedInodes, err = dc.ReadUint64()
if err != nil {
err = msgp.WrapError(err, "UsedInodes")
return
}
case "free_inodes":
z.FreeInodes, err = dc.ReadUint64()
if err != nil {
err = msgp.WrapError(err, "FreeInodes")
return
}
zb0001Mask |= 0x40000
case "local":
z.Local, err = dc.ReadBool()
if err != nil {
err = msgp.WrapError(err, "Local")
return
}
zb0001Mask |= 0x80000
case "cacheStats":
if dc.IsNil() {
err = dc.ReadNil()
if err != nil {
err = msgp.WrapError(err, "Cache")
return
}
z.Cache = nil
} else {
if z.Cache == nil {
z.Cache = new(CacheStats)
}
err = z.Cache.DecodeMsg(dc)
if err != nil {
err = msgp.WrapError(err, "Cache")
return
}
}
zb0001Mask |= 0x100000
case "pool_index":
z.PoolIndex, err = dc.ReadInt()
if err != nil {
err = msgp.WrapError(err, "PoolIndex")
return
}
case "set_index":
z.SetIndex, err = dc.ReadInt()
if err != nil {
err = msgp.WrapError(err, "SetIndex")
return
}
case "disk_index":
z.DiskIndex, err = dc.ReadInt()
if err != nil {
err = msgp.WrapError(err, "DiskIndex")
return
}
default:
err = dc.Skip()
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
// Clear omitted fields.
if zb0001Mask != 0x1fffff {
if (zb0001Mask & 0x1) == 0 {
z.Endpoint = ""
}
if (zb0001Mask & 0x2) == 0 {
z.RootDisk = false
}
if (zb0001Mask & 0x4) == 0 {
z.DrivePath = ""
}
if (zb0001Mask & 0x8) == 0 {
z.Healing = false
}
if (zb0001Mask & 0x10) == 0 {
z.Scanning = false
}
if (zb0001Mask & 0x20) == 0 {
z.State = ""
}
if (zb0001Mask & 0x40) == 0 {
z.UUID = ""
}
if (zb0001Mask & 0x80) == 0 {
z.Model = ""
}
if (zb0001Mask & 0x100) == 0 {
z.TotalSpace = 0
}
if (zb0001Mask & 0x200) == 0 {
z.UsedSpace = 0
}
if (zb0001Mask & 0x400) == 0 {
z.AvailableSpace = 0
}
if (zb0001Mask & 0x800) == 0 {
z.ReadThroughput = 0
}
if (zb0001Mask & 0x1000) == 0 {
z.WriteThroughPut = 0
}
if (zb0001Mask & 0x2000) == 0 {
z.ReadLatency = 0
}
if (zb0001Mask & 0x4000) == 0 {
z.WriteLatency = 0
}
if (zb0001Mask & 0x8000) == 0 {
z.Utilization = 0
}
if (zb0001Mask & 0x10000) == 0 {
z.Metrics = nil
}
if (zb0001Mask & 0x20000) == 0 {
z.HealInfo = nil
}
if (zb0001Mask & 0x40000) == 0 {
z.FreeInodes = 0
}
if (zb0001Mask & 0x80000) == 0 {
z.Local = false
}
if (zb0001Mask & 0x100000) == 0 {
z.Cache = nil
}
}
return
}
// EncodeMsg implements msgp.Encodable
func (z *Disk) EncodeMsg(en *msgp.Writer) (err error) {
// check for omitted fields
zb0001Len := uint32(27)
var zb0001Mask uint32 /* 27 bits */
_ = zb0001Mask
if z.Endpoint == "" {
zb0001Len--
zb0001Mask |= 0x1
}
if z.RootDisk == false {
zb0001Len--
zb0001Mask |= 0x2
}
if z.DrivePath == "" {
zb0001Len--
zb0001Mask |= 0x4
}
if z.Healing == false {
zb0001Len--
zb0001Mask |= 0x8
}
if z.Scanning == false {
zb0001Len--
zb0001Mask |= 0x10
}
if z.State == "" {
zb0001Len--
zb0001Mask |= 0x20
}
if z.UUID == "" {
zb0001Len--
zb0001Mask |= 0x40
}
if z.Model == "" {
zb0001Len--
zb0001Mask |= 0x200
}
if z.TotalSpace == 0 {
zb0001Len--
zb0001Mask |= 0x400
}
if z.UsedSpace == 0 {
zb0001Len--
zb0001Mask |= 0x800
}
if z.AvailableSpace == 0 {
zb0001Len--
zb0001Mask |= 0x1000
}
if z.ReadThroughput == 0 {
zb0001Len--
zb0001Mask |= 0x2000
}
if z.WriteThroughPut == 0 {
zb0001Len--
zb0001Mask |= 0x4000
}
if z.ReadLatency == 0 {
zb0001Len--
zb0001Mask |= 0x8000
}
if z.WriteLatency == 0 {
zb0001Len--
zb0001Mask |= 0x10000
}
if z.Utilization == 0 {
zb0001Len--
zb0001Mask |= 0x20000
}
if z.Metrics == nil {
zb0001Len--
zb0001Mask |= 0x40000
}
if z.HealInfo == nil {
zb0001Len--
zb0001Mask |= 0x80000
}
if z.FreeInodes == 0 {
zb0001Len--
zb0001Mask |= 0x200000
}
if z.Local == false {
zb0001Len--
zb0001Mask |= 0x400000
}
if z.Cache == nil {
zb0001Len--
zb0001Mask |= 0x800000
}
// variable map header, size zb0001Len
err = en.WriteMapHeader(zb0001Len)
if err != nil {
return
}
// skip if no fields are to be emitted
if zb0001Len != 0 {
if (zb0001Mask & 0x1) == 0 { // if not omitted
// write "endpoint"
err = en.Append(0xa8, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74)
if err != nil {
return
}
err = en.WriteString(z.Endpoint)
if err != nil {
err = msgp.WrapError(err, "Endpoint")
return
}
}
if (zb0001Mask & 0x2) == 0 { // if not omitted
// write "rootDisk"
err = en.Append(0xa8, 0x72, 0x6f, 0x6f, 0x74, 0x44, 0x69, 0x73, 0x6b)
if err != nil {
return
}
err = en.WriteBool(z.RootDisk)
if err != nil {
err = msgp.WrapError(err, "RootDisk")
return
}
}
if (zb0001Mask & 0x4) == 0 { // if not omitted
// write "path"
err = en.Append(0xa4, 0x70, 0x61, 0x74, 0x68)
if err != nil {
return
}
err = en.WriteString(z.DrivePath)
if err != nil {
err = msgp.WrapError(err, "DrivePath")
return
}
}
if (zb0001Mask & 0x8) == 0 { // if not omitted
// write "healing"
err = en.Append(0xa7, 0x68, 0x65, 0x61, 0x6c, 0x69, 0x6e, 0x67)
if err != nil {
return
}
err = en.WriteBool(z.Healing)
if err != nil {
err = msgp.WrapError(err, "Healing")
return
}
}
if (zb0001Mask & 0x10) == 0 { // if not omitted
// write "scanning"
err = en.Append(0xa8, 0x73, 0x63, 0x61, 0x6e, 0x6e, 0x69, 0x6e, 0x67)
if err != nil {
return
}
err = en.WriteBool(z.Scanning)
if err != nil {
err = msgp.WrapError(err, "Scanning")
return
}
}
if (zb0001Mask & 0x20) == 0 { // if not omitted
// write "state"
err = en.Append(0xa5, 0x73, 0x74, 0x61, 0x74, 0x65)
if err != nil {
return
}
err = en.WriteString(z.State)
if err != nil {
err = msgp.WrapError(err, "State")
return
}
}
if (zb0001Mask & 0x40) == 0 { // if not omitted
// write "uuid"
err = en.Append(0xa4, 0x75, 0x75, 0x69, 0x64)
if err != nil {
return
}
err = en.WriteString(z.UUID)
if err != nil {
err = msgp.WrapError(err, "UUID")
return
}
}
// write "major"
err = en.Append(0xa5, 0x6d, 0x61, 0x6a, 0x6f, 0x72)
if err != nil {
return
}
err = en.WriteUint32(z.Major)
if err != nil {
err = msgp.WrapError(err, "Major")
return
}
// write "minor"
err = en.Append(0xa5, 0x6d, 0x69, 0x6e, 0x6f, 0x72)
if err != nil {
return
}
err = en.WriteUint32(z.Minor)
if err != nil {
err = msgp.WrapError(err, "Minor")
return
}
if (zb0001Mask & 0x200) == 0 { // if not omitted
// write "model"
err = en.Append(0xa5, 0x6d, 0x6f, 0x64, 0x65, 0x6c)
if err != nil {
return
}
err = en.WriteString(z.Model)
if err != nil {
err = msgp.WrapError(err, "Model")
return
}
}
if (zb0001Mask & 0x400) == 0 { // if not omitted
// write "totalspace"
err = en.Append(0xaa, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x73, 0x70, 0x61, 0x63, 0x65)
if err != nil {
return
}
err = en.WriteUint64(z.TotalSpace)
if err != nil {
err = msgp.WrapError(err, "TotalSpace")
return
}
}
if (zb0001Mask & 0x800) == 0 { // if not omitted
// write "usedspace"
err = en.Append(0xa9, 0x75, 0x73, 0x65, 0x64, 0x73, 0x70, 0x61, 0x63, 0x65)
if err != nil {
return
}
err = en.WriteUint64(z.UsedSpace)
if err != nil {
err = msgp.WrapError(err, "UsedSpace")
return
}
}
if (zb0001Mask & 0x1000) == 0 { // if not omitted
// write "availspace"
err = en.Append(0xaa, 0x61, 0x76, 0x61, 0x69, 0x6c, 0x73, 0x70, 0x61, 0x63, 0x65)
if err != nil {
return
}
err = en.WriteUint64(z.AvailableSpace)
if err != nil {
err = msgp.WrapError(err, "AvailableSpace")
return
}
}
if (zb0001Mask & 0x2000) == 0 { // if not omitted
// write "readthroughput"
err = en.Append(0xae, 0x72, 0x65, 0x61, 0x64, 0x74, 0x68, 0x72, 0x6f, 0x75, 0x67, 0x68, 0x70, 0x75, 0x74)
if err != nil {
return
}
err = en.WriteFloat64(z.ReadThroughput)
if err != nil {
err = msgp.WrapError(err, "ReadThroughput")
return
}
}
if (zb0001Mask & 0x4000) == 0 { // if not omitted
// write "writethroughput"
err = en.Append(0xaf, 0x77, 0x72, 0x69, 0x74, 0x65, 0x74, 0x68, 0x72, 0x6f, 0x75, 0x67, 0x68, 0x70, 0x75, 0x74)
if err != nil {
return
}
err = en.WriteFloat64(z.WriteThroughPut)
if err != nil {
err = msgp.WrapError(err, "WriteThroughPut")
return
}
}
if (zb0001Mask & 0x8000) == 0 { // if not omitted
// write "readlatency"
err = en.Append(0xab, 0x72, 0x65, 0x61, 0x64, 0x6c, 0x61, 0x74, 0x65, 0x6e, 0x63, 0x79)
if err != nil {
return
}
err = en.WriteFloat64(z.ReadLatency)
if err != nil {
err = msgp.WrapError(err, "ReadLatency")
return
}
}
if (zb0001Mask & 0x10000) == 0 { // if not omitted
// write "writelatency"
err = en.Append(0xac, 0x77, 0x72, 0x69, 0x74, 0x65, 0x6c, 0x61, 0x74, 0x65, 0x6e, 0x63, 0x79)
if err != nil {
return
}
err = en.WriteFloat64(z.WriteLatency)
if err != nil {
err = msgp.WrapError(err, "WriteLatency")
return
}
}
if (zb0001Mask & 0x20000) == 0 { // if not omitted
// write "utilization"
err = en.Append(0xab, 0x75, 0x74, 0x69, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e)
if err != nil {
return
}
err = en.WriteFloat64(z.Utilization)
if err != nil {
err = msgp.WrapError(err, "Utilization")
return
}
}
if (zb0001Mask & 0x40000) == 0 { // if not omitted
// write "metrics"
err = en.Append(0xa7, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73)
if err != nil {
return
}
if z.Metrics == nil {
err = en.WriteNil()
if err != nil {
return
}
} else {
err = z.Metrics.EncodeMsg(en)
if err != nil {
err = msgp.WrapError(err, "Metrics")
return
}
}
}
if (zb0001Mask & 0x80000) == 0 { // if not omitted
// write "heal_info"
err = en.Append(0xa9, 0x68, 0x65, 0x61, 0x6c, 0x5f, 0x69, 0x6e, 0x66, 0x6f)
if err != nil {
return
}
if z.HealInfo == nil {
err = en.WriteNil()
if err != nil {
return
}
} else {
err = z.HealInfo.EncodeMsg(en)
if err != nil {
err = msgp.WrapError(err, "HealInfo")
return
}
}
}
// write "used_inodes"
err = en.Append(0xab, 0x75, 0x73, 0x65, 0x64, 0x5f, 0x69, 0x6e, 0x6f, 0x64, 0x65, 0x73)
if err != nil {
return
}
err = en.WriteUint64(z.UsedInodes)
if err != nil {
err = msgp.WrapError(err, "UsedInodes")
return
}
if (zb0001Mask & 0x200000) == 0 { // if not omitted
// write "free_inodes"
err = en.Append(0xab, 0x66, 0x72, 0x65, 0x65, 0x5f, 0x69, 0x6e, 0x6f, 0x64, 0x65, 0x73)
if err != nil {
return
}
err = en.WriteUint64(z.FreeInodes)
if err != nil {
err = msgp.WrapError(err, "FreeInodes")
return
}
}
if (zb0001Mask & 0x400000) == 0 { // if not omitted
// write "local"
err = en.Append(0xa5, 0x6c, 0x6f, 0x63, 0x61, 0x6c)
if err != nil {
return
}
err = en.WriteBool(z.Local)
if err != nil {
err = msgp.WrapError(err, "Local")
return
}
}
if (zb0001Mask & 0x800000) == 0 { // if not omitted
// write "cacheStats"
err = en.Append(0xaa, 0x63, 0x61, 0x63, 0x68, 0x65, 0x53, 0x74, 0x61, 0x74, 0x73)
if err != nil {
return
}
if z.Cache == nil {
err = en.WriteNil()
if err != nil {
return
}
} else {
err = z.Cache.EncodeMsg(en)
if err != nil {
err = msgp.WrapError(err, "Cache")
return
}
}
}
// write "pool_index"
err = en.Append(0xaa, 0x70, 0x6f, 0x6f, 0x6c, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78)
if err != nil {
return
}
err = en.WriteInt(z.PoolIndex)
if err != nil {
err = msgp.WrapError(err, "PoolIndex")
return
}
// write "set_index"
err = en.Append(0xa9, 0x73, 0x65, 0x74, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78)
if err != nil {
return
}
err = en.WriteInt(z.SetIndex)
if err != nil {
err = msgp.WrapError(err, "SetIndex")
return
}
// write "disk_index"
err = en.Append(0xaa, 0x64, 0x69, 0x73, 0x6b, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78)
if err != nil {
return
}
err = en.WriteInt(z.DiskIndex)
if err != nil {
err = msgp.WrapError(err, "DiskIndex")
return
}
}
return
}
// MarshalMsg implements msgp.Marshaler
func (z *Disk) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize())
// check for omitted fields
zb0001Len := uint32(27)
var zb0001Mask uint32 /* 27 bits */
_ = zb0001Mask
if z.Endpoint == "" {
zb0001Len--
zb0001Mask |= 0x1
}
if z.RootDisk == false {
zb0001Len--
zb0001Mask |= 0x2
}
if z.DrivePath == "" {
zb0001Len--
zb0001Mask |= 0x4
}
if z.Healing == false {
zb0001Len--
zb0001Mask |= 0x8
}
if z.Scanning == false {
zb0001Len--
zb0001Mask |= 0x10
}
if z.State == "" {
zb0001Len--
zb0001Mask |= 0x20
}
if z.UUID == "" {
zb0001Len--
zb0001Mask |= 0x40
}
if z.Model == "" {
zb0001Len--
zb0001Mask |= 0x200
}
if z.TotalSpace == 0 {
zb0001Len--
zb0001Mask |= 0x400
}
if z.UsedSpace == 0 {
zb0001Len--
zb0001Mask |= 0x800
}
if z.AvailableSpace == 0 {
zb0001Len--
zb0001Mask |= 0x1000
}
if z.ReadThroughput == 0 {
zb0001Len--
zb0001Mask |= 0x2000
}
if z.WriteThroughPut == 0 {
zb0001Len--
zb0001Mask |= 0x4000
}
if z.ReadLatency == 0 {
zb0001Len--
zb0001Mask |= 0x8000
}
if z.WriteLatency == 0 {
zb0001Len--
zb0001Mask |= 0x10000
}
if z.Utilization == 0 {
zb0001Len--
zb0001Mask |= 0x20000
}
if z.Metrics == nil {
zb0001Len--
zb0001Mask |= 0x40000
}
if z.HealInfo == nil {
zb0001Len--
zb0001Mask |= 0x80000
}
if z.FreeInodes == 0 {
zb0001Len--
zb0001Mask |= 0x200000
}
if z.Local == false {
zb0001Len--
zb0001Mask |= 0x400000
}
if z.Cache == nil {
zb0001Len--
zb0001Mask |= 0x800000
}
// variable map header, size zb0001Len
o = msgp.AppendMapHeader(o, zb0001Len)
// skip if no fields are to be emitted
if zb0001Len != 0 {
if (zb0001Mask & 0x1) == 0 { // if not omitted
// string "endpoint"
o = append(o, 0xa8, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74)
o = msgp.AppendString(o, z.Endpoint)
}
if (zb0001Mask & 0x2) == 0 { // if not omitted
// string "rootDisk"
o = append(o, 0xa8, 0x72, 0x6f, 0x6f, 0x74, 0x44, 0x69, 0x73, 0x6b)
o = msgp.AppendBool(o, z.RootDisk)
}
if (zb0001Mask & 0x4) == 0 { // if not omitted
// string "path"
o = append(o, 0xa4, 0x70, 0x61, 0x74, 0x68)
o = msgp.AppendString(o, z.DrivePath)
}
if (zb0001Mask & 0x8) == 0 { // if not omitted
// string "healing"
o = append(o, 0xa7, 0x68, 0x65, 0x61, 0x6c, 0x69, 0x6e, 0x67)
o = msgp.AppendBool(o, z.Healing)
}
if (zb0001Mask & 0x10) == 0 { // if not omitted
// string "scanning"
o = append(o, 0xa8, 0x73, 0x63, 0x61, 0x6e, 0x6e, 0x69, 0x6e, 0x67)
o = msgp.AppendBool(o, z.Scanning)
}
if (zb0001Mask & 0x20) == 0 { // if not omitted
// string "state"
o = append(o, 0xa5, 0x73, 0x74, 0x61, 0x74, 0x65)
o = msgp.AppendString(o, z.State)
}
if (zb0001Mask & 0x40) == 0 { // if not omitted
// string "uuid"
o = append(o, 0xa4, 0x75, 0x75, 0x69, 0x64)
o = msgp.AppendString(o, z.UUID)
}
// string "major"
o = append(o, 0xa5, 0x6d, 0x61, 0x6a, 0x6f, 0x72)
o = msgp.AppendUint32(o, z.Major)
// string "minor"
o = append(o, 0xa5, 0x6d, 0x69, 0x6e, 0x6f, 0x72)
o = msgp.AppendUint32(o, z.Minor)
if (zb0001Mask & 0x200) == 0 { // if not omitted
// string "model"
o = append(o, 0xa5, 0x6d, 0x6f, 0x64, 0x65, 0x6c)
o = msgp.AppendString(o, z.Model)
}
if (zb0001Mask & 0x400) == 0 { // if not omitted
// string "totalspace"
o = append(o, 0xaa, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x73, 0x70, 0x61, 0x63, 0x65)
o = msgp.AppendUint64(o, z.TotalSpace)
}
if (zb0001Mask & 0x800) == 0 { // if not omitted
// string "usedspace"
o = append(o, 0xa9, 0x75, 0x73, 0x65, 0x64, 0x73, 0x70, 0x61, 0x63, 0x65)
o = msgp.AppendUint64(o, z.UsedSpace)
}
if (zb0001Mask & 0x1000) == 0 { // if not omitted
// string "availspace"
o = append(o, 0xaa, 0x61, 0x76, 0x61, 0x69, 0x6c, 0x73, 0x70, 0x61, 0x63, 0x65)
o = msgp.AppendUint64(o, z.AvailableSpace)
}
if (zb0001Mask & 0x2000) == 0 { // if not omitted
// string "readthroughput"
o = append(o, 0xae, 0x72, 0x65, 0x61, 0x64, 0x74, 0x68, 0x72, 0x6f, 0x75, 0x67, 0x68, 0x70, 0x75, 0x74)
o = msgp.AppendFloat64(o, z.ReadThroughput)
}
if (zb0001Mask & 0x4000) == 0 { // if not omitted
// string "writethroughput"
o = append(o, 0xaf, 0x77, 0x72, 0x69, 0x74, 0x65, 0x74, 0x68, 0x72, 0x6f, 0x75, 0x67, 0x68, 0x70, 0x75, 0x74)
o = msgp.AppendFloat64(o, z.WriteThroughPut)
}
if (zb0001Mask & 0x8000) == 0 { // if not omitted
// string "readlatency"
o = append(o, 0xab, 0x72, 0x65, 0x61, 0x64, 0x6c, 0x61, 0x74, 0x65, 0x6e, 0x63, 0x79)
o = msgp.AppendFloat64(o, z.ReadLatency)
}
if (zb0001Mask & 0x10000) == 0 { // if not omitted
// string "writelatency"
o = append(o, 0xac, 0x77, 0x72, 0x69, 0x74, 0x65, 0x6c, 0x61, 0x74, 0x65, 0x6e, 0x63, 0x79)
o = msgp.AppendFloat64(o, z.WriteLatency)
}
if (zb0001Mask & 0x20000) == 0 { // if not omitted
// string "utilization"
o = append(o, 0xab, 0x75, 0x74, 0x69, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e)
o = msgp.AppendFloat64(o, z.Utilization)
}
if (zb0001Mask & 0x40000) == 0 { // if not omitted
// string "metrics"
o = append(o, 0xa7, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73)
if z.Metrics == nil {
o = msgp.AppendNil(o)
} else {
o, err = z.Metrics.MarshalMsg(o)
if err != nil {
err = msgp.WrapError(err, "Metrics")
return
}
}
}
if (zb0001Mask & 0x80000) == 0 { // if not omitted
// string "heal_info"
o = append(o, 0xa9, 0x68, 0x65, 0x61, 0x6c, 0x5f, 0x69, 0x6e, 0x66, 0x6f)
if z.HealInfo == nil {
o = msgp.AppendNil(o)
} else {
o, err = z.HealInfo.MarshalMsg(o)
if err != nil {
err = msgp.WrapError(err, "HealInfo")
return
}
}
}
// string "used_inodes"
o = append(o, 0xab, 0x75, 0x73, 0x65, 0x64, 0x5f, 0x69, 0x6e, 0x6f, 0x64, 0x65, 0x73)
o = msgp.AppendUint64(o, z.UsedInodes)
if (zb0001Mask & 0x200000) == 0 { // if not omitted
// string "free_inodes"
o = append(o, 0xab, 0x66, 0x72, 0x65, 0x65, 0x5f, 0x69, 0x6e, 0x6f, 0x64, 0x65, 0x73)
o = msgp.AppendUint64(o, z.FreeInodes)
}
if (zb0001Mask & 0x400000) == 0 { // if not omitted
// string "local"
o = append(o, 0xa5, 0x6c, 0x6f, 0x63, 0x61, 0x6c)
o = msgp.AppendBool(o, z.Local)
}
if (zb0001Mask & 0x800000) == 0 { // if not omitted
// string "cacheStats"
o = append(o, 0xaa, 0x63, 0x61, 0x63, 0x68, 0x65, 0x53, 0x74, 0x61, 0x74, 0x73)
if z.Cache == nil {
o = msgp.AppendNil(o)
} else {
o, err = z.Cache.MarshalMsg(o)
if err != nil {
err = msgp.WrapError(err, "Cache")
return
}
}
}
// string "pool_index"
o = append(o, 0xaa, 0x70, 0x6f, 0x6f, 0x6c, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78)
o = msgp.AppendInt(o, z.PoolIndex)
// string "set_index"
o = append(o, 0xa9, 0x73, 0x65, 0x74, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78)
o = msgp.AppendInt(o, z.SetIndex)
// string "disk_index"
o = append(o, 0xaa, 0x64, 0x69, 0x73, 0x6b, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78)
o = msgp.AppendInt(o, z.DiskIndex)
}
return
}
// UnmarshalMsg implements msgp.Unmarshaler
func (z *Disk) UnmarshalMsg(bts []byte) (o []byte, err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
var zb0001Mask uint32 /* 21 bits */
_ = zb0001Mask
for zb0001 > 0 {
zb0001--
field, bts, err = msgp.ReadMapKeyZC(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "endpoint":
z.Endpoint, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Endpoint")
return
}
zb0001Mask |= 0x1
case "rootDisk":
z.RootDisk, bts, err = msgp.ReadBoolBytes(bts)
if err != nil {
err = msgp.WrapError(err, "RootDisk")
return
}
zb0001Mask |= 0x2
case "path":
z.DrivePath, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "DrivePath")
return
}
zb0001Mask |= 0x4
case "healing":
z.Healing, bts, err = msgp.ReadBoolBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Healing")
return
}
zb0001Mask |= 0x8
case "scanning":
z.Scanning, bts, err = msgp.ReadBoolBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Scanning")
return
}
zb0001Mask |= 0x10
case "state":
z.State, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "State")
return
}
zb0001Mask |= 0x20
case "uuid":
z.UUID, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "UUID")
return
}
zb0001Mask |= 0x40
case "major":
z.Major, bts, err = msgp.ReadUint32Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "Major")
return
}
case "minor":
z.Minor, bts, err = msgp.ReadUint32Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "Minor")
return
}
case "model":
z.Model, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Model")
return
}
zb0001Mask |= 0x80
case "totalspace":
z.TotalSpace, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "TotalSpace")
return
}
zb0001Mask |= 0x100
case "usedspace":
z.UsedSpace, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "UsedSpace")
return
}
zb0001Mask |= 0x200
case "availspace":
z.AvailableSpace, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "AvailableSpace")
return
}
zb0001Mask |= 0x400
case "readthroughput":
z.ReadThroughput, bts, err = msgp.ReadFloat64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "ReadThroughput")
return
}
zb0001Mask |= 0x800
case "writethroughput":
z.WriteThroughPut, bts, err = msgp.ReadFloat64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "WriteThroughPut")
return
}
zb0001Mask |= 0x1000
case "readlatency":
z.ReadLatency, bts, err = msgp.ReadFloat64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "ReadLatency")
return
}
zb0001Mask |= 0x2000
case "writelatency":
z.WriteLatency, bts, err = msgp.ReadFloat64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "WriteLatency")
return
}
zb0001Mask |= 0x4000
case "utilization":
z.Utilization, bts, err = msgp.ReadFloat64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "Utilization")
return
}
zb0001Mask |= 0x8000
case "metrics":
if msgp.IsNil(bts) {
bts, err = msgp.ReadNilBytes(bts)
if err != nil {
return
}
z.Metrics = nil
} else {
if z.Metrics == nil {
z.Metrics = new(DiskMetrics)
}
bts, err = z.Metrics.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "Metrics")
return
}
}
zb0001Mask |= 0x10000
case "heal_info":
if msgp.IsNil(bts) {
bts, err = msgp.ReadNilBytes(bts)
if err != nil {
return
}
z.HealInfo = nil
} else {
if z.HealInfo == nil {
z.HealInfo = new(HealingDisk)
}
bts, err = z.HealInfo.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "HealInfo")
return
}
}
zb0001Mask |= 0x20000
case "used_inodes":
z.UsedInodes, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "UsedInodes")
return
}
case "free_inodes":
z.FreeInodes, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "FreeInodes")
return
}
zb0001Mask |= 0x40000
case "local":
z.Local, bts, err = msgp.ReadBoolBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Local")
return
}
zb0001Mask |= 0x80000
case "cacheStats":
if msgp.IsNil(bts) {
bts, err = msgp.ReadNilBytes(bts)
if err != nil {
return
}
z.Cache = nil
} else {
if z.Cache == nil {
z.Cache = new(CacheStats)
}
bts, err = z.Cache.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "Cache")
return
}
}
zb0001Mask |= 0x100000
case "pool_index":
z.PoolIndex, bts, err = msgp.ReadIntBytes(bts)
if err != nil {
err = msgp.WrapError(err, "PoolIndex")
return
}
case "set_index":
z.SetIndex, bts, err = msgp.ReadIntBytes(bts)
if err != nil {
err = msgp.WrapError(err, "SetIndex")
return
}
case "disk_index":
z.DiskIndex, bts, err = msgp.ReadIntBytes(bts)
if err != nil {
err = msgp.WrapError(err, "DiskIndex")
return
}
default:
bts, err = msgp.Skip(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
// Clear omitted fields.
if zb0001Mask != 0x1fffff {
if (zb0001Mask & 0x1) == 0 {
z.Endpoint = ""
}
if (zb0001Mask & 0x2) == 0 {
z.RootDisk = false
}
if (zb0001Mask & 0x4) == 0 {
z.DrivePath = ""
}
if (zb0001Mask & 0x8) == 0 {
z.Healing = false
}
if (zb0001Mask & 0x10) == 0 {
z.Scanning = false
}
if (zb0001Mask & 0x20) == 0 {
z.State = ""
}
if (zb0001Mask & 0x40) == 0 {
z.UUID = ""
}
if (zb0001Mask & 0x80) == 0 {
z.Model = ""
}
if (zb0001Mask & 0x100) == 0 {
z.TotalSpace = 0
}
if (zb0001Mask & 0x200) == 0 {
z.UsedSpace = 0
}
if (zb0001Mask & 0x400) == 0 {
z.AvailableSpace = 0
}
if (zb0001Mask & 0x800) == 0 {
z.ReadThroughput = 0
}
if (zb0001Mask & 0x1000) == 0 {
z.WriteThroughPut = 0
}
if (zb0001Mask & 0x2000) == 0 {
z.ReadLatency = 0
}
if (zb0001Mask & 0x4000) == 0 {
z.WriteLatency = 0
}
if (zb0001Mask & 0x8000) == 0 {
z.Utilization = 0
}
if (zb0001Mask & 0x10000) == 0 {
z.Metrics = nil
}
if (zb0001Mask & 0x20000) == 0 {
z.HealInfo = nil
}
if (zb0001Mask & 0x40000) == 0 {
z.FreeInodes = 0
}
if (zb0001Mask & 0x80000) == 0 {
z.Local = false
}
if (zb0001Mask & 0x100000) == 0 {
z.Cache = nil
}
}
o = bts
return
}
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z *Disk) Msgsize() (s int) {
s = 3 + 9 + msgp.StringPrefixSize + len(z.Endpoint) + 9 + msgp.BoolSize + 5 + msgp.StringPrefixSize + len(z.DrivePath) + 8 + msgp.BoolSize + 9 + msgp.BoolSize + 6 + msgp.StringPrefixSize + len(z.State) + 5 + msgp.StringPrefixSize + len(z.UUID) + 6 + msgp.Uint32Size + 6 + msgp.Uint32Size + 6 + msgp.StringPrefixSize + len(z.Model) + 11 + msgp.Uint64Size + 10 + msgp.Uint64Size + 11 + msgp.Uint64Size + 15 + msgp.Float64Size + 16 + msgp.Float64Size + 12 + msgp.Float64Size + 13 + msgp.Float64Size + 12 + msgp.Float64Size + 8
if z.Metrics == nil {
s += msgp.NilSize
} else {
s += z.Metrics.Msgsize()
}
s += 10
if z.HealInfo == nil {
s += msgp.NilSize
} else {
s += z.HealInfo.Msgsize()
}
s += 12 + msgp.Uint64Size + 12 + msgp.Uint64Size + 6 + msgp.BoolSize + 11
if z.Cache == nil {
s += msgp.NilSize
} else {
s += z.Cache.Msgsize()
}
s += 11 + msgp.IntSize + 10 + msgp.IntSize + 11 + msgp.IntSize
return
}
// DecodeMsg implements msgp.Decodable
func (z *DiskMetrics) DecodeMsg(dc *msgp.Reader) (err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, err = dc.ReadMapHeader()
if err != nil {
err = msgp.WrapError(err)
return
}
var zb0001Mask uint16 /* 9 bits */
_ = zb0001Mask
for zb0001 > 0 {
zb0001--
field, err = dc.ReadMapKeyPtr()
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "lastMinute":
var zb0002 uint32
zb0002, err = dc.ReadMapHeader()
if err != nil {
err = msgp.WrapError(err, "LastMinute")
return
}
if z.LastMinute == nil {
z.LastMinute = make(map[string]TimedAction, zb0002)
} else if len(z.LastMinute) > 0 {
for key := range z.LastMinute {
delete(z.LastMinute, key)
}
}
for zb0002 > 0 {
zb0002--
var za0001 string
var za0002 TimedAction
za0001, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "LastMinute")
return
}
err = za0002.DecodeMsg(dc)
if err != nil {
err = msgp.WrapError(err, "LastMinute", za0001)
return
}
z.LastMinute[za0001] = za0002
}
zb0001Mask |= 0x1
case "apiCalls":
var zb0003 uint32
zb0003, err = dc.ReadMapHeader()
if err != nil {
err = msgp.WrapError(err, "APICalls")
return
}
if z.APICalls == nil {
z.APICalls = make(map[string]uint64, zb0003)
} else if len(z.APICalls) > 0 {
for key := range z.APICalls {
delete(z.APICalls, key)
}
}
for zb0003 > 0 {
zb0003--
var za0003 string
var za0004 uint64
za0003, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "APICalls")
return
}
za0004, err = dc.ReadUint64()
if err != nil {
err = msgp.WrapError(err, "APICalls", za0003)
return
}
z.APICalls[za0003] = za0004
}
zb0001Mask |= 0x2
case "totalTokens":
z.TotalTokens, err = dc.ReadUint32()
if err != nil {
err = msgp.WrapError(err, "TotalTokens")
return
}
zb0001Mask |= 0x4
case "totalWaiting":
z.TotalWaiting, err = dc.ReadUint32()
if err != nil {
err = msgp.WrapError(err, "TotalWaiting")
return
}
zb0001Mask |= 0x8
case "totalErrorsAvailability":
z.TotalErrorsAvailability, err = dc.ReadUint64()
if err != nil {
err = msgp.WrapError(err, "TotalErrorsAvailability")
return
}
zb0001Mask |= 0x10
case "totalErrorsTimeout":
z.TotalErrorsTimeout, err = dc.ReadUint64()
if err != nil {
err = msgp.WrapError(err, "TotalErrorsTimeout")
return
}
zb0001Mask |= 0x20
case "totalWrites":
z.TotalWrites, err = dc.ReadUint64()
if err != nil {
err = msgp.WrapError(err, "TotalWrites")
return
}
zb0001Mask |= 0x40
case "totalDeletes":
z.TotalDeletes, err = dc.ReadUint64()
if err != nil {
err = msgp.WrapError(err, "TotalDeletes")
return
}
zb0001Mask |= 0x80
case "apiLatencies":
var zb0004 uint32
zb0004, err = dc.ReadMapHeader()
if err != nil {
err = msgp.WrapError(err, "APILatencies")
return
}
if z.APILatencies == nil {
z.APILatencies = make(map[string]interface{}, zb0004)
} else if len(z.APILatencies) > 0 {
for key := range z.APILatencies {
delete(z.APILatencies, key)
}
}
for zb0004 > 0 {
zb0004--
var za0005 string
var za0006 interface{}
za0005, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "APILatencies")
return
}
za0006, err = dc.ReadIntf()
if err != nil {
err = msgp.WrapError(err, "APILatencies", za0005)
return
}
z.APILatencies[za0005] = za0006
}
zb0001Mask |= 0x100
default:
err = dc.Skip()
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
// Clear omitted fields.
if zb0001Mask != 0x1ff {
if (zb0001Mask & 0x1) == 0 {
z.LastMinute = nil
}
if (zb0001Mask & 0x2) == 0 {
z.APICalls = nil
}
if (zb0001Mask & 0x4) == 0 {
z.TotalTokens = 0
}
if (zb0001Mask & 0x8) == 0 {
z.TotalWaiting = 0
}
if (zb0001Mask & 0x10) == 0 {
z.TotalErrorsAvailability = 0
}
if (zb0001Mask & 0x20) == 0 {
z.TotalErrorsTimeout = 0
}
if (zb0001Mask & 0x40) == 0 {
z.TotalWrites = 0
}
if (zb0001Mask & 0x80) == 0 {
z.TotalDeletes = 0
}
if (zb0001Mask & 0x100) == 0 {
z.APILatencies = nil
}
}
return
}
// EncodeMsg implements msgp.Encodable
func (z *DiskMetrics) EncodeMsg(en *msgp.Writer) (err error) {
// check for omitted fields
zb0001Len := uint32(9)
var zb0001Mask uint16 /* 9 bits */
_ = zb0001Mask
if z.LastMinute == nil {
zb0001Len--
zb0001Mask |= 0x1
}
if z.APICalls == nil {
zb0001Len--
zb0001Mask |= 0x2
}
if z.TotalTokens == 0 {
zb0001Len--
zb0001Mask |= 0x4
}
if z.TotalWaiting == 0 {
zb0001Len--
zb0001Mask |= 0x8
}
if z.TotalErrorsAvailability == 0 {
zb0001Len--
zb0001Mask |= 0x10
}
if z.TotalErrorsTimeout == 0 {
zb0001Len--
zb0001Mask |= 0x20
}
if z.TotalWrites == 0 {
zb0001Len--
zb0001Mask |= 0x40
}
if z.TotalDeletes == 0 {
zb0001Len--
zb0001Mask |= 0x80
}
if z.APILatencies == nil {
zb0001Len--
zb0001Mask |= 0x100
}
// variable map header, size zb0001Len
err = en.Append(0x80 | uint8(zb0001Len))
if err != nil {
return
}
// skip if no fields are to be emitted
if zb0001Len != 0 {
if (zb0001Mask & 0x1) == 0 { // if not omitted
// write "lastMinute"
err = en.Append(0xaa, 0x6c, 0x61, 0x73, 0x74, 0x4d, 0x69, 0x6e, 0x75, 0x74, 0x65)
if err != nil {
return
}
err = en.WriteMapHeader(uint32(len(z.LastMinute)))
if err != nil {
err = msgp.WrapError(err, "LastMinute")
return
}
for za0001, za0002 := range z.LastMinute {
err = en.WriteString(za0001)
if err != nil {
err = msgp.WrapError(err, "LastMinute")
return
}
err = za0002.EncodeMsg(en)
if err != nil {
err = msgp.WrapError(err, "LastMinute", za0001)
return
}
}
}
if (zb0001Mask & 0x2) == 0 { // if not omitted
// write "apiCalls"
err = en.Append(0xa8, 0x61, 0x70, 0x69, 0x43, 0x61, 0x6c, 0x6c, 0x73)
if err != nil {
return
}
err = en.WriteMapHeader(uint32(len(z.APICalls)))
if err != nil {
err = msgp.WrapError(err, "APICalls")
return
}
for za0003, za0004 := range z.APICalls {
err = en.WriteString(za0003)
if err != nil {
err = msgp.WrapError(err, "APICalls")
return
}
err = en.WriteUint64(za0004)
if err != nil {
err = msgp.WrapError(err, "APICalls", za0003)
return
}
}
}
if (zb0001Mask & 0x4) == 0 { // if not omitted
// write "totalTokens"
err = en.Append(0xab, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x73)
if err != nil {
return
}
err = en.WriteUint32(z.TotalTokens)
if err != nil {
err = msgp.WrapError(err, "TotalTokens")
return
}
}
if (zb0001Mask & 0x8) == 0 { // if not omitted
// write "totalWaiting"
err = en.Append(0xac, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x57, 0x61, 0x69, 0x74, 0x69, 0x6e, 0x67)
if err != nil {
return
}
err = en.WriteUint32(z.TotalWaiting)
if err != nil {
err = msgp.WrapError(err, "TotalWaiting")
return
}
}
if (zb0001Mask & 0x10) == 0 { // if not omitted
// write "totalErrorsAvailability"
err = en.Append(0xb7, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x73, 0x41, 0x76, 0x61, 0x69, 0x6c, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79)
if err != nil {
return
}
err = en.WriteUint64(z.TotalErrorsAvailability)
if err != nil {
err = msgp.WrapError(err, "TotalErrorsAvailability")
return
}
}
if (zb0001Mask & 0x20) == 0 { // if not omitted
// write "totalErrorsTimeout"
err = en.Append(0xb2, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x73, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74)
if err != nil {
return
}
err = en.WriteUint64(z.TotalErrorsTimeout)
if err != nil {
err = msgp.WrapError(err, "TotalErrorsTimeout")
return
}
}
if (zb0001Mask & 0x40) == 0 { // if not omitted
// write "totalWrites"
err = en.Append(0xab, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x57, 0x72, 0x69, 0x74, 0x65, 0x73)
if err != nil {
return
}
err = en.WriteUint64(z.TotalWrites)
if err != nil {
err = msgp.WrapError(err, "TotalWrites")
return
}
}
if (zb0001Mask & 0x80) == 0 { // if not omitted
// write "totalDeletes"
err = en.Append(0xac, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x73)
if err != nil {
return
}
err = en.WriteUint64(z.TotalDeletes)
if err != nil {
err = msgp.WrapError(err, "TotalDeletes")
return
}
}
if (zb0001Mask & 0x100) == 0 { // if not omitted
// write "apiLatencies"
err = en.Append(0xac, 0x61, 0x70, 0x69, 0x4c, 0x61, 0x74, 0x65, 0x6e, 0x63, 0x69, 0x65, 0x73)
if err != nil {
return
}
err = en.WriteMapHeader(uint32(len(z.APILatencies)))
if err != nil {
err = msgp.WrapError(err, "APILatencies")
return
}
for za0005, za0006 := range z.APILatencies {
err = en.WriteString(za0005)
if err != nil {
err = msgp.WrapError(err, "APILatencies")
return
}
err = en.WriteIntf(za0006)
if err != nil {
err = msgp.WrapError(err, "APILatencies", za0005)
return
}
}
}
}
return
}
// MarshalMsg implements msgp.Marshaler
func (z *DiskMetrics) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize())
// check for omitted fields
zb0001Len := uint32(9)
var zb0001Mask uint16 /* 9 bits */
_ = zb0001Mask
if z.LastMinute == nil {
zb0001Len--
zb0001Mask |= 0x1
}
if z.APICalls == nil {
zb0001Len--
zb0001Mask |= 0x2
}
if z.TotalTokens == 0 {
zb0001Len--
zb0001Mask |= 0x4
}
if z.TotalWaiting == 0 {
zb0001Len--
zb0001Mask |= 0x8
}
if z.TotalErrorsAvailability == 0 {
zb0001Len--
zb0001Mask |= 0x10
}
if z.TotalErrorsTimeout == 0 {
zb0001Len--
zb0001Mask |= 0x20
}
if z.TotalWrites == 0 {
zb0001Len--
zb0001Mask |= 0x40
}
if z.TotalDeletes == 0 {
zb0001Len--
zb0001Mask |= 0x80
}
if z.APILatencies == nil {
zb0001Len--
zb0001Mask |= 0x100
}
// variable map header, size zb0001Len
o = append(o, 0x80|uint8(zb0001Len))
// skip if no fields are to be emitted
if zb0001Len != 0 {
if (zb0001Mask & 0x1) == 0 { // if not omitted
// string "lastMinute"
o = append(o, 0xaa, 0x6c, 0x61, 0x73, 0x74, 0x4d, 0x69, 0x6e, 0x75, 0x74, 0x65)
o = msgp.AppendMapHeader(o, uint32(len(z.LastMinute)))
for za0001, za0002 := range z.LastMinute {
o = msgp.AppendString(o, za0001)
o, err = za0002.MarshalMsg(o)
if err != nil {
err = msgp.WrapError(err, "LastMinute", za0001)
return
}
}
}
if (zb0001Mask & 0x2) == 0 { // if not omitted
// string "apiCalls"
o = append(o, 0xa8, 0x61, 0x70, 0x69, 0x43, 0x61, 0x6c, 0x6c, 0x73)
o = msgp.AppendMapHeader(o, uint32(len(z.APICalls)))
for za0003, za0004 := range z.APICalls {
o = msgp.AppendString(o, za0003)
o = msgp.AppendUint64(o, za0004)
}
}
if (zb0001Mask & 0x4) == 0 { // if not omitted
// string "totalTokens"
o = append(o, 0xab, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x73)
o = msgp.AppendUint32(o, z.TotalTokens)
}
if (zb0001Mask & 0x8) == 0 { // if not omitted
// string "totalWaiting"
o = append(o, 0xac, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x57, 0x61, 0x69, 0x74, 0x69, 0x6e, 0x67)
o = msgp.AppendUint32(o, z.TotalWaiting)
}
if (zb0001Mask & 0x10) == 0 { // if not omitted
// string "totalErrorsAvailability"
o = append(o, 0xb7, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x73, 0x41, 0x76, 0x61, 0x69, 0x6c, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79)
o = msgp.AppendUint64(o, z.TotalErrorsAvailability)
}
if (zb0001Mask & 0x20) == 0 { // if not omitted
// string "totalErrorsTimeout"
o = append(o, 0xb2, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x73, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74)
o = msgp.AppendUint64(o, z.TotalErrorsTimeout)
}
if (zb0001Mask & 0x40) == 0 { // if not omitted
// string "totalWrites"
o = append(o, 0xab, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x57, 0x72, 0x69, 0x74, 0x65, 0x73)
o = msgp.AppendUint64(o, z.TotalWrites)
}
if (zb0001Mask & 0x80) == 0 { // if not omitted
// string "totalDeletes"
o = append(o, 0xac, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x73)
o = msgp.AppendUint64(o, z.TotalDeletes)
}
if (zb0001Mask & 0x100) == 0 { // if not omitted
// string "apiLatencies"
o = append(o, 0xac, 0x61, 0x70, 0x69, 0x4c, 0x61, 0x74, 0x65, 0x6e, 0x63, 0x69, 0x65, 0x73)
o = msgp.AppendMapHeader(o, uint32(len(z.APILatencies)))
for za0005, za0006 := range z.APILatencies {
o = msgp.AppendString(o, za0005)
o, err = msgp.AppendIntf(o, za0006)
if err != nil {
err = msgp.WrapError(err, "APILatencies", za0005)
return
}
}
}
}
return
}
// UnmarshalMsg implements msgp.Unmarshaler
func (z *DiskMetrics) UnmarshalMsg(bts []byte) (o []byte, err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
var zb0001Mask uint16 /* 9 bits */
_ = zb0001Mask
for zb0001 > 0 {
zb0001--
field, bts, err = msgp.ReadMapKeyZC(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "lastMinute":
var zb0002 uint32
zb0002, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "LastMinute")
return
}
if z.LastMinute == nil {
z.LastMinute = make(map[string]TimedAction, zb0002)
} else if len(z.LastMinute) > 0 {
for key := range z.LastMinute {
delete(z.LastMinute, key)
}
}
for zb0002 > 0 {
var za0001 string
var za0002 TimedAction
zb0002--
za0001, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "LastMinute")
return
}
bts, err = za0002.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "LastMinute", za0001)
return
}
z.LastMinute[za0001] = za0002
}
zb0001Mask |= 0x1
case "apiCalls":
var zb0003 uint32
zb0003, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "APICalls")
return
}
if z.APICalls == nil {
z.APICalls = make(map[string]uint64, zb0003)
} else if len(z.APICalls) > 0 {
for key := range z.APICalls {
delete(z.APICalls, key)
}
}
for zb0003 > 0 {
var za0003 string
var za0004 uint64
zb0003--
za0003, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "APICalls")
return
}
za0004, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "APICalls", za0003)
return
}
z.APICalls[za0003] = za0004
}
zb0001Mask |= 0x2
case "totalTokens":
z.TotalTokens, bts, err = msgp.ReadUint32Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "TotalTokens")
return
}
zb0001Mask |= 0x4
case "totalWaiting":
z.TotalWaiting, bts, err = msgp.ReadUint32Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "TotalWaiting")
return
}
zb0001Mask |= 0x8
case "totalErrorsAvailability":
z.TotalErrorsAvailability, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "TotalErrorsAvailability")
return
}
zb0001Mask |= 0x10
case "totalErrorsTimeout":
z.TotalErrorsTimeout, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "TotalErrorsTimeout")
return
}
zb0001Mask |= 0x20
case "totalWrites":
z.TotalWrites, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "TotalWrites")
return
}
zb0001Mask |= 0x40
case "totalDeletes":
z.TotalDeletes, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "TotalDeletes")
return
}
zb0001Mask |= 0x80
case "apiLatencies":
var zb0004 uint32
zb0004, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "APILatencies")
return
}
if z.APILatencies == nil {
z.APILatencies = make(map[string]interface{}, zb0004)
} else if len(z.APILatencies) > 0 {
for key := range z.APILatencies {
delete(z.APILatencies, key)
}
}
for zb0004 > 0 {
var za0005 string
var za0006 interface{}
zb0004--
za0005, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "APILatencies")
return
}
za0006, bts, err = msgp.ReadIntfBytes(bts)
if err != nil {
err = msgp.WrapError(err, "APILatencies", za0005)
return
}
z.APILatencies[za0005] = za0006
}
zb0001Mask |= 0x100
default:
bts, err = msgp.Skip(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
// Clear omitted fields.
if zb0001Mask != 0x1ff {
if (zb0001Mask & 0x1) == 0 {
z.LastMinute = nil
}
if (zb0001Mask & 0x2) == 0 {
z.APICalls = nil
}
if (zb0001Mask & 0x4) == 0 {
z.TotalTokens = 0
}
if (zb0001Mask & 0x8) == 0 {
z.TotalWaiting = 0
}
if (zb0001Mask & 0x10) == 0 {
z.TotalErrorsAvailability = 0
}
if (zb0001Mask & 0x20) == 0 {
z.TotalErrorsTimeout = 0
}
if (zb0001Mask & 0x40) == 0 {
z.TotalWrites = 0
}
if (zb0001Mask & 0x80) == 0 {
z.TotalDeletes = 0
}
if (zb0001Mask & 0x100) == 0 {
z.APILatencies = nil
}
}
o = bts
return
}
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z *DiskMetrics) Msgsize() (s int) {
s = 1 + 11 + msgp.MapHeaderSize
if z.LastMinute != nil {
for za0001, za0002 := range z.LastMinute {
_ = za0002
s += msgp.StringPrefixSize + len(za0001) + za0002.Msgsize()
}
}
s += 9 + msgp.MapHeaderSize
if z.APICalls != nil {
for za0003, za0004 := range z.APICalls {
_ = za0004
s += msgp.StringPrefixSize + len(za0003) + msgp.Uint64Size
}
}
s += 12 + msgp.Uint32Size + 13 + msgp.Uint32Size + 24 + msgp.Uint64Size + 19 + msgp.Uint64Size + 12 + msgp.Uint64Size + 13 + msgp.Uint64Size + 13 + msgp.MapHeaderSize
if z.APILatencies != nil {
for za0005, za0006 := range z.APILatencies {
_ = za0006
s += msgp.StringPrefixSize + len(za0005) + msgp.GuessSize(za0006)
}
}
return
}
// DecodeMsg implements msgp.Decodable
func (z *ErasureBackend) DecodeMsg(dc *msgp.Reader) (err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, err = dc.ReadMapHeader()
if err != nil {
err = msgp.WrapError(err)
return
}
for zb0001 > 0 {
zb0001--
field, err = dc.ReadMapKeyPtr()
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "backendType":
{
var zb0002 string
zb0002, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "Type")
return
}
z.Type = backendType(zb0002)
}
case "onlineDisks":
z.OnlineDisks, err = dc.ReadInt()
if err != nil {
err = msgp.WrapError(err, "OnlineDisks")
return
}
case "offlineDisks":
z.OfflineDisks, err = dc.ReadInt()
if err != nil {
err = msgp.WrapError(err, "OfflineDisks")
return
}
case "standardSCParity":
z.StandardSCParity, err = dc.ReadInt()
if err != nil {
err = msgp.WrapError(err, "StandardSCParity")
return
}
case "rrSCParity":
z.RRSCParity, err = dc.ReadInt()
if err != nil {
err = msgp.WrapError(err, "RRSCParity")
return
}
case "totalSets":
var zb0003 uint32
zb0003, err = dc.ReadArrayHeader()
if err != nil {
err = msgp.WrapError(err, "TotalSets")
return
}
if cap(z.TotalSets) >= int(zb0003) {
z.TotalSets = (z.TotalSets)[:zb0003]
} else {
z.TotalSets = make([]int, zb0003)
}
for za0001 := range z.TotalSets {
z.TotalSets[za0001], err = dc.ReadInt()
if err != nil {
err = msgp.WrapError(err, "TotalSets", za0001)
return
}
}
case "totalDrivesPerSet":
var zb0004 uint32
zb0004, err = dc.ReadArrayHeader()
if err != nil {
err = msgp.WrapError(err, "DrivesPerSet")
return
}
if cap(z.DrivesPerSet) >= int(zb0004) {
z.DrivesPerSet = (z.DrivesPerSet)[:zb0004]
} else {
z.DrivesPerSet = make([]int, zb0004)
}
for za0002 := range z.DrivesPerSet {
z.DrivesPerSet[za0002], err = dc.ReadInt()
if err != nil {
err = msgp.WrapError(err, "DrivesPerSet", za0002)
return
}
}
default:
err = dc.Skip()
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
return
}
// EncodeMsg implements msgp.Encodable
func (z *ErasureBackend) EncodeMsg(en *msgp.Writer) (err error) {
// map header, size 7
// write "backendType"
err = en.Append(0x87, 0xab, 0x62, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x54, 0x79, 0x70, 0x65)
if err != nil {
return
}
err = en.WriteString(string(z.Type))
if err != nil {
err = msgp.WrapError(err, "Type")
return
}
// write "onlineDisks"
err = en.Append(0xab, 0x6f, 0x6e, 0x6c, 0x69, 0x6e, 0x65, 0x44, 0x69, 0x73, 0x6b, 0x73)
if err != nil {
return
}
err = en.WriteInt(z.OnlineDisks)
if err != nil {
err = msgp.WrapError(err, "OnlineDisks")
return
}
// write "offlineDisks"
err = en.Append(0xac, 0x6f, 0x66, 0x66, 0x6c, 0x69, 0x6e, 0x65, 0x44, 0x69, 0x73, 0x6b, 0x73)
if err != nil {
return
}
err = en.WriteInt(z.OfflineDisks)
if err != nil {
err = msgp.WrapError(err, "OfflineDisks")
return
}
// write "standardSCParity"
err = en.Append(0xb0, 0x73, 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72, 0x64, 0x53, 0x43, 0x50, 0x61, 0x72, 0x69, 0x74, 0x79)
if err != nil {
return
}
err = en.WriteInt(z.StandardSCParity)
if err != nil {
err = msgp.WrapError(err, "StandardSCParity")
return
}
// write "rrSCParity"
err = en.Append(0xaa, 0x72, 0x72, 0x53, 0x43, 0x50, 0x61, 0x72, 0x69, 0x74, 0x79)
if err != nil {
return
}
err = en.WriteInt(z.RRSCParity)
if err != nil {
err = msgp.WrapError(err, "RRSCParity")
return
}
// write "totalSets"
err = en.Append(0xa9, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x53, 0x65, 0x74, 0x73)
if err != nil {
return
}
err = en.WriteArrayHeader(uint32(len(z.TotalSets)))
if err != nil {
err = msgp.WrapError(err, "TotalSets")
return
}
for za0001 := range z.TotalSets {
err = en.WriteInt(z.TotalSets[za0001])
if err != nil {
err = msgp.WrapError(err, "TotalSets", za0001)
return
}
}
// write "totalDrivesPerSet"
err = en.Append(0xb1, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x44, 0x72, 0x69, 0x76, 0x65, 0x73, 0x50, 0x65, 0x72, 0x53, 0x65, 0x74)
if err != nil {
return
}
err = en.WriteArrayHeader(uint32(len(z.DrivesPerSet)))
if err != nil {
err = msgp.WrapError(err, "DrivesPerSet")
return
}
for za0002 := range z.DrivesPerSet {
err = en.WriteInt(z.DrivesPerSet[za0002])
if err != nil {
err = msgp.WrapError(err, "DrivesPerSet", za0002)
return
}
}
return
}
// MarshalMsg implements msgp.Marshaler
func (z *ErasureBackend) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize())
// map header, size 7
// string "backendType"
o = append(o, 0x87, 0xab, 0x62, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x54, 0x79, 0x70, 0x65)
o = msgp.AppendString(o, string(z.Type))
// string "onlineDisks"
o = append(o, 0xab, 0x6f, 0x6e, 0x6c, 0x69, 0x6e, 0x65, 0x44, 0x69, 0x73, 0x6b, 0x73)
o = msgp.AppendInt(o, z.OnlineDisks)
// string "offlineDisks"
o = append(o, 0xac, 0x6f, 0x66, 0x66, 0x6c, 0x69, 0x6e, 0x65, 0x44, 0x69, 0x73, 0x6b, 0x73)
o = msgp.AppendInt(o, z.OfflineDisks)
// string "standardSCParity"
o = append(o, 0xb0, 0x73, 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72, 0x64, 0x53, 0x43, 0x50, 0x61, 0x72, 0x69, 0x74, 0x79)
o = msgp.AppendInt(o, z.StandardSCParity)
// string "rrSCParity"
o = append(o, 0xaa, 0x72, 0x72, 0x53, 0x43, 0x50, 0x61, 0x72, 0x69, 0x74, 0x79)
o = msgp.AppendInt(o, z.RRSCParity)
// string "totalSets"
o = append(o, 0xa9, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x53, 0x65, 0x74, 0x73)
o = msgp.AppendArrayHeader(o, uint32(len(z.TotalSets)))
for za0001 := range z.TotalSets {
o = msgp.AppendInt(o, z.TotalSets[za0001])
}
// string "totalDrivesPerSet"
o = append(o, 0xb1, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x44, 0x72, 0x69, 0x76, 0x65, 0x73, 0x50, 0x65, 0x72, 0x53, 0x65, 0x74)
o = msgp.AppendArrayHeader(o, uint32(len(z.DrivesPerSet)))
for za0002 := range z.DrivesPerSet {
o = msgp.AppendInt(o, z.DrivesPerSet[za0002])
}
return
}
// UnmarshalMsg implements msgp.Unmarshaler
func (z *ErasureBackend) UnmarshalMsg(bts []byte) (o []byte, err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
for zb0001 > 0 {
zb0001--
field, bts, err = msgp.ReadMapKeyZC(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "backendType":
{
var zb0002 string
zb0002, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Type")
return
}
z.Type = backendType(zb0002)
}
case "onlineDisks":
z.OnlineDisks, bts, err = msgp.ReadIntBytes(bts)
if err != nil {
err = msgp.WrapError(err, "OnlineDisks")
return
}
case "offlineDisks":
z.OfflineDisks, bts, err = msgp.ReadIntBytes(bts)
if err != nil {
err = msgp.WrapError(err, "OfflineDisks")
return
}
case "standardSCParity":
z.StandardSCParity, bts, err = msgp.ReadIntBytes(bts)
if err != nil {
err = msgp.WrapError(err, "StandardSCParity")
return
}
case "rrSCParity":
z.RRSCParity, bts, err = msgp.ReadIntBytes(bts)
if err != nil {
err = msgp.WrapError(err, "RRSCParity")
return
}
case "totalSets":
var zb0003 uint32
zb0003, bts, err = msgp.ReadArrayHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "TotalSets")
return
}
if cap(z.TotalSets) >= int(zb0003) {
z.TotalSets = (z.TotalSets)[:zb0003]
} else {
z.TotalSets = make([]int, zb0003)
}
for za0001 := range z.TotalSets {
z.TotalSets[za0001], bts, err = msgp.ReadIntBytes(bts)
if err != nil {
err = msgp.WrapError(err, "TotalSets", za0001)
return
}
}
case "totalDrivesPerSet":
var zb0004 uint32
zb0004, bts, err = msgp.ReadArrayHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "DrivesPerSet")
return
}
if cap(z.DrivesPerSet) >= int(zb0004) {
z.DrivesPerSet = (z.DrivesPerSet)[:zb0004]
} else {
z.DrivesPerSet = make([]int, zb0004)
}
for za0002 := range z.DrivesPerSet {
z.DrivesPerSet[za0002], bts, err = msgp.ReadIntBytes(bts)
if err != nil {
err = msgp.WrapError(err, "DrivesPerSet", za0002)
return
}
}
default:
bts, err = msgp.Skip(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
o = bts
return
}
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z *ErasureBackend) Msgsize() (s int) {
s = 1 + 12 + msgp.StringPrefixSize + len(string(z.Type)) + 12 + msgp.IntSize + 13 + msgp.IntSize + 17 + msgp.IntSize + 11 + msgp.IntSize + 10 + msgp.ArrayHeaderSize + (len(z.TotalSets) * (msgp.IntSize)) + 18 + msgp.ArrayHeaderSize + (len(z.DrivesPerSet) * (msgp.IntSize))
return
}
// DecodeMsg implements msgp.Decodable
func (z *ErasureSetInfo) DecodeMsg(dc *msgp.Reader) (err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, err = dc.ReadMapHeader()
if err != nil {
err = msgp.WrapError(err)
return
}
for zb0001 > 0 {
zb0001--
field, err = dc.ReadMapKeyPtr()
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "id":
z.ID, err = dc.ReadInt()
if err != nil {
err = msgp.WrapError(err, "ID")
return
}
case "rawUsage":
z.RawUsage, err = dc.ReadUint64()
if err != nil {
err = msgp.WrapError(err, "RawUsage")
return
}
case "rawCapacity":
z.RawCapacity, err = dc.ReadUint64()
if err != nil {
err = msgp.WrapError(err, "RawCapacity")
return
}
case "usage":
z.Usage, err = dc.ReadUint64()
if err != nil {
err = msgp.WrapError(err, "Usage")
return
}
case "objectsCount":
z.ObjectsCount, err = dc.ReadUint64()
if err != nil {
err = msgp.WrapError(err, "ObjectsCount")
return
}
case "versionsCount":
z.VersionsCount, err = dc.ReadUint64()
if err != nil {
err = msgp.WrapError(err, "VersionsCount")
return
}
case "deleteMarkersCount":
z.DeleteMarkersCount, err = dc.ReadUint64()
if err != nil {
err = msgp.WrapError(err, "DeleteMarkersCount")
return
}
case "healDisks":
z.HealDisks, err = dc.ReadInt()
if err != nil {
err = msgp.WrapError(err, "HealDisks")
return
}
default:
err = dc.Skip()
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
return
}
// EncodeMsg implements msgp.Encodable
func (z *ErasureSetInfo) EncodeMsg(en *msgp.Writer) (err error) {
// map header, size 8
// write "id"
err = en.Append(0x88, 0xa2, 0x69, 0x64)
if err != nil {
return
}
err = en.WriteInt(z.ID)
if err != nil {
err = msgp.WrapError(err, "ID")
return
}
// write "rawUsage"
err = en.Append(0xa8, 0x72, 0x61, 0x77, 0x55, 0x73, 0x61, 0x67, 0x65)
if err != nil {
return
}
err = en.WriteUint64(z.RawUsage)
if err != nil {
err = msgp.WrapError(err, "RawUsage")
return
}
// write "rawCapacity"
err = en.Append(0xab, 0x72, 0x61, 0x77, 0x43, 0x61, 0x70, 0x61, 0x63, 0x69, 0x74, 0x79)
if err != nil {
return
}
err = en.WriteUint64(z.RawCapacity)
if err != nil {
err = msgp.WrapError(err, "RawCapacity")
return
}
// write "usage"
err = en.Append(0xa5, 0x75, 0x73, 0x61, 0x67, 0x65)
if err != nil {
return
}
err = en.WriteUint64(z.Usage)
if err != nil {
err = msgp.WrapError(err, "Usage")
return
}
// write "objectsCount"
err = en.Append(0xac, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74)
if err != nil {
return
}
err = en.WriteUint64(z.ObjectsCount)
if err != nil {
err = msgp.WrapError(err, "ObjectsCount")
return
}
// write "versionsCount"
err = en.Append(0xad, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74)
if err != nil {
return
}
err = en.WriteUint64(z.VersionsCount)
if err != nil {
err = msgp.WrapError(err, "VersionsCount")
return
}
// write "deleteMarkersCount"
err = en.Append(0xb2, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4d, 0x61, 0x72, 0x6b, 0x65, 0x72, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74)
if err != nil {
return
}
err = en.WriteUint64(z.DeleteMarkersCount)
if err != nil {
err = msgp.WrapError(err, "DeleteMarkersCount")
return
}
// write "healDisks"
err = en.Append(0xa9, 0x68, 0x65, 0x61, 0x6c, 0x44, 0x69, 0x73, 0x6b, 0x73)
if err != nil {
return
}
err = en.WriteInt(z.HealDisks)
if err != nil {
err = msgp.WrapError(err, "HealDisks")
return
}
return
}
// MarshalMsg implements msgp.Marshaler
func (z *ErasureSetInfo) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize())
// map header, size 8
// string "id"
o = append(o, 0x88, 0xa2, 0x69, 0x64)
o = msgp.AppendInt(o, z.ID)
// string "rawUsage"
o = append(o, 0xa8, 0x72, 0x61, 0x77, 0x55, 0x73, 0x61, 0x67, 0x65)
o = msgp.AppendUint64(o, z.RawUsage)
// string "rawCapacity"
o = append(o, 0xab, 0x72, 0x61, 0x77, 0x43, 0x61, 0x70, 0x61, 0x63, 0x69, 0x74, 0x79)
o = msgp.AppendUint64(o, z.RawCapacity)
// string "usage"
o = append(o, 0xa5, 0x75, 0x73, 0x61, 0x67, 0x65)
o = msgp.AppendUint64(o, z.Usage)
// string "objectsCount"
o = append(o, 0xac, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74)
o = msgp.AppendUint64(o, z.ObjectsCount)
// string "versionsCount"
o = append(o, 0xad, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74)
o = msgp.AppendUint64(o, z.VersionsCount)
// string "deleteMarkersCount"
o = append(o, 0xb2, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4d, 0x61, 0x72, 0x6b, 0x65, 0x72, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74)
o = msgp.AppendUint64(o, z.DeleteMarkersCount)
// string "healDisks"
o = append(o, 0xa9, 0x68, 0x65, 0x61, 0x6c, 0x44, 0x69, 0x73, 0x6b, 0x73)
o = msgp.AppendInt(o, z.HealDisks)
return
}
// UnmarshalMsg implements msgp.Unmarshaler
func (z *ErasureSetInfo) UnmarshalMsg(bts []byte) (o []byte, err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
for zb0001 > 0 {
zb0001--
field, bts, err = msgp.ReadMapKeyZC(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "id":
z.ID, bts, err = msgp.ReadIntBytes(bts)
if err != nil {
err = msgp.WrapError(err, "ID")
return
}
case "rawUsage":
z.RawUsage, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "RawUsage")
return
}
case "rawCapacity":
z.RawCapacity, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "RawCapacity")
return
}
case "usage":
z.Usage, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "Usage")
return
}
case "objectsCount":
z.ObjectsCount, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "ObjectsCount")
return
}
case "versionsCount":
z.VersionsCount, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "VersionsCount")
return
}
case "deleteMarkersCount":
z.DeleteMarkersCount, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "DeleteMarkersCount")
return
}
case "healDisks":
z.HealDisks, bts, err = msgp.ReadIntBytes(bts)
if err != nil {
err = msgp.WrapError(err, "HealDisks")
return
}
default:
bts, err = msgp.Skip(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
o = bts
return
}
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z *ErasureSetInfo) Msgsize() (s int) {
s = 1 + 3 + msgp.IntSize + 9 + msgp.Uint64Size + 12 + msgp.Uint64Size + 6 + msgp.Uint64Size + 13 + msgp.Uint64Size + 14 + msgp.Uint64Size + 19 + msgp.Uint64Size + 10 + msgp.IntSize
return
}
// DecodeMsg implements msgp.Decodable
func (z *FSBackend) DecodeMsg(dc *msgp.Reader) (err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, err = dc.ReadMapHeader()
if err != nil {
err = msgp.WrapError(err)
return
}
for zb0001 > 0 {
zb0001--
field, err = dc.ReadMapKeyPtr()
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "backendType":
{
var zb0002 string
zb0002, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "Type")
return
}
z.Type = backendType(zb0002)
}
default:
err = dc.Skip()
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
return
}
// EncodeMsg implements msgp.Encodable
func (z FSBackend) EncodeMsg(en *msgp.Writer) (err error) {
// map header, size 1
// write "backendType"
err = en.Append(0x81, 0xab, 0x62, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x54, 0x79, 0x70, 0x65)
if err != nil {
return
}
err = en.WriteString(string(z.Type))
if err != nil {
err = msgp.WrapError(err, "Type")
return
}
return
}
// MarshalMsg implements msgp.Marshaler
func (z FSBackend) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize())
// map header, size 1
// string "backendType"
o = append(o, 0x81, 0xab, 0x62, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x54, 0x79, 0x70, 0x65)
o = msgp.AppendString(o, string(z.Type))
return
}
// UnmarshalMsg implements msgp.Unmarshaler
func (z *FSBackend) UnmarshalMsg(bts []byte) (o []byte, err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
for zb0001 > 0 {
zb0001--
field, bts, err = msgp.ReadMapKeyZC(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "backendType":
{
var zb0002 string
zb0002, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Type")
return
}
z.Type = backendType(zb0002)
}
default:
bts, err = msgp.Skip(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
o = bts
return
}
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z FSBackend) Msgsize() (s int) {
s = 1 + 12 + msgp.StringPrefixSize + len(string(z.Type))
return
}
// DecodeMsg implements msgp.Decodable
func (z *GCStats) DecodeMsg(dc *msgp.Reader) (err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, err = dc.ReadMapHeader()
if err != nil {
err = msgp.WrapError(err)
return
}
for zb0001 > 0 {
zb0001--
field, err = dc.ReadMapKeyPtr()
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "last_gc":
z.LastGC, err = dc.ReadTime()
if err != nil {
err = msgp.WrapError(err, "LastGC")
return
}
case "num_gc":
z.NumGC, err = dc.ReadInt64()
if err != nil {
err = msgp.WrapError(err, "NumGC")
return
}
case "pause_total":
z.PauseTotal, err = dc.ReadDuration()
if err != nil {
err = msgp.WrapError(err, "PauseTotal")
return
}
case "pause":
var zb0002 uint32
zb0002, err = dc.ReadArrayHeader()
if err != nil {
err = msgp.WrapError(err, "Pause")
return
}
if cap(z.Pause) >= int(zb0002) {
z.Pause = (z.Pause)[:zb0002]
} else {
z.Pause = make([]time.Duration, zb0002)
}
for za0001 := range z.Pause {
z.Pause[za0001], err = dc.ReadDuration()
if err != nil {
err = msgp.WrapError(err, "Pause", za0001)
return
}
}
case "pause_end":
var zb0003 uint32
zb0003, err = dc.ReadArrayHeader()
if err != nil {
err = msgp.WrapError(err, "PauseEnd")
return
}
if cap(z.PauseEnd) >= int(zb0003) {
z.PauseEnd = (z.PauseEnd)[:zb0003]
} else {
z.PauseEnd = make([]time.Time, zb0003)
}
for za0002 := range z.PauseEnd {
z.PauseEnd[za0002], err = dc.ReadTime()
if err != nil {
err = msgp.WrapError(err, "PauseEnd", za0002)
return
}
}
default:
err = dc.Skip()
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
return
}
// EncodeMsg implements msgp.Encodable
func (z *GCStats) EncodeMsg(en *msgp.Writer) (err error) {
// map header, size 5
// write "last_gc"
err = en.Append(0x85, 0xa7, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x67, 0x63)
if err != nil {
return
}
err = en.WriteTime(z.LastGC)
if err != nil {
err = msgp.WrapError(err, "LastGC")
return
}
// write "num_gc"
err = en.Append(0xa6, 0x6e, 0x75, 0x6d, 0x5f, 0x67, 0x63)
if err != nil {
return
}
err = en.WriteInt64(z.NumGC)
if err != nil {
err = msgp.WrapError(err, "NumGC")
return
}
// write "pause_total"
err = en.Append(0xab, 0x70, 0x61, 0x75, 0x73, 0x65, 0x5f, 0x74, 0x6f, 0x74, 0x61, 0x6c)
if err != nil {
return
}
err = en.WriteDuration(z.PauseTotal)
if err != nil {
err = msgp.WrapError(err, "PauseTotal")
return
}
// write "pause"
err = en.Append(0xa5, 0x70, 0x61, 0x75, 0x73, 0x65)
if err != nil {
return
}
err = en.WriteArrayHeader(uint32(len(z.Pause)))
if err != nil {
err = msgp.WrapError(err, "Pause")
return
}
for za0001 := range z.Pause {
err = en.WriteDuration(z.Pause[za0001])
if err != nil {
err = msgp.WrapError(err, "Pause", za0001)
return
}
}
// write "pause_end"
err = en.Append(0xa9, 0x70, 0x61, 0x75, 0x73, 0x65, 0x5f, 0x65, 0x6e, 0x64)
if err != nil {
return
}
err = en.WriteArrayHeader(uint32(len(z.PauseEnd)))
if err != nil {
err = msgp.WrapError(err, "PauseEnd")
return
}
for za0002 := range z.PauseEnd {
err = en.WriteTime(z.PauseEnd[za0002])
if err != nil {
err = msgp.WrapError(err, "PauseEnd", za0002)
return
}
}
return
}
// MarshalMsg implements msgp.Marshaler
func (z *GCStats) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize())
// map header, size 5
// string "last_gc"
o = append(o, 0x85, 0xa7, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x67, 0x63)
o = msgp.AppendTime(o, z.LastGC)
// string "num_gc"
o = append(o, 0xa6, 0x6e, 0x75, 0x6d, 0x5f, 0x67, 0x63)
o = msgp.AppendInt64(o, z.NumGC)
// string "pause_total"
o = append(o, 0xab, 0x70, 0x61, 0x75, 0x73, 0x65, 0x5f, 0x74, 0x6f, 0x74, 0x61, 0x6c)
o = msgp.AppendDuration(o, z.PauseTotal)
// string "pause"
o = append(o, 0xa5, 0x70, 0x61, 0x75, 0x73, 0x65)
o = msgp.AppendArrayHeader(o, uint32(len(z.Pause)))
for za0001 := range z.Pause {
o = msgp.AppendDuration(o, z.Pause[za0001])
}
// string "pause_end"
o = append(o, 0xa9, 0x70, 0x61, 0x75, 0x73, 0x65, 0x5f, 0x65, 0x6e, 0x64)
o = msgp.AppendArrayHeader(o, uint32(len(z.PauseEnd)))
for za0002 := range z.PauseEnd {
o = msgp.AppendTime(o, z.PauseEnd[za0002])
}
return
}
// UnmarshalMsg implements msgp.Unmarshaler
func (z *GCStats) UnmarshalMsg(bts []byte) (o []byte, err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
for zb0001 > 0 {
zb0001--
field, bts, err = msgp.ReadMapKeyZC(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "last_gc":
z.LastGC, bts, err = msgp.ReadTimeBytes(bts)
if err != nil {
err = msgp.WrapError(err, "LastGC")
return
}
case "num_gc":
z.NumGC, bts, err = msgp.ReadInt64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "NumGC")
return
}
case "pause_total":
z.PauseTotal, bts, err = msgp.ReadDurationBytes(bts)
if err != nil {
err = msgp.WrapError(err, "PauseTotal")
return
}
case "pause":
var zb0002 uint32
zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Pause")
return
}
if cap(z.Pause) >= int(zb0002) {
z.Pause = (z.Pause)[:zb0002]
} else {
z.Pause = make([]time.Duration, zb0002)
}
for za0001 := range z.Pause {
z.Pause[za0001], bts, err = msgp.ReadDurationBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Pause", za0001)
return
}
}
case "pause_end":
var zb0003 uint32
zb0003, bts, err = msgp.ReadArrayHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "PauseEnd")
return
}
if cap(z.PauseEnd) >= int(zb0003) {
z.PauseEnd = (z.PauseEnd)[:zb0003]
} else {
z.PauseEnd = make([]time.Time, zb0003)
}
for za0002 := range z.PauseEnd {
z.PauseEnd[za0002], bts, err = msgp.ReadTimeBytes(bts)
if err != nil {
err = msgp.WrapError(err, "PauseEnd", za0002)
return
}
}
default:
bts, err = msgp.Skip(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
o = bts
return
}
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z *GCStats) Msgsize() (s int) {
s = 1 + 8 + msgp.TimeSize + 7 + msgp.Int64Size + 12 + msgp.DurationSize + 6 + msgp.ArrayHeaderSize + (len(z.Pause) * (msgp.DurationSize)) + 10 + msgp.ArrayHeaderSize + (len(z.PauseEnd) * (msgp.TimeSize))
return
}
// DecodeMsg implements msgp.Decodable
func (z *InfoMessage) DecodeMsg(dc *msgp.Reader) (err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, err = dc.ReadMapHeader()
if err != nil {
err = msgp.WrapError(err)
return
}
var zb0001Mask uint16 /* 13 bits */
_ = zb0001Mask
for zb0001 > 0 {
zb0001--
field, err = dc.ReadMapKeyPtr()
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "mode":
z.Mode, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "Mode")
return
}
zb0001Mask |= 0x1
case "domain":
var zb0002 uint32
zb0002, err = dc.ReadArrayHeader()
if err != nil {
err = msgp.WrapError(err, "Domain")
return
}
if cap(z.Domain) >= int(zb0002) {
z.Domain = (z.Domain)[:zb0002]
} else {
z.Domain = make([]string, zb0002)
}
for za0001 := range z.Domain {
z.Domain[za0001], err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "Domain", za0001)
return
}
}
zb0001Mask |= 0x2
case "region":
z.Region, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "Region")
return
}
zb0001Mask |= 0x4
case "sqsARN":
var zb0003 uint32
zb0003, err = dc.ReadArrayHeader()
if err != nil {
err = msgp.WrapError(err, "SQSARN")
return
}
if cap(z.SQSARN) >= int(zb0003) {
z.SQSARN = (z.SQSARN)[:zb0003]
} else {
z.SQSARN = make([]string, zb0003)
}
for za0002 := range z.SQSARN {
z.SQSARN[za0002], err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "SQSARN", za0002)
return
}
}
zb0001Mask |= 0x8
case "deploymentID":
z.DeploymentID, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "DeploymentID")
return
}
zb0001Mask |= 0x10
case "buckets":
var zb0004 uint32
zb0004, err = dc.ReadMapHeader()
if err != nil {
err = msgp.WrapError(err, "Buckets")
return
}
var zb0004Mask uint8 /* 1 bits */
_ = zb0004Mask
for zb0004 > 0 {
zb0004--
field, err = dc.ReadMapKeyPtr()
if err != nil {
err = msgp.WrapError(err, "Buckets")
return
}
switch msgp.UnsafeString(field) {
case "count":
z.Buckets.Count, err = dc.ReadUint64()
if err != nil {
err = msgp.WrapError(err, "Buckets", "Count")
return
}
case "error":
z.Buckets.Error, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "Buckets", "Error")
return
}
zb0004Mask |= 0x1
default:
err = dc.Skip()
if err != nil {
err = msgp.WrapError(err, "Buckets")
return
}
}
}
// Clear omitted fields.
if (zb0004Mask & 0x1) == 0 {
z.Buckets.Error = ""
}
zb0001Mask |= 0x20
case "objects":
var zb0005 uint32
zb0005, err = dc.ReadMapHeader()
if err != nil {
err = msgp.WrapError(err, "Objects")
return
}
var zb0005Mask uint8 /* 1 bits */
_ = zb0005Mask
for zb0005 > 0 {
zb0005--
field, err = dc.ReadMapKeyPtr()
if err != nil {
err = msgp.WrapError(err, "Objects")
return
}
switch msgp.UnsafeString(field) {
case "count":
z.Objects.Count, err = dc.ReadUint64()
if err != nil {
err = msgp.WrapError(err, "Objects", "Count")
return
}
case "error":
z.Objects.Error, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "Objects", "Error")
return
}
zb0005Mask |= 0x1
default:
err = dc.Skip()
if err != nil {
err = msgp.WrapError(err, "Objects")
return
}
}
}
// Clear omitted fields.
if (zb0005Mask & 0x1) == 0 {
z.Objects.Error = ""
}
zb0001Mask |= 0x40
case "versions":
var zb0006 uint32
zb0006, err = dc.ReadMapHeader()
if err != nil {
err = msgp.WrapError(err, "Versions")
return
}
var zb0006Mask uint8 /* 1 bits */
_ = zb0006Mask
for zb0006 > 0 {
zb0006--
field, err = dc.ReadMapKeyPtr()
if err != nil {
err = msgp.WrapError(err, "Versions")
return
}
switch msgp.UnsafeString(field) {
case "count":
z.Versions.Count, err = dc.ReadUint64()
if err != nil {
err = msgp.WrapError(err, "Versions", "Count")
return
}
case "error":
z.Versions.Error, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "Versions", "Error")
return
}
zb0006Mask |= 0x1
default:
err = dc.Skip()
if err != nil {
err = msgp.WrapError(err, "Versions")
return
}
}
}
// Clear omitted fields.
if (zb0006Mask & 0x1) == 0 {
z.Versions.Error = ""
}
zb0001Mask |= 0x80
case "deletemarkers":
var zb0007 uint32
zb0007, err = dc.ReadMapHeader()
if err != nil {
err = msgp.WrapError(err, "DeleteMarkers")
return
}
var zb0007Mask uint8 /* 1 bits */
_ = zb0007Mask
for zb0007 > 0 {
zb0007--
field, err = dc.ReadMapKeyPtr()
if err != nil {
err = msgp.WrapError(err, "DeleteMarkers")
return
}
switch msgp.UnsafeString(field) {
case "count":
z.DeleteMarkers.Count, err = dc.ReadUint64()
if err != nil {
err = msgp.WrapError(err, "DeleteMarkers", "Count")
return
}
case "error":
z.DeleteMarkers.Error, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "DeleteMarkers", "Error")
return
}
zb0007Mask |= 0x1
default:
err = dc.Skip()
if err != nil {
err = msgp.WrapError(err, "DeleteMarkers")
return
}
}
}
// Clear omitted fields.
if (zb0007Mask & 0x1) == 0 {
z.DeleteMarkers.Error = ""
}
zb0001Mask |= 0x100
case "usage":
var zb0008 uint32
zb0008, err = dc.ReadMapHeader()
if err != nil {
err = msgp.WrapError(err, "Usage")
return
}
var zb0008Mask uint8 /* 1 bits */
_ = zb0008Mask
for zb0008 > 0 {
zb0008--
field, err = dc.ReadMapKeyPtr()
if err != nil {
err = msgp.WrapError(err, "Usage")
return
}
switch msgp.UnsafeString(field) {
case "size":
z.Usage.Size, err = dc.ReadUint64()
if err != nil {
err = msgp.WrapError(err, "Usage", "Size")
return
}
case "error":
z.Usage.Error, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "Usage", "Error")
return
}
zb0008Mask |= 0x1
default:
err = dc.Skip()
if err != nil {
err = msgp.WrapError(err, "Usage")
return
}
}
}
// Clear omitted fields.
if (zb0008Mask & 0x1) == 0 {
z.Usage.Error = ""
}
zb0001Mask |= 0x200
case "services":
err = z.Services.DecodeMsg(dc)
if err != nil {
err = msgp.WrapError(err, "Services")
return
}
zb0001Mask |= 0x400
case "backend":
err = z.Backend.DecodeMsg(dc)
if err != nil {
err = msgp.WrapError(err, "Backend")
return
}
zb0001Mask |= 0x800
case "servers":
var zb0009 uint32
zb0009, err = dc.ReadArrayHeader()
if err != nil {
err = msgp.WrapError(err, "Servers")
return
}
if cap(z.Servers) >= int(zb0009) {
z.Servers = (z.Servers)[:zb0009]
} else {
z.Servers = make([]ServerProperties, zb0009)
}
for za0003 := range z.Servers {
err = z.Servers[za0003].DecodeMsg(dc)
if err != nil {
err = msgp.WrapError(err, "Servers", za0003)
return
}
}
zb0001Mask |= 0x1000
default:
err = dc.Skip()
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
// Clear omitted fields.
if zb0001Mask != 0x1fff {
if (zb0001Mask & 0x1) == 0 {
z.Mode = ""
}
if (zb0001Mask & 0x2) == 0 {
z.Domain = nil
}
if (zb0001Mask & 0x4) == 0 {
z.Region = ""
}
if (zb0001Mask & 0x8) == 0 {
z.SQSARN = nil
}
if (zb0001Mask & 0x10) == 0 {
z.DeploymentID = ""
}
if (zb0001Mask & 0x20) == 0 {
z.Buckets = (Buckets{})
}
if (zb0001Mask & 0x40) == 0 {
z.Objects = (Objects{})
}
if (zb0001Mask & 0x80) == 0 {
z.Versions = (Versions{})
}
if (zb0001Mask & 0x100) == 0 {
z.DeleteMarkers = (DeleteMarkers{})
}
if (zb0001Mask & 0x200) == 0 {
z.Usage = (Usage{})
}
if (zb0001Mask & 0x400) == 0 {
z.Services = Services{}
}
if (zb0001Mask & 0x800) == 0 {
z.Backend = ErasureBackend{}
}
if (zb0001Mask & 0x1000) == 0 {
z.Servers = nil
}
}
return
}
// EncodeMsg implements msgp.Encodable
func (z *InfoMessage) EncodeMsg(en *msgp.Writer) (err error) {
// check for omitted fields
zb0001Len := uint32(13)
var zb0001Mask uint16 /* 13 bits */
_ = zb0001Mask
if z.Mode == "" {
zb0001Len--
zb0001Mask |= 0x1
}
if z.Domain == nil {
zb0001Len--
zb0001Mask |= 0x2
}
if z.Region == "" {
zb0001Len--
zb0001Mask |= 0x4
}
if z.SQSARN == nil {
zb0001Len--
zb0001Mask |= 0x8
}
if z.DeploymentID == "" {
zb0001Len--
zb0001Mask |= 0x10
}
if z.Buckets == (Buckets{}) {
zb0001Len--
zb0001Mask |= 0x20
}
if z.Objects == (Objects{}) {
zb0001Len--
zb0001Mask |= 0x40
}
if z.Versions == (Versions{}) {
zb0001Len--
zb0001Mask |= 0x80
}
if z.DeleteMarkers == (DeleteMarkers{}) {
zb0001Len--
zb0001Mask |= 0x100
}
if z.Usage == (Usage{}) {
zb0001Len--
zb0001Mask |= 0x200
}
if z.Servers == nil {
zb0001Len--
zb0001Mask |= 0x1000
}
// variable map header, size zb0001Len
err = en.Append(0x80 | uint8(zb0001Len))
if err != nil {
return
}
// skip if no fields are to be emitted
if zb0001Len != 0 {
if (zb0001Mask & 0x1) == 0 { // if not omitted
// write "mode"
err = en.Append(0xa4, 0x6d, 0x6f, 0x64, 0x65)
if err != nil {
return
}
err = en.WriteString(z.Mode)
if err != nil {
err = msgp.WrapError(err, "Mode")
return
}
}
if (zb0001Mask & 0x2) == 0 { // if not omitted
// write "domain"
err = en.Append(0xa6, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e)
if err != nil {
return
}
err = en.WriteArrayHeader(uint32(len(z.Domain)))
if err != nil {
err = msgp.WrapError(err, "Domain")
return
}
for za0001 := range z.Domain {
err = en.WriteString(z.Domain[za0001])
if err != nil {
err = msgp.WrapError(err, "Domain", za0001)
return
}
}
}
if (zb0001Mask & 0x4) == 0 { // if not omitted
// write "region"
err = en.Append(0xa6, 0x72, 0x65, 0x67, 0x69, 0x6f, 0x6e)
if err != nil {
return
}
err = en.WriteString(z.Region)
if err != nil {
err = msgp.WrapError(err, "Region")
return
}
}
if (zb0001Mask & 0x8) == 0 { // if not omitted
// write "sqsARN"
err = en.Append(0xa6, 0x73, 0x71, 0x73, 0x41, 0x52, 0x4e)
if err != nil {
return
}
err = en.WriteArrayHeader(uint32(len(z.SQSARN)))
if err != nil {
err = msgp.WrapError(err, "SQSARN")
return
}
for za0002 := range z.SQSARN {
err = en.WriteString(z.SQSARN[za0002])
if err != nil {
err = msgp.WrapError(err, "SQSARN", za0002)
return
}
}
}
if (zb0001Mask & 0x10) == 0 { // if not omitted
// write "deploymentID"
err = en.Append(0xac, 0x64, 0x65, 0x70, 0x6c, 0x6f, 0x79, 0x6d, 0x65, 0x6e, 0x74, 0x49, 0x44)
if err != nil {
return
}
err = en.WriteString(z.DeploymentID)
if err != nil {
err = msgp.WrapError(err, "DeploymentID")
return
}
}
if (zb0001Mask & 0x20) == 0 { // if not omitted
// write "buckets"
err = en.Append(0xa7, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73)
if err != nil {
return
}
// check for omitted fields
zb0002Len := uint32(2)
var zb0002Mask uint8 /* 2 bits */
_ = zb0002Mask
if z.Buckets.Error == "" {
zb0002Len--
zb0002Mask |= 0x2
}
// variable map header, size zb0002Len
err = en.Append(0x80 | uint8(zb0002Len))
if err != nil {
return
}
// skip if no fields are to be emitted
if zb0002Len != 0 {
// write "count"
err = en.Append(0xa5, 0x63, 0x6f, 0x75, 0x6e, 0x74)
if err != nil {
return
}
err = en.WriteUint64(z.Buckets.Count)
if err != nil {
err = msgp.WrapError(err, "Buckets", "Count")
return
}
if (zb0002Mask & 0x2) == 0 { // if not omitted
// write "error"
err = en.Append(0xa5, 0x65, 0x72, 0x72, 0x6f, 0x72)
if err != nil {
return
}
err = en.WriteString(z.Buckets.Error)
if err != nil {
err = msgp.WrapError(err, "Buckets", "Error")
return
}
}
}
}
if (zb0001Mask & 0x40) == 0 { // if not omitted
// write "objects"
err = en.Append(0xa7, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73)
if err != nil {
return
}
// check for omitted fields
zb0003Len := uint32(2)
var zb0003Mask uint8 /* 2 bits */
_ = zb0003Mask
if z.Objects.Error == "" {
zb0003Len--
zb0003Mask |= 0x2
}
// variable map header, size zb0003Len
err = en.Append(0x80 | uint8(zb0003Len))
if err != nil {
return
}
// skip if no fields are to be emitted
if zb0003Len != 0 {
// write "count"
err = en.Append(0xa5, 0x63, 0x6f, 0x75, 0x6e, 0x74)
if err != nil {
return
}
err = en.WriteUint64(z.Objects.Count)
if err != nil {
err = msgp.WrapError(err, "Objects", "Count")
return
}
if (zb0003Mask & 0x2) == 0 { // if not omitted
// write "error"
err = en.Append(0xa5, 0x65, 0x72, 0x72, 0x6f, 0x72)
if err != nil {
return
}
err = en.WriteString(z.Objects.Error)
if err != nil {
err = msgp.WrapError(err, "Objects", "Error")
return
}
}
}
}
if (zb0001Mask & 0x80) == 0 { // if not omitted
// write "versions"
err = en.Append(0xa8, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73)
if err != nil {
return
}
// check for omitted fields
zb0004Len := uint32(2)
var zb0004Mask uint8 /* 2 bits */
_ = zb0004Mask
if z.Versions.Error == "" {
zb0004Len--
zb0004Mask |= 0x2
}
// variable map header, size zb0004Len
err = en.Append(0x80 | uint8(zb0004Len))
if err != nil {
return
}
// skip if no fields are to be emitted
if zb0004Len != 0 {
// write "count"
err = en.Append(0xa5, 0x63, 0x6f, 0x75, 0x6e, 0x74)
if err != nil {
return
}
err = en.WriteUint64(z.Versions.Count)
if err != nil {
err = msgp.WrapError(err, "Versions", "Count")
return
}
if (zb0004Mask & 0x2) == 0 { // if not omitted
// write "error"
err = en.Append(0xa5, 0x65, 0x72, 0x72, 0x6f, 0x72)
if err != nil {
return
}
err = en.WriteString(z.Versions.Error)
if err != nil {
err = msgp.WrapError(err, "Versions", "Error")
return
}
}
}
}
if (zb0001Mask & 0x100) == 0 { // if not omitted
// write "deletemarkers"
err = en.Append(0xad, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x6d, 0x61, 0x72, 0x6b, 0x65, 0x72, 0x73)
if err != nil {
return
}
// check for omitted fields
zb0005Len := uint32(2)
var zb0005Mask uint8 /* 2 bits */
_ = zb0005Mask
if z.DeleteMarkers.Error == "" {
zb0005Len--
zb0005Mask |= 0x2
}
// variable map header, size zb0005Len
err = en.Append(0x80 | uint8(zb0005Len))
if err != nil {
return
}
// skip if no fields are to be emitted
if zb0005Len != 0 {
// write "count"
err = en.Append(0xa5, 0x63, 0x6f, 0x75, 0x6e, 0x74)
if err != nil {
return
}
err = en.WriteUint64(z.DeleteMarkers.Count)
if err != nil {
err = msgp.WrapError(err, "DeleteMarkers", "Count")
return
}
if (zb0005Mask & 0x2) == 0 { // if not omitted
// write "error"
err = en.Append(0xa5, 0x65, 0x72, 0x72, 0x6f, 0x72)
if err != nil {
return
}
err = en.WriteString(z.DeleteMarkers.Error)
if err != nil {
err = msgp.WrapError(err, "DeleteMarkers", "Error")
return
}
}
}
}
if (zb0001Mask & 0x200) == 0 { // if not omitted
// write "usage"
err = en.Append(0xa5, 0x75, 0x73, 0x61, 0x67, 0x65)
if err != nil {
return
}
// check for omitted fields
zb0006Len := uint32(2)
var zb0006Mask uint8 /* 2 bits */
_ = zb0006Mask
if z.Usage.Error == "" {
zb0006Len--
zb0006Mask |= 0x2
}
// variable map header, size zb0006Len
err = en.Append(0x80 | uint8(zb0006Len))
if err != nil {
return
}
// skip if no fields are to be emitted
if zb0006Len != 0 {
// write "size"
err = en.Append(0xa4, 0x73, 0x69, 0x7a, 0x65)
if err != nil {
return
}
err = en.WriteUint64(z.Usage.Size)
if err != nil {
err = msgp.WrapError(err, "Usage", "Size")
return
}
if (zb0006Mask & 0x2) == 0 { // if not omitted
// write "error"
err = en.Append(0xa5, 0x65, 0x72, 0x72, 0x6f, 0x72)
if err != nil {
return
}
err = en.WriteString(z.Usage.Error)
if err != nil {
err = msgp.WrapError(err, "Usage", "Error")
return
}
}
}
}
// write "services"
err = en.Append(0xa8, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73)
if err != nil {
return
}
err = z.Services.EncodeMsg(en)
if err != nil {
err = msgp.WrapError(err, "Services")
return
}
// write "backend"
err = en.Append(0xa7, 0x62, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64)
if err != nil {
return
}
err = z.Backend.EncodeMsg(en)
if err != nil {
err = msgp.WrapError(err, "Backend")
return
}
if (zb0001Mask & 0x1000) == 0 { // if not omitted
// write "servers"
err = en.Append(0xa7, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x73)
if err != nil {
return
}
err = en.WriteArrayHeader(uint32(len(z.Servers)))
if err != nil {
err = msgp.WrapError(err, "Servers")
return
}
for za0003 := range z.Servers {
err = z.Servers[za0003].EncodeMsg(en)
if err != nil {
err = msgp.WrapError(err, "Servers", za0003)
return
}
}
}
}
return
}
// MarshalMsg implements msgp.Marshaler
func (z *InfoMessage) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize())
// check for omitted fields
zb0001Len := uint32(13)
var zb0001Mask uint16 /* 13 bits */
_ = zb0001Mask
if z.Mode == "" {
zb0001Len--
zb0001Mask |= 0x1
}
if z.Domain == nil {
zb0001Len--
zb0001Mask |= 0x2
}
if z.Region == "" {
zb0001Len--
zb0001Mask |= 0x4
}
if z.SQSARN == nil {
zb0001Len--
zb0001Mask |= 0x8
}
if z.DeploymentID == "" {
zb0001Len--
zb0001Mask |= 0x10
}
if z.Buckets == (Buckets{}) {
zb0001Len--
zb0001Mask |= 0x20
}
if z.Objects == (Objects{}) {
zb0001Len--
zb0001Mask |= 0x40
}
if z.Versions == (Versions{}) {
zb0001Len--
zb0001Mask |= 0x80
}
if z.DeleteMarkers == (DeleteMarkers{}) {
zb0001Len--
zb0001Mask |= 0x100
}
if z.Usage == (Usage{}) {
zb0001Len--
zb0001Mask |= 0x200
}
if z.Servers == nil {
zb0001Len--
zb0001Mask |= 0x1000
}
// variable map header, size zb0001Len
o = append(o, 0x80|uint8(zb0001Len))
// skip if no fields are to be emitted
if zb0001Len != 0 {
if (zb0001Mask & 0x1) == 0 { // if not omitted
// string "mode"
o = append(o, 0xa4, 0x6d, 0x6f, 0x64, 0x65)
o = msgp.AppendString(o, z.Mode)
}
if (zb0001Mask & 0x2) == 0 { // if not omitted
// string "domain"
o = append(o, 0xa6, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e)
o = msgp.AppendArrayHeader(o, uint32(len(z.Domain)))
for za0001 := range z.Domain {
o = msgp.AppendString(o, z.Domain[za0001])
}
}
if (zb0001Mask & 0x4) == 0 { // if not omitted
// string "region"
o = append(o, 0xa6, 0x72, 0x65, 0x67, 0x69, 0x6f, 0x6e)
o = msgp.AppendString(o, z.Region)
}
if (zb0001Mask & 0x8) == 0 { // if not omitted
// string "sqsARN"
o = append(o, 0xa6, 0x73, 0x71, 0x73, 0x41, 0x52, 0x4e)
o = msgp.AppendArrayHeader(o, uint32(len(z.SQSARN)))
for za0002 := range z.SQSARN {
o = msgp.AppendString(o, z.SQSARN[za0002])
}
}
if (zb0001Mask & 0x10) == 0 { // if not omitted
// string "deploymentID"
o = append(o, 0xac, 0x64, 0x65, 0x70, 0x6c, 0x6f, 0x79, 0x6d, 0x65, 0x6e, 0x74, 0x49, 0x44)
o = msgp.AppendString(o, z.DeploymentID)
}
if (zb0001Mask & 0x20) == 0 { // if not omitted
// string "buckets"
o = append(o, 0xa7, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73)
// check for omitted fields
zb0002Len := uint32(2)
var zb0002Mask uint8 /* 2 bits */
_ = zb0002Mask
if z.Buckets.Error == "" {
zb0002Len--
zb0002Mask |= 0x2
}
// variable map header, size zb0002Len
o = append(o, 0x80|uint8(zb0002Len))
// skip if no fields are to be emitted
if zb0002Len != 0 {
// string "count"
o = append(o, 0xa5, 0x63, 0x6f, 0x75, 0x6e, 0x74)
o = msgp.AppendUint64(o, z.Buckets.Count)
if (zb0002Mask & 0x2) == 0 { // if not omitted
// string "error"
o = append(o, 0xa5, 0x65, 0x72, 0x72, 0x6f, 0x72)
o = msgp.AppendString(o, z.Buckets.Error)
}
}
}
if (zb0001Mask & 0x40) == 0 { // if not omitted
// string "objects"
o = append(o, 0xa7, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73)
// check for omitted fields
zb0003Len := uint32(2)
var zb0003Mask uint8 /* 2 bits */
_ = zb0003Mask
if z.Objects.Error == "" {
zb0003Len--
zb0003Mask |= 0x2
}
// variable map header, size zb0003Len
o = append(o, 0x80|uint8(zb0003Len))
// skip if no fields are to be emitted
if zb0003Len != 0 {
// string "count"
o = append(o, 0xa5, 0x63, 0x6f, 0x75, 0x6e, 0x74)
o = msgp.AppendUint64(o, z.Objects.Count)
if (zb0003Mask & 0x2) == 0 { // if not omitted
// string "error"
o = append(o, 0xa5, 0x65, 0x72, 0x72, 0x6f, 0x72)
o = msgp.AppendString(o, z.Objects.Error)
}
}
}
if (zb0001Mask & 0x80) == 0 { // if not omitted
// string "versions"
o = append(o, 0xa8, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73)
// check for omitted fields
zb0004Len := uint32(2)
var zb0004Mask uint8 /* 2 bits */
_ = zb0004Mask
if z.Versions.Error == "" {
zb0004Len--
zb0004Mask |= 0x2
}
// variable map header, size zb0004Len
o = append(o, 0x80|uint8(zb0004Len))
// skip if no fields are to be emitted
if zb0004Len != 0 {
// string "count"
o = append(o, 0xa5, 0x63, 0x6f, 0x75, 0x6e, 0x74)
o = msgp.AppendUint64(o, z.Versions.Count)
if (zb0004Mask & 0x2) == 0 { // if not omitted
// string "error"
o = append(o, 0xa5, 0x65, 0x72, 0x72, 0x6f, 0x72)
o = msgp.AppendString(o, z.Versions.Error)
}
}
}
if (zb0001Mask & 0x100) == 0 { // if not omitted
// string "deletemarkers"
o = append(o, 0xad, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x6d, 0x61, 0x72, 0x6b, 0x65, 0x72, 0x73)
// check for omitted fields
zb0005Len := uint32(2)
var zb0005Mask uint8 /* 2 bits */
_ = zb0005Mask
if z.DeleteMarkers.Error == "" {
zb0005Len--
zb0005Mask |= 0x2
}
// variable map header, size zb0005Len
o = append(o, 0x80|uint8(zb0005Len))
// skip if no fields are to be emitted
if zb0005Len != 0 {
// string "count"
o = append(o, 0xa5, 0x63, 0x6f, 0x75, 0x6e, 0x74)
o = msgp.AppendUint64(o, z.DeleteMarkers.Count)
if (zb0005Mask & 0x2) == 0 { // if not omitted
// string "error"
o = append(o, 0xa5, 0x65, 0x72, 0x72, 0x6f, 0x72)
o = msgp.AppendString(o, z.DeleteMarkers.Error)
}
}
}
if (zb0001Mask & 0x200) == 0 { // if not omitted
// string "usage"
o = append(o, 0xa5, 0x75, 0x73, 0x61, 0x67, 0x65)
// check for omitted fields
zb0006Len := uint32(2)
var zb0006Mask uint8 /* 2 bits */
_ = zb0006Mask
if z.Usage.Error == "" {
zb0006Len--
zb0006Mask |= 0x2
}
// variable map header, size zb0006Len
o = append(o, 0x80|uint8(zb0006Len))
// skip if no fields are to be emitted
if zb0006Len != 0 {
// string "size"
o = append(o, 0xa4, 0x73, 0x69, 0x7a, 0x65)
o = msgp.AppendUint64(o, z.Usage.Size)
if (zb0006Mask & 0x2) == 0 { // if not omitted
// string "error"
o = append(o, 0xa5, 0x65, 0x72, 0x72, 0x6f, 0x72)
o = msgp.AppendString(o, z.Usage.Error)
}
}
}
// string "services"
o = append(o, 0xa8, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73)
o, err = z.Services.MarshalMsg(o)
if err != nil {
err = msgp.WrapError(err, "Services")
return
}
// string "backend"
o = append(o, 0xa7, 0x62, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64)
o, err = z.Backend.MarshalMsg(o)
if err != nil {
err = msgp.WrapError(err, "Backend")
return
}
if (zb0001Mask & 0x1000) == 0 { // if not omitted
// string "servers"
o = append(o, 0xa7, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x73)
o = msgp.AppendArrayHeader(o, uint32(len(z.Servers)))
for za0003 := range z.Servers {
o, err = z.Servers[za0003].MarshalMsg(o)
if err != nil {
err = msgp.WrapError(err, "Servers", za0003)
return
}
}
}
}
return
}
// UnmarshalMsg implements msgp.Unmarshaler
func (z *InfoMessage) UnmarshalMsg(bts []byte) (o []byte, err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
var zb0001Mask uint16 /* 13 bits */
_ = zb0001Mask
for zb0001 > 0 {
zb0001--
field, bts, err = msgp.ReadMapKeyZC(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "mode":
z.Mode, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Mode")
return
}
zb0001Mask |= 0x1
case "domain":
var zb0002 uint32
zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Domain")
return
}
if cap(z.Domain) >= int(zb0002) {
z.Domain = (z.Domain)[:zb0002]
} else {
z.Domain = make([]string, zb0002)
}
for za0001 := range z.Domain {
z.Domain[za0001], bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Domain", za0001)
return
}
}
zb0001Mask |= 0x2
case "region":
z.Region, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Region")
return
}
zb0001Mask |= 0x4
case "sqsARN":
var zb0003 uint32
zb0003, bts, err = msgp.ReadArrayHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "SQSARN")
return
}
if cap(z.SQSARN) >= int(zb0003) {
z.SQSARN = (z.SQSARN)[:zb0003]
} else {
z.SQSARN = make([]string, zb0003)
}
for za0002 := range z.SQSARN {
z.SQSARN[za0002], bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "SQSARN", za0002)
return
}
}
zb0001Mask |= 0x8
case "deploymentID":
z.DeploymentID, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "DeploymentID")
return
}
zb0001Mask |= 0x10
case "buckets":
var zb0004 uint32
zb0004, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Buckets")
return
}
var zb0004Mask uint8 /* 1 bits */
_ = zb0004Mask
for zb0004 > 0 {
zb0004--
field, bts, err = msgp.ReadMapKeyZC(bts)
if err != nil {
err = msgp.WrapError(err, "Buckets")
return
}
switch msgp.UnsafeString(field) {
case "count":
z.Buckets.Count, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "Buckets", "Count")
return
}
case "error":
z.Buckets.Error, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Buckets", "Error")
return
}
zb0004Mask |= 0x1
default:
bts, err = msgp.Skip(bts)
if err != nil {
err = msgp.WrapError(err, "Buckets")
return
}
}
}
// Clear omitted fields.
if (zb0004Mask & 0x1) == 0 {
z.Buckets.Error = ""
}
zb0001Mask |= 0x20
case "objects":
var zb0005 uint32
zb0005, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Objects")
return
}
var zb0005Mask uint8 /* 1 bits */
_ = zb0005Mask
for zb0005 > 0 {
zb0005--
field, bts, err = msgp.ReadMapKeyZC(bts)
if err != nil {
err = msgp.WrapError(err, "Objects")
return
}
switch msgp.UnsafeString(field) {
case "count":
z.Objects.Count, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "Objects", "Count")
return
}
case "error":
z.Objects.Error, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Objects", "Error")
return
}
zb0005Mask |= 0x1
default:
bts, err = msgp.Skip(bts)
if err != nil {
err = msgp.WrapError(err, "Objects")
return
}
}
}
// Clear omitted fields.
if (zb0005Mask & 0x1) == 0 {
z.Objects.Error = ""
}
zb0001Mask |= 0x40
case "versions":
var zb0006 uint32
zb0006, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Versions")
return
}
var zb0006Mask uint8 /* 1 bits */
_ = zb0006Mask
for zb0006 > 0 {
zb0006--
field, bts, err = msgp.ReadMapKeyZC(bts)
if err != nil {
err = msgp.WrapError(err, "Versions")
return
}
switch msgp.UnsafeString(field) {
case "count":
z.Versions.Count, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "Versions", "Count")
return
}
case "error":
z.Versions.Error, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Versions", "Error")
return
}
zb0006Mask |= 0x1
default:
bts, err = msgp.Skip(bts)
if err != nil {
err = msgp.WrapError(err, "Versions")
return
}
}
}
// Clear omitted fields.
if (zb0006Mask & 0x1) == 0 {
z.Versions.Error = ""
}
zb0001Mask |= 0x80
case "deletemarkers":
var zb0007 uint32
zb0007, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "DeleteMarkers")
return
}
var zb0007Mask uint8 /* 1 bits */
_ = zb0007Mask
for zb0007 > 0 {
zb0007--
field, bts, err = msgp.ReadMapKeyZC(bts)
if err != nil {
err = msgp.WrapError(err, "DeleteMarkers")
return
}
switch msgp.UnsafeString(field) {
case "count":
z.DeleteMarkers.Count, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "DeleteMarkers", "Count")
return
}
case "error":
z.DeleteMarkers.Error, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "DeleteMarkers", "Error")
return
}
zb0007Mask |= 0x1
default:
bts, err = msgp.Skip(bts)
if err != nil {
err = msgp.WrapError(err, "DeleteMarkers")
return
}
}
}
// Clear omitted fields.
if (zb0007Mask & 0x1) == 0 {
z.DeleteMarkers.Error = ""
}
zb0001Mask |= 0x100
case "usage":
var zb0008 uint32
zb0008, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Usage")
return
}
var zb0008Mask uint8 /* 1 bits */
_ = zb0008Mask
for zb0008 > 0 {
zb0008--
field, bts, err = msgp.ReadMapKeyZC(bts)
if err != nil {
err = msgp.WrapError(err, "Usage")
return
}
switch msgp.UnsafeString(field) {
case "size":
z.Usage.Size, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "Usage", "Size")
return
}
case "error":
z.Usage.Error, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Usage", "Error")
return
}
zb0008Mask |= 0x1
default:
bts, err = msgp.Skip(bts)
if err != nil {
err = msgp.WrapError(err, "Usage")
return
}
}
}
// Clear omitted fields.
if (zb0008Mask & 0x1) == 0 {
z.Usage.Error = ""
}
zb0001Mask |= 0x200
case "services":
bts, err = z.Services.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "Services")
return
}
zb0001Mask |= 0x400
case "backend":
bts, err = z.Backend.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "Backend")
return
}
zb0001Mask |= 0x800
case "servers":
var zb0009 uint32
zb0009, bts, err = msgp.ReadArrayHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Servers")
return
}
if cap(z.Servers) >= int(zb0009) {
z.Servers = (z.Servers)[:zb0009]
} else {
z.Servers = make([]ServerProperties, zb0009)
}
for za0003 := range z.Servers {
bts, err = z.Servers[za0003].UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "Servers", za0003)
return
}
}
zb0001Mask |= 0x1000
default:
bts, err = msgp.Skip(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
// Clear omitted fields.
if zb0001Mask != 0x1fff {
if (zb0001Mask & 0x1) == 0 {
z.Mode = ""
}
if (zb0001Mask & 0x2) == 0 {
z.Domain = nil
}
if (zb0001Mask & 0x4) == 0 {
z.Region = ""
}
if (zb0001Mask & 0x8) == 0 {
z.SQSARN = nil
}
if (zb0001Mask & 0x10) == 0 {
z.DeploymentID = ""
}
if (zb0001Mask & 0x20) == 0 {
z.Buckets = (Buckets{})
}
if (zb0001Mask & 0x40) == 0 {
z.Objects = (Objects{})
}
if (zb0001Mask & 0x80) == 0 {
z.Versions = (Versions{})
}
if (zb0001Mask & 0x100) == 0 {
z.DeleteMarkers = (DeleteMarkers{})
}
if (zb0001Mask & 0x200) == 0 {
z.Usage = (Usage{})
}
if (zb0001Mask & 0x400) == 0 {
z.Services = Services{}
}
if (zb0001Mask & 0x800) == 0 {
z.Backend = ErasureBackend{}
}
if (zb0001Mask & 0x1000) == 0 {
z.Servers = nil
}
}
o = bts
return
}
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z *InfoMessage) Msgsize() (s int) {
s = 1 + 5 + msgp.StringPrefixSize + len(z.Mode) + 7 + msgp.ArrayHeaderSize
for za0001 := range z.Domain {
s += msgp.StringPrefixSize + len(z.Domain[za0001])
}
s += 7 + msgp.StringPrefixSize + len(z.Region) + 7 + msgp.ArrayHeaderSize
for za0002 := range z.SQSARN {
s += msgp.StringPrefixSize + len(z.SQSARN[za0002])
}
s += 13 + msgp.StringPrefixSize + len(z.DeploymentID) + 8 + 1 + 6 + msgp.Uint64Size + 6 + msgp.StringPrefixSize + len(z.Buckets.Error) + 8 + 1 + 6 + msgp.Uint64Size + 6 + msgp.StringPrefixSize + len(z.Objects.Error) + 9 + 1 + 6 + msgp.Uint64Size + 6 + msgp.StringPrefixSize + len(z.Versions.Error) + 14 + 1 + 6 + msgp.Uint64Size + 6 + msgp.StringPrefixSize + len(z.DeleteMarkers.Error) + 6 + 1 + 5 + msgp.Uint64Size + 6 + msgp.StringPrefixSize + len(z.Usage.Error) + 9 + z.Services.Msgsize() + 8 + z.Backend.Msgsize() + 8 + msgp.ArrayHeaderSize
for za0003 := range z.Servers {
s += z.Servers[za0003].Msgsize()
}
return
}
// DecodeMsg implements msgp.Decodable
func (z *ItemState) DecodeMsg(dc *msgp.Reader) (err error) {
{
var zb0001 string
zb0001, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err)
return
}
(*z) = ItemState(zb0001)
}
return
}
// EncodeMsg implements msgp.Encodable
func (z ItemState) EncodeMsg(en *msgp.Writer) (err error) {
err = en.WriteString(string(z))
if err != nil {
err = msgp.WrapError(err)
return
}
return
}
// MarshalMsg implements msgp.Marshaler
func (z ItemState) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize())
o = msgp.AppendString(o, string(z))
return
}
// UnmarshalMsg implements msgp.Unmarshaler
func (z *ItemState) UnmarshalMsg(bts []byte) (o []byte, err error) {
{
var zb0001 string
zb0001, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
(*z) = ItemState(zb0001)
}
o = bts
return
}
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z ItemState) Msgsize() (s int) {
s = msgp.StringPrefixSize + len(string(z))
return
}
// DecodeMsg implements msgp.Decodable
func (z *KMS) DecodeMsg(dc *msgp.Reader) (err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, err = dc.ReadMapHeader()
if err != nil {
err = msgp.WrapError(err)
return
}
var zb0001Mask uint8 /* 5 bits */
_ = zb0001Mask
for zb0001 > 0 {
zb0001--
field, err = dc.ReadMapKeyPtr()
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "status":
z.Status, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "Status")
return
}
zb0001Mask |= 0x1
case "encrypt":
z.Encrypt, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "Encrypt")
return
}
zb0001Mask |= 0x2
case "decrypt":
z.Decrypt, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "Decrypt")
return
}
zb0001Mask |= 0x4
case "endpoint":
z.Endpoint, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "Endpoint")
return
}
zb0001Mask |= 0x8
case "version":
z.Version, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "Version")
return
}
zb0001Mask |= 0x10
default:
err = dc.Skip()
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
// Clear omitted fields.
if zb0001Mask != 0x1f {
if (zb0001Mask & 0x1) == 0 {
z.Status = ""
}
if (zb0001Mask & 0x2) == 0 {
z.Encrypt = ""
}
if (zb0001Mask & 0x4) == 0 {
z.Decrypt = ""
}
if (zb0001Mask & 0x8) == 0 {
z.Endpoint = ""
}
if (zb0001Mask & 0x10) == 0 {
z.Version = ""
}
}
return
}
// EncodeMsg implements msgp.Encodable
func (z *KMS) EncodeMsg(en *msgp.Writer) (err error) {
// check for omitted fields
zb0001Len := uint32(5)
var zb0001Mask uint8 /* 5 bits */
_ = zb0001Mask
if z.Status == "" {
zb0001Len--
zb0001Mask |= 0x1
}
if z.Encrypt == "" {
zb0001Len--
zb0001Mask |= 0x2
}
if z.Decrypt == "" {
zb0001Len--
zb0001Mask |= 0x4
}
if z.Endpoint == "" {
zb0001Len--
zb0001Mask |= 0x8
}
if z.Version == "" {
zb0001Len--
zb0001Mask |= 0x10
}
// variable map header, size zb0001Len
err = en.Append(0x80 | uint8(zb0001Len))
if err != nil {
return
}
// skip if no fields are to be emitted
if zb0001Len != 0 {
if (zb0001Mask & 0x1) == 0 { // if not omitted
// write "status"
err = en.Append(0xa6, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73)
if err != nil {
return
}
err = en.WriteString(z.Status)
if err != nil {
err = msgp.WrapError(err, "Status")
return
}
}
if (zb0001Mask & 0x2) == 0 { // if not omitted
// write "encrypt"
err = en.Append(0xa7, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74)
if err != nil {
return
}
err = en.WriteString(z.Encrypt)
if err != nil {
err = msgp.WrapError(err, "Encrypt")
return
}
}
if (zb0001Mask & 0x4) == 0 { // if not omitted
// write "decrypt"
err = en.Append(0xa7, 0x64, 0x65, 0x63, 0x72, 0x79, 0x70, 0x74)
if err != nil {
return
}
err = en.WriteString(z.Decrypt)
if err != nil {
err = msgp.WrapError(err, "Decrypt")
return
}
}
if (zb0001Mask & 0x8) == 0 { // if not omitted
// write "endpoint"
err = en.Append(0xa8, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74)
if err != nil {
return
}
err = en.WriteString(z.Endpoint)
if err != nil {
err = msgp.WrapError(err, "Endpoint")
return
}
}
if (zb0001Mask & 0x10) == 0 { // if not omitted
// write "version"
err = en.Append(0xa7, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e)
if err != nil {
return
}
err = en.WriteString(z.Version)
if err != nil {
err = msgp.WrapError(err, "Version")
return
}
}
}
return
}
// MarshalMsg implements msgp.Marshaler
func (z *KMS) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize())
// check for omitted fields
zb0001Len := uint32(5)
var zb0001Mask uint8 /* 5 bits */
_ = zb0001Mask
if z.Status == "" {
zb0001Len--
zb0001Mask |= 0x1
}
if z.Encrypt == "" {
zb0001Len--
zb0001Mask |= 0x2
}
if z.Decrypt == "" {
zb0001Len--
zb0001Mask |= 0x4
}
if z.Endpoint == "" {
zb0001Len--
zb0001Mask |= 0x8
}
if z.Version == "" {
zb0001Len--
zb0001Mask |= 0x10
}
// variable map header, size zb0001Len
o = append(o, 0x80|uint8(zb0001Len))
// skip if no fields are to be emitted
if zb0001Len != 0 {
if (zb0001Mask & 0x1) == 0 { // if not omitted
// string "status"
o = append(o, 0xa6, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73)
o = msgp.AppendString(o, z.Status)
}
if (zb0001Mask & 0x2) == 0 { // if not omitted
// string "encrypt"
o = append(o, 0xa7, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74)
o = msgp.AppendString(o, z.Encrypt)
}
if (zb0001Mask & 0x4) == 0 { // if not omitted
// string "decrypt"
o = append(o, 0xa7, 0x64, 0x65, 0x63, 0x72, 0x79, 0x70, 0x74)
o = msgp.AppendString(o, z.Decrypt)
}
if (zb0001Mask & 0x8) == 0 { // if not omitted
// string "endpoint"
o = append(o, 0xa8, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74)
o = msgp.AppendString(o, z.Endpoint)
}
if (zb0001Mask & 0x10) == 0 { // if not omitted
// string "version"
o = append(o, 0xa7, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e)
o = msgp.AppendString(o, z.Version)
}
}
return
}
// UnmarshalMsg implements msgp.Unmarshaler
func (z *KMS) UnmarshalMsg(bts []byte) (o []byte, err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
var zb0001Mask uint8 /* 5 bits */
_ = zb0001Mask
for zb0001 > 0 {
zb0001--
field, bts, err = msgp.ReadMapKeyZC(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "status":
z.Status, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Status")
return
}
zb0001Mask |= 0x1
case "encrypt":
z.Encrypt, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Encrypt")
return
}
zb0001Mask |= 0x2
case "decrypt":
z.Decrypt, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Decrypt")
return
}
zb0001Mask |= 0x4
case "endpoint":
z.Endpoint, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Endpoint")
return
}
zb0001Mask |= 0x8
case "version":
z.Version, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Version")
return
}
zb0001Mask |= 0x10
default:
bts, err = msgp.Skip(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
// Clear omitted fields.
if zb0001Mask != 0x1f {
if (zb0001Mask & 0x1) == 0 {
z.Status = ""
}
if (zb0001Mask & 0x2) == 0 {
z.Encrypt = ""
}
if (zb0001Mask & 0x4) == 0 {
z.Decrypt = ""
}
if (zb0001Mask & 0x8) == 0 {
z.Endpoint = ""
}
if (zb0001Mask & 0x10) == 0 {
z.Version = ""
}
}
o = bts
return
}
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z *KMS) Msgsize() (s int) {
s = 1 + 7 + msgp.StringPrefixSize + len(z.Status) + 8 + msgp.StringPrefixSize + len(z.Encrypt) + 8 + msgp.StringPrefixSize + len(z.Decrypt) + 9 + msgp.StringPrefixSize + len(z.Endpoint) + 8 + msgp.StringPrefixSize + len(z.Version)
return
}
// DecodeMsg implements msgp.Decodable
func (z *LDAP) DecodeMsg(dc *msgp.Reader) (err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, err = dc.ReadMapHeader()
if err != nil {
err = msgp.WrapError(err)
return
}
var zb0001Mask uint8 /* 1 bits */
_ = zb0001Mask
for zb0001 > 0 {
zb0001--
field, err = dc.ReadMapKeyPtr()
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "status":
z.Status, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "Status")
return
}
zb0001Mask |= 0x1
default:
err = dc.Skip()
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
// Clear omitted fields.
if (zb0001Mask & 0x1) == 0 {
z.Status = ""
}
return
}
// EncodeMsg implements msgp.Encodable
func (z LDAP) EncodeMsg(en *msgp.Writer) (err error) {
// check for omitted fields
zb0001Len := uint32(1)
var zb0001Mask uint8 /* 1 bits */
_ = zb0001Mask
if z.Status == "" {
zb0001Len--
zb0001Mask |= 0x1
}
// variable map header, size zb0001Len
err = en.Append(0x80 | uint8(zb0001Len))
if err != nil {
return
}
if (zb0001Mask & 0x1) == 0 { // if not omitted
// write "status"
err = en.Append(0xa6, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73)
if err != nil {
return
}
err = en.WriteString(z.Status)
if err != nil {
err = msgp.WrapError(err, "Status")
return
}
}
return
}
// MarshalMsg implements msgp.Marshaler
func (z LDAP) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize())
// check for omitted fields
zb0001Len := uint32(1)
var zb0001Mask uint8 /* 1 bits */
_ = zb0001Mask
if z.Status == "" {
zb0001Len--
zb0001Mask |= 0x1
}
// variable map header, size zb0001Len
o = append(o, 0x80|uint8(zb0001Len))
if (zb0001Mask & 0x1) == 0 { // if not omitted
// string "status"
o = append(o, 0xa6, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73)
o = msgp.AppendString(o, z.Status)
}
return
}
// UnmarshalMsg implements msgp.Unmarshaler
func (z *LDAP) UnmarshalMsg(bts []byte) (o []byte, err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
var zb0001Mask uint8 /* 1 bits */
_ = zb0001Mask
for zb0001 > 0 {
zb0001--
field, bts, err = msgp.ReadMapKeyZC(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "status":
z.Status, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Status")
return
}
zb0001Mask |= 0x1
default:
bts, err = msgp.Skip(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
// Clear omitted fields.
if (zb0001Mask & 0x1) == 0 {
z.Status = ""
}
o = bts
return
}
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z LDAP) Msgsize() (s int) {
s = 1 + 7 + msgp.StringPrefixSize + len(z.Status)
return
}
// DecodeMsg implements msgp.Decodable
func (z *Logger) DecodeMsg(dc *msgp.Reader) (err error) {
var zb0003 uint32
zb0003, err = dc.ReadMapHeader()
if err != nil {
err = msgp.WrapError(err)
return
}
if (*z) == nil {
(*z) = make(Logger, zb0003)
} else if len((*z)) > 0 {
for key := range *z {
delete((*z), key)
}
}
var field []byte
_ = field
for zb0003 > 0 {
zb0003--
var zb0001 string
var zb0002 Status
zb0001, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err)
return
}
var zb0004 uint32
zb0004, err = dc.ReadMapHeader()
if err != nil {
err = msgp.WrapError(err, zb0001)
return
}
var zb0004Mask uint8 /* 1 bits */
_ = zb0004Mask
for zb0004 > 0 {
zb0004--
field, err = dc.ReadMapKeyPtr()
if err != nil {
err = msgp.WrapError(err, zb0001)
return
}
switch msgp.UnsafeString(field) {
case "status":
zb0002.Status, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, zb0001, "Status")
return
}
zb0004Mask |= 0x1
default:
err = dc.Skip()
if err != nil {
err = msgp.WrapError(err, zb0001)
return
}
}
}
// Clear omitted fields.
if (zb0004Mask & 0x1) == 0 {
zb0002.Status = ""
}
(*z)[zb0001] = zb0002
}
return
}
// EncodeMsg implements msgp.Encodable
func (z Logger) EncodeMsg(en *msgp.Writer) (err error) {
err = en.WriteMapHeader(uint32(len(z)))
if err != nil {
err = msgp.WrapError(err)
return
}
for zb0005, zb0006 := range z {
err = en.WriteString(zb0005)
if err != nil {
err = msgp.WrapError(err)
return
}
// check for omitted fields
zb0001Len := uint32(1)
var zb0001Mask uint8 /* 1 bits */
_ = zb0001Mask
if zb0006.Status == "" {
zb0001Len--
zb0001Mask |= 0x1
}
// variable map header, size zb0001Len
err = en.Append(0x80 | uint8(zb0001Len))
if err != nil {
return
}
if (zb0001Mask & 0x1) == 0 { // if not omitted
// write "status"
err = en.Append(0xa6, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73)
if err != nil {
return
}
err = en.WriteString(zb0006.Status)
if err != nil {
err = msgp.WrapError(err, zb0005, "Status")
return
}
}
}
return
}
// MarshalMsg implements msgp.Marshaler
func (z Logger) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize())
o = msgp.AppendMapHeader(o, uint32(len(z)))
for zb0005, zb0006 := range z {
o = msgp.AppendString(o, zb0005)
// check for omitted fields
zb0001Len := uint32(1)
var zb0001Mask uint8 /* 1 bits */
_ = zb0001Mask
if zb0006.Status == "" {
zb0001Len--
zb0001Mask |= 0x1
}
// variable map header, size zb0001Len
o = append(o, 0x80|uint8(zb0001Len))
if (zb0001Mask & 0x1) == 0 { // if not omitted
// string "status"
o = append(o, 0xa6, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73)
o = msgp.AppendString(o, zb0006.Status)
}
}
return
}
// UnmarshalMsg implements msgp.Unmarshaler
func (z *Logger) UnmarshalMsg(bts []byte) (o []byte, err error) {
var zb0003 uint32
zb0003, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
if (*z) == nil {
(*z) = make(Logger, zb0003)
} else if len((*z)) > 0 {
for key := range *z {
delete((*z), key)
}
}
var field []byte
_ = field
for zb0003 > 0 {
var zb0001 string
var zb0002 Status
zb0003--
zb0001, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
var zb0004 uint32
zb0004, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, zb0001)
return
}
var zb0004Mask uint8 /* 1 bits */
_ = zb0004Mask
for zb0004 > 0 {
zb0004--
field, bts, err = msgp.ReadMapKeyZC(bts)
if err != nil {
err = msgp.WrapError(err, zb0001)
return
}
switch msgp.UnsafeString(field) {
case "status":
zb0002.Status, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, zb0001, "Status")
return
}
zb0004Mask |= 0x1
default:
bts, err = msgp.Skip(bts)
if err != nil {
err = msgp.WrapError(err, zb0001)
return
}
}
}
// Clear omitted fields.
if (zb0004Mask & 0x1) == 0 {
zb0002.Status = ""
}
(*z)[zb0001] = zb0002
}
o = bts
return
}
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z Logger) Msgsize() (s int) {
s = msgp.MapHeaderSize
if z != nil {
for zb0005, zb0006 := range z {
_ = zb0006
s += msgp.StringPrefixSize + len(zb0005) + 1 + 7 + msgp.StringPrefixSize + len(zb0006.Status)
}
}
return
}
// DecodeMsg implements msgp.Decodable
func (z *MemStats) DecodeMsg(dc *msgp.Reader) (err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, err = dc.ReadMapHeader()
if err != nil {
err = msgp.WrapError(err)
return
}
for zb0001 > 0 {
zb0001--
field, err = dc.ReadMapKeyPtr()
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "Alloc":
z.Alloc, err = dc.ReadUint64()
if err != nil {
err = msgp.WrapError(err, "Alloc")
return
}
case "TotalAlloc":
z.TotalAlloc, err = dc.ReadUint64()
if err != nil {
err = msgp.WrapError(err, "TotalAlloc")
return
}
case "Mallocs":
z.Mallocs, err = dc.ReadUint64()
if err != nil {
err = msgp.WrapError(err, "Mallocs")
return
}
case "Frees":
z.Frees, err = dc.ReadUint64()
if err != nil {
err = msgp.WrapError(err, "Frees")
return
}
case "HeapAlloc":
z.HeapAlloc, err = dc.ReadUint64()
if err != nil {
err = msgp.WrapError(err, "HeapAlloc")
return
}
default:
err = dc.Skip()
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
return
}
// EncodeMsg implements msgp.Encodable
func (z *MemStats) EncodeMsg(en *msgp.Writer) (err error) {
// map header, size 5
// write "Alloc"
err = en.Append(0x85, 0xa5, 0x41, 0x6c, 0x6c, 0x6f, 0x63)
if err != nil {
return
}
err = en.WriteUint64(z.Alloc)
if err != nil {
err = msgp.WrapError(err, "Alloc")
return
}
// write "TotalAlloc"
err = en.Append(0xaa, 0x54, 0x6f, 0x74, 0x61, 0x6c, 0x41, 0x6c, 0x6c, 0x6f, 0x63)
if err != nil {
return
}
err = en.WriteUint64(z.TotalAlloc)
if err != nil {
err = msgp.WrapError(err, "TotalAlloc")
return
}
// write "Mallocs"
err = en.Append(0xa7, 0x4d, 0x61, 0x6c, 0x6c, 0x6f, 0x63, 0x73)
if err != nil {
return
}
err = en.WriteUint64(z.Mallocs)
if err != nil {
err = msgp.WrapError(err, "Mallocs")
return
}
// write "Frees"
err = en.Append(0xa5, 0x46, 0x72, 0x65, 0x65, 0x73)
if err != nil {
return
}
err = en.WriteUint64(z.Frees)
if err != nil {
err = msgp.WrapError(err, "Frees")
return
}
// write "HeapAlloc"
err = en.Append(0xa9, 0x48, 0x65, 0x61, 0x70, 0x41, 0x6c, 0x6c, 0x6f, 0x63)
if err != nil {
return
}
err = en.WriteUint64(z.HeapAlloc)
if err != nil {
err = msgp.WrapError(err, "HeapAlloc")
return
}
return
}
// MarshalMsg implements msgp.Marshaler
func (z *MemStats) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize())
// map header, size 5
// string "Alloc"
o = append(o, 0x85, 0xa5, 0x41, 0x6c, 0x6c, 0x6f, 0x63)
o = msgp.AppendUint64(o, z.Alloc)
// string "TotalAlloc"
o = append(o, 0xaa, 0x54, 0x6f, 0x74, 0x61, 0x6c, 0x41, 0x6c, 0x6c, 0x6f, 0x63)
o = msgp.AppendUint64(o, z.TotalAlloc)
// string "Mallocs"
o = append(o, 0xa7, 0x4d, 0x61, 0x6c, 0x6c, 0x6f, 0x63, 0x73)
o = msgp.AppendUint64(o, z.Mallocs)
// string "Frees"
o = append(o, 0xa5, 0x46, 0x72, 0x65, 0x65, 0x73)
o = msgp.AppendUint64(o, z.Frees)
// string "HeapAlloc"
o = append(o, 0xa9, 0x48, 0x65, 0x61, 0x70, 0x41, 0x6c, 0x6c, 0x6f, 0x63)
o = msgp.AppendUint64(o, z.HeapAlloc)
return
}
// UnmarshalMsg implements msgp.Unmarshaler
func (z *MemStats) UnmarshalMsg(bts []byte) (o []byte, err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
for zb0001 > 0 {
zb0001--
field, bts, err = msgp.ReadMapKeyZC(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "Alloc":
z.Alloc, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "Alloc")
return
}
case "TotalAlloc":
z.TotalAlloc, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "TotalAlloc")
return
}
case "Mallocs":
z.Mallocs, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "Mallocs")
return
}
case "Frees":
z.Frees, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "Frees")
return
}
case "HeapAlloc":
z.HeapAlloc, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "HeapAlloc")
return
}
default:
bts, err = msgp.Skip(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
o = bts
return
}
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z *MemStats) Msgsize() (s int) {
s = 1 + 6 + msgp.Uint64Size + 11 + msgp.Uint64Size + 8 + msgp.Uint64Size + 6 + msgp.Uint64Size + 10 + msgp.Uint64Size
return
}
// DecodeMsg implements msgp.Decodable
func (z *Objects) DecodeMsg(dc *msgp.Reader) (err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, err = dc.ReadMapHeader()
if err != nil {
err = msgp.WrapError(err)
return
}
var zb0001Mask uint8 /* 1 bits */
_ = zb0001Mask
for zb0001 > 0 {
zb0001--
field, err = dc.ReadMapKeyPtr()
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "count":
z.Count, err = dc.ReadUint64()
if err != nil {
err = msgp.WrapError(err, "Count")
return
}
case "error":
z.Error, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "Error")
return
}
zb0001Mask |= 0x1
default:
err = dc.Skip()
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
// Clear omitted fields.
if (zb0001Mask & 0x1) == 0 {
z.Error = ""
}
return
}
// EncodeMsg implements msgp.Encodable
func (z Objects) EncodeMsg(en *msgp.Writer) (err error) {
// check for omitted fields
zb0001Len := uint32(2)
var zb0001Mask uint8 /* 2 bits */
_ = zb0001Mask
if z.Error == "" {
zb0001Len--
zb0001Mask |= 0x2
}
// variable map header, size zb0001Len
err = en.Append(0x80 | uint8(zb0001Len))
if err != nil {
return
}
// skip if no fields are to be emitted
if zb0001Len != 0 {
// write "count"
err = en.Append(0xa5, 0x63, 0x6f, 0x75, 0x6e, 0x74)
if err != nil {
return
}
err = en.WriteUint64(z.Count)
if err != nil {
err = msgp.WrapError(err, "Count")
return
}
if (zb0001Mask & 0x2) == 0 { // if not omitted
// write "error"
err = en.Append(0xa5, 0x65, 0x72, 0x72, 0x6f, 0x72)
if err != nil {
return
}
err = en.WriteString(z.Error)
if err != nil {
err = msgp.WrapError(err, "Error")
return
}
}
}
return
}
// MarshalMsg implements msgp.Marshaler
func (z Objects) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize())
// check for omitted fields
zb0001Len := uint32(2)
var zb0001Mask uint8 /* 2 bits */
_ = zb0001Mask
if z.Error == "" {
zb0001Len--
zb0001Mask |= 0x2
}
// variable map header, size zb0001Len
o = append(o, 0x80|uint8(zb0001Len))
// skip if no fields are to be emitted
if zb0001Len != 0 {
// string "count"
o = append(o, 0xa5, 0x63, 0x6f, 0x75, 0x6e, 0x74)
o = msgp.AppendUint64(o, z.Count)
if (zb0001Mask & 0x2) == 0 { // if not omitted
// string "error"
o = append(o, 0xa5, 0x65, 0x72, 0x72, 0x6f, 0x72)
o = msgp.AppendString(o, z.Error)
}
}
return
}
// UnmarshalMsg implements msgp.Unmarshaler
func (z *Objects) UnmarshalMsg(bts []byte) (o []byte, err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
var zb0001Mask uint8 /* 1 bits */
_ = zb0001Mask
for zb0001 > 0 {
zb0001--
field, bts, err = msgp.ReadMapKeyZC(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "count":
z.Count, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "Count")
return
}
case "error":
z.Error, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Error")
return
}
zb0001Mask |= 0x1
default:
bts, err = msgp.Skip(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
// Clear omitted fields.
if (zb0001Mask & 0x1) == 0 {
z.Error = ""
}
o = bts
return
}
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z Objects) Msgsize() (s int) {
s = 1 + 6 + msgp.Uint64Size + 6 + msgp.StringPrefixSize + len(z.Error)
return
}
// DecodeMsg implements msgp.Decodable
func (z *ServerInfoOpts) DecodeMsg(dc *msgp.Reader) (err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, err = dc.ReadMapHeader()
if err != nil {
err = msgp.WrapError(err)
return
}
for zb0001 > 0 {
zb0001--
field, err = dc.ReadMapKeyPtr()
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "Metrics":
z.Metrics, err = dc.ReadBool()
if err != nil {
err = msgp.WrapError(err, "Metrics")
return
}
default:
err = dc.Skip()
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
return
}
// EncodeMsg implements msgp.Encodable
func (z ServerInfoOpts) EncodeMsg(en *msgp.Writer) (err error) {
// map header, size 1
// write "Metrics"
err = en.Append(0x81, 0xa7, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73)
if err != nil {
return
}
err = en.WriteBool(z.Metrics)
if err != nil {
err = msgp.WrapError(err, "Metrics")
return
}
return
}
// MarshalMsg implements msgp.Marshaler
func (z ServerInfoOpts) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize())
// map header, size 1
// string "Metrics"
o = append(o, 0x81, 0xa7, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73)
o = msgp.AppendBool(o, z.Metrics)
return
}
// UnmarshalMsg implements msgp.Unmarshaler
func (z *ServerInfoOpts) UnmarshalMsg(bts []byte) (o []byte, err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
for zb0001 > 0 {
zb0001--
field, bts, err = msgp.ReadMapKeyZC(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "Metrics":
z.Metrics, bts, err = msgp.ReadBoolBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Metrics")
return
}
default:
bts, err = msgp.Skip(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
o = bts
return
}
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z ServerInfoOpts) Msgsize() (s int) {
s = 1 + 8 + msgp.BoolSize
return
}
// DecodeMsg implements msgp.Decodable
func (z *ServerProperties) DecodeMsg(dc *msgp.Reader) (err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, err = dc.ReadMapHeader()
if err != nil {
err = msgp.WrapError(err)
return
}
var zb0001Mask uint16 /* 16 bits */
_ = zb0001Mask
for zb0001 > 0 {
zb0001--
field, err = dc.ReadMapKeyPtr()
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "state":
z.State, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "State")
return
}
zb0001Mask |= 0x1
case "endpoint":
z.Endpoint, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "Endpoint")
return
}
zb0001Mask |= 0x2
case "scheme":
z.Scheme, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "Scheme")
return
}
zb0001Mask |= 0x4
case "uptime":
z.Uptime, err = dc.ReadInt64()
if err != nil {
err = msgp.WrapError(err, "Uptime")
return
}
zb0001Mask |= 0x8
case "version":
z.Version, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "Version")
return
}
zb0001Mask |= 0x10
case "commitID":
z.CommitID, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "CommitID")
return
}
zb0001Mask |= 0x20
case "network":
var zb0002 uint32
zb0002, err = dc.ReadMapHeader()
if err != nil {
err = msgp.WrapError(err, "Network")
return
}
if z.Network == nil {
z.Network = make(map[string]string, zb0002)
} else if len(z.Network) > 0 {
for key := range z.Network {
delete(z.Network, key)
}
}
for zb0002 > 0 {
zb0002--
var za0001 string
var za0002 string
za0001, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "Network")
return
}
za0002, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "Network", za0001)
return
}
z.Network[za0001] = za0002
}
zb0001Mask |= 0x40
case "drives":
var zb0003 uint32
zb0003, err = dc.ReadArrayHeader()
if err != nil {
err = msgp.WrapError(err, "Disks")
return
}
if cap(z.Disks) >= int(zb0003) {
z.Disks = (z.Disks)[:zb0003]
} else {
z.Disks = make([]Disk, zb0003)
}
for za0003 := range z.Disks {
err = z.Disks[za0003].DecodeMsg(dc)
if err != nil {
err = msgp.WrapError(err, "Disks", za0003)
return
}
}
zb0001Mask |= 0x80
case "poolNumber":
z.PoolNumber, err = dc.ReadInt()
if err != nil {
err = msgp.WrapError(err, "PoolNumber")
return
}
zb0001Mask |= 0x100
case "poolNumbers":
var zb0004 uint32
zb0004, err = dc.ReadArrayHeader()
if err != nil {
err = msgp.WrapError(err, "PoolNumbers")
return
}
if cap(z.PoolNumbers) >= int(zb0004) {
z.PoolNumbers = (z.PoolNumbers)[:zb0004]
} else {
z.PoolNumbers = make([]int, zb0004)
}
for za0004 := range z.PoolNumbers {
z.PoolNumbers[za0004], err = dc.ReadInt()
if err != nil {
err = msgp.WrapError(err, "PoolNumbers", za0004)
return
}
}
zb0001Mask |= 0x200
case "mem_stats":
err = z.MemStats.DecodeMsg(dc)
if err != nil {
err = msgp.WrapError(err, "MemStats")
return
}
case "go_max_procs":
z.GoMaxProcs, err = dc.ReadInt()
if err != nil {
err = msgp.WrapError(err, "GoMaxProcs")
return
}
zb0001Mask |= 0x400
case "num_cpu":
z.NumCPU, err = dc.ReadInt()
if err != nil {
err = msgp.WrapError(err, "NumCPU")
return
}
zb0001Mask |= 0x800
case "runtime_version":
z.RuntimeVersion, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "RuntimeVersion")
return
}
zb0001Mask |= 0x1000
case "gc_stats":
if dc.IsNil() {
err = dc.ReadNil()
if err != nil {
err = msgp.WrapError(err, "GCStats")
return
}
z.GCStats = nil
} else {
if z.GCStats == nil {
z.GCStats = new(GCStats)
}
err = z.GCStats.DecodeMsg(dc)
if err != nil {
err = msgp.WrapError(err, "GCStats")
return
}
}
zb0001Mask |= 0x2000
case "minio_env_vars":
var zb0005 uint32
zb0005, err = dc.ReadMapHeader()
if err != nil {
err = msgp.WrapError(err, "MinioEnvVars")
return
}
if z.MinioEnvVars == nil {
z.MinioEnvVars = make(map[string]string, zb0005)
} else if len(z.MinioEnvVars) > 0 {
for key := range z.MinioEnvVars {
delete(z.MinioEnvVars, key)
}
}
for zb0005 > 0 {
zb0005--
var za0005 string
var za0006 string
za0005, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "MinioEnvVars")
return
}
za0006, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "MinioEnvVars", za0005)
return
}
z.MinioEnvVars[za0005] = za0006
}
zb0001Mask |= 0x4000
case "edition":
z.Edition, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "Edition")
return
}
case "license":
if dc.IsNil() {
err = dc.ReadNil()
if err != nil {
err = msgp.WrapError(err, "License")
return
}
z.License = nil
} else {
if z.License == nil {
z.License = new(LicenseInfo)
}
err = z.License.DecodeMsg(dc)
if err != nil {
err = msgp.WrapError(err, "License")
return
}
}
zb0001Mask |= 0x8000
case "is_leader":
z.IsLeader, err = dc.ReadBool()
if err != nil {
err = msgp.WrapError(err, "IsLeader")
return
}
case "ilm_expiry_in_progress":
z.ILMExpiryInProgress, err = dc.ReadBool()
if err != nil {
err = msgp.WrapError(err, "ILMExpiryInProgress")
return
}
default:
err = dc.Skip()
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
// Clear omitted fields.
if zb0001Mask != 0xffff {
if (zb0001Mask & 0x1) == 0 {
z.State = ""
}
if (zb0001Mask & 0x2) == 0 {
z.Endpoint = ""
}
if (zb0001Mask & 0x4) == 0 {
z.Scheme = ""
}
if (zb0001Mask & 0x8) == 0 {
z.Uptime = 0
}
if (zb0001Mask & 0x10) == 0 {
z.Version = ""
}
if (zb0001Mask & 0x20) == 0 {
z.CommitID = ""
}
if (zb0001Mask & 0x40) == 0 {
z.Network = nil
}
if (zb0001Mask & 0x80) == 0 {
z.Disks = nil
}
if (zb0001Mask & 0x100) == 0 {
z.PoolNumber = 0
}
if (zb0001Mask & 0x200) == 0 {
z.PoolNumbers = nil
}
if (zb0001Mask & 0x400) == 0 {
z.GoMaxProcs = 0
}
if (zb0001Mask & 0x800) == 0 {
z.NumCPU = 0
}
if (zb0001Mask & 0x1000) == 0 {
z.RuntimeVersion = ""
}
if (zb0001Mask & 0x2000) == 0 {
z.GCStats = nil
}
if (zb0001Mask & 0x4000) == 0 {
z.MinioEnvVars = nil
}
if (zb0001Mask & 0x8000) == 0 {
z.License = nil
}
}
return
}
// EncodeMsg implements msgp.Encodable
func (z *ServerProperties) EncodeMsg(en *msgp.Writer) (err error) {
// check for omitted fields
zb0001Len := uint32(20)
var zb0001Mask uint32 /* 20 bits */
_ = zb0001Mask
if z.State == "" {
zb0001Len--
zb0001Mask |= 0x1
}
if z.Endpoint == "" {
zb0001Len--
zb0001Mask |= 0x2
}
if z.Scheme == "" {
zb0001Len--
zb0001Mask |= 0x4
}
if z.Uptime == 0 {
zb0001Len--
zb0001Mask |= 0x8
}
if z.Version == "" {
zb0001Len--
zb0001Mask |= 0x10
}
if z.CommitID == "" {
zb0001Len--
zb0001Mask |= 0x20
}
if z.Network == nil {
zb0001Len--
zb0001Mask |= 0x40
}
if z.Disks == nil {
zb0001Len--
zb0001Mask |= 0x80
}
if z.PoolNumber == 0 {
zb0001Len--
zb0001Mask |= 0x100
}
if z.PoolNumbers == nil {
zb0001Len--
zb0001Mask |= 0x200
}
if z.GoMaxProcs == 0 {
zb0001Len--
zb0001Mask |= 0x800
}
if z.NumCPU == 0 {
zb0001Len--
zb0001Mask |= 0x1000
}
if z.RuntimeVersion == "" {
zb0001Len--
zb0001Mask |= 0x2000
}
if z.GCStats == nil {
zb0001Len--
zb0001Mask |= 0x4000
}
if z.MinioEnvVars == nil {
zb0001Len--
zb0001Mask |= 0x8000
}
if z.License == nil {
zb0001Len--
zb0001Mask |= 0x20000
}
// variable map header, size zb0001Len
err = en.WriteMapHeader(zb0001Len)
if err != nil {
return
}
// skip if no fields are to be emitted
if zb0001Len != 0 {
if (zb0001Mask & 0x1) == 0 { // if not omitted
// write "state"
err = en.Append(0xa5, 0x73, 0x74, 0x61, 0x74, 0x65)
if err != nil {
return
}
err = en.WriteString(z.State)
if err != nil {
err = msgp.WrapError(err, "State")
return
}
}
if (zb0001Mask & 0x2) == 0 { // if not omitted
// write "endpoint"
err = en.Append(0xa8, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74)
if err != nil {
return
}
err = en.WriteString(z.Endpoint)
if err != nil {
err = msgp.WrapError(err, "Endpoint")
return
}
}
if (zb0001Mask & 0x4) == 0 { // if not omitted
// write "scheme"
err = en.Append(0xa6, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x65)
if err != nil {
return
}
err = en.WriteString(z.Scheme)
if err != nil {
err = msgp.WrapError(err, "Scheme")
return
}
}
if (zb0001Mask & 0x8) == 0 { // if not omitted
// write "uptime"
err = en.Append(0xa6, 0x75, 0x70, 0x74, 0x69, 0x6d, 0x65)
if err != nil {
return
}
err = en.WriteInt64(z.Uptime)
if err != nil {
err = msgp.WrapError(err, "Uptime")
return
}
}
if (zb0001Mask & 0x10) == 0 { // if not omitted
// write "version"
err = en.Append(0xa7, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e)
if err != nil {
return
}
err = en.WriteString(z.Version)
if err != nil {
err = msgp.WrapError(err, "Version")
return
}
}
if (zb0001Mask & 0x20) == 0 { // if not omitted
// write "commitID"
err = en.Append(0xa8, 0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x49, 0x44)
if err != nil {
return
}
err = en.WriteString(z.CommitID)
if err != nil {
err = msgp.WrapError(err, "CommitID")
return
}
}
if (zb0001Mask & 0x40) == 0 { // if not omitted
// write "network"
err = en.Append(0xa7, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b)
if err != nil {
return
}
err = en.WriteMapHeader(uint32(len(z.Network)))
if err != nil {
err = msgp.WrapError(err, "Network")
return
}
for za0001, za0002 := range z.Network {
err = en.WriteString(za0001)
if err != nil {
err = msgp.WrapError(err, "Network")
return
}
err = en.WriteString(za0002)
if err != nil {
err = msgp.WrapError(err, "Network", za0001)
return
}
}
}
if (zb0001Mask & 0x80) == 0 { // if not omitted
// write "drives"
err = en.Append(0xa6, 0x64, 0x72, 0x69, 0x76, 0x65, 0x73)
if err != nil {
return
}
err = en.WriteArrayHeader(uint32(len(z.Disks)))
if err != nil {
err = msgp.WrapError(err, "Disks")
return
}
for za0003 := range z.Disks {
err = z.Disks[za0003].EncodeMsg(en)
if err != nil {
err = msgp.WrapError(err, "Disks", za0003)
return
}
}
}
if (zb0001Mask & 0x100) == 0 { // if not omitted
// write "poolNumber"
err = en.Append(0xaa, 0x70, 0x6f, 0x6f, 0x6c, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72)
if err != nil {
return
}
err = en.WriteInt(z.PoolNumber)
if err != nil {
err = msgp.WrapError(err, "PoolNumber")
return
}
}
if (zb0001Mask & 0x200) == 0 { // if not omitted
// write "poolNumbers"
err = en.Append(0xab, 0x70, 0x6f, 0x6f, 0x6c, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x73)
if err != nil {
return
}
err = en.WriteArrayHeader(uint32(len(z.PoolNumbers)))
if err != nil {
err = msgp.WrapError(err, "PoolNumbers")
return
}
for za0004 := range z.PoolNumbers {
err = en.WriteInt(z.PoolNumbers[za0004])
if err != nil {
err = msgp.WrapError(err, "PoolNumbers", za0004)
return
}
}
}
// write "mem_stats"
err = en.Append(0xa9, 0x6d, 0x65, 0x6d, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x73)
if err != nil {
return
}
err = z.MemStats.EncodeMsg(en)
if err != nil {
err = msgp.WrapError(err, "MemStats")
return
}
if (zb0001Mask & 0x800) == 0 { // if not omitted
// write "go_max_procs"
err = en.Append(0xac, 0x67, 0x6f, 0x5f, 0x6d, 0x61, 0x78, 0x5f, 0x70, 0x72, 0x6f, 0x63, 0x73)
if err != nil {
return
}
err = en.WriteInt(z.GoMaxProcs)
if err != nil {
err = msgp.WrapError(err, "GoMaxProcs")
return
}
}
if (zb0001Mask & 0x1000) == 0 { // if not omitted
// write "num_cpu"
err = en.Append(0xa7, 0x6e, 0x75, 0x6d, 0x5f, 0x63, 0x70, 0x75)
if err != nil {
return
}
err = en.WriteInt(z.NumCPU)
if err != nil {
err = msgp.WrapError(err, "NumCPU")
return
}
}
if (zb0001Mask & 0x2000) == 0 { // if not omitted
// write "runtime_version"
err = en.Append(0xaf, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e)
if err != nil {
return
}
err = en.WriteString(z.RuntimeVersion)
if err != nil {
err = msgp.WrapError(err, "RuntimeVersion")
return
}
}
if (zb0001Mask & 0x4000) == 0 { // if not omitted
// write "gc_stats"
err = en.Append(0xa8, 0x67, 0x63, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x73)
if err != nil {
return
}
if z.GCStats == nil {
err = en.WriteNil()
if err != nil {
return
}
} else {
err = z.GCStats.EncodeMsg(en)
if err != nil {
err = msgp.WrapError(err, "GCStats")
return
}
}
}
if (zb0001Mask & 0x8000) == 0 { // if not omitted
// write "minio_env_vars"
err = en.Append(0xae, 0x6d, 0x69, 0x6e, 0x69, 0x6f, 0x5f, 0x65, 0x6e, 0x76, 0x5f, 0x76, 0x61, 0x72, 0x73)
if err != nil {
return
}
err = en.WriteMapHeader(uint32(len(z.MinioEnvVars)))
if err != nil {
err = msgp.WrapError(err, "MinioEnvVars")
return
}
for za0005, za0006 := range z.MinioEnvVars {
err = en.WriteString(za0005)
if err != nil {
err = msgp.WrapError(err, "MinioEnvVars")
return
}
err = en.WriteString(za0006)
if err != nil {
err = msgp.WrapError(err, "MinioEnvVars", za0005)
return
}
}
}
// write "edition"
err = en.Append(0xa7, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e)
if err != nil {
return
}
err = en.WriteString(z.Edition)
if err != nil {
err = msgp.WrapError(err, "Edition")
return
}
if (zb0001Mask & 0x20000) == 0 { // if not omitted
// write "license"
err = en.Append(0xa7, 0x6c, 0x69, 0x63, 0x65, 0x6e, 0x73, 0x65)
if err != nil {
return
}
if z.License == nil {
err = en.WriteNil()
if err != nil {
return
}
} else {
err = z.License.EncodeMsg(en)
if err != nil {
err = msgp.WrapError(err, "License")
return
}
}
}
// write "is_leader"
err = en.Append(0xa9, 0x69, 0x73, 0x5f, 0x6c, 0x65, 0x61, 0x64, 0x65, 0x72)
if err != nil {
return
}
err = en.WriteBool(z.IsLeader)
if err != nil {
err = msgp.WrapError(err, "IsLeader")
return
}
// write "ilm_expiry_in_progress"
err = en.Append(0xb6, 0x69, 0x6c, 0x6d, 0x5f, 0x65, 0x78, 0x70, 0x69, 0x72, 0x79, 0x5f, 0x69, 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x67, 0x72, 0x65, 0x73, 0x73)
if err != nil {
return
}
err = en.WriteBool(z.ILMExpiryInProgress)
if err != nil {
err = msgp.WrapError(err, "ILMExpiryInProgress")
return
}
}
return
}
// MarshalMsg implements msgp.Marshaler
func (z *ServerProperties) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize())
// check for omitted fields
zb0001Len := uint32(20)
var zb0001Mask uint32 /* 20 bits */
_ = zb0001Mask
if z.State == "" {
zb0001Len--
zb0001Mask |= 0x1
}
if z.Endpoint == "" {
zb0001Len--
zb0001Mask |= 0x2
}
if z.Scheme == "" {
zb0001Len--
zb0001Mask |= 0x4
}
if z.Uptime == 0 {
zb0001Len--
zb0001Mask |= 0x8
}
if z.Version == "" {
zb0001Len--
zb0001Mask |= 0x10
}
if z.CommitID == "" {
zb0001Len--
zb0001Mask |= 0x20
}
if z.Network == nil {
zb0001Len--
zb0001Mask |= 0x40
}
if z.Disks == nil {
zb0001Len--
zb0001Mask |= 0x80
}
if z.PoolNumber == 0 {
zb0001Len--
zb0001Mask |= 0x100
}
if z.PoolNumbers == nil {
zb0001Len--
zb0001Mask |= 0x200
}
if z.GoMaxProcs == 0 {
zb0001Len--
zb0001Mask |= 0x800
}
if z.NumCPU == 0 {
zb0001Len--
zb0001Mask |= 0x1000
}
if z.RuntimeVersion == "" {
zb0001Len--
zb0001Mask |= 0x2000
}
if z.GCStats == nil {
zb0001Len--
zb0001Mask |= 0x4000
}
if z.MinioEnvVars == nil {
zb0001Len--
zb0001Mask |= 0x8000
}
if z.License == nil {
zb0001Len--
zb0001Mask |= 0x20000
}
// variable map header, size zb0001Len
o = msgp.AppendMapHeader(o, zb0001Len)
// skip if no fields are to be emitted
if zb0001Len != 0 {
if (zb0001Mask & 0x1) == 0 { // if not omitted
// string "state"
o = append(o, 0xa5, 0x73, 0x74, 0x61, 0x74, 0x65)
o = msgp.AppendString(o, z.State)
}
if (zb0001Mask & 0x2) == 0 { // if not omitted
// string "endpoint"
o = append(o, 0xa8, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74)
o = msgp.AppendString(o, z.Endpoint)
}
if (zb0001Mask & 0x4) == 0 { // if not omitted
// string "scheme"
o = append(o, 0xa6, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x65)
o = msgp.AppendString(o, z.Scheme)
}
if (zb0001Mask & 0x8) == 0 { // if not omitted
// string "uptime"
o = append(o, 0xa6, 0x75, 0x70, 0x74, 0x69, 0x6d, 0x65)
o = msgp.AppendInt64(o, z.Uptime)
}
if (zb0001Mask & 0x10) == 0 { // if not omitted
// string "version"
o = append(o, 0xa7, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e)
o = msgp.AppendString(o, z.Version)
}
if (zb0001Mask & 0x20) == 0 { // if not omitted
// string "commitID"
o = append(o, 0xa8, 0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x49, 0x44)
o = msgp.AppendString(o, z.CommitID)
}
if (zb0001Mask & 0x40) == 0 { // if not omitted
// string "network"
o = append(o, 0xa7, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b)
o = msgp.AppendMapHeader(o, uint32(len(z.Network)))
for za0001, za0002 := range z.Network {
o = msgp.AppendString(o, za0001)
o = msgp.AppendString(o, za0002)
}
}
if (zb0001Mask & 0x80) == 0 { // if not omitted
// string "drives"
o = append(o, 0xa6, 0x64, 0x72, 0x69, 0x76, 0x65, 0x73)
o = msgp.AppendArrayHeader(o, uint32(len(z.Disks)))
for za0003 := range z.Disks {
o, err = z.Disks[za0003].MarshalMsg(o)
if err != nil {
err = msgp.WrapError(err, "Disks", za0003)
return
}
}
}
if (zb0001Mask & 0x100) == 0 { // if not omitted
// string "poolNumber"
o = append(o, 0xaa, 0x70, 0x6f, 0x6f, 0x6c, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72)
o = msgp.AppendInt(o, z.PoolNumber)
}
if (zb0001Mask & 0x200) == 0 { // if not omitted
// string "poolNumbers"
o = append(o, 0xab, 0x70, 0x6f, 0x6f, 0x6c, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x73)
o = msgp.AppendArrayHeader(o, uint32(len(z.PoolNumbers)))
for za0004 := range z.PoolNumbers {
o = msgp.AppendInt(o, z.PoolNumbers[za0004])
}
}
// string "mem_stats"
o = append(o, 0xa9, 0x6d, 0x65, 0x6d, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x73)
o, err = z.MemStats.MarshalMsg(o)
if err != nil {
err = msgp.WrapError(err, "MemStats")
return
}
if (zb0001Mask & 0x800) == 0 { // if not omitted
// string "go_max_procs"
o = append(o, 0xac, 0x67, 0x6f, 0x5f, 0x6d, 0x61, 0x78, 0x5f, 0x70, 0x72, 0x6f, 0x63, 0x73)
o = msgp.AppendInt(o, z.GoMaxProcs)
}
if (zb0001Mask & 0x1000) == 0 { // if not omitted
// string "num_cpu"
o = append(o, 0xa7, 0x6e, 0x75, 0x6d, 0x5f, 0x63, 0x70, 0x75)
o = msgp.AppendInt(o, z.NumCPU)
}
if (zb0001Mask & 0x2000) == 0 { // if not omitted
// string "runtime_version"
o = append(o, 0xaf, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e)
o = msgp.AppendString(o, z.RuntimeVersion)
}
if (zb0001Mask & 0x4000) == 0 { // if not omitted
// string "gc_stats"
o = append(o, 0xa8, 0x67, 0x63, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x73)
if z.GCStats == nil {
o = msgp.AppendNil(o)
} else {
o, err = z.GCStats.MarshalMsg(o)
if err != nil {
err = msgp.WrapError(err, "GCStats")
return
}
}
}
if (zb0001Mask & 0x8000) == 0 { // if not omitted
// string "minio_env_vars"
o = append(o, 0xae, 0x6d, 0x69, 0x6e, 0x69, 0x6f, 0x5f, 0x65, 0x6e, 0x76, 0x5f, 0x76, 0x61, 0x72, 0x73)
o = msgp.AppendMapHeader(o, uint32(len(z.MinioEnvVars)))
for za0005, za0006 := range z.MinioEnvVars {
o = msgp.AppendString(o, za0005)
o = msgp.AppendString(o, za0006)
}
}
// string "edition"
o = append(o, 0xa7, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e)
o = msgp.AppendString(o, z.Edition)
if (zb0001Mask & 0x20000) == 0 { // if not omitted
// string "license"
o = append(o, 0xa7, 0x6c, 0x69, 0x63, 0x65, 0x6e, 0x73, 0x65)
if z.License == nil {
o = msgp.AppendNil(o)
} else {
o, err = z.License.MarshalMsg(o)
if err != nil {
err = msgp.WrapError(err, "License")
return
}
}
}
// string "is_leader"
o = append(o, 0xa9, 0x69, 0x73, 0x5f, 0x6c, 0x65, 0x61, 0x64, 0x65, 0x72)
o = msgp.AppendBool(o, z.IsLeader)
// string "ilm_expiry_in_progress"
o = append(o, 0xb6, 0x69, 0x6c, 0x6d, 0x5f, 0x65, 0x78, 0x70, 0x69, 0x72, 0x79, 0x5f, 0x69, 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x67, 0x72, 0x65, 0x73, 0x73)
o = msgp.AppendBool(o, z.ILMExpiryInProgress)
}
return
}
// UnmarshalMsg implements msgp.Unmarshaler
func (z *ServerProperties) UnmarshalMsg(bts []byte) (o []byte, err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
var zb0001Mask uint16 /* 16 bits */
_ = zb0001Mask
for zb0001 > 0 {
zb0001--
field, bts, err = msgp.ReadMapKeyZC(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "state":
z.State, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "State")
return
}
zb0001Mask |= 0x1
case "endpoint":
z.Endpoint, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Endpoint")
return
}
zb0001Mask |= 0x2
case "scheme":
z.Scheme, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Scheme")
return
}
zb0001Mask |= 0x4
case "uptime":
z.Uptime, bts, err = msgp.ReadInt64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "Uptime")
return
}
zb0001Mask |= 0x8
case "version":
z.Version, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Version")
return
}
zb0001Mask |= 0x10
case "commitID":
z.CommitID, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "CommitID")
return
}
zb0001Mask |= 0x20
case "network":
var zb0002 uint32
zb0002, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Network")
return
}
if z.Network == nil {
z.Network = make(map[string]string, zb0002)
} else if len(z.Network) > 0 {
for key := range z.Network {
delete(z.Network, key)
}
}
for zb0002 > 0 {
var za0001 string
var za0002 string
zb0002--
za0001, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Network")
return
}
za0002, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Network", za0001)
return
}
z.Network[za0001] = za0002
}
zb0001Mask |= 0x40
case "drives":
var zb0003 uint32
zb0003, bts, err = msgp.ReadArrayHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Disks")
return
}
if cap(z.Disks) >= int(zb0003) {
z.Disks = (z.Disks)[:zb0003]
} else {
z.Disks = make([]Disk, zb0003)
}
for za0003 := range z.Disks {
bts, err = z.Disks[za0003].UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "Disks", za0003)
return
}
}
zb0001Mask |= 0x80
case "poolNumber":
z.PoolNumber, bts, err = msgp.ReadIntBytes(bts)
if err != nil {
err = msgp.WrapError(err, "PoolNumber")
return
}
zb0001Mask |= 0x100
case "poolNumbers":
var zb0004 uint32
zb0004, bts, err = msgp.ReadArrayHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "PoolNumbers")
return
}
if cap(z.PoolNumbers) >= int(zb0004) {
z.PoolNumbers = (z.PoolNumbers)[:zb0004]
} else {
z.PoolNumbers = make([]int, zb0004)
}
for za0004 := range z.PoolNumbers {
z.PoolNumbers[za0004], bts, err = msgp.ReadIntBytes(bts)
if err != nil {
err = msgp.WrapError(err, "PoolNumbers", za0004)
return
}
}
zb0001Mask |= 0x200
case "mem_stats":
bts, err = z.MemStats.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "MemStats")
return
}
case "go_max_procs":
z.GoMaxProcs, bts, err = msgp.ReadIntBytes(bts)
if err != nil {
err = msgp.WrapError(err, "GoMaxProcs")
return
}
zb0001Mask |= 0x400
case "num_cpu":
z.NumCPU, bts, err = msgp.ReadIntBytes(bts)
if err != nil {
err = msgp.WrapError(err, "NumCPU")
return
}
zb0001Mask |= 0x800
case "runtime_version":
z.RuntimeVersion, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "RuntimeVersion")
return
}
zb0001Mask |= 0x1000
case "gc_stats":
if msgp.IsNil(bts) {
bts, err = msgp.ReadNilBytes(bts)
if err != nil {
return
}
z.GCStats = nil
} else {
if z.GCStats == nil {
z.GCStats = new(GCStats)
}
bts, err = z.GCStats.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "GCStats")
return
}
}
zb0001Mask |= 0x2000
case "minio_env_vars":
var zb0005 uint32
zb0005, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "MinioEnvVars")
return
}
if z.MinioEnvVars == nil {
z.MinioEnvVars = make(map[string]string, zb0005)
} else if len(z.MinioEnvVars) > 0 {
for key := range z.MinioEnvVars {
delete(z.MinioEnvVars, key)
}
}
for zb0005 > 0 {
var za0005 string
var za0006 string
zb0005--
za0005, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "MinioEnvVars")
return
}
za0006, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "MinioEnvVars", za0005)
return
}
z.MinioEnvVars[za0005] = za0006
}
zb0001Mask |= 0x4000
case "edition":
z.Edition, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Edition")
return
}
case "license":
if msgp.IsNil(bts) {
bts, err = msgp.ReadNilBytes(bts)
if err != nil {
return
}
z.License = nil
} else {
if z.License == nil {
z.License = new(LicenseInfo)
}
bts, err = z.License.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "License")
return
}
}
zb0001Mask |= 0x8000
case "is_leader":
z.IsLeader, bts, err = msgp.ReadBoolBytes(bts)
if err != nil {
err = msgp.WrapError(err, "IsLeader")
return
}
case "ilm_expiry_in_progress":
z.ILMExpiryInProgress, bts, err = msgp.ReadBoolBytes(bts)
if err != nil {
err = msgp.WrapError(err, "ILMExpiryInProgress")
return
}
default:
bts, err = msgp.Skip(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
// Clear omitted fields.
if zb0001Mask != 0xffff {
if (zb0001Mask & 0x1) == 0 {
z.State = ""
}
if (zb0001Mask & 0x2) == 0 {
z.Endpoint = ""
}
if (zb0001Mask & 0x4) == 0 {
z.Scheme = ""
}
if (zb0001Mask & 0x8) == 0 {
z.Uptime = 0
}
if (zb0001Mask & 0x10) == 0 {
z.Version = ""
}
if (zb0001Mask & 0x20) == 0 {
z.CommitID = ""
}
if (zb0001Mask & 0x40) == 0 {
z.Network = nil
}
if (zb0001Mask & 0x80) == 0 {
z.Disks = nil
}
if (zb0001Mask & 0x100) == 0 {
z.PoolNumber = 0
}
if (zb0001Mask & 0x200) == 0 {
z.PoolNumbers = nil
}
if (zb0001Mask & 0x400) == 0 {
z.GoMaxProcs = 0
}
if (zb0001Mask & 0x800) == 0 {
z.NumCPU = 0
}
if (zb0001Mask & 0x1000) == 0 {
z.RuntimeVersion = ""
}
if (zb0001Mask & 0x2000) == 0 {
z.GCStats = nil
}
if (zb0001Mask & 0x4000) == 0 {
z.MinioEnvVars = nil
}
if (zb0001Mask & 0x8000) == 0 {
z.License = nil
}
}
o = bts
return
}
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z *ServerProperties) Msgsize() (s int) {
s = 3 + 6 + msgp.StringPrefixSize + len(z.State) + 9 + msgp.StringPrefixSize + len(z.Endpoint) + 7 + msgp.StringPrefixSize + len(z.Scheme) + 7 + msgp.Int64Size + 8 + msgp.StringPrefixSize + len(z.Version) + 9 + msgp.StringPrefixSize + len(z.CommitID) + 8 + msgp.MapHeaderSize
if z.Network != nil {
for za0001, za0002 := range z.Network {
_ = za0002
s += msgp.StringPrefixSize + len(za0001) + msgp.StringPrefixSize + len(za0002)
}
}
s += 7 + msgp.ArrayHeaderSize
for za0003 := range z.Disks {
s += z.Disks[za0003].Msgsize()
}
s += 11 + msgp.IntSize + 12 + msgp.ArrayHeaderSize + (len(z.PoolNumbers) * (msgp.IntSize)) + 10 + z.MemStats.Msgsize() + 13 + msgp.IntSize + 8 + msgp.IntSize + 16 + msgp.StringPrefixSize + len(z.RuntimeVersion) + 9
if z.GCStats == nil {
s += msgp.NilSize
} else {
s += z.GCStats.Msgsize()
}
s += 15 + msgp.MapHeaderSize
if z.MinioEnvVars != nil {
for za0005, za0006 := range z.MinioEnvVars {
_ = za0006
s += msgp.StringPrefixSize + len(za0005) + msgp.StringPrefixSize + len(za0006)
}
}
s += 8 + msgp.StringPrefixSize + len(z.Edition) + 8
if z.License == nil {
s += msgp.NilSize
} else {
s += z.License.Msgsize()
}
s += 10 + msgp.BoolSize + 23 + msgp.BoolSize
return
}
// DecodeMsg implements msgp.Decodable
func (z *Services) DecodeMsg(dc *msgp.Reader) (err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, err = dc.ReadMapHeader()
if err != nil {
err = msgp.WrapError(err)
return
}
var zb0001Mask uint8 /* 6 bits */
_ = zb0001Mask
for zb0001 > 0 {
zb0001--
field, err = dc.ReadMapKeyPtr()
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "kms":
err = z.KMS.DecodeMsg(dc)
if err != nil {
err = msgp.WrapError(err, "KMS")
return
}
zb0001Mask |= 0x1
case "kmsStatus":
var zb0002 uint32
zb0002, err = dc.ReadArrayHeader()
if err != nil {
err = msgp.WrapError(err, "KMSStatus")
return
}
if cap(z.KMSStatus) >= int(zb0002) {
z.KMSStatus = (z.KMSStatus)[:zb0002]
} else {
z.KMSStatus = make([]KMS, zb0002)
}
for za0001 := range z.KMSStatus {
err = z.KMSStatus[za0001].DecodeMsg(dc)
if err != nil {
err = msgp.WrapError(err, "KMSStatus", za0001)
return
}
}
zb0001Mask |= 0x2
case "ldap":
var zb0003 uint32
zb0003, err = dc.ReadMapHeader()
if err != nil {
err = msgp.WrapError(err, "LDAP")
return
}
var zb0003Mask uint8 /* 1 bits */
_ = zb0003Mask
for zb0003 > 0 {
zb0003--
field, err = dc.ReadMapKeyPtr()
if err != nil {
err = msgp.WrapError(err, "LDAP")
return
}
switch msgp.UnsafeString(field) {
case "status":
z.LDAP.Status, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "LDAP", "Status")
return
}
zb0003Mask |= 0x1
default:
err = dc.Skip()
if err != nil {
err = msgp.WrapError(err, "LDAP")
return
}
}
}
// Clear omitted fields.
if (zb0003Mask & 0x1) == 0 {
z.LDAP.Status = ""
}
zb0001Mask |= 0x4
case "logger":
var zb0004 uint32
zb0004, err = dc.ReadArrayHeader()
if err != nil {
err = msgp.WrapError(err, "Logger")
return
}
if cap(z.Logger) >= int(zb0004) {
z.Logger = (z.Logger)[:zb0004]
} else {
z.Logger = make([]Logger, zb0004)
}
for za0002 := range z.Logger {
var zb0005 uint32
zb0005, err = dc.ReadMapHeader()
if err != nil {
err = msgp.WrapError(err, "Logger", za0002)
return
}
if z.Logger[za0002] == nil {
z.Logger[za0002] = make(Logger, zb0005)
} else if len(z.Logger[za0002]) > 0 {
for key := range z.Logger[za0002] {
delete(z.Logger[za0002], key)
}
}
for zb0005 > 0 {
zb0005--
var za0003 string
var za0004 Status
za0003, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "Logger", za0002)
return
}
var zb0006 uint32
zb0006, err = dc.ReadMapHeader()
if err != nil {
err = msgp.WrapError(err, "Logger", za0002, za0003)
return
}
var zb0006Mask uint8 /* 1 bits */
_ = zb0006Mask
for zb0006 > 0 {
zb0006--
field, err = dc.ReadMapKeyPtr()
if err != nil {
err = msgp.WrapError(err, "Logger", za0002, za0003)
return
}
switch msgp.UnsafeString(field) {
case "status":
za0004.Status, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "Logger", za0002, za0003, "Status")
return
}
zb0006Mask |= 0x1
default:
err = dc.Skip()
if err != nil {
err = msgp.WrapError(err, "Logger", za0002, za0003)
return
}
}
}
// Clear omitted fields.
if (zb0006Mask & 0x1) == 0 {
za0004.Status = ""
}
z.Logger[za0002][za0003] = za0004
}
}
zb0001Mask |= 0x8
case "audit":
var zb0007 uint32
zb0007, err = dc.ReadArrayHeader()
if err != nil {
err = msgp.WrapError(err, "Audit")
return
}
if cap(z.Audit) >= int(zb0007) {
z.Audit = (z.Audit)[:zb0007]
} else {
z.Audit = make([]Audit, zb0007)
}
for za0005 := range z.Audit {
var zb0008 uint32
zb0008, err = dc.ReadMapHeader()
if err != nil {
err = msgp.WrapError(err, "Audit", za0005)
return
}
if z.Audit[za0005] == nil {
z.Audit[za0005] = make(Audit, zb0008)
} else if len(z.Audit[za0005]) > 0 {
for key := range z.Audit[za0005] {
delete(z.Audit[za0005], key)
}
}
for zb0008 > 0 {
zb0008--
var za0006 string
var za0007 Status
za0006, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "Audit", za0005)
return
}
var zb0009 uint32
zb0009, err = dc.ReadMapHeader()
if err != nil {
err = msgp.WrapError(err, "Audit", za0005, za0006)
return
}
var zb0009Mask uint8 /* 1 bits */
_ = zb0009Mask
for zb0009 > 0 {
zb0009--
field, err = dc.ReadMapKeyPtr()
if err != nil {
err = msgp.WrapError(err, "Audit", za0005, za0006)
return
}
switch msgp.UnsafeString(field) {
case "status":
za0007.Status, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "Audit", za0005, za0006, "Status")
return
}
zb0009Mask |= 0x1
default:
err = dc.Skip()
if err != nil {
err = msgp.WrapError(err, "Audit", za0005, za0006)
return
}
}
}
// Clear omitted fields.
if (zb0009Mask & 0x1) == 0 {
za0007.Status = ""
}
z.Audit[za0005][za0006] = za0007
}
}
zb0001Mask |= 0x10
case "notifications":
var zb0010 uint32
zb0010, err = dc.ReadArrayHeader()
if err != nil {
err = msgp.WrapError(err, "Notifications")
return
}
if cap(z.Notifications) >= int(zb0010) {
z.Notifications = (z.Notifications)[:zb0010]
} else {
z.Notifications = make([]map[string][]TargetIDStatus, zb0010)
}
for za0008 := range z.Notifications {
var zb0011 uint32
zb0011, err = dc.ReadMapHeader()
if err != nil {
err = msgp.WrapError(err, "Notifications", za0008)
return
}
if z.Notifications[za0008] == nil {
z.Notifications[za0008] = make(map[string][]TargetIDStatus, zb0011)
} else if len(z.Notifications[za0008]) > 0 {
for key := range z.Notifications[za0008] {
delete(z.Notifications[za0008], key)
}
}
for zb0011 > 0 {
zb0011--
var za0009 string
var za0010 []TargetIDStatus
za0009, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "Notifications", za0008)
return
}
var zb0012 uint32
zb0012, err = dc.ReadArrayHeader()
if err != nil {
err = msgp.WrapError(err, "Notifications", za0008, za0009)
return
}
if cap(za0010) >= int(zb0012) {
za0010 = (za0010)[:zb0012]
} else {
za0010 = make([]TargetIDStatus, zb0012)
}
for za0011 := range za0010 {
var zb0013 uint32
zb0013, err = dc.ReadMapHeader()
if err != nil {
err = msgp.WrapError(err, "Notifications", za0008, za0009, za0011)
return
}
if za0010[za0011] == nil {
za0010[za0011] = make(TargetIDStatus, zb0013)
} else if len(za0010[za0011]) > 0 {
for key := range za0010[za0011] {
delete(za0010[za0011], key)
}
}
for zb0013 > 0 {
zb0013--
var za0012 string
var za0013 Status
za0012, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "Notifications", za0008, za0009, za0011)
return
}
var zb0014 uint32
zb0014, err = dc.ReadMapHeader()
if err != nil {
err = msgp.WrapError(err, "Notifications", za0008, za0009, za0011, za0012)
return
}
var zb0014Mask uint8 /* 1 bits */
_ = zb0014Mask
for zb0014 > 0 {
zb0014--
field, err = dc.ReadMapKeyPtr()
if err != nil {
err = msgp.WrapError(err, "Notifications", za0008, za0009, za0011, za0012)
return
}
switch msgp.UnsafeString(field) {
case "status":
za0013.Status, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "Notifications", za0008, za0009, za0011, za0012, "Status")
return
}
zb0014Mask |= 0x1
default:
err = dc.Skip()
if err != nil {
err = msgp.WrapError(err, "Notifications", za0008, za0009, za0011, za0012)
return
}
}
}
// Clear omitted fields.
if (zb0014Mask & 0x1) == 0 {
za0013.Status = ""
}
za0010[za0011][za0012] = za0013
}
}
z.Notifications[za0008][za0009] = za0010
}
}
zb0001Mask |= 0x20
default:
err = dc.Skip()
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
// Clear omitted fields.
if zb0001Mask != 0x3f {
if (zb0001Mask & 0x1) == 0 {
z.KMS = KMS{}
}
if (zb0001Mask & 0x2) == 0 {
z.KMSStatus = nil
}
if (zb0001Mask & 0x4) == 0 {
z.LDAP = (LDAP{})
}
if (zb0001Mask & 0x8) == 0 {
z.Logger = nil
}
if (zb0001Mask & 0x10) == 0 {
z.Audit = nil
}
if (zb0001Mask & 0x20) == 0 {
z.Notifications = nil
}
}
return
}
// EncodeMsg implements msgp.Encodable
func (z *Services) EncodeMsg(en *msgp.Writer) (err error) {
// check for omitted fields
zb0001Len := uint32(6)
var zb0001Mask uint8 /* 6 bits */
_ = zb0001Mask
if z.KMSStatus == nil {
zb0001Len--
zb0001Mask |= 0x2
}
if z.LDAP == (LDAP{}) {
zb0001Len--
zb0001Mask |= 0x4
}
if z.Logger == nil {
zb0001Len--
zb0001Mask |= 0x8
}
if z.Audit == nil {
zb0001Len--
zb0001Mask |= 0x10
}
if z.Notifications == nil {
zb0001Len--
zb0001Mask |= 0x20
}
// variable map header, size zb0001Len
err = en.Append(0x80 | uint8(zb0001Len))
if err != nil {
return
}
// skip if no fields are to be emitted
if zb0001Len != 0 {
// write "kms"
err = en.Append(0xa3, 0x6b, 0x6d, 0x73)
if err != nil {
return
}
err = z.KMS.EncodeMsg(en)
if err != nil {
err = msgp.WrapError(err, "KMS")
return
}
if (zb0001Mask & 0x2) == 0 { // if not omitted
// write "kmsStatus"
err = en.Append(0xa9, 0x6b, 0x6d, 0x73, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73)
if err != nil {
return
}
err = en.WriteArrayHeader(uint32(len(z.KMSStatus)))
if err != nil {
err = msgp.WrapError(err, "KMSStatus")
return
}
for za0001 := range z.KMSStatus {
err = z.KMSStatus[za0001].EncodeMsg(en)
if err != nil {
err = msgp.WrapError(err, "KMSStatus", za0001)
return
}
}
}
if (zb0001Mask & 0x4) == 0 { // if not omitted
// write "ldap"
err = en.Append(0xa4, 0x6c, 0x64, 0x61, 0x70)
if err != nil {
return
}
// check for omitted fields
zb0002Len := uint32(1)
var zb0002Mask uint8 /* 1 bits */
_ = zb0002Mask
if z.LDAP.Status == "" {
zb0002Len--
zb0002Mask |= 0x1
}
// variable map header, size zb0002Len
err = en.Append(0x80 | uint8(zb0002Len))
if err != nil {
return
}
if (zb0002Mask & 0x1) == 0 { // if not omitted
// write "status"
err = en.Append(0xa6, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73)
if err != nil {
return
}
err = en.WriteString(z.LDAP.Status)
if err != nil {
err = msgp.WrapError(err, "LDAP", "Status")
return
}
}
}
if (zb0001Mask & 0x8) == 0 { // if not omitted
// write "logger"
err = en.Append(0xa6, 0x6c, 0x6f, 0x67, 0x67, 0x65, 0x72)
if err != nil {
return
}
err = en.WriteArrayHeader(uint32(len(z.Logger)))
if err != nil {
err = msgp.WrapError(err, "Logger")
return
}
for za0002 := range z.Logger {
err = en.WriteMapHeader(uint32(len(z.Logger[za0002])))
if err != nil {
err = msgp.WrapError(err, "Logger", za0002)
return
}
for za0003, za0004 := range z.Logger[za0002] {
err = en.WriteString(za0003)
if err != nil {
err = msgp.WrapError(err, "Logger", za0002)
return
}
// check for omitted fields
zb0003Len := uint32(1)
var zb0003Mask uint8 /* 1 bits */
_ = zb0003Mask
if za0004.Status == "" {
zb0003Len--
zb0003Mask |= 0x1
}
// variable map header, size zb0003Len
err = en.Append(0x80 | uint8(zb0003Len))
if err != nil {
return
}
if (zb0003Mask & 0x1) == 0 { // if not omitted
// write "status"
err = en.Append(0xa6, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73)
if err != nil {
return
}
err = en.WriteString(za0004.Status)
if err != nil {
err = msgp.WrapError(err, "Logger", za0002, za0003, "Status")
return
}
}
}
}
}
if (zb0001Mask & 0x10) == 0 { // if not omitted
// write "audit"
err = en.Append(0xa5, 0x61, 0x75, 0x64, 0x69, 0x74)
if err != nil {
return
}
err = en.WriteArrayHeader(uint32(len(z.Audit)))
if err != nil {
err = msgp.WrapError(err, "Audit")
return
}
for za0005 := range z.Audit {
err = en.WriteMapHeader(uint32(len(z.Audit[za0005])))
if err != nil {
err = msgp.WrapError(err, "Audit", za0005)
return
}
for za0006, za0007 := range z.Audit[za0005] {
err = en.WriteString(za0006)
if err != nil {
err = msgp.WrapError(err, "Audit", za0005)
return
}
// check for omitted fields
zb0004Len := uint32(1)
var zb0004Mask uint8 /* 1 bits */
_ = zb0004Mask
if za0007.Status == "" {
zb0004Len--
zb0004Mask |= 0x1
}
// variable map header, size zb0004Len
err = en.Append(0x80 | uint8(zb0004Len))
if err != nil {
return
}
if (zb0004Mask & 0x1) == 0 { // if not omitted
// write "status"
err = en.Append(0xa6, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73)
if err != nil {
return
}
err = en.WriteString(za0007.Status)
if err != nil {
err = msgp.WrapError(err, "Audit", za0005, za0006, "Status")
return
}
}
}
}
}
if (zb0001Mask & 0x20) == 0 { // if not omitted
// write "notifications"
err = en.Append(0xad, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73)
if err != nil {
return
}
err = en.WriteArrayHeader(uint32(len(z.Notifications)))
if err != nil {
err = msgp.WrapError(err, "Notifications")
return
}
for za0008 := range z.Notifications {
err = en.WriteMapHeader(uint32(len(z.Notifications[za0008])))
if err != nil {
err = msgp.WrapError(err, "Notifications", za0008)
return
}
for za0009, za0010 := range z.Notifications[za0008] {
err = en.WriteString(za0009)
if err != nil {
err = msgp.WrapError(err, "Notifications", za0008)
return
}
err = en.WriteArrayHeader(uint32(len(za0010)))
if err != nil {
err = msgp.WrapError(err, "Notifications", za0008, za0009)
return
}
for za0011 := range za0010 {
err = en.WriteMapHeader(uint32(len(za0010[za0011])))
if err != nil {
err = msgp.WrapError(err, "Notifications", za0008, za0009, za0011)
return
}
for za0012, za0013 := range za0010[za0011] {
err = en.WriteString(za0012)
if err != nil {
err = msgp.WrapError(err, "Notifications", za0008, za0009, za0011)
return
}
// check for omitted fields
zb0005Len := uint32(1)
var zb0005Mask uint8 /* 1 bits */
_ = zb0005Mask
if za0013.Status == "" {
zb0005Len--
zb0005Mask |= 0x1
}
// variable map header, size zb0005Len
err = en.Append(0x80 | uint8(zb0005Len))
if err != nil {
return
}
if (zb0005Mask & 0x1) == 0 { // if not omitted
// write "status"
err = en.Append(0xa6, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73)
if err != nil {
return
}
err = en.WriteString(za0013.Status)
if err != nil {
err = msgp.WrapError(err, "Notifications", za0008, za0009, za0011, za0012, "Status")
return
}
}
}
}
}
}
}
}
return
}
// MarshalMsg implements msgp.Marshaler
func (z *Services) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize())
// check for omitted fields
zb0001Len := uint32(6)
var zb0001Mask uint8 /* 6 bits */
_ = zb0001Mask
if z.KMSStatus == nil {
zb0001Len--
zb0001Mask |= 0x2
}
if z.LDAP == (LDAP{}) {
zb0001Len--
zb0001Mask |= 0x4
}
if z.Logger == nil {
zb0001Len--
zb0001Mask |= 0x8
}
if z.Audit == nil {
zb0001Len--
zb0001Mask |= 0x10
}
if z.Notifications == nil {
zb0001Len--
zb0001Mask |= 0x20
}
// variable map header, size zb0001Len
o = append(o, 0x80|uint8(zb0001Len))
// skip if no fields are to be emitted
if zb0001Len != 0 {
// string "kms"
o = append(o, 0xa3, 0x6b, 0x6d, 0x73)
o, err = z.KMS.MarshalMsg(o)
if err != nil {
err = msgp.WrapError(err, "KMS")
return
}
if (zb0001Mask & 0x2) == 0 { // if not omitted
// string "kmsStatus"
o = append(o, 0xa9, 0x6b, 0x6d, 0x73, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73)
o = msgp.AppendArrayHeader(o, uint32(len(z.KMSStatus)))
for za0001 := range z.KMSStatus {
o, err = z.KMSStatus[za0001].MarshalMsg(o)
if err != nil {
err = msgp.WrapError(err, "KMSStatus", za0001)
return
}
}
}
if (zb0001Mask & 0x4) == 0 { // if not omitted
// string "ldap"
o = append(o, 0xa4, 0x6c, 0x64, 0x61, 0x70)
// check for omitted fields
zb0002Len := uint32(1)
var zb0002Mask uint8 /* 1 bits */
_ = zb0002Mask
if z.LDAP.Status == "" {
zb0002Len--
zb0002Mask |= 0x1
}
// variable map header, size zb0002Len
o = append(o, 0x80|uint8(zb0002Len))
if (zb0002Mask & 0x1) == 0 { // if not omitted
// string "status"
o = append(o, 0xa6, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73)
o = msgp.AppendString(o, z.LDAP.Status)
}
}
if (zb0001Mask & 0x8) == 0 { // if not omitted
// string "logger"
o = append(o, 0xa6, 0x6c, 0x6f, 0x67, 0x67, 0x65, 0x72)
o = msgp.AppendArrayHeader(o, uint32(len(z.Logger)))
for za0002 := range z.Logger {
o = msgp.AppendMapHeader(o, uint32(len(z.Logger[za0002])))
for za0003, za0004 := range z.Logger[za0002] {
o = msgp.AppendString(o, za0003)
// check for omitted fields
zb0003Len := uint32(1)
var zb0003Mask uint8 /* 1 bits */
_ = zb0003Mask
if za0004.Status == "" {
zb0003Len--
zb0003Mask |= 0x1
}
// variable map header, size zb0003Len
o = append(o, 0x80|uint8(zb0003Len))
if (zb0003Mask & 0x1) == 0 { // if not omitted
// string "status"
o = append(o, 0xa6, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73)
o = msgp.AppendString(o, za0004.Status)
}
}
}
}
if (zb0001Mask & 0x10) == 0 { // if not omitted
// string "audit"
o = append(o, 0xa5, 0x61, 0x75, 0x64, 0x69, 0x74)
o = msgp.AppendArrayHeader(o, uint32(len(z.Audit)))
for za0005 := range z.Audit {
o = msgp.AppendMapHeader(o, uint32(len(z.Audit[za0005])))
for za0006, za0007 := range z.Audit[za0005] {
o = msgp.AppendString(o, za0006)
// check for omitted fields
zb0004Len := uint32(1)
var zb0004Mask uint8 /* 1 bits */
_ = zb0004Mask
if za0007.Status == "" {
zb0004Len--
zb0004Mask |= 0x1
}
// variable map header, size zb0004Len
o = append(o, 0x80|uint8(zb0004Len))
if (zb0004Mask & 0x1) == 0 { // if not omitted
// string "status"
o = append(o, 0xa6, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73)
o = msgp.AppendString(o, za0007.Status)
}
}
}
}
if (zb0001Mask & 0x20) == 0 { // if not omitted
// string "notifications"
o = append(o, 0xad, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73)
o = msgp.AppendArrayHeader(o, uint32(len(z.Notifications)))
for za0008 := range z.Notifications {
o = msgp.AppendMapHeader(o, uint32(len(z.Notifications[za0008])))
for za0009, za0010 := range z.Notifications[za0008] {
o = msgp.AppendString(o, za0009)
o = msgp.AppendArrayHeader(o, uint32(len(za0010)))
for za0011 := range za0010 {
o = msgp.AppendMapHeader(o, uint32(len(za0010[za0011])))
for za0012, za0013 := range za0010[za0011] {
o = msgp.AppendString(o, za0012)
// check for omitted fields
zb0005Len := uint32(1)
var zb0005Mask uint8 /* 1 bits */
_ = zb0005Mask
if za0013.Status == "" {
zb0005Len--
zb0005Mask |= 0x1
}
// variable map header, size zb0005Len
o = append(o, 0x80|uint8(zb0005Len))
if (zb0005Mask & 0x1) == 0 { // if not omitted
// string "status"
o = append(o, 0xa6, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73)
o = msgp.AppendString(o, za0013.Status)
}
}
}
}
}
}
}
return
}
// UnmarshalMsg implements msgp.Unmarshaler
func (z *Services) UnmarshalMsg(bts []byte) (o []byte, err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
var zb0001Mask uint8 /* 6 bits */
_ = zb0001Mask
for zb0001 > 0 {
zb0001--
field, bts, err = msgp.ReadMapKeyZC(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "kms":
bts, err = z.KMS.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "KMS")
return
}
zb0001Mask |= 0x1
case "kmsStatus":
var zb0002 uint32
zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "KMSStatus")
return
}
if cap(z.KMSStatus) >= int(zb0002) {
z.KMSStatus = (z.KMSStatus)[:zb0002]
} else {
z.KMSStatus = make([]KMS, zb0002)
}
for za0001 := range z.KMSStatus {
bts, err = z.KMSStatus[za0001].UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "KMSStatus", za0001)
return
}
}
zb0001Mask |= 0x2
case "ldap":
var zb0003 uint32
zb0003, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "LDAP")
return
}
var zb0003Mask uint8 /* 1 bits */
_ = zb0003Mask
for zb0003 > 0 {
zb0003--
field, bts, err = msgp.ReadMapKeyZC(bts)
if err != nil {
err = msgp.WrapError(err, "LDAP")
return
}
switch msgp.UnsafeString(field) {
case "status":
z.LDAP.Status, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "LDAP", "Status")
return
}
zb0003Mask |= 0x1
default:
bts, err = msgp.Skip(bts)
if err != nil {
err = msgp.WrapError(err, "LDAP")
return
}
}
}
// Clear omitted fields.
if (zb0003Mask & 0x1) == 0 {
z.LDAP.Status = ""
}
zb0001Mask |= 0x4
case "logger":
var zb0004 uint32
zb0004, bts, err = msgp.ReadArrayHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Logger")
return
}
if cap(z.Logger) >= int(zb0004) {
z.Logger = (z.Logger)[:zb0004]
} else {
z.Logger = make([]Logger, zb0004)
}
for za0002 := range z.Logger {
var zb0005 uint32
zb0005, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Logger", za0002)
return
}
if z.Logger[za0002] == nil {
z.Logger[za0002] = make(Logger, zb0005)
} else if len(z.Logger[za0002]) > 0 {
for key := range z.Logger[za0002] {
delete(z.Logger[za0002], key)
}
}
for zb0005 > 0 {
var za0003 string
var za0004 Status
zb0005--
za0003, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Logger", za0002)
return
}
var zb0006 uint32
zb0006, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Logger", za0002, za0003)
return
}
var zb0006Mask uint8 /* 1 bits */
_ = zb0006Mask
for zb0006 > 0 {
zb0006--
field, bts, err = msgp.ReadMapKeyZC(bts)
if err != nil {
err = msgp.WrapError(err, "Logger", za0002, za0003)
return
}
switch msgp.UnsafeString(field) {
case "status":
za0004.Status, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Logger", za0002, za0003, "Status")
return
}
zb0006Mask |= 0x1
default:
bts, err = msgp.Skip(bts)
if err != nil {
err = msgp.WrapError(err, "Logger", za0002, za0003)
return
}
}
}
// Clear omitted fields.
if (zb0006Mask & 0x1) == 0 {
za0004.Status = ""
}
z.Logger[za0002][za0003] = za0004
}
}
zb0001Mask |= 0x8
case "audit":
var zb0007 uint32
zb0007, bts, err = msgp.ReadArrayHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Audit")
return
}
if cap(z.Audit) >= int(zb0007) {
z.Audit = (z.Audit)[:zb0007]
} else {
z.Audit = make([]Audit, zb0007)
}
for za0005 := range z.Audit {
var zb0008 uint32
zb0008, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Audit", za0005)
return
}
if z.Audit[za0005] == nil {
z.Audit[za0005] = make(Audit, zb0008)
} else if len(z.Audit[za0005]) > 0 {
for key := range z.Audit[za0005] {
delete(z.Audit[za0005], key)
}
}
for zb0008 > 0 {
var za0006 string
var za0007 Status
zb0008--
za0006, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Audit", za0005)
return
}
var zb0009 uint32
zb0009, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Audit", za0005, za0006)
return
}
var zb0009Mask uint8 /* 1 bits */
_ = zb0009Mask
for zb0009 > 0 {
zb0009--
field, bts, err = msgp.ReadMapKeyZC(bts)
if err != nil {
err = msgp.WrapError(err, "Audit", za0005, za0006)
return
}
switch msgp.UnsafeString(field) {
case "status":
za0007.Status, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Audit", za0005, za0006, "Status")
return
}
zb0009Mask |= 0x1
default:
bts, err = msgp.Skip(bts)
if err != nil {
err = msgp.WrapError(err, "Audit", za0005, za0006)
return
}
}
}
// Clear omitted fields.
if (zb0009Mask & 0x1) == 0 {
za0007.Status = ""
}
z.Audit[za0005][za0006] = za0007
}
}
zb0001Mask |= 0x10
case "notifications":
var zb0010 uint32
zb0010, bts, err = msgp.ReadArrayHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Notifications")
return
}
if cap(z.Notifications) >= int(zb0010) {
z.Notifications = (z.Notifications)[:zb0010]
} else {
z.Notifications = make([]map[string][]TargetIDStatus, zb0010)
}
for za0008 := range z.Notifications {
var zb0011 uint32
zb0011, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Notifications", za0008)
return
}
if z.Notifications[za0008] == nil {
z.Notifications[za0008] = make(map[string][]TargetIDStatus, zb0011)
} else if len(z.Notifications[za0008]) > 0 {
for key := range z.Notifications[za0008] {
delete(z.Notifications[za0008], key)
}
}
for zb0011 > 0 {
var za0009 string
var za0010 []TargetIDStatus
zb0011--
za0009, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Notifications", za0008)
return
}
var zb0012 uint32
zb0012, bts, err = msgp.ReadArrayHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Notifications", za0008, za0009)
return
}
if cap(za0010) >= int(zb0012) {
za0010 = (za0010)[:zb0012]
} else {
za0010 = make([]TargetIDStatus, zb0012)
}
for za0011 := range za0010 {
var zb0013 uint32
zb0013, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Notifications", za0008, za0009, za0011)
return
}
if za0010[za0011] == nil {
za0010[za0011] = make(TargetIDStatus, zb0013)
} else if len(za0010[za0011]) > 0 {
for key := range za0010[za0011] {
delete(za0010[za0011], key)
}
}
for zb0013 > 0 {
var za0012 string
var za0013 Status
zb0013--
za0012, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Notifications", za0008, za0009, za0011)
return
}
var zb0014 uint32
zb0014, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Notifications", za0008, za0009, za0011, za0012)
return
}
var zb0014Mask uint8 /* 1 bits */
_ = zb0014Mask
for zb0014 > 0 {
zb0014--
field, bts, err = msgp.ReadMapKeyZC(bts)
if err != nil {
err = msgp.WrapError(err, "Notifications", za0008, za0009, za0011, za0012)
return
}
switch msgp.UnsafeString(field) {
case "status":
za0013.Status, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Notifications", za0008, za0009, za0011, za0012, "Status")
return
}
zb0014Mask |= 0x1
default:
bts, err = msgp.Skip(bts)
if err != nil {
err = msgp.WrapError(err, "Notifications", za0008, za0009, za0011, za0012)
return
}
}
}
// Clear omitted fields.
if (zb0014Mask & 0x1) == 0 {
za0013.Status = ""
}
za0010[za0011][za0012] = za0013
}
}
z.Notifications[za0008][za0009] = za0010
}
}
zb0001Mask |= 0x20
default:
bts, err = msgp.Skip(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
// Clear omitted fields.
if zb0001Mask != 0x3f {
if (zb0001Mask & 0x1) == 0 {
z.KMS = KMS{}
}
if (zb0001Mask & 0x2) == 0 {
z.KMSStatus = nil
}
if (zb0001Mask & 0x4) == 0 {
z.LDAP = (LDAP{})
}
if (zb0001Mask & 0x8) == 0 {
z.Logger = nil
}
if (zb0001Mask & 0x10) == 0 {
z.Audit = nil
}
if (zb0001Mask & 0x20) == 0 {
z.Notifications = nil
}
}
o = bts
return
}
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z *Services) Msgsize() (s int) {
s = 1 + 4 + z.KMS.Msgsize() + 10 + msgp.ArrayHeaderSize
for za0001 := range z.KMSStatus {
s += z.KMSStatus[za0001].Msgsize()
}
s += 5 + 1 + 7 + msgp.StringPrefixSize + len(z.LDAP.Status) + 7 + msgp.ArrayHeaderSize
for za0002 := range z.Logger {
s += msgp.MapHeaderSize
if z.Logger[za0002] != nil {
for za0003, za0004 := range z.Logger[za0002] {
_ = za0004
s += msgp.StringPrefixSize + len(za0003) + 1 + 7 + msgp.StringPrefixSize + len(za0004.Status)
}
}
}
s += 6 + msgp.ArrayHeaderSize
for za0005 := range z.Audit {
s += msgp.MapHeaderSize
if z.Audit[za0005] != nil {
for za0006, za0007 := range z.Audit[za0005] {
_ = za0007
s += msgp.StringPrefixSize + len(za0006) + 1 + 7 + msgp.StringPrefixSize + len(za0007.Status)
}
}
}
s += 14 + msgp.ArrayHeaderSize
for za0008 := range z.Notifications {
s += msgp.MapHeaderSize
if z.Notifications[za0008] != nil {
for za0009, za0010 := range z.Notifications[za0008] {
_ = za0010
s += msgp.StringPrefixSize + len(za0009) + msgp.ArrayHeaderSize
for za0011 := range za0010 {
s += msgp.MapHeaderSize
if za0010[za0011] != nil {
for za0012, za0013 := range za0010[za0011] {
_ = za0013
s += msgp.StringPrefixSize + len(za0012) + 1 + 7 + msgp.StringPrefixSize + len(za0013.Status)
}
}
}
}
}
}
return
}
// DecodeMsg implements msgp.Decodable
func (z *Status) DecodeMsg(dc *msgp.Reader) (err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, err = dc.ReadMapHeader()
if err != nil {
err = msgp.WrapError(err)
return
}
var zb0001Mask uint8 /* 1 bits */
_ = zb0001Mask
for zb0001 > 0 {
zb0001--
field, err = dc.ReadMapKeyPtr()
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "status":
z.Status, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "Status")
return
}
zb0001Mask |= 0x1
default:
err = dc.Skip()
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
// Clear omitted fields.
if (zb0001Mask & 0x1) == 0 {
z.Status = ""
}
return
}
// EncodeMsg implements msgp.Encodable
func (z Status) EncodeMsg(en *msgp.Writer) (err error) {
// check for omitted fields
zb0001Len := uint32(1)
var zb0001Mask uint8 /* 1 bits */
_ = zb0001Mask
if z.Status == "" {
zb0001Len--
zb0001Mask |= 0x1
}
// variable map header, size zb0001Len
err = en.Append(0x80 | uint8(zb0001Len))
if err != nil {
return
}
if (zb0001Mask & 0x1) == 0 { // if not omitted
// write "status"
err = en.Append(0xa6, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73)
if err != nil {
return
}
err = en.WriteString(z.Status)
if err != nil {
err = msgp.WrapError(err, "Status")
return
}
}
return
}
// MarshalMsg implements msgp.Marshaler
func (z Status) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize())
// check for omitted fields
zb0001Len := uint32(1)
var zb0001Mask uint8 /* 1 bits */
_ = zb0001Mask
if z.Status == "" {
zb0001Len--
zb0001Mask |= 0x1
}
// variable map header, size zb0001Len
o = append(o, 0x80|uint8(zb0001Len))
if (zb0001Mask & 0x1) == 0 { // if not omitted
// string "status"
o = append(o, 0xa6, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73)
o = msgp.AppendString(o, z.Status)
}
return
}
// UnmarshalMsg implements msgp.Unmarshaler
func (z *Status) UnmarshalMsg(bts []byte) (o []byte, err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
var zb0001Mask uint8 /* 1 bits */
_ = zb0001Mask
for zb0001 > 0 {
zb0001--
field, bts, err = msgp.ReadMapKeyZC(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "status":
z.Status, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Status")
return
}
zb0001Mask |= 0x1
default:
bts, err = msgp.Skip(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
// Clear omitted fields.
if (zb0001Mask & 0x1) == 0 {
z.Status = ""
}
o = bts
return
}
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z Status) Msgsize() (s int) {
s = 1 + 7 + msgp.StringPrefixSize + len(z.Status)
return
}
// DecodeMsg implements msgp.Decodable
func (z *StorageInfo) DecodeMsg(dc *msgp.Reader) (err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, err = dc.ReadMapHeader()
if err != nil {
err = msgp.WrapError(err)
return
}
for zb0001 > 0 {
zb0001--
field, err = dc.ReadMapKeyPtr()
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "Disks":
var zb0002 uint32
zb0002, err = dc.ReadArrayHeader()
if err != nil {
err = msgp.WrapError(err, "Disks")
return
}
if cap(z.Disks) >= int(zb0002) {
z.Disks = (z.Disks)[:zb0002]
} else {
z.Disks = make([]Disk, zb0002)
}
for za0001 := range z.Disks {
err = z.Disks[za0001].DecodeMsg(dc)
if err != nil {
err = msgp.WrapError(err, "Disks", za0001)
return
}
}
case "Backend":
err = z.Backend.DecodeMsg(dc)
if err != nil {
err = msgp.WrapError(err, "Backend")
return
}
default:
err = dc.Skip()
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
return
}
// EncodeMsg implements msgp.Encodable
func (z *StorageInfo) EncodeMsg(en *msgp.Writer) (err error) {
// map header, size 2
// write "Disks"
err = en.Append(0x82, 0xa5, 0x44, 0x69, 0x73, 0x6b, 0x73)
if err != nil {
return
}
err = en.WriteArrayHeader(uint32(len(z.Disks)))
if err != nil {
err = msgp.WrapError(err, "Disks")
return
}
for za0001 := range z.Disks {
err = z.Disks[za0001].EncodeMsg(en)
if err != nil {
err = msgp.WrapError(err, "Disks", za0001)
return
}
}
// write "Backend"
err = en.Append(0xa7, 0x42, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64)
if err != nil {
return
}
err = z.Backend.EncodeMsg(en)
if err != nil {
err = msgp.WrapError(err, "Backend")
return
}
return
}
// MarshalMsg implements msgp.Marshaler
func (z *StorageInfo) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize())
// map header, size 2
// string "Disks"
o = append(o, 0x82, 0xa5, 0x44, 0x69, 0x73, 0x6b, 0x73)
o = msgp.AppendArrayHeader(o, uint32(len(z.Disks)))
for za0001 := range z.Disks {
o, err = z.Disks[za0001].MarshalMsg(o)
if err != nil {
err = msgp.WrapError(err, "Disks", za0001)
return
}
}
// string "Backend"
o = append(o, 0xa7, 0x42, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64)
o, err = z.Backend.MarshalMsg(o)
if err != nil {
err = msgp.WrapError(err, "Backend")
return
}
return
}
// UnmarshalMsg implements msgp.Unmarshaler
func (z *StorageInfo) UnmarshalMsg(bts []byte) (o []byte, err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
for zb0001 > 0 {
zb0001--
field, bts, err = msgp.ReadMapKeyZC(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "Disks":
var zb0002 uint32
zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Disks")
return
}
if cap(z.Disks) >= int(zb0002) {
z.Disks = (z.Disks)[:zb0002]
} else {
z.Disks = make([]Disk, zb0002)
}
for za0001 := range z.Disks {
bts, err = z.Disks[za0001].UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "Disks", za0001)
return
}
}
case "Backend":
bts, err = z.Backend.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "Backend")
return
}
default:
bts, err = msgp.Skip(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
o = bts
return
}
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z *StorageInfo) Msgsize() (s int) {
s = 1 + 6 + msgp.ArrayHeaderSize
for za0001 := range z.Disks {
s += z.Disks[za0001].Msgsize()
}
s += 8 + z.Backend.Msgsize()
return
}
// DecodeMsg implements msgp.Decodable
func (z *TargetIDStatus) DecodeMsg(dc *msgp.Reader) (err error) {
var zb0003 uint32
zb0003, err = dc.ReadMapHeader()
if err != nil {
err = msgp.WrapError(err)
return
}
if (*z) == nil {
(*z) = make(TargetIDStatus, zb0003)
} else if len((*z)) > 0 {
for key := range *z {
delete((*z), key)
}
}
var field []byte
_ = field
for zb0003 > 0 {
zb0003--
var zb0001 string
var zb0002 Status
zb0001, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err)
return
}
var zb0004 uint32
zb0004, err = dc.ReadMapHeader()
if err != nil {
err = msgp.WrapError(err, zb0001)
return
}
var zb0004Mask uint8 /* 1 bits */
_ = zb0004Mask
for zb0004 > 0 {
zb0004--
field, err = dc.ReadMapKeyPtr()
if err != nil {
err = msgp.WrapError(err, zb0001)
return
}
switch msgp.UnsafeString(field) {
case "status":
zb0002.Status, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, zb0001, "Status")
return
}
zb0004Mask |= 0x1
default:
err = dc.Skip()
if err != nil {
err = msgp.WrapError(err, zb0001)
return
}
}
}
// Clear omitted fields.
if (zb0004Mask & 0x1) == 0 {
zb0002.Status = ""
}
(*z)[zb0001] = zb0002
}
return
}
// EncodeMsg implements msgp.Encodable
func (z TargetIDStatus) EncodeMsg(en *msgp.Writer) (err error) {
err = en.WriteMapHeader(uint32(len(z)))
if err != nil {
err = msgp.WrapError(err)
return
}
for zb0005, zb0006 := range z {
err = en.WriteString(zb0005)
if err != nil {
err = msgp.WrapError(err)
return
}
// check for omitted fields
zb0001Len := uint32(1)
var zb0001Mask uint8 /* 1 bits */
_ = zb0001Mask
if zb0006.Status == "" {
zb0001Len--
zb0001Mask |= 0x1
}
// variable map header, size zb0001Len
err = en.Append(0x80 | uint8(zb0001Len))
if err != nil {
return
}
if (zb0001Mask & 0x1) == 0 { // if not omitted
// write "status"
err = en.Append(0xa6, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73)
if err != nil {
return
}
err = en.WriteString(zb0006.Status)
if err != nil {
err = msgp.WrapError(err, zb0005, "Status")
return
}
}
}
return
}
// MarshalMsg implements msgp.Marshaler
func (z TargetIDStatus) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize())
o = msgp.AppendMapHeader(o, uint32(len(z)))
for zb0005, zb0006 := range z {
o = msgp.AppendString(o, zb0005)
// check for omitted fields
zb0001Len := uint32(1)
var zb0001Mask uint8 /* 1 bits */
_ = zb0001Mask
if zb0006.Status == "" {
zb0001Len--
zb0001Mask |= 0x1
}
// variable map header, size zb0001Len
o = append(o, 0x80|uint8(zb0001Len))
if (zb0001Mask & 0x1) == 0 { // if not omitted
// string "status"
o = append(o, 0xa6, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73)
o = msgp.AppendString(o, zb0006.Status)
}
}
return
}
// UnmarshalMsg implements msgp.Unmarshaler
func (z *TargetIDStatus) UnmarshalMsg(bts []byte) (o []byte, err error) {
var zb0003 uint32
zb0003, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
if (*z) == nil {
(*z) = make(TargetIDStatus, zb0003)
} else if len((*z)) > 0 {
for key := range *z {
delete((*z), key)
}
}
var field []byte
_ = field
for zb0003 > 0 {
var zb0001 string
var zb0002 Status
zb0003--
zb0001, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
var zb0004 uint32
zb0004, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, zb0001)
return
}
var zb0004Mask uint8 /* 1 bits */
_ = zb0004Mask
for zb0004 > 0 {
zb0004--
field, bts, err = msgp.ReadMapKeyZC(bts)
if err != nil {
err = msgp.WrapError(err, zb0001)
return
}
switch msgp.UnsafeString(field) {
case "status":
zb0002.Status, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, zb0001, "Status")
return
}
zb0004Mask |= 0x1
default:
bts, err = msgp.Skip(bts)
if err != nil {
err = msgp.WrapError(err, zb0001)
return
}
}
}
// Clear omitted fields.
if (zb0004Mask & 0x1) == 0 {
zb0002.Status = ""
}
(*z)[zb0001] = zb0002
}
o = bts
return
}
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z TargetIDStatus) Msgsize() (s int) {
s = msgp.MapHeaderSize
if z != nil {
for zb0005, zb0006 := range z {
_ = zb0006
s += msgp.StringPrefixSize + len(zb0005) + 1 + 7 + msgp.StringPrefixSize + len(zb0006.Status)
}
}
return
}
// DecodeMsg implements msgp.Decodable
func (z *TierStats) DecodeMsg(dc *msgp.Reader) (err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, err = dc.ReadMapHeader()
if err != nil {
err = msgp.WrapError(err)
return
}
for zb0001 > 0 {
zb0001--
field, err = dc.ReadMapKeyPtr()
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "totalSize":
z.TotalSize, err = dc.ReadUint64()
if err != nil {
err = msgp.WrapError(err, "TotalSize")
return
}
case "numVersions":
z.NumVersions, err = dc.ReadInt()
if err != nil {
err = msgp.WrapError(err, "NumVersions")
return
}
case "numObjects":
z.NumObjects, err = dc.ReadInt()
if err != nil {
err = msgp.WrapError(err, "NumObjects")
return
}
default:
err = dc.Skip()
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
return
}
// EncodeMsg implements msgp.Encodable
func (z TierStats) EncodeMsg(en *msgp.Writer) (err error) {
// map header, size 3
// write "totalSize"
err = en.Append(0x83, 0xa9, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x53, 0x69, 0x7a, 0x65)
if err != nil {
return
}
err = en.WriteUint64(z.TotalSize)
if err != nil {
err = msgp.WrapError(err, "TotalSize")
return
}
// write "numVersions"
err = en.Append(0xab, 0x6e, 0x75, 0x6d, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73)
if err != nil {
return
}
err = en.WriteInt(z.NumVersions)
if err != nil {
err = msgp.WrapError(err, "NumVersions")
return
}
// write "numObjects"
err = en.Append(0xaa, 0x6e, 0x75, 0x6d, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73)
if err != nil {
return
}
err = en.WriteInt(z.NumObjects)
if err != nil {
err = msgp.WrapError(err, "NumObjects")
return
}
return
}
// MarshalMsg implements msgp.Marshaler
func (z TierStats) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize())
// map header, size 3
// string "totalSize"
o = append(o, 0x83, 0xa9, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x53, 0x69, 0x7a, 0x65)
o = msgp.AppendUint64(o, z.TotalSize)
// string "numVersions"
o = append(o, 0xab, 0x6e, 0x75, 0x6d, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73)
o = msgp.AppendInt(o, z.NumVersions)
// string "numObjects"
o = append(o, 0xaa, 0x6e, 0x75, 0x6d, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73)
o = msgp.AppendInt(o, z.NumObjects)
return
}
// UnmarshalMsg implements msgp.Unmarshaler
func (z *TierStats) UnmarshalMsg(bts []byte) (o []byte, err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
for zb0001 > 0 {
zb0001--
field, bts, err = msgp.ReadMapKeyZC(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "totalSize":
z.TotalSize, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "TotalSize")
return
}
case "numVersions":
z.NumVersions, bts, err = msgp.ReadIntBytes(bts)
if err != nil {
err = msgp.WrapError(err, "NumVersions")
return
}
case "numObjects":
z.NumObjects, bts, err = msgp.ReadIntBytes(bts)
if err != nil {
err = msgp.WrapError(err, "NumObjects")
return
}
default:
bts, err = msgp.Skip(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
o = bts
return
}
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z TierStats) Msgsize() (s int) {
s = 1 + 10 + msgp.Uint64Size + 12 + msgp.IntSize + 11 + msgp.IntSize
return
}
// DecodeMsg implements msgp.Decodable
func (z *Usage) DecodeMsg(dc *msgp.Reader) (err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, err = dc.ReadMapHeader()
if err != nil {
err = msgp.WrapError(err)
return
}
var zb0001Mask uint8 /* 1 bits */
_ = zb0001Mask
for zb0001 > 0 {
zb0001--
field, err = dc.ReadMapKeyPtr()
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "size":
z.Size, err = dc.ReadUint64()
if err != nil {
err = msgp.WrapError(err, "Size")
return
}
case "error":
z.Error, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "Error")
return
}
zb0001Mask |= 0x1
default:
err = dc.Skip()
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
// Clear omitted fields.
if (zb0001Mask & 0x1) == 0 {
z.Error = ""
}
return
}
// EncodeMsg implements msgp.Encodable
func (z Usage) EncodeMsg(en *msgp.Writer) (err error) {
// check for omitted fields
zb0001Len := uint32(2)
var zb0001Mask uint8 /* 2 bits */
_ = zb0001Mask
if z.Error == "" {
zb0001Len--
zb0001Mask |= 0x2
}
// variable map header, size zb0001Len
err = en.Append(0x80 | uint8(zb0001Len))
if err != nil {
return
}
// skip if no fields are to be emitted
if zb0001Len != 0 {
// write "size"
err = en.Append(0xa4, 0x73, 0x69, 0x7a, 0x65)
if err != nil {
return
}
err = en.WriteUint64(z.Size)
if err != nil {
err = msgp.WrapError(err, "Size")
return
}
if (zb0001Mask & 0x2) == 0 { // if not omitted
// write "error"
err = en.Append(0xa5, 0x65, 0x72, 0x72, 0x6f, 0x72)
if err != nil {
return
}
err = en.WriteString(z.Error)
if err != nil {
err = msgp.WrapError(err, "Error")
return
}
}
}
return
}
// MarshalMsg implements msgp.Marshaler
func (z Usage) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize())
// check for omitted fields
zb0001Len := uint32(2)
var zb0001Mask uint8 /* 2 bits */
_ = zb0001Mask
if z.Error == "" {
zb0001Len--
zb0001Mask |= 0x2
}
// variable map header, size zb0001Len
o = append(o, 0x80|uint8(zb0001Len))
// skip if no fields are to be emitted
if zb0001Len != 0 {
// string "size"
o = append(o, 0xa4, 0x73, 0x69, 0x7a, 0x65)
o = msgp.AppendUint64(o, z.Size)
if (zb0001Mask & 0x2) == 0 { // if not omitted
// string "error"
o = append(o, 0xa5, 0x65, 0x72, 0x72, 0x6f, 0x72)
o = msgp.AppendString(o, z.Error)
}
}
return
}
// UnmarshalMsg implements msgp.Unmarshaler
func (z *Usage) UnmarshalMsg(bts []byte) (o []byte, err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
var zb0001Mask uint8 /* 1 bits */
_ = zb0001Mask
for zb0001 > 0 {
zb0001--
field, bts, err = msgp.ReadMapKeyZC(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "size":
z.Size, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "Size")
return
}
case "error":
z.Error, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Error")
return
}
zb0001Mask |= 0x1
default:
bts, err = msgp.Skip(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
// Clear omitted fields.
if (zb0001Mask & 0x1) == 0 {
z.Error = ""
}
o = bts
return
}
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z Usage) Msgsize() (s int) {
s = 1 + 5 + msgp.Uint64Size + 6 + msgp.StringPrefixSize + len(z.Error)
return
}
// DecodeMsg implements msgp.Decodable
func (z *Versions) DecodeMsg(dc *msgp.Reader) (err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, err = dc.ReadMapHeader()
if err != nil {
err = msgp.WrapError(err)
return
}
var zb0001Mask uint8 /* 1 bits */
_ = zb0001Mask
for zb0001 > 0 {
zb0001--
field, err = dc.ReadMapKeyPtr()
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "count":
z.Count, err = dc.ReadUint64()
if err != nil {
err = msgp.WrapError(err, "Count")
return
}
case "error":
z.Error, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "Error")
return
}
zb0001Mask |= 0x1
default:
err = dc.Skip()
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
// Clear omitted fields.
if (zb0001Mask & 0x1) == 0 {
z.Error = ""
}
return
}
// EncodeMsg implements msgp.Encodable
func (z Versions) EncodeMsg(en *msgp.Writer) (err error) {
// check for omitted fields
zb0001Len := uint32(2)
var zb0001Mask uint8 /* 2 bits */
_ = zb0001Mask
if z.Error == "" {
zb0001Len--
zb0001Mask |= 0x2
}
// variable map header, size zb0001Len
err = en.Append(0x80 | uint8(zb0001Len))
if err != nil {
return
}
// skip if no fields are to be emitted
if zb0001Len != 0 {
// write "count"
err = en.Append(0xa5, 0x63, 0x6f, 0x75, 0x6e, 0x74)
if err != nil {
return
}
err = en.WriteUint64(z.Count)
if err != nil {
err = msgp.WrapError(err, "Count")
return
}
if (zb0001Mask & 0x2) == 0 { // if not omitted
// write "error"
err = en.Append(0xa5, 0x65, 0x72, 0x72, 0x6f, 0x72)
if err != nil {
return
}
err = en.WriteString(z.Error)
if err != nil {
err = msgp.WrapError(err, "Error")
return
}
}
}
return
}
// MarshalMsg implements msgp.Marshaler
func (z Versions) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize())
// check for omitted fields
zb0001Len := uint32(2)
var zb0001Mask uint8 /* 2 bits */
_ = zb0001Mask
if z.Error == "" {
zb0001Len--
zb0001Mask |= 0x2
}
// variable map header, size zb0001Len
o = append(o, 0x80|uint8(zb0001Len))
// skip if no fields are to be emitted
if zb0001Len != 0 {
// string "count"
o = append(o, 0xa5, 0x63, 0x6f, 0x75, 0x6e, 0x74)
o = msgp.AppendUint64(o, z.Count)
if (zb0001Mask & 0x2) == 0 { // if not omitted
// string "error"
o = append(o, 0xa5, 0x65, 0x72, 0x72, 0x6f, 0x72)
o = msgp.AppendString(o, z.Error)
}
}
return
}
// UnmarshalMsg implements msgp.Unmarshaler
func (z *Versions) UnmarshalMsg(bts []byte) (o []byte, err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
var zb0001Mask uint8 /* 1 bits */
_ = zb0001Mask
for zb0001 > 0 {
zb0001--
field, bts, err = msgp.ReadMapKeyZC(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "count":
z.Count, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "Count")
return
}
case "error":
z.Error, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Error")
return
}
zb0001Mask |= 0x1
default:
bts, err = msgp.Skip(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
// Clear omitted fields.
if (zb0001Mask & 0x1) == 0 {
z.Error = ""
}
o = bts
return
}
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z Versions) Msgsize() (s int) {
s = 1 + 6 + msgp.Uint64Size + 6 + msgp.StringPrefixSize + len(z.Error)
return
}
golang-github-minio-madmin-go-3.0.104/info-commands_gen_test.go 0000664 0000000 0000000 00000174045 14774251704 0024364 0 ustar 00root root 0000000 0000000 package madmin
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
import (
"bytes"
"testing"
"github.com/tinylib/msgp/msgp"
)
func TestMarshalUnmarshalAudit(t *testing.T) {
v := Audit{}
bts, err := v.MarshalMsg(nil)
if err != nil {
t.Fatal(err)
}
left, err := v.UnmarshalMsg(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
}
left, err = msgp.Skip(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
}
}
func BenchmarkMarshalMsgAudit(b *testing.B) {
v := Audit{}
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.MarshalMsg(nil)
}
}
func BenchmarkAppendMsgAudit(b *testing.B) {
v := Audit{}
bts := make([]byte, 0, v.Msgsize())
bts, _ = v.MarshalMsg(bts[0:0])
b.SetBytes(int64(len(bts)))
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
bts, _ = v.MarshalMsg(bts[0:0])
}
}
func BenchmarkUnmarshalAudit(b *testing.B) {
v := Audit{}
bts, _ := v.MarshalMsg(nil)
b.ReportAllocs()
b.SetBytes(int64(len(bts)))
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := v.UnmarshalMsg(bts)
if err != nil {
b.Fatal(err)
}
}
}
func TestEncodeDecodeAudit(t *testing.T) {
v := Audit{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
m := v.Msgsize()
if buf.Len() > m {
t.Log("WARNING: TestEncodeDecodeAudit Msgsize() is inaccurate")
}
vn := Audit{}
err := msgp.Decode(&buf, &vn)
if err != nil {
t.Error(err)
}
buf.Reset()
msgp.Encode(&buf, &v)
err = msgp.NewReader(&buf).Skip()
if err != nil {
t.Error(err)
}
}
func BenchmarkEncodeAudit(b *testing.B) {
v := Audit{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
en := msgp.NewWriter(msgp.Nowhere)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.EncodeMsg(en)
}
en.Flush()
}
func BenchmarkDecodeAudit(b *testing.B) {
v := Audit{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
rd := msgp.NewEndlessReader(buf.Bytes(), b)
dc := msgp.NewReader(rd)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
err := v.DecodeMsg(dc)
if err != nil {
b.Fatal(err)
}
}
}
func TestMarshalUnmarshalBackendDisks(t *testing.T) {
v := BackendDisks{}
bts, err := v.MarshalMsg(nil)
if err != nil {
t.Fatal(err)
}
left, err := v.UnmarshalMsg(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
}
left, err = msgp.Skip(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
}
}
func BenchmarkMarshalMsgBackendDisks(b *testing.B) {
v := BackendDisks{}
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.MarshalMsg(nil)
}
}
func BenchmarkAppendMsgBackendDisks(b *testing.B) {
v := BackendDisks{}
bts := make([]byte, 0, v.Msgsize())
bts, _ = v.MarshalMsg(bts[0:0])
b.SetBytes(int64(len(bts)))
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
bts, _ = v.MarshalMsg(bts[0:0])
}
}
func BenchmarkUnmarshalBackendDisks(b *testing.B) {
v := BackendDisks{}
bts, _ := v.MarshalMsg(nil)
b.ReportAllocs()
b.SetBytes(int64(len(bts)))
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := v.UnmarshalMsg(bts)
if err != nil {
b.Fatal(err)
}
}
}
func TestEncodeDecodeBackendDisks(t *testing.T) {
v := BackendDisks{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
m := v.Msgsize()
if buf.Len() > m {
t.Log("WARNING: TestEncodeDecodeBackendDisks Msgsize() is inaccurate")
}
vn := BackendDisks{}
err := msgp.Decode(&buf, &vn)
if err != nil {
t.Error(err)
}
buf.Reset()
msgp.Encode(&buf, &v)
err = msgp.NewReader(&buf).Skip()
if err != nil {
t.Error(err)
}
}
func BenchmarkEncodeBackendDisks(b *testing.B) {
v := BackendDisks{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
en := msgp.NewWriter(msgp.Nowhere)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.EncodeMsg(en)
}
en.Flush()
}
func BenchmarkDecodeBackendDisks(b *testing.B) {
v := BackendDisks{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
rd := msgp.NewEndlessReader(buf.Bytes(), b)
dc := msgp.NewReader(rd)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
err := v.DecodeMsg(dc)
if err != nil {
b.Fatal(err)
}
}
}
func TestMarshalUnmarshalBackendInfo(t *testing.T) {
v := BackendInfo{}
bts, err := v.MarshalMsg(nil)
if err != nil {
t.Fatal(err)
}
left, err := v.UnmarshalMsg(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
}
left, err = msgp.Skip(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
}
}
func BenchmarkMarshalMsgBackendInfo(b *testing.B) {
v := BackendInfo{}
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.MarshalMsg(nil)
}
}
func BenchmarkAppendMsgBackendInfo(b *testing.B) {
v := BackendInfo{}
bts := make([]byte, 0, v.Msgsize())
bts, _ = v.MarshalMsg(bts[0:0])
b.SetBytes(int64(len(bts)))
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
bts, _ = v.MarshalMsg(bts[0:0])
}
}
func BenchmarkUnmarshalBackendInfo(b *testing.B) {
v := BackendInfo{}
bts, _ := v.MarshalMsg(nil)
b.ReportAllocs()
b.SetBytes(int64(len(bts)))
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := v.UnmarshalMsg(bts)
if err != nil {
b.Fatal(err)
}
}
}
func TestEncodeDecodeBackendInfo(t *testing.T) {
v := BackendInfo{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
m := v.Msgsize()
if buf.Len() > m {
t.Log("WARNING: TestEncodeDecodeBackendInfo Msgsize() is inaccurate")
}
vn := BackendInfo{}
err := msgp.Decode(&buf, &vn)
if err != nil {
t.Error(err)
}
buf.Reset()
msgp.Encode(&buf, &v)
err = msgp.NewReader(&buf).Skip()
if err != nil {
t.Error(err)
}
}
func BenchmarkEncodeBackendInfo(b *testing.B) {
v := BackendInfo{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
en := msgp.NewWriter(msgp.Nowhere)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.EncodeMsg(en)
}
en.Flush()
}
func BenchmarkDecodeBackendInfo(b *testing.B) {
v := BackendInfo{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
rd := msgp.NewEndlessReader(buf.Bytes(), b)
dc := msgp.NewReader(rd)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
err := v.DecodeMsg(dc)
if err != nil {
b.Fatal(err)
}
}
}
func TestMarshalUnmarshalBucketUsageInfo(t *testing.T) {
v := BucketUsageInfo{}
bts, err := v.MarshalMsg(nil)
if err != nil {
t.Fatal(err)
}
left, err := v.UnmarshalMsg(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
}
left, err = msgp.Skip(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
}
}
func BenchmarkMarshalMsgBucketUsageInfo(b *testing.B) {
v := BucketUsageInfo{}
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.MarshalMsg(nil)
}
}
func BenchmarkAppendMsgBucketUsageInfo(b *testing.B) {
v := BucketUsageInfo{}
bts := make([]byte, 0, v.Msgsize())
bts, _ = v.MarshalMsg(bts[0:0])
b.SetBytes(int64(len(bts)))
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
bts, _ = v.MarshalMsg(bts[0:0])
}
}
func BenchmarkUnmarshalBucketUsageInfo(b *testing.B) {
v := BucketUsageInfo{}
bts, _ := v.MarshalMsg(nil)
b.ReportAllocs()
b.SetBytes(int64(len(bts)))
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := v.UnmarshalMsg(bts)
if err != nil {
b.Fatal(err)
}
}
}
func TestEncodeDecodeBucketUsageInfo(t *testing.T) {
v := BucketUsageInfo{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
m := v.Msgsize()
if buf.Len() > m {
t.Log("WARNING: TestEncodeDecodeBucketUsageInfo Msgsize() is inaccurate")
}
vn := BucketUsageInfo{}
err := msgp.Decode(&buf, &vn)
if err != nil {
t.Error(err)
}
buf.Reset()
msgp.Encode(&buf, &v)
err = msgp.NewReader(&buf).Skip()
if err != nil {
t.Error(err)
}
}
func BenchmarkEncodeBucketUsageInfo(b *testing.B) {
v := BucketUsageInfo{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
en := msgp.NewWriter(msgp.Nowhere)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.EncodeMsg(en)
}
en.Flush()
}
func BenchmarkDecodeBucketUsageInfo(b *testing.B) {
v := BucketUsageInfo{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
rd := msgp.NewEndlessReader(buf.Bytes(), b)
dc := msgp.NewReader(rd)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
err := v.DecodeMsg(dc)
if err != nil {
b.Fatal(err)
}
}
}
func TestMarshalUnmarshalBuckets(t *testing.T) {
v := Buckets{}
bts, err := v.MarshalMsg(nil)
if err != nil {
t.Fatal(err)
}
left, err := v.UnmarshalMsg(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
}
left, err = msgp.Skip(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
}
}
func BenchmarkMarshalMsgBuckets(b *testing.B) {
v := Buckets{}
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.MarshalMsg(nil)
}
}
func BenchmarkAppendMsgBuckets(b *testing.B) {
v := Buckets{}
bts := make([]byte, 0, v.Msgsize())
bts, _ = v.MarshalMsg(bts[0:0])
b.SetBytes(int64(len(bts)))
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
bts, _ = v.MarshalMsg(bts[0:0])
}
}
func BenchmarkUnmarshalBuckets(b *testing.B) {
v := Buckets{}
bts, _ := v.MarshalMsg(nil)
b.ReportAllocs()
b.SetBytes(int64(len(bts)))
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := v.UnmarshalMsg(bts)
if err != nil {
b.Fatal(err)
}
}
}
func TestEncodeDecodeBuckets(t *testing.T) {
v := Buckets{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
m := v.Msgsize()
if buf.Len() > m {
t.Log("WARNING: TestEncodeDecodeBuckets Msgsize() is inaccurate")
}
vn := Buckets{}
err := msgp.Decode(&buf, &vn)
if err != nil {
t.Error(err)
}
buf.Reset()
msgp.Encode(&buf, &v)
err = msgp.NewReader(&buf).Skip()
if err != nil {
t.Error(err)
}
}
func BenchmarkEncodeBuckets(b *testing.B) {
v := Buckets{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
en := msgp.NewWriter(msgp.Nowhere)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.EncodeMsg(en)
}
en.Flush()
}
func BenchmarkDecodeBuckets(b *testing.B) {
v := Buckets{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
rd := msgp.NewEndlessReader(buf.Bytes(), b)
dc := msgp.NewReader(rd)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
err := v.DecodeMsg(dc)
if err != nil {
b.Fatal(err)
}
}
}
func TestMarshalUnmarshalCacheStats(t *testing.T) {
v := CacheStats{}
bts, err := v.MarshalMsg(nil)
if err != nil {
t.Fatal(err)
}
left, err := v.UnmarshalMsg(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
}
left, err = msgp.Skip(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
}
}
func BenchmarkMarshalMsgCacheStats(b *testing.B) {
v := CacheStats{}
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.MarshalMsg(nil)
}
}
func BenchmarkAppendMsgCacheStats(b *testing.B) {
v := CacheStats{}
bts := make([]byte, 0, v.Msgsize())
bts, _ = v.MarshalMsg(bts[0:0])
b.SetBytes(int64(len(bts)))
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
bts, _ = v.MarshalMsg(bts[0:0])
}
}
func BenchmarkUnmarshalCacheStats(b *testing.B) {
v := CacheStats{}
bts, _ := v.MarshalMsg(nil)
b.ReportAllocs()
b.SetBytes(int64(len(bts)))
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := v.UnmarshalMsg(bts)
if err != nil {
b.Fatal(err)
}
}
}
func TestEncodeDecodeCacheStats(t *testing.T) {
v := CacheStats{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
m := v.Msgsize()
if buf.Len() > m {
t.Log("WARNING: TestEncodeDecodeCacheStats Msgsize() is inaccurate")
}
vn := CacheStats{}
err := msgp.Decode(&buf, &vn)
if err != nil {
t.Error(err)
}
buf.Reset()
msgp.Encode(&buf, &v)
err = msgp.NewReader(&buf).Skip()
if err != nil {
t.Error(err)
}
}
func BenchmarkEncodeCacheStats(b *testing.B) {
v := CacheStats{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
en := msgp.NewWriter(msgp.Nowhere)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.EncodeMsg(en)
}
en.Flush()
}
func BenchmarkDecodeCacheStats(b *testing.B) {
v := CacheStats{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
rd := msgp.NewEndlessReader(buf.Bytes(), b)
dc := msgp.NewReader(rd)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
err := v.DecodeMsg(dc)
if err != nil {
b.Fatal(err)
}
}
}
func TestMarshalUnmarshalDataUsageInfo(t *testing.T) {
v := DataUsageInfo{}
bts, err := v.MarshalMsg(nil)
if err != nil {
t.Fatal(err)
}
left, err := v.UnmarshalMsg(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
}
left, err = msgp.Skip(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
}
}
func BenchmarkMarshalMsgDataUsageInfo(b *testing.B) {
v := DataUsageInfo{}
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.MarshalMsg(nil)
}
}
func BenchmarkAppendMsgDataUsageInfo(b *testing.B) {
v := DataUsageInfo{}
bts := make([]byte, 0, v.Msgsize())
bts, _ = v.MarshalMsg(bts[0:0])
b.SetBytes(int64(len(bts)))
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
bts, _ = v.MarshalMsg(bts[0:0])
}
}
func BenchmarkUnmarshalDataUsageInfo(b *testing.B) {
v := DataUsageInfo{}
bts, _ := v.MarshalMsg(nil)
b.ReportAllocs()
b.SetBytes(int64(len(bts)))
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := v.UnmarshalMsg(bts)
if err != nil {
b.Fatal(err)
}
}
}
func TestEncodeDecodeDataUsageInfo(t *testing.T) {
v := DataUsageInfo{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
m := v.Msgsize()
if buf.Len() > m {
t.Log("WARNING: TestEncodeDecodeDataUsageInfo Msgsize() is inaccurate")
}
vn := DataUsageInfo{}
err := msgp.Decode(&buf, &vn)
if err != nil {
t.Error(err)
}
buf.Reset()
msgp.Encode(&buf, &v)
err = msgp.NewReader(&buf).Skip()
if err != nil {
t.Error(err)
}
}
func BenchmarkEncodeDataUsageInfo(b *testing.B) {
v := DataUsageInfo{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
en := msgp.NewWriter(msgp.Nowhere)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.EncodeMsg(en)
}
en.Flush()
}
func BenchmarkDecodeDataUsageInfo(b *testing.B) {
v := DataUsageInfo{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
rd := msgp.NewEndlessReader(buf.Bytes(), b)
dc := msgp.NewReader(rd)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
err := v.DecodeMsg(dc)
if err != nil {
b.Fatal(err)
}
}
}
func TestMarshalUnmarshalDeleteMarkers(t *testing.T) {
v := DeleteMarkers{}
bts, err := v.MarshalMsg(nil)
if err != nil {
t.Fatal(err)
}
left, err := v.UnmarshalMsg(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
}
left, err = msgp.Skip(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
}
}
func BenchmarkMarshalMsgDeleteMarkers(b *testing.B) {
v := DeleteMarkers{}
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.MarshalMsg(nil)
}
}
func BenchmarkAppendMsgDeleteMarkers(b *testing.B) {
v := DeleteMarkers{}
bts := make([]byte, 0, v.Msgsize())
bts, _ = v.MarshalMsg(bts[0:0])
b.SetBytes(int64(len(bts)))
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
bts, _ = v.MarshalMsg(bts[0:0])
}
}
func BenchmarkUnmarshalDeleteMarkers(b *testing.B) {
v := DeleteMarkers{}
bts, _ := v.MarshalMsg(nil)
b.ReportAllocs()
b.SetBytes(int64(len(bts)))
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := v.UnmarshalMsg(bts)
if err != nil {
b.Fatal(err)
}
}
}
func TestEncodeDecodeDeleteMarkers(t *testing.T) {
v := DeleteMarkers{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
m := v.Msgsize()
if buf.Len() > m {
t.Log("WARNING: TestEncodeDecodeDeleteMarkers Msgsize() is inaccurate")
}
vn := DeleteMarkers{}
err := msgp.Decode(&buf, &vn)
if err != nil {
t.Error(err)
}
buf.Reset()
msgp.Encode(&buf, &v)
err = msgp.NewReader(&buf).Skip()
if err != nil {
t.Error(err)
}
}
func BenchmarkEncodeDeleteMarkers(b *testing.B) {
v := DeleteMarkers{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
en := msgp.NewWriter(msgp.Nowhere)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.EncodeMsg(en)
}
en.Flush()
}
func BenchmarkDecodeDeleteMarkers(b *testing.B) {
v := DeleteMarkers{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
rd := msgp.NewEndlessReader(buf.Bytes(), b)
dc := msgp.NewReader(rd)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
err := v.DecodeMsg(dc)
if err != nil {
b.Fatal(err)
}
}
}
func TestMarshalUnmarshalDisk(t *testing.T) {
v := Disk{}
bts, err := v.MarshalMsg(nil)
if err != nil {
t.Fatal(err)
}
left, err := v.UnmarshalMsg(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
}
left, err = msgp.Skip(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
}
}
func BenchmarkMarshalMsgDisk(b *testing.B) {
v := Disk{}
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.MarshalMsg(nil)
}
}
func BenchmarkAppendMsgDisk(b *testing.B) {
v := Disk{}
bts := make([]byte, 0, v.Msgsize())
bts, _ = v.MarshalMsg(bts[0:0])
b.SetBytes(int64(len(bts)))
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
bts, _ = v.MarshalMsg(bts[0:0])
}
}
func BenchmarkUnmarshalDisk(b *testing.B) {
v := Disk{}
bts, _ := v.MarshalMsg(nil)
b.ReportAllocs()
b.SetBytes(int64(len(bts)))
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := v.UnmarshalMsg(bts)
if err != nil {
b.Fatal(err)
}
}
}
func TestEncodeDecodeDisk(t *testing.T) {
v := Disk{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
m := v.Msgsize()
if buf.Len() > m {
t.Log("WARNING: TestEncodeDecodeDisk Msgsize() is inaccurate")
}
vn := Disk{}
err := msgp.Decode(&buf, &vn)
if err != nil {
t.Error(err)
}
buf.Reset()
msgp.Encode(&buf, &v)
err = msgp.NewReader(&buf).Skip()
if err != nil {
t.Error(err)
}
}
func BenchmarkEncodeDisk(b *testing.B) {
v := Disk{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
en := msgp.NewWriter(msgp.Nowhere)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.EncodeMsg(en)
}
en.Flush()
}
func BenchmarkDecodeDisk(b *testing.B) {
v := Disk{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
rd := msgp.NewEndlessReader(buf.Bytes(), b)
dc := msgp.NewReader(rd)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
err := v.DecodeMsg(dc)
if err != nil {
b.Fatal(err)
}
}
}
func TestMarshalUnmarshalDiskMetrics(t *testing.T) {
v := DiskMetrics{}
bts, err := v.MarshalMsg(nil)
if err != nil {
t.Fatal(err)
}
left, err := v.UnmarshalMsg(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
}
left, err = msgp.Skip(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
}
}
func BenchmarkMarshalMsgDiskMetrics(b *testing.B) {
v := DiskMetrics{}
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.MarshalMsg(nil)
}
}
func BenchmarkAppendMsgDiskMetrics(b *testing.B) {
v := DiskMetrics{}
bts := make([]byte, 0, v.Msgsize())
bts, _ = v.MarshalMsg(bts[0:0])
b.SetBytes(int64(len(bts)))
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
bts, _ = v.MarshalMsg(bts[0:0])
}
}
func BenchmarkUnmarshalDiskMetrics(b *testing.B) {
v := DiskMetrics{}
bts, _ := v.MarshalMsg(nil)
b.ReportAllocs()
b.SetBytes(int64(len(bts)))
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := v.UnmarshalMsg(bts)
if err != nil {
b.Fatal(err)
}
}
}
func TestEncodeDecodeDiskMetrics(t *testing.T) {
v := DiskMetrics{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
m := v.Msgsize()
if buf.Len() > m {
t.Log("WARNING: TestEncodeDecodeDiskMetrics Msgsize() is inaccurate")
}
vn := DiskMetrics{}
err := msgp.Decode(&buf, &vn)
if err != nil {
t.Error(err)
}
buf.Reset()
msgp.Encode(&buf, &v)
err = msgp.NewReader(&buf).Skip()
if err != nil {
t.Error(err)
}
}
func BenchmarkEncodeDiskMetrics(b *testing.B) {
v := DiskMetrics{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
en := msgp.NewWriter(msgp.Nowhere)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.EncodeMsg(en)
}
en.Flush()
}
func BenchmarkDecodeDiskMetrics(b *testing.B) {
v := DiskMetrics{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
rd := msgp.NewEndlessReader(buf.Bytes(), b)
dc := msgp.NewReader(rd)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
err := v.DecodeMsg(dc)
if err != nil {
b.Fatal(err)
}
}
}
func TestMarshalUnmarshalErasureBackend(t *testing.T) {
v := ErasureBackend{}
bts, err := v.MarshalMsg(nil)
if err != nil {
t.Fatal(err)
}
left, err := v.UnmarshalMsg(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
}
left, err = msgp.Skip(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
}
}
func BenchmarkMarshalMsgErasureBackend(b *testing.B) {
v := ErasureBackend{}
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.MarshalMsg(nil)
}
}
func BenchmarkAppendMsgErasureBackend(b *testing.B) {
v := ErasureBackend{}
bts := make([]byte, 0, v.Msgsize())
bts, _ = v.MarshalMsg(bts[0:0])
b.SetBytes(int64(len(bts)))
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
bts, _ = v.MarshalMsg(bts[0:0])
}
}
func BenchmarkUnmarshalErasureBackend(b *testing.B) {
v := ErasureBackend{}
bts, _ := v.MarshalMsg(nil)
b.ReportAllocs()
b.SetBytes(int64(len(bts)))
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := v.UnmarshalMsg(bts)
if err != nil {
b.Fatal(err)
}
}
}
func TestEncodeDecodeErasureBackend(t *testing.T) {
v := ErasureBackend{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
m := v.Msgsize()
if buf.Len() > m {
t.Log("WARNING: TestEncodeDecodeErasureBackend Msgsize() is inaccurate")
}
vn := ErasureBackend{}
err := msgp.Decode(&buf, &vn)
if err != nil {
t.Error(err)
}
buf.Reset()
msgp.Encode(&buf, &v)
err = msgp.NewReader(&buf).Skip()
if err != nil {
t.Error(err)
}
}
func BenchmarkEncodeErasureBackend(b *testing.B) {
v := ErasureBackend{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
en := msgp.NewWriter(msgp.Nowhere)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.EncodeMsg(en)
}
en.Flush()
}
func BenchmarkDecodeErasureBackend(b *testing.B) {
v := ErasureBackend{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
rd := msgp.NewEndlessReader(buf.Bytes(), b)
dc := msgp.NewReader(rd)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
err := v.DecodeMsg(dc)
if err != nil {
b.Fatal(err)
}
}
}
func TestMarshalUnmarshalErasureSetInfo(t *testing.T) {
v := ErasureSetInfo{}
bts, err := v.MarshalMsg(nil)
if err != nil {
t.Fatal(err)
}
left, err := v.UnmarshalMsg(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
}
left, err = msgp.Skip(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
}
}
func BenchmarkMarshalMsgErasureSetInfo(b *testing.B) {
v := ErasureSetInfo{}
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.MarshalMsg(nil)
}
}
func BenchmarkAppendMsgErasureSetInfo(b *testing.B) {
v := ErasureSetInfo{}
bts := make([]byte, 0, v.Msgsize())
bts, _ = v.MarshalMsg(bts[0:0])
b.SetBytes(int64(len(bts)))
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
bts, _ = v.MarshalMsg(bts[0:0])
}
}
func BenchmarkUnmarshalErasureSetInfo(b *testing.B) {
v := ErasureSetInfo{}
bts, _ := v.MarshalMsg(nil)
b.ReportAllocs()
b.SetBytes(int64(len(bts)))
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := v.UnmarshalMsg(bts)
if err != nil {
b.Fatal(err)
}
}
}
func TestEncodeDecodeErasureSetInfo(t *testing.T) {
v := ErasureSetInfo{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
m := v.Msgsize()
if buf.Len() > m {
t.Log("WARNING: TestEncodeDecodeErasureSetInfo Msgsize() is inaccurate")
}
vn := ErasureSetInfo{}
err := msgp.Decode(&buf, &vn)
if err != nil {
t.Error(err)
}
buf.Reset()
msgp.Encode(&buf, &v)
err = msgp.NewReader(&buf).Skip()
if err != nil {
t.Error(err)
}
}
func BenchmarkEncodeErasureSetInfo(b *testing.B) {
v := ErasureSetInfo{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
en := msgp.NewWriter(msgp.Nowhere)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.EncodeMsg(en)
}
en.Flush()
}
func BenchmarkDecodeErasureSetInfo(b *testing.B) {
v := ErasureSetInfo{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
rd := msgp.NewEndlessReader(buf.Bytes(), b)
dc := msgp.NewReader(rd)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
err := v.DecodeMsg(dc)
if err != nil {
b.Fatal(err)
}
}
}
func TestMarshalUnmarshalFSBackend(t *testing.T) {
v := FSBackend{}
bts, err := v.MarshalMsg(nil)
if err != nil {
t.Fatal(err)
}
left, err := v.UnmarshalMsg(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
}
left, err = msgp.Skip(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
}
}
func BenchmarkMarshalMsgFSBackend(b *testing.B) {
v := FSBackend{}
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.MarshalMsg(nil)
}
}
func BenchmarkAppendMsgFSBackend(b *testing.B) {
v := FSBackend{}
bts := make([]byte, 0, v.Msgsize())
bts, _ = v.MarshalMsg(bts[0:0])
b.SetBytes(int64(len(bts)))
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
bts, _ = v.MarshalMsg(bts[0:0])
}
}
func BenchmarkUnmarshalFSBackend(b *testing.B) {
v := FSBackend{}
bts, _ := v.MarshalMsg(nil)
b.ReportAllocs()
b.SetBytes(int64(len(bts)))
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := v.UnmarshalMsg(bts)
if err != nil {
b.Fatal(err)
}
}
}
func TestEncodeDecodeFSBackend(t *testing.T) {
v := FSBackend{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
m := v.Msgsize()
if buf.Len() > m {
t.Log("WARNING: TestEncodeDecodeFSBackend Msgsize() is inaccurate")
}
vn := FSBackend{}
err := msgp.Decode(&buf, &vn)
if err != nil {
t.Error(err)
}
buf.Reset()
msgp.Encode(&buf, &v)
err = msgp.NewReader(&buf).Skip()
if err != nil {
t.Error(err)
}
}
func BenchmarkEncodeFSBackend(b *testing.B) {
v := FSBackend{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
en := msgp.NewWriter(msgp.Nowhere)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.EncodeMsg(en)
}
en.Flush()
}
func BenchmarkDecodeFSBackend(b *testing.B) {
v := FSBackend{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
rd := msgp.NewEndlessReader(buf.Bytes(), b)
dc := msgp.NewReader(rd)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
err := v.DecodeMsg(dc)
if err != nil {
b.Fatal(err)
}
}
}
func TestMarshalUnmarshalGCStats(t *testing.T) {
v := GCStats{}
bts, err := v.MarshalMsg(nil)
if err != nil {
t.Fatal(err)
}
left, err := v.UnmarshalMsg(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
}
left, err = msgp.Skip(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
}
}
func BenchmarkMarshalMsgGCStats(b *testing.B) {
v := GCStats{}
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.MarshalMsg(nil)
}
}
func BenchmarkAppendMsgGCStats(b *testing.B) {
v := GCStats{}
bts := make([]byte, 0, v.Msgsize())
bts, _ = v.MarshalMsg(bts[0:0])
b.SetBytes(int64(len(bts)))
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
bts, _ = v.MarshalMsg(bts[0:0])
}
}
func BenchmarkUnmarshalGCStats(b *testing.B) {
v := GCStats{}
bts, _ := v.MarshalMsg(nil)
b.ReportAllocs()
b.SetBytes(int64(len(bts)))
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := v.UnmarshalMsg(bts)
if err != nil {
b.Fatal(err)
}
}
}
func TestEncodeDecodeGCStats(t *testing.T) {
v := GCStats{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
m := v.Msgsize()
if buf.Len() > m {
t.Log("WARNING: TestEncodeDecodeGCStats Msgsize() is inaccurate")
}
vn := GCStats{}
err := msgp.Decode(&buf, &vn)
if err != nil {
t.Error(err)
}
buf.Reset()
msgp.Encode(&buf, &v)
err = msgp.NewReader(&buf).Skip()
if err != nil {
t.Error(err)
}
}
func BenchmarkEncodeGCStats(b *testing.B) {
v := GCStats{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
en := msgp.NewWriter(msgp.Nowhere)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.EncodeMsg(en)
}
en.Flush()
}
func BenchmarkDecodeGCStats(b *testing.B) {
v := GCStats{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
rd := msgp.NewEndlessReader(buf.Bytes(), b)
dc := msgp.NewReader(rd)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
err := v.DecodeMsg(dc)
if err != nil {
b.Fatal(err)
}
}
}
func TestMarshalUnmarshalInfoMessage(t *testing.T) {
v := InfoMessage{}
bts, err := v.MarshalMsg(nil)
if err != nil {
t.Fatal(err)
}
left, err := v.UnmarshalMsg(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
}
left, err = msgp.Skip(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
}
}
func BenchmarkMarshalMsgInfoMessage(b *testing.B) {
v := InfoMessage{}
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.MarshalMsg(nil)
}
}
func BenchmarkAppendMsgInfoMessage(b *testing.B) {
v := InfoMessage{}
bts := make([]byte, 0, v.Msgsize())
bts, _ = v.MarshalMsg(bts[0:0])
b.SetBytes(int64(len(bts)))
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
bts, _ = v.MarshalMsg(bts[0:0])
}
}
func BenchmarkUnmarshalInfoMessage(b *testing.B) {
v := InfoMessage{}
bts, _ := v.MarshalMsg(nil)
b.ReportAllocs()
b.SetBytes(int64(len(bts)))
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := v.UnmarshalMsg(bts)
if err != nil {
b.Fatal(err)
}
}
}
func TestEncodeDecodeInfoMessage(t *testing.T) {
v := InfoMessage{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
m := v.Msgsize()
if buf.Len() > m {
t.Log("WARNING: TestEncodeDecodeInfoMessage Msgsize() is inaccurate")
}
vn := InfoMessage{}
err := msgp.Decode(&buf, &vn)
if err != nil {
t.Error(err)
}
buf.Reset()
msgp.Encode(&buf, &v)
err = msgp.NewReader(&buf).Skip()
if err != nil {
t.Error(err)
}
}
func BenchmarkEncodeInfoMessage(b *testing.B) {
v := InfoMessage{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
en := msgp.NewWriter(msgp.Nowhere)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.EncodeMsg(en)
}
en.Flush()
}
func BenchmarkDecodeInfoMessage(b *testing.B) {
v := InfoMessage{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
rd := msgp.NewEndlessReader(buf.Bytes(), b)
dc := msgp.NewReader(rd)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
err := v.DecodeMsg(dc)
if err != nil {
b.Fatal(err)
}
}
}
func TestMarshalUnmarshalKMS(t *testing.T) {
v := KMS{}
bts, err := v.MarshalMsg(nil)
if err != nil {
t.Fatal(err)
}
left, err := v.UnmarshalMsg(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
}
left, err = msgp.Skip(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
}
}
func BenchmarkMarshalMsgKMS(b *testing.B) {
v := KMS{}
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.MarshalMsg(nil)
}
}
func BenchmarkAppendMsgKMS(b *testing.B) {
v := KMS{}
bts := make([]byte, 0, v.Msgsize())
bts, _ = v.MarshalMsg(bts[0:0])
b.SetBytes(int64(len(bts)))
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
bts, _ = v.MarshalMsg(bts[0:0])
}
}
func BenchmarkUnmarshalKMS(b *testing.B) {
v := KMS{}
bts, _ := v.MarshalMsg(nil)
b.ReportAllocs()
b.SetBytes(int64(len(bts)))
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := v.UnmarshalMsg(bts)
if err != nil {
b.Fatal(err)
}
}
}
func TestEncodeDecodeKMS(t *testing.T) {
v := KMS{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
m := v.Msgsize()
if buf.Len() > m {
t.Log("WARNING: TestEncodeDecodeKMS Msgsize() is inaccurate")
}
vn := KMS{}
err := msgp.Decode(&buf, &vn)
if err != nil {
t.Error(err)
}
buf.Reset()
msgp.Encode(&buf, &v)
err = msgp.NewReader(&buf).Skip()
if err != nil {
t.Error(err)
}
}
func BenchmarkEncodeKMS(b *testing.B) {
v := KMS{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
en := msgp.NewWriter(msgp.Nowhere)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.EncodeMsg(en)
}
en.Flush()
}
func BenchmarkDecodeKMS(b *testing.B) {
v := KMS{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
rd := msgp.NewEndlessReader(buf.Bytes(), b)
dc := msgp.NewReader(rd)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
err := v.DecodeMsg(dc)
if err != nil {
b.Fatal(err)
}
}
}
func TestMarshalUnmarshalLDAP(t *testing.T) {
v := LDAP{}
bts, err := v.MarshalMsg(nil)
if err != nil {
t.Fatal(err)
}
left, err := v.UnmarshalMsg(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
}
left, err = msgp.Skip(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
}
}
func BenchmarkMarshalMsgLDAP(b *testing.B) {
v := LDAP{}
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.MarshalMsg(nil)
}
}
func BenchmarkAppendMsgLDAP(b *testing.B) {
v := LDAP{}
bts := make([]byte, 0, v.Msgsize())
bts, _ = v.MarshalMsg(bts[0:0])
b.SetBytes(int64(len(bts)))
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
bts, _ = v.MarshalMsg(bts[0:0])
}
}
func BenchmarkUnmarshalLDAP(b *testing.B) {
v := LDAP{}
bts, _ := v.MarshalMsg(nil)
b.ReportAllocs()
b.SetBytes(int64(len(bts)))
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := v.UnmarshalMsg(bts)
if err != nil {
b.Fatal(err)
}
}
}
func TestEncodeDecodeLDAP(t *testing.T) {
v := LDAP{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
m := v.Msgsize()
if buf.Len() > m {
t.Log("WARNING: TestEncodeDecodeLDAP Msgsize() is inaccurate")
}
vn := LDAP{}
err := msgp.Decode(&buf, &vn)
if err != nil {
t.Error(err)
}
buf.Reset()
msgp.Encode(&buf, &v)
err = msgp.NewReader(&buf).Skip()
if err != nil {
t.Error(err)
}
}
func BenchmarkEncodeLDAP(b *testing.B) {
v := LDAP{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
en := msgp.NewWriter(msgp.Nowhere)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.EncodeMsg(en)
}
en.Flush()
}
func BenchmarkDecodeLDAP(b *testing.B) {
v := LDAP{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
rd := msgp.NewEndlessReader(buf.Bytes(), b)
dc := msgp.NewReader(rd)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
err := v.DecodeMsg(dc)
if err != nil {
b.Fatal(err)
}
}
}
func TestMarshalUnmarshalLogger(t *testing.T) {
v := Logger{}
bts, err := v.MarshalMsg(nil)
if err != nil {
t.Fatal(err)
}
left, err := v.UnmarshalMsg(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
}
left, err = msgp.Skip(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
}
}
func BenchmarkMarshalMsgLogger(b *testing.B) {
v := Logger{}
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.MarshalMsg(nil)
}
}
func BenchmarkAppendMsgLogger(b *testing.B) {
v := Logger{}
bts := make([]byte, 0, v.Msgsize())
bts, _ = v.MarshalMsg(bts[0:0])
b.SetBytes(int64(len(bts)))
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
bts, _ = v.MarshalMsg(bts[0:0])
}
}
func BenchmarkUnmarshalLogger(b *testing.B) {
v := Logger{}
bts, _ := v.MarshalMsg(nil)
b.ReportAllocs()
b.SetBytes(int64(len(bts)))
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := v.UnmarshalMsg(bts)
if err != nil {
b.Fatal(err)
}
}
}
func TestEncodeDecodeLogger(t *testing.T) {
v := Logger{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
m := v.Msgsize()
if buf.Len() > m {
t.Log("WARNING: TestEncodeDecodeLogger Msgsize() is inaccurate")
}
vn := Logger{}
err := msgp.Decode(&buf, &vn)
if err != nil {
t.Error(err)
}
buf.Reset()
msgp.Encode(&buf, &v)
err = msgp.NewReader(&buf).Skip()
if err != nil {
t.Error(err)
}
}
func BenchmarkEncodeLogger(b *testing.B) {
v := Logger{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
en := msgp.NewWriter(msgp.Nowhere)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.EncodeMsg(en)
}
en.Flush()
}
func BenchmarkDecodeLogger(b *testing.B) {
v := Logger{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
rd := msgp.NewEndlessReader(buf.Bytes(), b)
dc := msgp.NewReader(rd)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
err := v.DecodeMsg(dc)
if err != nil {
b.Fatal(err)
}
}
}
func TestMarshalUnmarshalMemStats(t *testing.T) {
v := MemStats{}
bts, err := v.MarshalMsg(nil)
if err != nil {
t.Fatal(err)
}
left, err := v.UnmarshalMsg(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
}
left, err = msgp.Skip(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
}
}
func BenchmarkMarshalMsgMemStats(b *testing.B) {
v := MemStats{}
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.MarshalMsg(nil)
}
}
func BenchmarkAppendMsgMemStats(b *testing.B) {
v := MemStats{}
bts := make([]byte, 0, v.Msgsize())
bts, _ = v.MarshalMsg(bts[0:0])
b.SetBytes(int64(len(bts)))
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
bts, _ = v.MarshalMsg(bts[0:0])
}
}
func BenchmarkUnmarshalMemStats(b *testing.B) {
v := MemStats{}
bts, _ := v.MarshalMsg(nil)
b.ReportAllocs()
b.SetBytes(int64(len(bts)))
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := v.UnmarshalMsg(bts)
if err != nil {
b.Fatal(err)
}
}
}
func TestEncodeDecodeMemStats(t *testing.T) {
v := MemStats{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
m := v.Msgsize()
if buf.Len() > m {
t.Log("WARNING: TestEncodeDecodeMemStats Msgsize() is inaccurate")
}
vn := MemStats{}
err := msgp.Decode(&buf, &vn)
if err != nil {
t.Error(err)
}
buf.Reset()
msgp.Encode(&buf, &v)
err = msgp.NewReader(&buf).Skip()
if err != nil {
t.Error(err)
}
}
func BenchmarkEncodeMemStats(b *testing.B) {
v := MemStats{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
en := msgp.NewWriter(msgp.Nowhere)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.EncodeMsg(en)
}
en.Flush()
}
func BenchmarkDecodeMemStats(b *testing.B) {
v := MemStats{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
rd := msgp.NewEndlessReader(buf.Bytes(), b)
dc := msgp.NewReader(rd)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
err := v.DecodeMsg(dc)
if err != nil {
b.Fatal(err)
}
}
}
func TestMarshalUnmarshalObjects(t *testing.T) {
v := Objects{}
bts, err := v.MarshalMsg(nil)
if err != nil {
t.Fatal(err)
}
left, err := v.UnmarshalMsg(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
}
left, err = msgp.Skip(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
}
}
func BenchmarkMarshalMsgObjects(b *testing.B) {
v := Objects{}
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.MarshalMsg(nil)
}
}
func BenchmarkAppendMsgObjects(b *testing.B) {
v := Objects{}
bts := make([]byte, 0, v.Msgsize())
bts, _ = v.MarshalMsg(bts[0:0])
b.SetBytes(int64(len(bts)))
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
bts, _ = v.MarshalMsg(bts[0:0])
}
}
func BenchmarkUnmarshalObjects(b *testing.B) {
v := Objects{}
bts, _ := v.MarshalMsg(nil)
b.ReportAllocs()
b.SetBytes(int64(len(bts)))
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := v.UnmarshalMsg(bts)
if err != nil {
b.Fatal(err)
}
}
}
func TestEncodeDecodeObjects(t *testing.T) {
v := Objects{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
m := v.Msgsize()
if buf.Len() > m {
t.Log("WARNING: TestEncodeDecodeObjects Msgsize() is inaccurate")
}
vn := Objects{}
err := msgp.Decode(&buf, &vn)
if err != nil {
t.Error(err)
}
buf.Reset()
msgp.Encode(&buf, &v)
err = msgp.NewReader(&buf).Skip()
if err != nil {
t.Error(err)
}
}
func BenchmarkEncodeObjects(b *testing.B) {
v := Objects{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
en := msgp.NewWriter(msgp.Nowhere)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.EncodeMsg(en)
}
en.Flush()
}
func BenchmarkDecodeObjects(b *testing.B) {
v := Objects{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
rd := msgp.NewEndlessReader(buf.Bytes(), b)
dc := msgp.NewReader(rd)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
err := v.DecodeMsg(dc)
if err != nil {
b.Fatal(err)
}
}
}
func TestMarshalUnmarshalServerInfoOpts(t *testing.T) {
v := ServerInfoOpts{}
bts, err := v.MarshalMsg(nil)
if err != nil {
t.Fatal(err)
}
left, err := v.UnmarshalMsg(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
}
left, err = msgp.Skip(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
}
}
func BenchmarkMarshalMsgServerInfoOpts(b *testing.B) {
v := ServerInfoOpts{}
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.MarshalMsg(nil)
}
}
func BenchmarkAppendMsgServerInfoOpts(b *testing.B) {
v := ServerInfoOpts{}
bts := make([]byte, 0, v.Msgsize())
bts, _ = v.MarshalMsg(bts[0:0])
b.SetBytes(int64(len(bts)))
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
bts, _ = v.MarshalMsg(bts[0:0])
}
}
func BenchmarkUnmarshalServerInfoOpts(b *testing.B) {
v := ServerInfoOpts{}
bts, _ := v.MarshalMsg(nil)
b.ReportAllocs()
b.SetBytes(int64(len(bts)))
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := v.UnmarshalMsg(bts)
if err != nil {
b.Fatal(err)
}
}
}
func TestEncodeDecodeServerInfoOpts(t *testing.T) {
v := ServerInfoOpts{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
m := v.Msgsize()
if buf.Len() > m {
t.Log("WARNING: TestEncodeDecodeServerInfoOpts Msgsize() is inaccurate")
}
vn := ServerInfoOpts{}
err := msgp.Decode(&buf, &vn)
if err != nil {
t.Error(err)
}
buf.Reset()
msgp.Encode(&buf, &v)
err = msgp.NewReader(&buf).Skip()
if err != nil {
t.Error(err)
}
}
func BenchmarkEncodeServerInfoOpts(b *testing.B) {
v := ServerInfoOpts{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
en := msgp.NewWriter(msgp.Nowhere)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.EncodeMsg(en)
}
en.Flush()
}
func BenchmarkDecodeServerInfoOpts(b *testing.B) {
v := ServerInfoOpts{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
rd := msgp.NewEndlessReader(buf.Bytes(), b)
dc := msgp.NewReader(rd)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
err := v.DecodeMsg(dc)
if err != nil {
b.Fatal(err)
}
}
}
func TestMarshalUnmarshalServerProperties(t *testing.T) {
v := ServerProperties{}
bts, err := v.MarshalMsg(nil)
if err != nil {
t.Fatal(err)
}
left, err := v.UnmarshalMsg(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
}
left, err = msgp.Skip(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
}
}
func BenchmarkMarshalMsgServerProperties(b *testing.B) {
v := ServerProperties{}
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.MarshalMsg(nil)
}
}
func BenchmarkAppendMsgServerProperties(b *testing.B) {
v := ServerProperties{}
bts := make([]byte, 0, v.Msgsize())
bts, _ = v.MarshalMsg(bts[0:0])
b.SetBytes(int64(len(bts)))
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
bts, _ = v.MarshalMsg(bts[0:0])
}
}
func BenchmarkUnmarshalServerProperties(b *testing.B) {
v := ServerProperties{}
bts, _ := v.MarshalMsg(nil)
b.ReportAllocs()
b.SetBytes(int64(len(bts)))
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := v.UnmarshalMsg(bts)
if err != nil {
b.Fatal(err)
}
}
}
func TestEncodeDecodeServerProperties(t *testing.T) {
v := ServerProperties{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
m := v.Msgsize()
if buf.Len() > m {
t.Log("WARNING: TestEncodeDecodeServerProperties Msgsize() is inaccurate")
}
vn := ServerProperties{}
err := msgp.Decode(&buf, &vn)
if err != nil {
t.Error(err)
}
buf.Reset()
msgp.Encode(&buf, &v)
err = msgp.NewReader(&buf).Skip()
if err != nil {
t.Error(err)
}
}
func BenchmarkEncodeServerProperties(b *testing.B) {
v := ServerProperties{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
en := msgp.NewWriter(msgp.Nowhere)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.EncodeMsg(en)
}
en.Flush()
}
func BenchmarkDecodeServerProperties(b *testing.B) {
v := ServerProperties{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
rd := msgp.NewEndlessReader(buf.Bytes(), b)
dc := msgp.NewReader(rd)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
err := v.DecodeMsg(dc)
if err != nil {
b.Fatal(err)
}
}
}
func TestMarshalUnmarshalServices(t *testing.T) {
v := Services{}
bts, err := v.MarshalMsg(nil)
if err != nil {
t.Fatal(err)
}
left, err := v.UnmarshalMsg(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
}
left, err = msgp.Skip(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
}
}
func BenchmarkMarshalMsgServices(b *testing.B) {
v := Services{}
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.MarshalMsg(nil)
}
}
func BenchmarkAppendMsgServices(b *testing.B) {
v := Services{}
bts := make([]byte, 0, v.Msgsize())
bts, _ = v.MarshalMsg(bts[0:0])
b.SetBytes(int64(len(bts)))
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
bts, _ = v.MarshalMsg(bts[0:0])
}
}
func BenchmarkUnmarshalServices(b *testing.B) {
v := Services{}
bts, _ := v.MarshalMsg(nil)
b.ReportAllocs()
b.SetBytes(int64(len(bts)))
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := v.UnmarshalMsg(bts)
if err != nil {
b.Fatal(err)
}
}
}
func TestEncodeDecodeServices(t *testing.T) {
v := Services{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
m := v.Msgsize()
if buf.Len() > m {
t.Log("WARNING: TestEncodeDecodeServices Msgsize() is inaccurate")
}
vn := Services{}
err := msgp.Decode(&buf, &vn)
if err != nil {
t.Error(err)
}
buf.Reset()
msgp.Encode(&buf, &v)
err = msgp.NewReader(&buf).Skip()
if err != nil {
t.Error(err)
}
}
func BenchmarkEncodeServices(b *testing.B) {
v := Services{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
en := msgp.NewWriter(msgp.Nowhere)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.EncodeMsg(en)
}
en.Flush()
}
func BenchmarkDecodeServices(b *testing.B) {
v := Services{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
rd := msgp.NewEndlessReader(buf.Bytes(), b)
dc := msgp.NewReader(rd)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
err := v.DecodeMsg(dc)
if err != nil {
b.Fatal(err)
}
}
}
func TestMarshalUnmarshalStatus(t *testing.T) {
v := Status{}
bts, err := v.MarshalMsg(nil)
if err != nil {
t.Fatal(err)
}
left, err := v.UnmarshalMsg(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
}
left, err = msgp.Skip(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
}
}
func BenchmarkMarshalMsgStatus(b *testing.B) {
v := Status{}
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.MarshalMsg(nil)
}
}
func BenchmarkAppendMsgStatus(b *testing.B) {
v := Status{}
bts := make([]byte, 0, v.Msgsize())
bts, _ = v.MarshalMsg(bts[0:0])
b.SetBytes(int64(len(bts)))
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
bts, _ = v.MarshalMsg(bts[0:0])
}
}
func BenchmarkUnmarshalStatus(b *testing.B) {
v := Status{}
bts, _ := v.MarshalMsg(nil)
b.ReportAllocs()
b.SetBytes(int64(len(bts)))
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := v.UnmarshalMsg(bts)
if err != nil {
b.Fatal(err)
}
}
}
func TestEncodeDecodeStatus(t *testing.T) {
v := Status{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
m := v.Msgsize()
if buf.Len() > m {
t.Log("WARNING: TestEncodeDecodeStatus Msgsize() is inaccurate")
}
vn := Status{}
err := msgp.Decode(&buf, &vn)
if err != nil {
t.Error(err)
}
buf.Reset()
msgp.Encode(&buf, &v)
err = msgp.NewReader(&buf).Skip()
if err != nil {
t.Error(err)
}
}
func BenchmarkEncodeStatus(b *testing.B) {
v := Status{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
en := msgp.NewWriter(msgp.Nowhere)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.EncodeMsg(en)
}
en.Flush()
}
func BenchmarkDecodeStatus(b *testing.B) {
v := Status{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
rd := msgp.NewEndlessReader(buf.Bytes(), b)
dc := msgp.NewReader(rd)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
err := v.DecodeMsg(dc)
if err != nil {
b.Fatal(err)
}
}
}
func TestMarshalUnmarshalStorageInfo(t *testing.T) {
v := StorageInfo{}
bts, err := v.MarshalMsg(nil)
if err != nil {
t.Fatal(err)
}
left, err := v.UnmarshalMsg(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
}
left, err = msgp.Skip(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
}
}
func BenchmarkMarshalMsgStorageInfo(b *testing.B) {
v := StorageInfo{}
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.MarshalMsg(nil)
}
}
func BenchmarkAppendMsgStorageInfo(b *testing.B) {
v := StorageInfo{}
bts := make([]byte, 0, v.Msgsize())
bts, _ = v.MarshalMsg(bts[0:0])
b.SetBytes(int64(len(bts)))
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
bts, _ = v.MarshalMsg(bts[0:0])
}
}
func BenchmarkUnmarshalStorageInfo(b *testing.B) {
v := StorageInfo{}
bts, _ := v.MarshalMsg(nil)
b.ReportAllocs()
b.SetBytes(int64(len(bts)))
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := v.UnmarshalMsg(bts)
if err != nil {
b.Fatal(err)
}
}
}
func TestEncodeDecodeStorageInfo(t *testing.T) {
v := StorageInfo{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
m := v.Msgsize()
if buf.Len() > m {
t.Log("WARNING: TestEncodeDecodeStorageInfo Msgsize() is inaccurate")
}
vn := StorageInfo{}
err := msgp.Decode(&buf, &vn)
if err != nil {
t.Error(err)
}
buf.Reset()
msgp.Encode(&buf, &v)
err = msgp.NewReader(&buf).Skip()
if err != nil {
t.Error(err)
}
}
func BenchmarkEncodeStorageInfo(b *testing.B) {
v := StorageInfo{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
en := msgp.NewWriter(msgp.Nowhere)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.EncodeMsg(en)
}
en.Flush()
}
func BenchmarkDecodeStorageInfo(b *testing.B) {
v := StorageInfo{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
rd := msgp.NewEndlessReader(buf.Bytes(), b)
dc := msgp.NewReader(rd)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
err := v.DecodeMsg(dc)
if err != nil {
b.Fatal(err)
}
}
}
func TestMarshalUnmarshalTargetIDStatus(t *testing.T) {
v := TargetIDStatus{}
bts, err := v.MarshalMsg(nil)
if err != nil {
t.Fatal(err)
}
left, err := v.UnmarshalMsg(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
}
left, err = msgp.Skip(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
}
}
func BenchmarkMarshalMsgTargetIDStatus(b *testing.B) {
v := TargetIDStatus{}
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.MarshalMsg(nil)
}
}
func BenchmarkAppendMsgTargetIDStatus(b *testing.B) {
v := TargetIDStatus{}
bts := make([]byte, 0, v.Msgsize())
bts, _ = v.MarshalMsg(bts[0:0])
b.SetBytes(int64(len(bts)))
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
bts, _ = v.MarshalMsg(bts[0:0])
}
}
func BenchmarkUnmarshalTargetIDStatus(b *testing.B) {
v := TargetIDStatus{}
bts, _ := v.MarshalMsg(nil)
b.ReportAllocs()
b.SetBytes(int64(len(bts)))
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := v.UnmarshalMsg(bts)
if err != nil {
b.Fatal(err)
}
}
}
func TestEncodeDecodeTargetIDStatus(t *testing.T) {
v := TargetIDStatus{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
m := v.Msgsize()
if buf.Len() > m {
t.Log("WARNING: TestEncodeDecodeTargetIDStatus Msgsize() is inaccurate")
}
vn := TargetIDStatus{}
err := msgp.Decode(&buf, &vn)
if err != nil {
t.Error(err)
}
buf.Reset()
msgp.Encode(&buf, &v)
err = msgp.NewReader(&buf).Skip()
if err != nil {
t.Error(err)
}
}
func BenchmarkEncodeTargetIDStatus(b *testing.B) {
v := TargetIDStatus{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
en := msgp.NewWriter(msgp.Nowhere)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.EncodeMsg(en)
}
en.Flush()
}
func BenchmarkDecodeTargetIDStatus(b *testing.B) {
v := TargetIDStatus{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
rd := msgp.NewEndlessReader(buf.Bytes(), b)
dc := msgp.NewReader(rd)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
err := v.DecodeMsg(dc)
if err != nil {
b.Fatal(err)
}
}
}
func TestMarshalUnmarshalTierStats(t *testing.T) {
v := TierStats{}
bts, err := v.MarshalMsg(nil)
if err != nil {
t.Fatal(err)
}
left, err := v.UnmarshalMsg(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
}
left, err = msgp.Skip(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
}
}
func BenchmarkMarshalMsgTierStats(b *testing.B) {
v := TierStats{}
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.MarshalMsg(nil)
}
}
func BenchmarkAppendMsgTierStats(b *testing.B) {
v := TierStats{}
bts := make([]byte, 0, v.Msgsize())
bts, _ = v.MarshalMsg(bts[0:0])
b.SetBytes(int64(len(bts)))
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
bts, _ = v.MarshalMsg(bts[0:0])
}
}
func BenchmarkUnmarshalTierStats(b *testing.B) {
v := TierStats{}
bts, _ := v.MarshalMsg(nil)
b.ReportAllocs()
b.SetBytes(int64(len(bts)))
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := v.UnmarshalMsg(bts)
if err != nil {
b.Fatal(err)
}
}
}
func TestEncodeDecodeTierStats(t *testing.T) {
v := TierStats{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
m := v.Msgsize()
if buf.Len() > m {
t.Log("WARNING: TestEncodeDecodeTierStats Msgsize() is inaccurate")
}
vn := TierStats{}
err := msgp.Decode(&buf, &vn)
if err != nil {
t.Error(err)
}
buf.Reset()
msgp.Encode(&buf, &v)
err = msgp.NewReader(&buf).Skip()
if err != nil {
t.Error(err)
}
}
func BenchmarkEncodeTierStats(b *testing.B) {
v := TierStats{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
en := msgp.NewWriter(msgp.Nowhere)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.EncodeMsg(en)
}
en.Flush()
}
func BenchmarkDecodeTierStats(b *testing.B) {
v := TierStats{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
rd := msgp.NewEndlessReader(buf.Bytes(), b)
dc := msgp.NewReader(rd)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
err := v.DecodeMsg(dc)
if err != nil {
b.Fatal(err)
}
}
}
func TestMarshalUnmarshalUsage(t *testing.T) {
v := Usage{}
bts, err := v.MarshalMsg(nil)
if err != nil {
t.Fatal(err)
}
left, err := v.UnmarshalMsg(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
}
left, err = msgp.Skip(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
}
}
func BenchmarkMarshalMsgUsage(b *testing.B) {
v := Usage{}
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.MarshalMsg(nil)
}
}
func BenchmarkAppendMsgUsage(b *testing.B) {
v := Usage{}
bts := make([]byte, 0, v.Msgsize())
bts, _ = v.MarshalMsg(bts[0:0])
b.SetBytes(int64(len(bts)))
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
bts, _ = v.MarshalMsg(bts[0:0])
}
}
func BenchmarkUnmarshalUsage(b *testing.B) {
v := Usage{}
bts, _ := v.MarshalMsg(nil)
b.ReportAllocs()
b.SetBytes(int64(len(bts)))
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := v.UnmarshalMsg(bts)
if err != nil {
b.Fatal(err)
}
}
}
func TestEncodeDecodeUsage(t *testing.T) {
v := Usage{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
m := v.Msgsize()
if buf.Len() > m {
t.Log("WARNING: TestEncodeDecodeUsage Msgsize() is inaccurate")
}
vn := Usage{}
err := msgp.Decode(&buf, &vn)
if err != nil {
t.Error(err)
}
buf.Reset()
msgp.Encode(&buf, &v)
err = msgp.NewReader(&buf).Skip()
if err != nil {
t.Error(err)
}
}
func BenchmarkEncodeUsage(b *testing.B) {
v := Usage{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
en := msgp.NewWriter(msgp.Nowhere)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.EncodeMsg(en)
}
en.Flush()
}
func BenchmarkDecodeUsage(b *testing.B) {
v := Usage{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
rd := msgp.NewEndlessReader(buf.Bytes(), b)
dc := msgp.NewReader(rd)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
err := v.DecodeMsg(dc)
if err != nil {
b.Fatal(err)
}
}
}
func TestMarshalUnmarshalVersions(t *testing.T) {
v := Versions{}
bts, err := v.MarshalMsg(nil)
if err != nil {
t.Fatal(err)
}
left, err := v.UnmarshalMsg(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
}
left, err = msgp.Skip(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
}
}
func BenchmarkMarshalMsgVersions(b *testing.B) {
v := Versions{}
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.MarshalMsg(nil)
}
}
func BenchmarkAppendMsgVersions(b *testing.B) {
v := Versions{}
bts := make([]byte, 0, v.Msgsize())
bts, _ = v.MarshalMsg(bts[0:0])
b.SetBytes(int64(len(bts)))
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
bts, _ = v.MarshalMsg(bts[0:0])
}
}
func BenchmarkUnmarshalVersions(b *testing.B) {
v := Versions{}
bts, _ := v.MarshalMsg(nil)
b.ReportAllocs()
b.SetBytes(int64(len(bts)))
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := v.UnmarshalMsg(bts)
if err != nil {
b.Fatal(err)
}
}
}
func TestEncodeDecodeVersions(t *testing.T) {
v := Versions{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
m := v.Msgsize()
if buf.Len() > m {
t.Log("WARNING: TestEncodeDecodeVersions Msgsize() is inaccurate")
}
vn := Versions{}
err := msgp.Decode(&buf, &vn)
if err != nil {
t.Error(err)
}
buf.Reset()
msgp.Encode(&buf, &v)
err = msgp.NewReader(&buf).Skip()
if err != nil {
t.Error(err)
}
}
func BenchmarkEncodeVersions(b *testing.B) {
v := Versions{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
en := msgp.NewWriter(msgp.Nowhere)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.EncodeMsg(en)
}
en.Flush()
}
func BenchmarkDecodeVersions(b *testing.B) {
v := Versions{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
rd := msgp.NewEndlessReader(buf.Bytes(), b)
dc := msgp.NewReader(rd)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
err := v.DecodeMsg(dc)
if err != nil {
b.Fatal(err)
}
}
}
golang-github-minio-madmin-go-3.0.104/inspect.go 0000664 0000000 0000000 00000005546 14774251704 0021406 0 ustar 00root root 0000000 0000000 //
// Copyright (c) 2015-2024 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as
// published by the Free Software Foundation, either version 3 of the
// License, or (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see .
//
package madmin
import (
"bufio"
"context"
"encoding/base64"
"errors"
"fmt"
"io"
"net/http"
"net/url"
)
// InspectOptions provides options to Inspect.
type InspectOptions struct {
Volume, File string
PublicKey []byte // PublicKey to use for inspected data.
}
// Inspect makes an admin call to download a raw files from disk.
// If inspect is called with a public key no key will be returned
// and the data is returned encrypted with the public key.
func (adm *AdminClient) Inspect(ctx context.Context, d InspectOptions) (key []byte, c io.ReadCloser, err error) {
// Add form key/values in the body
form := make(url.Values)
form.Set("volume", d.Volume)
form.Set("file", d.File)
if d.PublicKey != nil {
form.Set("public-key", base64.StdEncoding.EncodeToString(d.PublicKey))
}
method := ""
reqData := requestData{
relPath: fmt.Sprintf(adminAPIPrefix + "/inspect-data"),
}
// If the public-key is specified, create a POST request and send
// parameters as multipart-form instead of query values
if d.PublicKey != nil {
method = http.MethodPost
reqData.customHeaders = make(http.Header)
reqData.customHeaders.Set("Content-Type", "application/x-www-form-urlencoded")
reqData.content = []byte(form.Encode())
} else {
method = http.MethodGet
reqData.queryValues = form
}
resp, err := adm.executeMethod(ctx, method, reqData)
if err != nil {
return nil, nil, err
}
if resp.StatusCode != http.StatusOK {
closeResponse(resp)
return nil, nil, httpRespToErrorResponse(resp)
}
bior := bufio.NewReaderSize(resp.Body, 4<<10)
format, err := bior.ReadByte()
if err != nil {
closeResponse(resp)
return nil, nil, err
}
switch format {
case 1:
key = make([]byte, 32)
// Read key...
_, err = io.ReadFull(bior, key[:])
if err != nil {
closeResponse(resp)
return nil, nil, err
}
case 2:
if err := bior.UnreadByte(); err != nil {
return nil, nil, err
}
default:
closeResponse(resp)
return nil, nil, errors.New("unknown data version")
}
// Return body
return key, &closeWrapper{Reader: bior, Closer: resp.Body}, nil
}
type closeWrapper struct {
io.Reader
io.Closer
}
golang-github-minio-madmin-go-3.0.104/jobs/ 0000775 0000000 0000000 00000000000 14774251704 0020335 5 ustar 00root root 0000000 0000000 golang-github-minio-madmin-go-3.0.104/jobs/batch.go 0000664 0000000 0000000 00000022654 14774251704 0021756 0 ustar 00root root 0000000 0000000 //
// Copyright (c) 2015-2024 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as
// published by the Free Software Foundation, either version 3 of the
// License, or (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see .
//
// Package jobs constists of all structs related to batch job requests and related functionality.
package jobs
import (
"context"
"time"
"github.com/minio/madmin-go/v3/xtime"
miniogo "github.com/minio/minio-go/v7"
)
// BatchJobRequest to start batch job
type BatchJobRequest struct {
ID string `yaml:"-" json:"name"`
User string `yaml:"-" json:"user"`
Started time.Time `yaml:"-" json:"started"`
Replicate *BatchJobReplicateV1 `yaml:"replicate" json:"replicate"`
KeyRotate *BatchJobKeyRotateV1 `yaml:"keyrotate" json:"keyrotate"`
Expire *BatchJobExpire `yaml:"expire" json:"expire"`
Ctx context.Context `msg:"-"`
}
// BatchJobReplicateV1 v1 of batch job replication
type BatchJobReplicateV1 struct {
APIVersion string `yaml:"apiVersion" json:"apiVersion"`
Flags BatchJobReplicateFlags `yaml:"flags" json:"flags"`
Target BatchJobReplicateTarget `yaml:"target" json:"target"`
Source BatchJobReplicateSource `yaml:"source" json:"source"`
Clnt *miniogo.Core `msg:"-"`
}
// BatchJobReplicateFlags various configurations for replication job definition currently includes
type BatchJobReplicateFlags struct {
Filter BatchReplicateFilter `yaml:"filter" json:"filter"`
Notify BatchJobNotification `yaml:"notify" json:"notify"`
Retry BatchJobRetry `yaml:"retry" json:"retry"`
}
// BatchReplicateFilter holds all the filters currently supported for batch replication
type BatchReplicateFilter struct {
NewerThan xtime.Duration `yaml:"newerThan,omitempty" json:"newerThan"`
OlderThan xtime.Duration `yaml:"olderThan,omitempty" json:"olderThan"`
CreatedAfter time.Time `yaml:"createdAfter,omitempty" json:"createdAfter"`
CreatedBefore time.Time `yaml:"createdBefore,omitempty" json:"createdBefore"`
Tags []BatchJobKV `yaml:"tags,omitempty" json:"tags"`
Metadata []BatchJobKV `yaml:"metadata,omitempty" json:"metadata"`
}
// BatchJobKV is a key-value data type which supports wildcard matching
type BatchJobKV struct {
Line, Col int
Key string `yaml:"key" json:"key"`
Value string `yaml:"value" json:"value"`
}
// BatchJobNotification stores notification endpoint and token information.
// Used by batch jobs to notify of their status.
type BatchJobNotification struct {
Line, Col int
Endpoint string `yaml:"endpoint" json:"endpoint"`
Token string `yaml:"token" json:"token"`
}
// BatchJobRetry stores retry configuration used in the event of failures.
type BatchJobRetry struct {
Line, Col int
Attempts int `yaml:"attempts" json:"attempts"` // number of retry attempts
Delay time.Duration `yaml:"delay" json:"delay"` // delay between each retries
}
// BatchJobReplicateTarget describes target element of the replication job that receives
// the filtered data from source
type BatchJobReplicateTarget struct {
Type BatchJobReplicateResourceType `yaml:"type" json:"type"`
Bucket string `yaml:"bucket" json:"bucket"`
Prefix string `yaml:"prefix" json:"prefix"`
Endpoint string `yaml:"endpoint" json:"endpoint"`
Path string `yaml:"path" json:"path"`
Creds BatchJobReplicateCredentials `yaml:"credentials" json:"credentials"`
}
// BatchJobReplicateResourceType defines the type of batch jobs
type BatchJobReplicateResourceType string
// BatchJobReplicateCredentials access credentials for batch replication it may
// be either for target or source.
type BatchJobReplicateCredentials struct {
AccessKey string `xml:"AccessKeyId" json:"accessKey,omitempty" yaml:"accessKey"`
SecretKey string `xml:"SecretAccessKey" json:"secretKey,omitempty" yaml:"secretKey"`
SessionToken string `xml:"SessionToken" json:"sessionToken,omitempty" yaml:"sessionToken"`
}
// BatchJobReplicateSource describes source element of the replication job that is
// the source of the data for the target
type BatchJobReplicateSource struct {
Type BatchJobReplicateResourceType `yaml:"type" json:"type"`
Bucket string `yaml:"bucket" json:"bucket"`
Prefix BatchJobPrefix `yaml:"prefix" json:"prefix"`
Endpoint string `yaml:"endpoint" json:"endpoint"`
Path string `yaml:"path" json:"path"`
Creds BatchJobReplicateCredentials `yaml:"credentials" json:"credentials"`
Snowball BatchJobSnowball `yaml:"snowball" json:"snowball"`
}
// BatchJobPrefix - to support prefix field yaml unmarshalling with string or slice of strings
type BatchJobPrefix []string
// BatchJobSnowball describes the snowball feature when replicating objects from a local source to a remote target
type BatchJobSnowball struct {
Line, Col int
Disable *bool `yaml:"disable" json:"disable"`
Batch *int `yaml:"batch" json:"batch"`
InMemory *bool `yaml:"inmemory" json:"inmemory"`
Compress *bool `yaml:"compress" json:"compress"`
SmallerThan *string `yaml:"smallerThan" json:"smallerThan"`
SkipErrs *bool `yaml:"skipErrs" json:"skipErrs"`
}
// BatchJobKeyRotateV1 v1 of batch key rotation job
type BatchJobKeyRotateV1 struct {
APIVersion string `yaml:"apiVersion" json:"apiVersion"`
Flags BatchJobKeyRotateFlags `yaml:"flags" json:"flags"`
Bucket string `yaml:"bucket" json:"bucket"`
Prefix string `yaml:"prefix" json:"prefix"`
Encryption BatchJobKeyRotateEncryption `yaml:"encryption" json:"encryption"`
}
// BatchJobKeyRotateFlags various configurations for replication job definition currently includes
type BatchJobKeyRotateFlags struct {
Filter BatchKeyRotateFilter `yaml:"filter" json:"filter"`
Notify BatchJobNotification `yaml:"notify" json:"notify"`
Retry BatchJobRetry `yaml:"retry" json:"retry"`
}
// BatchKeyRotateFilter holds all the filters currently supported for batch replication
type BatchKeyRotateFilter struct {
NewerThan time.Duration `yaml:"newerThan,omitempty" json:"newerThan"`
OlderThan time.Duration `yaml:"olderThan,omitempty" json:"olderThan"`
CreatedAfter time.Time `yaml:"createdAfter,omitempty" json:"createdAfter"`
CreatedBefore time.Time `yaml:"createdBefore,omitempty" json:"createdBefore"`
Tags []BatchJobKV `yaml:"tags,omitempty" json:"tags"`
Metadata []BatchJobKV `yaml:"metadata,omitempty" json:"metadata"`
KMSKeyID string `yaml:"kmskeyid" json:"kmskey"`
}
// BatchJobKeyRotateEncryption defines key rotation encryption options passed
type BatchJobKeyRotateEncryption struct {
Type BatchKeyRotationType `yaml:"type" json:"type"`
Key string `yaml:"key" json:"key"`
Context string `yaml:"context" json:"context"`
KmsContext map[string]string `msg:"-"`
}
// BatchKeyRotationType defines key rotation type
type BatchKeyRotationType string
// BatchJobExpire represents configuration parameters for a batch expiration
// job typically supplied in yaml form
type BatchJobExpire struct {
Line, Col int
APIVersion string `yaml:"apiVersion" json:"apiVersion"`
Bucket string `yaml:"bucket" json:"bucket"`
Prefix BatchJobPrefix `yaml:"prefix" json:"prefix"`
NotificationCfg BatchJobNotification `yaml:"notify" json:"notify"`
Retry BatchJobRetry `yaml:"retry" json:"retry"`
Rules []BatchJobExpireFilter `yaml:"rules" json:"rules"`
}
// BatchJobExpireFilter holds all the filters currently supported for batch replication
type BatchJobExpireFilter struct {
Line, Col int
OlderThan xtime.Duration `yaml:"olderThan,omitempty" json:"olderThan"`
CreatedBefore *time.Time `yaml:"createdBefore,omitempty" json:"createdBefore"`
Tags []BatchJobKV `yaml:"tags,omitempty" json:"tags"`
Metadata []BatchJobKV `yaml:"metadata,omitempty" json:"metadata"`
Size BatchJobSizeFilter `yaml:"size" json:"size"`
Type string `yaml:"type" json:"type"`
Name string `yaml:"name" json:"name"`
Purge BatchJobExpirePurge `yaml:"purge" json:"purge"`
}
// BatchJobSizeFilter supports size based filters - LesserThan and GreaterThan
type BatchJobSizeFilter struct {
Line, Col int
UpperBound BatchJobSize `yaml:"lessThan" json:"lessThan"`
LowerBound BatchJobSize `yaml:"greaterThan" json:"greaterThan"`
}
// BatchJobExpirePurge type accepts non-negative versions to be retained
type BatchJobExpirePurge struct {
Line, Col int
RetainVersions int `yaml:"retainVersions" json:"retainVersions"`
}
// BatchJobSize supports humanized byte values in yaml files type BatchJobSize uint64
type BatchJobSize int64
golang-github-minio-madmin-go-3.0.104/kernel/ 0000775 0000000 0000000 00000000000 14774251704 0020660 5 ustar 00root root 0000000 0000000 golang-github-minio-madmin-go-3.0.104/kernel/kernel.go 0000664 0000000 0000000 00000010030 14774251704 0022461 0 ustar 00root root 0000000 0000000 //
// Copyright (c) 2015-2024 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as
// published by the Free Software Foundation, either version 3 of the
// License, or (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see .
//
//go:build linux
// +build linux
package kernel
import (
"fmt"
"os"
"regexp"
"strconv"
"strings"
"syscall"
)
var versionRegex = regexp.MustCompile(`^(\d+)\.(\d+).(\d+).*$`)
// VersionFromRelease converts a release string with format
// 4.4.2[-1] to a kernel version number in LINUX_VERSION_CODE format.
// That is, for kernel "a.b.c", the version number will be (a<<16 + b<<8 + c)
func VersionFromRelease(releaseString string) (uint32, error) {
versionParts := versionRegex.FindStringSubmatch(releaseString)
if len(versionParts) != 4 {
return 0, fmt.Errorf("got invalid release version %q (expected format '4.3.2-1')", releaseString)
}
major, err := strconv.Atoi(versionParts[1])
if err != nil {
return 0, err
}
minor, err := strconv.Atoi(versionParts[2])
if err != nil {
return 0, err
}
patch, err := strconv.Atoi(versionParts[3])
if err != nil {
return 0, err
}
return Version(major, minor, patch), nil
}
// Version implements KERNEL_VERSION equivalent macro
// #define KERNEL_VERSION(a,b,c) (((a) << 16) + ((b) << 8) + ((c) > 255 ? 255 : (c)))
func Version(major, minor, patch int) uint32 {
if patch > 255 {
patch = 255
}
out := major<<16 + minor<<8 + patch
return uint32(out)
}
func currentReleaseUname() (string, error) {
var buf syscall.Utsname
if err := syscall.Uname(&buf); err != nil {
return "", err
}
releaseString := strings.Trim(utsnameStr(buf.Release[:]), "\x00")
return releaseString, nil
}
func currentReleaseUbuntu() (string, error) {
procVersion, err := os.ReadFile("/proc/version_signature")
if err != nil {
return "", err
}
var u1, u2, releaseString string
_, err = fmt.Sscanf(string(procVersion), "%s %s %s", &u1, &u2, &releaseString)
if err != nil {
return "", err
}
return releaseString, nil
}
var debianVersionRegex = regexp.MustCompile(`.* SMP Debian (\d+\.\d+.\d+-\d+)(?:\+[[:alnum:]]*)?.*`)
func parseDebianRelease(str string) (string, error) {
match := debianVersionRegex.FindStringSubmatch(str)
if len(match) != 2 {
return "", fmt.Errorf("failed to parse kernel version from /proc/version: %s", str)
}
return match[1], nil
}
func currentReleaseDebian() (string, error) {
procVersion, err := os.ReadFile("/proc/version")
if err != nil {
return "", fmt.Errorf("error reading /proc/version: %s", err)
}
return parseDebianRelease(string(procVersion))
}
// CurrentRelease returns the current kernel release ensuring that
// ubuntu and debian release numbers are accurate.
func CurrentRelease() (string, error) {
// We need extra checks for Debian and Ubuntu as they modify
// the kernel version patch number for compatibility with
// out-of-tree modules. Linux perf tools do the same for Ubuntu
// systems: https://github.com/torvalds/linux/commit/d18acd15c
//
// See also:
// https://kernel-team.pages.debian.net/kernel-handbook/ch-versions.html
// https://wiki.ubuntu.com/Kernel/FAQ
version, err := currentReleaseUbuntu()
if err == nil {
return version, nil
}
version, err = currentReleaseDebian()
if err == nil {
return version, nil
}
return currentReleaseUname()
}
// CurrentVersion returns the current kernel version in
// LINUX_VERSION_CODE format (see VersionFromRelease())
func CurrentVersion() (uint32, error) {
release, err := CurrentRelease()
if err == nil {
return VersionFromRelease(release)
}
return 0, err
}
golang-github-minio-madmin-go-3.0.104/kernel/kernel_other.go 0000664 0000000 0000000 00000002320 14774251704 0023665 0 ustar 00root root 0000000 0000000 //
// Copyright (c) 2015-2024 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as
// published by the Free Software Foundation, either version 3 of the
// License, or (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see .
//
//go:build !linux
// +build !linux
package kernel
// VersionFromRelease only implemented on Linux.
func VersionFromRelease(_ string) (uint32, error) {
return 0, nil
}
// Version only implemented on Linux.
func Version(_, _, _ int) uint32 {
return 0
}
// CurrentRelease only implemented on Linux.
func CurrentRelease() (string, error) {
return "", nil
}
// CurrentVersion only implemented on Linux.
func CurrentVersion() (uint32, error) {
return 0, nil
}
golang-github-minio-madmin-go-3.0.104/kernel/kernel_test.go 0000664 0000000 0000000 00000006145 14774251704 0023534 0 ustar 00root root 0000000 0000000 //
// Copyright (c) 2015-2024 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as
// published by the Free Software Foundation, either version 3 of the
// License, or (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see .
//
//go:build linux
// +build linux
package kernel
import "testing"
var testData = []struct {
success bool
releaseString string
kernelVersion uint32
}{
{true, "4.1.2-3", 262402},
{true, "4.8.14-200.fc24.x86_64", 264206},
{true, "4.1.2-3foo", 262402},
{true, "4.1.2foo-1", 262402},
{true, "4.1.2-rkt-v1", 262402},
{true, "4.1.2rkt-v1", 262402},
{true, "4.1.2-3 foo", 262402},
{true, "3.10.0-1062.el7.x86_64", 199168},
{true, "3.0.0", 196608},
{true, "2.6.32", 132640},
{true, "5.13.0-30-generic", 331008},
{true, "5.10.0-1052-oem", 330240},
{false, "foo 4.1.2-3", 0},
{true, "4.1.2", 262402},
{false, ".4.1.2", 0},
{false, "4.1.", 0},
{false, "4.1", 0},
}
func TestVersionFromRelease(t *testing.T) {
for _, test := range testData {
version, err := VersionFromRelease(test.releaseString)
if err != nil && test.success {
t.Errorf("expected %q to success: %s", test.releaseString, err)
} else if err == nil && !test.success {
t.Errorf("expected %q to fail", test.releaseString)
}
if version != test.kernelVersion {
t.Errorf("expected kernel version %d, got %d", test.kernelVersion, version)
}
}
}
func TestParseDebianVersion(t *testing.T) {
for _, tc := range []struct {
success bool
releaseString string
kernelVersion uint32
}{
// 4.9.168
{true, "Linux version 4.9.0-9-amd64 (debian-kernel@lists.debian.org) (gcc version 6.3.0 20170516 (Debian 6.3.0-18+deb9u1) ) #1 SMP Debian 4.9.168-1+deb9u3 (2019-06-16)", 264616},
// 4.9.88
{true, "Linux ip-10-0-75-49 4.9.0-6-amd64 #1 SMP Debian 4.9.88-1+deb9u1 (2018-05-07) x86_64 GNU/Linux", 264536},
// 3.0.4
{true, "Linux version 3.16.0-9-amd64 (debian-kernel@lists.debian.org) (gcc version 4.9.2 (Debian 4.9.2-10+deb8u2) ) #1 SMP Debian 3.16.68-1 (2019-05-22)", 200772},
// Invalid
{false, "Linux version 4.9.125-linuxkit (root@659b6d51c354) (gcc version 6.4.0 (Alpine 6.4.0) ) #1 SMP Fri Sep 7 08:20:28 UTC 2018", 0},
} {
var version uint32
release, err := parseDebianRelease(tc.releaseString)
if err == nil {
version, err = VersionFromRelease(release)
}
if err != nil && tc.success {
t.Errorf("expected %q to success: %s", tc.releaseString, err)
} else if err == nil && !tc.success {
t.Errorf("expected %q to fail", tc.releaseString)
}
if version != tc.kernelVersion {
t.Errorf("expected kernel version %d, got %d", tc.kernelVersion, version)
}
}
}
golang-github-minio-madmin-go-3.0.104/kernel/kernel_utsname_int8.go 0000664 0000000 0000000 00000002313 14774251704 0025164 0 ustar 00root root 0000000 0000000 //
// Copyright (c) 2015-2024 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as
// published by the Free Software Foundation, either version 3 of the
// License, or (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see .
//
//go:build (linux && 386) || (linux && amd64) || (linux && arm64) || (linux && loong64) || (linux && mips64) || (linux && mips64le) || (linux && mips)
// +build linux,386 linux,amd64 linux,arm64 linux,loong64 linux,mips64 linux,mips64le linux,mips
package kernel
func utsnameStr(in []int8) string {
out := make([]byte, 0, len(in))
for i := 0; i < len(in); i++ {
if in[i] == 0x00 {
break
}
out = append(out, byte(in[i]))
}
return string(out)
}
golang-github-minio-madmin-go-3.0.104/kernel/kernel_utsname_uint8.go 0000664 0000000 0000000 00000002212 14774251704 0025347 0 ustar 00root root 0000000 0000000 //
// Copyright (c) 2015-2024 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as
// published by the Free Software Foundation, either version 3 of the
// License, or (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see .
//
//go:build (linux && arm) || (linux && ppc64) || (linux && ppc64le) || (linux && s390x) || (linux && riscv64)
// +build linux,arm linux,ppc64 linux,ppc64le linux,s390x linux,riscv64
package kernel
func utsnameStr(in []uint8) string {
out := make([]byte, 0, len(in))
for i := 0; i < len(in); i++ {
if in[i] == 0x00 {
break
}
out = append(out, byte(in[i]))
}
return string(out)
}
golang-github-minio-madmin-go-3.0.104/kms-commands.go 0000664 0000000 0000000 00000037607 14774251704 0022335 0 ustar 00root root 0000000 0000000 //
// Copyright (c) 2015-2024 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as
// published by the Free Software Foundation, either version 3 of the
// License, or (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see .
//
package madmin
import (
"context"
"encoding/json"
"net/http"
"net/url"
"time"
)
// KMSStatus contains various informations about
// the KMS connected to a MinIO server - like
// the KMS endpoints and the default key ID.
type KMSStatus struct {
Name string `json:"name"` // Name or type of the KMS
DefaultKeyID string `json:"default-key-id"` // The key ID used when no explicit key is specified
Endpoints map[string]ItemState `json:"endpoints"` // List of KMS endpoints and their status (online/offline)
State KMSState `json:"state"` // Current KMS server state
}
// KMSState is a KES server status snapshot.
type KMSState struct {
Version string
KeyStoreLatency time.Duration
KeyStoreReachable bool
KeystoreAvailable bool
OS string
Arch string
UpTime time.Duration
CPUs int
UsableCPUs int
HeapAlloc uint64
StackAlloc uint64
}
// KMSKeyInfo contains key metadata
type KMSKeyInfo struct {
CreatedAt time.Time `json:"createdAt"`
CreatedBy string `json:"createdBy"`
Name string `json:"name"`
}
// KMSPolicyInfo contains policy metadata
type KMSPolicyInfo struct {
CreatedAt time.Time `json:"created_at"`
CreatedBy string `json:"created_by"`
Name string `json:"name"`
}
// KMSIdentityInfo contains policy metadata
type KMSIdentityInfo struct {
CreatedAt time.Time `json:"createdAt"`
CreatedBy string `json:"createdBy"`
Identity string `json:"identity"`
Policy string `json:"policy"`
Error string `json:"error"`
}
// KMSDescribePolicy contains policy metadata
type KMSDescribePolicy struct {
Name string `json:"name"`
CreatedAt time.Time `json:"created_at"`
CreatedBy string `json:"created_by"`
}
// KMSPolicy represents a KMS policy
type KMSPolicy struct {
Allow []string `json:"allow"`
Deny []string `json:"deny"`
}
// KMSDescribeIdentity contains identity metadata
type KMSDescribeIdentity struct {
Policy string `json:"policy"`
Identity string `json:"identity"`
IsAdmin bool `json:"isAdmin"`
CreatedAt time.Time `json:"createdAt"`
CreatedBy string `json:"createdBy"`
}
// KMSDescribeSelfIdentity describes the identity issuing the request
type KMSDescribeSelfIdentity struct {
Policy *KMSPolicy `json:"policy"`
PolicyName string `json:"policyName"`
Identity string `json:"identity"`
IsAdmin bool `json:"isAdmin"`
CreatedAt string `json:"createdAt"`
CreatedBy string `json:"createdBy"`
}
type KMSMetrics struct {
RequestOK int64 `json:"kes_http_request_success"`
RequestErr int64 `json:"kes_http_request_error"`
RequestFail int64 `json:"kes_http_request_failure"`
RequestActive int64 `json:"kes_http_request_active"`
AuditEvents int64 `json:"kes_log_audit_events"`
ErrorEvents int64 `json:"kes_log_error_events"`
LatencyHistogram map[int64]int64 `json:"kes_http_response_time"`
UpTime int64 `json:"kes_system_up_time"`
CPUs int64 `json:"kes_system_num_cpu"`
UsableCPUs int64 `json:"kes_system_num_cpu_used"`
Threads int64 `json:"kes_system_num_threads"`
HeapAlloc int64 `json:"kes_system_mem_heap_used"`
HeapObjects int64 `json:"kes_system_mem_heap_objects"`
StackAlloc int64 `json:"kes_system_mem_stack_used"`
}
type KMSAPI struct {
Method string
Path string
MaxBody int64
Timeout int64
}
type KMSVersion struct {
Version string `json:"version"`
}
// KMSStatus returns status information about the KMS connected
// to the MinIO server, if configured.
func (adm *AdminClient) KMSStatus(ctx context.Context) (KMSStatus, error) {
// GET /minio/kms/v1/status
resp, err := adm.doKMSRequest(ctx, "/status", http.MethodGet, nil, map[string]string{})
if err != nil {
return KMSStatus{}, err
}
defer closeResponse(resp)
if resp.StatusCode != http.StatusOK {
return KMSStatus{}, httpRespToErrorResponse(resp)
}
var status KMSStatus
if err = json.NewDecoder(resp.Body).Decode(&status); err != nil {
return KMSStatus{}, err
}
return status, nil
}
// KMSMetrics returns metrics about the KMS connected
// to the MinIO server, if configured.
func (adm *AdminClient) KMSMetrics(ctx context.Context) (*KMSMetrics, error) {
// GET /minio/kms/v1/metrics
resp, err := adm.doKMSRequest(ctx, "/metrics", http.MethodGet, nil, map[string]string{})
if err != nil {
return nil, err
}
defer closeResponse(resp)
if resp.StatusCode != http.StatusOK {
return nil, httpRespToErrorResponse(resp)
}
var metrics KMSMetrics
if err = json.NewDecoder(resp.Body).Decode(&metrics); err != nil {
return nil, err
}
return &metrics, nil
}
// KMSAPIs returns a list of supported API endpoints in the KMS connected
// to the MinIO server, if configured.
func (adm *AdminClient) KMSAPIs(ctx context.Context) ([]KMSAPI, error) {
// GET /minio/kms/v1/apis
resp, err := adm.doKMSRequest(ctx, "/apis", http.MethodGet, nil, map[string]string{})
if err != nil {
return nil, err
}
defer closeResponse(resp)
if resp.StatusCode != http.StatusOK {
return nil, httpRespToErrorResponse(resp)
}
var apis []KMSAPI
if err = json.NewDecoder(resp.Body).Decode(&apis); err != nil {
return nil, err
}
return apis, nil
}
// KMSVersion returns a list of supported API endpoints in the KMS connected
// to the MinIO server, if configured.
func (adm *AdminClient) KMSVersion(ctx context.Context) (*KMSVersion, error) {
// GET /minio/kms/v1/version
resp, err := adm.doKMSRequest(ctx, "/version", http.MethodGet, nil, map[string]string{})
if err != nil {
return nil, err
}
defer closeResponse(resp)
if resp.StatusCode != http.StatusOK {
return nil, httpRespToErrorResponse(resp)
}
var version KMSVersion
if err = json.NewDecoder(resp.Body).Decode(&version); err != nil {
return nil, err
}
return &version, nil
}
// CreateKey tries to create a new master key with the given keyID
// at the KMS connected to a MinIO server.
func (adm *AdminClient) CreateKey(ctx context.Context, keyID string) error {
// POST /minio/kms/v1/key/create?key-id=
resp, err := adm.doKMSRequest(ctx, "/key/create", http.MethodPost, nil, map[string]string{"key-id": keyID})
if err != nil {
return err
}
defer closeResponse(resp)
if resp.StatusCode != http.StatusOK {
return httpRespToErrorResponse(resp)
}
return nil
}
// DeleteKey tries to delete a key with the given keyID
// at the KMS connected to a MinIO server.
func (adm *AdminClient) DeleteKey(ctx context.Context, keyID string) error {
// DELETE /minio/kms/v1/key/delete?key-id=
resp, err := adm.doKMSRequest(ctx, "/key/delete", http.MethodDelete, nil, map[string]string{"key-id": keyID})
if err != nil {
return err
}
defer closeResponse(resp)
if resp.StatusCode != http.StatusOK {
return httpRespToErrorResponse(resp)
}
return nil
}
// ImportKey tries to import a cryptographic key
// at the KMS connected to a MinIO server.
func (adm *AdminClient) ImportKey(ctx context.Context, keyID string, content []byte) error {
// POST /minio/kms/v1/key/import?key-id=
resp, err := adm.doKMSRequest(ctx, "/key/import", http.MethodPost, content, map[string]string{"key-id": keyID})
if err != nil {
return err
}
defer closeResponse(resp)
if resp.StatusCode != http.StatusOK {
return httpRespToErrorResponse(resp)
}
return nil
}
// ListKeys tries to get all key names that match the specified pattern
func (adm *AdminClient) ListKeys(ctx context.Context, pattern string) ([]KMSKeyInfo, error) {
// GET /minio/kms/v1/key/list?pattern=
resp, err := adm.doKMSRequest(ctx, "/key/list", http.MethodGet, nil, map[string]string{"pattern": pattern})
if err != nil {
return nil, err
}
defer closeResponse(resp)
if resp.StatusCode != http.StatusOK {
return nil, httpRespToErrorResponse(resp)
}
var results []KMSKeyInfo
if err = json.NewDecoder(resp.Body).Decode(&results); err != nil {
return nil, err
}
return results, nil
}
// GetKeyStatus requests status information about the key referenced by keyID
// from the KMS connected to a MinIO by performing a Admin-API request.
// It basically hits the `/minio/admin/v3/kms/key/status` API endpoint.
func (adm *AdminClient) GetKeyStatus(ctx context.Context, keyID string) (*KMSKeyStatus, error) {
// GET /minio/kms/v1/key/status?key-id=
resp, err := adm.doKMSRequest(ctx, "/key/status", http.MethodGet, nil, map[string]string{"key-id": keyID})
if err != nil {
return nil, err
}
defer closeResponse(resp)
if resp.StatusCode != http.StatusOK {
return nil, httpRespToErrorResponse(resp)
}
var keyInfo KMSKeyStatus
if err = json.NewDecoder(resp.Body).Decode(&keyInfo); err != nil {
return nil, err
}
return &keyInfo, nil
}
// KMSKeyStatus contains some status information about a KMS master key.
// The MinIO server tries to access the KMS and perform encryption and
// decryption operations. If the MinIO server can access the KMS and
// all master key operations succeed it returns a status containing only
// the master key ID but no error.
type KMSKeyStatus struct {
KeyID string `json:"key-id"`
EncryptionErr string `json:"encryption-error,omitempty"` // An empty error == success
DecryptionErr string `json:"decryption-error,omitempty"` // An empty error == success
}
// SetKMSPolicy tries to create or update a policy
// at the KMS connected to a MinIO server.
func (adm *AdminClient) SetKMSPolicy(ctx context.Context, policy string, content []byte) error {
// POST /minio/kms/v1/policy/set?policy=
resp, err := adm.doKMSRequest(ctx, "/policy/set", http.MethodPost, content, map[string]string{"policy": policy})
if err != nil {
return err
}
defer closeResponse(resp)
if resp.StatusCode != http.StatusOK {
return httpRespToErrorResponse(resp)
}
return nil
}
// AssignPolicy tries to assign a policy to an identity
// at the KMS connected to a MinIO server.
func (adm *AdminClient) AssignPolicy(ctx context.Context, policy string, content []byte) error {
// POST /minio/kms/v1/policy/assign?policy=
resp, err := adm.doKMSRequest(ctx, "/policy/assign", http.MethodPost, content, map[string]string{"policy": policy})
if err != nil {
return err
}
defer closeResponse(resp)
if resp.StatusCode != http.StatusOK {
return httpRespToErrorResponse(resp)
}
return nil
}
// DescribePolicy tries to describe a KMS policy
func (adm *AdminClient) DescribePolicy(ctx context.Context, policy string) (*KMSDescribePolicy, error) {
// GET /minio/kms/v1/policy/describe?policy=
resp, err := adm.doKMSRequest(ctx, "/policy/describe", http.MethodGet, nil, map[string]string{"policy": policy})
if err != nil {
return nil, err
}
defer closeResponse(resp)
if resp.StatusCode != http.StatusOK {
return nil, httpRespToErrorResponse(resp)
}
var dp KMSDescribePolicy
if err = json.NewDecoder(resp.Body).Decode(&dp); err != nil {
return nil, err
}
return &dp, nil
}
// GetPolicy tries to get a KMS policy
func (adm *AdminClient) GetPolicy(ctx context.Context, policy string) (*KMSPolicy, error) {
// GET /minio/kms/v1/policy/get?policy=
resp, err := adm.doKMSRequest(ctx, "/policy/get", http.MethodGet, nil, map[string]string{"policy": policy})
if err != nil {
return nil, err
}
defer closeResponse(resp)
if resp.StatusCode != http.StatusOK {
return nil, httpRespToErrorResponse(resp)
}
var p KMSPolicy
if err = json.NewDecoder(resp.Body).Decode(&p); err != nil {
return nil, err
}
return &p, nil
}
// ListPolicies tries to get all policies that match the specified pattern
func (adm *AdminClient) ListPolicies(ctx context.Context, pattern string) ([]KMSPolicyInfo, error) {
// GET /minio/kms/v1/policy/list?pattern=
resp, err := adm.doKMSRequest(ctx, "/policy/list", http.MethodGet, nil, map[string]string{"pattern": pattern})
if err != nil {
return nil, err
}
defer closeResponse(resp)
if resp.StatusCode != http.StatusOK {
return nil, httpRespToErrorResponse(resp)
}
var results []KMSPolicyInfo
if err = json.NewDecoder(resp.Body).Decode(&results); err != nil {
return nil, err
}
return results, nil
}
// DeletePolicy tries to delete a policy
// at the KMS connected to a MinIO server.
func (adm *AdminClient) DeletePolicy(ctx context.Context, policy string) error {
// DELETE /minio/kms/v1/policy/delete?policy=
resp, err := adm.doKMSRequest(ctx, "/policy/delete", http.MethodDelete, nil, map[string]string{"policy": policy})
if err != nil {
return err
}
defer closeResponse(resp)
if resp.StatusCode != http.StatusOK {
return httpRespToErrorResponse(resp)
}
return nil
}
// DescribeIdentity tries to describe a KMS identity
func (adm *AdminClient) DescribeIdentity(ctx context.Context, identity string) (*KMSDescribeIdentity, error) {
// GET /minio/kms/v1/identity/describe?identity=
resp, err := adm.doKMSRequest(ctx, "/identity/describe", http.MethodGet, nil, map[string]string{"identity": identity})
if err != nil {
return nil, err
}
defer closeResponse(resp)
if resp.StatusCode != http.StatusOK {
return nil, httpRespToErrorResponse(resp)
}
var i KMSDescribeIdentity
if err = json.NewDecoder(resp.Body).Decode(&i); err != nil {
return nil, err
}
return &i, nil
}
// DescribeSelfIdentity tries to describe the identity issuing the request.
func (adm *AdminClient) DescribeSelfIdentity(ctx context.Context) (*KMSDescribeSelfIdentity, error) {
// GET /minio/kms/v1/identity/describe-self
resp, err := adm.doKMSRequest(ctx, "/identity/describe-self", http.MethodGet, nil, map[string]string{})
if err != nil {
return nil, err
}
defer closeResponse(resp)
if resp.StatusCode != http.StatusOK {
return nil, httpRespToErrorResponse(resp)
}
var si KMSDescribeSelfIdentity
if err = json.NewDecoder(resp.Body).Decode(&si); err != nil {
return nil, err
}
return &si, nil
}
// ListIdentities tries to get all identities that match the specified pattern
func (adm *AdminClient) ListIdentities(ctx context.Context, pattern string) ([]KMSIdentityInfo, error) {
// GET /minio/kms/v1/identity/list?pattern=
if pattern == "" { // list identities does not default to *
pattern = "*"
}
resp, err := adm.doKMSRequest(ctx, "/identity/list", http.MethodGet, nil, map[string]string{"pattern": pattern})
if err != nil {
return nil, err
}
defer closeResponse(resp)
if resp.StatusCode != http.StatusOK {
return nil, httpRespToErrorResponse(resp)
}
var results []KMSIdentityInfo
if err = json.NewDecoder(resp.Body).Decode(&results); err != nil {
return nil, err
}
return results, nil
}
// DeleteIdentity tries to delete a identity
// at the KMS connected to a MinIO server.
func (adm *AdminClient) DeleteIdentity(ctx context.Context, identity string) error {
// DELETE /minio/kms/v1/identity/delete?identity=
resp, err := adm.doKMSRequest(ctx, "/identity/delete", http.MethodDelete, nil, map[string]string{"identity": identity})
if err != nil {
return err
}
defer closeResponse(resp)
if resp.StatusCode != http.StatusOK {
return httpRespToErrorResponse(resp)
}
return nil
}
func (adm *AdminClient) doKMSRequest(ctx context.Context, path, method string, content []byte, values map[string]string) (*http.Response, error) {
qv := url.Values{}
for key, value := range values {
qv.Set(key, value)
}
reqData := requestData{
relPath: kmsAPIPrefix + path,
queryValues: qv,
isKMS: true,
content: content,
}
return adm.executeMethod(ctx, method, reqData)
}
golang-github-minio-madmin-go-3.0.104/license.go 0000664 0000000 0000000 00000004164 14774251704 0021356 0 ustar 00root root 0000000 0000000 //
// Copyright (c) 2015-2024 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as
// published by the Free Software Foundation, either version 3 of the
// License, or (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see .
//
package madmin
import (
"context"
"encoding/json"
"net/http"
"time"
)
//msgp:clearomitted
//msgp:tag json
//go:generate msgp
// LicenseInfo is a structure containing MinIO license information.
type LicenseInfo struct {
ID string `json:"ID"` // The license ID
Organization string `json:"Organization"` // Name of the organization using the license
Plan string `json:"Plan"` // License plan. E.g. "ENTERPRISE-PLUS"
IssuedAt time.Time `json:"IssuedAt"` // Point in time when the license was issued
ExpiresAt time.Time `json:"ExpiresAt"` // Point in time when the license expires
Trial bool `json:"Trial"` // Whether the license is on trial
APIKey string `json:"APIKey"` // Subnet account API Key
}
// GetLicenseInfo - returns the license info
func (adm *AdminClient) GetLicenseInfo(ctx context.Context) (*LicenseInfo, error) {
// Execute GET on /minio/admin/v3/licenseinfo to get license info.
resp, err := adm.executeMethod(ctx,
http.MethodGet,
requestData{
relPath: adminAPIPrefix + "/license-info",
})
defer closeResponse(resp)
if err != nil {
return nil, err
}
if resp.StatusCode != http.StatusOK {
return nil, httpRespToErrorResponse(resp)
}
l := LicenseInfo{}
err = json.NewDecoder(resp.Body).Decode(&l)
if err != nil {
return nil, err
}
return &l, nil
}
golang-github-minio-madmin-go-3.0.104/license_gen.go 0000664 0000000 0000000 00000013734 14774251704 0022212 0 ustar 00root root 0000000 0000000 package madmin
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
import (
"github.com/tinylib/msgp/msgp"
)
// DecodeMsg implements msgp.Decodable
func (z *LicenseInfo) DecodeMsg(dc *msgp.Reader) (err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, err = dc.ReadMapHeader()
if err != nil {
err = msgp.WrapError(err)
return
}
for zb0001 > 0 {
zb0001--
field, err = dc.ReadMapKeyPtr()
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "ID":
z.ID, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "ID")
return
}
case "Organization":
z.Organization, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "Organization")
return
}
case "Plan":
z.Plan, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "Plan")
return
}
case "IssuedAt":
z.IssuedAt, err = dc.ReadTime()
if err != nil {
err = msgp.WrapError(err, "IssuedAt")
return
}
case "ExpiresAt":
z.ExpiresAt, err = dc.ReadTime()
if err != nil {
err = msgp.WrapError(err, "ExpiresAt")
return
}
case "Trial":
z.Trial, err = dc.ReadBool()
if err != nil {
err = msgp.WrapError(err, "Trial")
return
}
case "APIKey":
z.APIKey, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "APIKey")
return
}
default:
err = dc.Skip()
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
return
}
// EncodeMsg implements msgp.Encodable
func (z *LicenseInfo) EncodeMsg(en *msgp.Writer) (err error) {
// map header, size 7
// write "ID"
err = en.Append(0x87, 0xa2, 0x49, 0x44)
if err != nil {
return
}
err = en.WriteString(z.ID)
if err != nil {
err = msgp.WrapError(err, "ID")
return
}
// write "Organization"
err = en.Append(0xac, 0x4f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e)
if err != nil {
return
}
err = en.WriteString(z.Organization)
if err != nil {
err = msgp.WrapError(err, "Organization")
return
}
// write "Plan"
err = en.Append(0xa4, 0x50, 0x6c, 0x61, 0x6e)
if err != nil {
return
}
err = en.WriteString(z.Plan)
if err != nil {
err = msgp.WrapError(err, "Plan")
return
}
// write "IssuedAt"
err = en.Append(0xa8, 0x49, 0x73, 0x73, 0x75, 0x65, 0x64, 0x41, 0x74)
if err != nil {
return
}
err = en.WriteTime(z.IssuedAt)
if err != nil {
err = msgp.WrapError(err, "IssuedAt")
return
}
// write "ExpiresAt"
err = en.Append(0xa9, 0x45, 0x78, 0x70, 0x69, 0x72, 0x65, 0x73, 0x41, 0x74)
if err != nil {
return
}
err = en.WriteTime(z.ExpiresAt)
if err != nil {
err = msgp.WrapError(err, "ExpiresAt")
return
}
// write "Trial"
err = en.Append(0xa5, 0x54, 0x72, 0x69, 0x61, 0x6c)
if err != nil {
return
}
err = en.WriteBool(z.Trial)
if err != nil {
err = msgp.WrapError(err, "Trial")
return
}
// write "APIKey"
err = en.Append(0xa6, 0x41, 0x50, 0x49, 0x4b, 0x65, 0x79)
if err != nil {
return
}
err = en.WriteString(z.APIKey)
if err != nil {
err = msgp.WrapError(err, "APIKey")
return
}
return
}
// MarshalMsg implements msgp.Marshaler
func (z *LicenseInfo) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize())
// map header, size 7
// string "ID"
o = append(o, 0x87, 0xa2, 0x49, 0x44)
o = msgp.AppendString(o, z.ID)
// string "Organization"
o = append(o, 0xac, 0x4f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e)
o = msgp.AppendString(o, z.Organization)
// string "Plan"
o = append(o, 0xa4, 0x50, 0x6c, 0x61, 0x6e)
o = msgp.AppendString(o, z.Plan)
// string "IssuedAt"
o = append(o, 0xa8, 0x49, 0x73, 0x73, 0x75, 0x65, 0x64, 0x41, 0x74)
o = msgp.AppendTime(o, z.IssuedAt)
// string "ExpiresAt"
o = append(o, 0xa9, 0x45, 0x78, 0x70, 0x69, 0x72, 0x65, 0x73, 0x41, 0x74)
o = msgp.AppendTime(o, z.ExpiresAt)
// string "Trial"
o = append(o, 0xa5, 0x54, 0x72, 0x69, 0x61, 0x6c)
o = msgp.AppendBool(o, z.Trial)
// string "APIKey"
o = append(o, 0xa6, 0x41, 0x50, 0x49, 0x4b, 0x65, 0x79)
o = msgp.AppendString(o, z.APIKey)
return
}
// UnmarshalMsg implements msgp.Unmarshaler
func (z *LicenseInfo) UnmarshalMsg(bts []byte) (o []byte, err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
for zb0001 > 0 {
zb0001--
field, bts, err = msgp.ReadMapKeyZC(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "ID":
z.ID, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "ID")
return
}
case "Organization":
z.Organization, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Organization")
return
}
case "Plan":
z.Plan, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Plan")
return
}
case "IssuedAt":
z.IssuedAt, bts, err = msgp.ReadTimeBytes(bts)
if err != nil {
err = msgp.WrapError(err, "IssuedAt")
return
}
case "ExpiresAt":
z.ExpiresAt, bts, err = msgp.ReadTimeBytes(bts)
if err != nil {
err = msgp.WrapError(err, "ExpiresAt")
return
}
case "Trial":
z.Trial, bts, err = msgp.ReadBoolBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Trial")
return
}
case "APIKey":
z.APIKey, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "APIKey")
return
}
default:
bts, err = msgp.Skip(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
o = bts
return
}
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z *LicenseInfo) Msgsize() (s int) {
s = 1 + 3 + msgp.StringPrefixSize + len(z.ID) + 13 + msgp.StringPrefixSize + len(z.Organization) + 5 + msgp.StringPrefixSize + len(z.Plan) + 9 + msgp.TimeSize + 10 + msgp.TimeSize + 6 + msgp.BoolSize + 7 + msgp.StringPrefixSize + len(z.APIKey)
return
}
golang-github-minio-madmin-go-3.0.104/license_gen_test.go 0000664 0000000 0000000 00000004455 14774251704 0023251 0 ustar 00root root 0000000 0000000 package madmin
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
import (
"bytes"
"testing"
"github.com/tinylib/msgp/msgp"
)
func TestMarshalUnmarshalLicenseInfo(t *testing.T) {
v := LicenseInfo{}
bts, err := v.MarshalMsg(nil)
if err != nil {
t.Fatal(err)
}
left, err := v.UnmarshalMsg(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
}
left, err = msgp.Skip(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
}
}
func BenchmarkMarshalMsgLicenseInfo(b *testing.B) {
v := LicenseInfo{}
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.MarshalMsg(nil)
}
}
func BenchmarkAppendMsgLicenseInfo(b *testing.B) {
v := LicenseInfo{}
bts := make([]byte, 0, v.Msgsize())
bts, _ = v.MarshalMsg(bts[0:0])
b.SetBytes(int64(len(bts)))
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
bts, _ = v.MarshalMsg(bts[0:0])
}
}
func BenchmarkUnmarshalLicenseInfo(b *testing.B) {
v := LicenseInfo{}
bts, _ := v.MarshalMsg(nil)
b.ReportAllocs()
b.SetBytes(int64(len(bts)))
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := v.UnmarshalMsg(bts)
if err != nil {
b.Fatal(err)
}
}
}
func TestEncodeDecodeLicenseInfo(t *testing.T) {
v := LicenseInfo{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
m := v.Msgsize()
if buf.Len() > m {
t.Log("WARNING: TestEncodeDecodeLicenseInfo Msgsize() is inaccurate")
}
vn := LicenseInfo{}
err := msgp.Decode(&buf, &vn)
if err != nil {
t.Error(err)
}
buf.Reset()
msgp.Encode(&buf, &v)
err = msgp.NewReader(&buf).Skip()
if err != nil {
t.Error(err)
}
}
func BenchmarkEncodeLicenseInfo(b *testing.B) {
v := LicenseInfo{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
en := msgp.NewWriter(msgp.Nowhere)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.EncodeMsg(en)
}
en.Flush()
}
func BenchmarkDecodeLicenseInfo(b *testing.B) {
v := LicenseInfo{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
rd := msgp.NewEndlessReader(buf.Bytes(), b)
dc := msgp.NewReader(rd)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
err := v.DecodeMsg(dc)
if err != nil {
b.Fatal(err)
}
}
}
golang-github-minio-madmin-go-3.0.104/licenseheaders.py 0000775 0000000 0000000 00000122760 14774251704 0022743 0 ustar 00root root 0000000 0000000 #!/usr/bin/env python
# encoding: utf-8
"""A tool to change or add license headers in all supported files in or below a directory."""
# Copyright (c) 2016-2018 Johann Petrak
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import argparse
import fnmatch
import logging
import os
import sys
import stat
import contextlib
from shutil import copyfile
from string import Template
import regex as re
__version__ = '0.8.8'
__author__ = 'Johann Petrak'
__license__ = 'MIT'
LOGGER = logging.getLogger("licenseheaders_{}".format(__version__))
default_dir = "."
default_encoding = "utf-8"
def update_c_style_comments(extensions):
return {
"extensions": extensions,
"keepFirst": None,
"blockCommentStartPattern": re.compile(r'^\s*/\*'),
"blockCommentEndPattern": re.compile(r'\*/\s*$'),
"lineCommentStartPattern": re.compile(r'^\s*//'),
"lineCommentEndPattern": None,
"headerStartLine": "/*\n",
"headerEndLine": " */\n",
"headerLinePrefix": " * ",
"headerLineSuffix": None,
}
def update_go_style_comments(extensions):
return {
"extensions": extensions,
"keepFirst": None,
"blockCommentStartPattern": re.compile(r'^\s*/\*'),
"blockCommentEndPattern": re.compile(r'\*/\s*$'),
"lineCommentStartPattern": re.compile(r'^\s*//'),
"lineCommentEndPattern": None,
"headerStartLine": "//\n",
"headerEndLine": "//\n",
"headerLinePrefix": "// ",
"headerLineSuffix": None,
}
# for each processing type, the detailed settings of how to process files of that type
TYPE_SETTINGS = {
# All the languages with C style comments:
"c": update_c_style_comments([".c", ".cc", ".h"]),
"cpp": update_c_style_comments([".cpp", ".hpp", ".cxx", ".hxx", ".ixx"]),
"csharp": update_c_style_comments([".cs", ".csx"]),
"d": update_c_style_comments([".d"]),
"go": update_go_style_comments([".go"]),
"groovy": update_c_style_comments([".groovy"]),
"java": update_c_style_comments([".java", ".jape"]),
"javascript": update_c_style_comments([".js", ".js", ".cjs", ".mjs"]),
"kotlin": update_c_style_comments([".kt", ".kts", ".ktm"]),
"objective-c": update_c_style_comments([".m", ".mm", ".M"]),
"php": update_c_style_comments([".php," ".phtml," ".php3," ".php4," ".php5," ".php7," ".phps," ".php-s," ".pht," ".phar"]),
"rust": update_c_style_comments([".rs"]),
"scala": update_c_style_comments([".scala"]),
"swift": update_c_style_comments([".swift"]),
"typescript": update_c_style_comments([".ts", ".tsx"]),
"script": {
"extensions": [".sh", ".csh", ".pl"],
"keepFirst": re.compile(r'^#!|^# -\*-'),
"blockCommentStartPattern": None,
"blockCommentEndPattern": None,
"lineCommentStartPattern": re.compile(r'^\s*#'),
"lineCommentEndPattern": None,
"headerStartLine": "##\n",
"headerEndLine": "##\n",
"headerLinePrefix": "## ",
"headerLineSuffix": None
},
"perl": {
"extensions": [".pl"],
"keepFirst": re.compile(r'^#!|^# -\*-'),
"blockCommentStartPattern": None,
"blockCommentEndPattern": None,
"lineCommentStartPattern": re.compile(r'^\s*#'),
"lineCommentEndPattern": None,
"headerStartLine": "##\n",
"headerEndLine": "##\n",
"headerLinePrefix": "## ",
"headerLineSuffix": None
},
"python": {
"extensions": [".py"],
"keepFirst": re.compile(r'^#!|^# +pylint|^# +-\*-|^# +coding|^# +encoding|^# +type|^# +flake8'),
"blockCommentStartPattern": None,
"blockCommentEndPattern": None,
"lineCommentStartPattern": re.compile(r'^\s*#'),
"lineCommentEndPattern": None,
"headerStartLine": None,
"headerEndLine": "\n",
"headerLinePrefix": "# ",
"headerLineSuffix": None
},
"robot": {
"extensions": [".robot"],
"keepFirst": re.compile(r'^#!|^# +pylint|^# +-\*-|^# +coding|^# +encoding'),
"blockCommentStartPattern": None,
"blockCommentEndPattern": None,
"lineCommentStartPattern": re.compile(r'^\s*#'),
"lineCommentEndPattern": None,
"headerStartLine": None,
"headerEndLine": None,
"headerLinePrefix": "# ",
"headerLineSuffix": None
},
"xml": {
"extensions": [".xml"],
"keepFirst": re.compile(r'^\s*<\?xml.*\?>'),
"blockCommentStartPattern": re.compile(r'^\s*\s*$'),
"lineCommentStartPattern": None,
"lineCommentEndPattern": None,
"headerStartLine": "\n",
"headerLinePrefix": "-- ",
"headerLineSuffix": None
},
"sql": {
"extensions": [".sql"],
"keepFirst": None,
"blockCommentStartPattern": None, # re.compile('^\s*/\*'),
"blockCommentEndPattern": None, # re.compile(r'\*/\s*$'),
"lineCommentStartPattern": re.compile(r'^\s*--'),
"lineCommentEndPattern": None,
"headerStartLine": "--\n",
"headerEndLine": "--\n",
"headerLinePrefix": "-- ",
"headerLineSuffix": None
},
"cmake": {
"extensions": [],
"filenames": ["CMakeLists.txt"],
"keepFirst": None,
"blockCommentStartPattern": re.compile(r'^\s*#\[\['),
"blockCommentEndPattern": re.compile(r'\]\]\s*$'),
"lineCommentStartPattern": re.compile(r'\s*#'),
"lineCommentEndPattern": None,
"headerStartLine": "#[[\n",
"headerEndLine": "]]\n",
"headerLinePrefix": "",
"headerLineSuffix": None
},
"markdown": {
"extensions": [".md"],
"keepFirst": None,
"blockCommentStartPattern": re.compile(r'^\s*\s*$'),
"lineCommentStartPattern": None,
"lineCommentEndPattern": None,
"headerStartLine": "\n",
"headerLinePrefix": "",
"headerLineSuffix": None
},
"ruby": {
"extensions": [".rb"],
"keepFirst": re.compile(r'^#!'),
"blockCommentStartPattern": re.compile('^=begin'),
"blockCommentEndPattern": re.compile(r'^=end'),
"lineCommentStartPattern": re.compile(r'^\s*#'),
"lineCommentEndPattern": None,
"headerStartLine": "##\n",
"headerEndLine": "##\n",
"headerLinePrefix": "## ",
"headerLineSuffix": None
},
"vb": {
"extensions": [".vb"],
"keepFirst": None,
"blockCommentStartPattern": None,
"blockCommentEndPattern": None,
"lineCommentStartPattern": re.compile(r"^\s*\'"),
"lineCommentEndPattern": None,
"headerStartLine": None,
"headerEndLine": None,
"headerLinePrefix": "' ",
"headerLineSuffix": None
},
"erlang": {
"extensions": [".erl", ".src", ".config", ".schema"],
"keepFirst": None,
"blockCommentStartPattern": None,
"blockCommentEndPattern": None,
"lineCommentStartPattern": None,
"lineCommentEndPattern": None,
"headerStartLine": "%% -*- erlang -*-\n%% %CopyrightBegin%\n%%\n",
"headerEndLine": "%%\n%% %CopyrightEnd%\n\n",
"headerLinePrefix": "%% ",
"headerLineSuffix": None,
},
"html": {
"extensions": [".html"],
"keepFirst": re.compile(r'^\s*<\!DOCTYPE.*>'),
"blockCommentStartPattern": re.compile(r'^\s*\s*$'),
"lineCommentStartPattern": None,
"lineCommentEndPattern": None,
"headerStartLine": "\n",
"headerLinePrefix": "-- ",
"headerLineSuffix": None
},
"css": {
"extensions": [".css", ".scss", ".sass"],
"keepFirst": None,
"blockCommentStartPattern": re.compile(r'^\s*/\*'),
"blockCommentEndPattern": re.compile(r'\*/\s*$'),
"lineCommentStartPattern": None,
"lineCommentEndPattern": None,
"headerStartLine": "/*\n",
"headerEndLine": "*/\n",
"headerLinePrefix": None,
"headerLineSuffix": None
},
"docker": {
"extensions": [".dockerfile"],
"filenames": ["Dockerfile"],
"keepFirst": None,
"blockCommentStartPattern": None,
"blockCommentEndPattern": None,
"lineCommentStartPattern": re.compile(r'^\s*#'),
"lineCommentEndPattern": None,
"headerStartLine": "##\n",
"headerEndLine": "##\n",
"headerLinePrefix": "## ",
"headerLineSuffix": None
},
"yaml": {
"extensions": [".yaml", ".yml"],
"keepFirst": None,
"blockCommentStartPattern": None,
"blockCommentEndPattern": None,
"lineCommentStartPattern": re.compile(r'^\s*#'),
"lineCommentEndPattern": None,
"headerStartLine": "##\n",
"headerEndLine": "##\n",
"headerLinePrefix": "## ",
"headerLineSuffix": None
},
"zig": {
"extensions": [".zig"],
"keepFirst": None,
"blockCommentStartPattern": None,
"blockCommentEndPattern": None,
"lineCommentStartPattern": re.compile(r'^\s*//'),
"lineCommentEndPattern": None,
"headerStartLine": "//!\n",
"headerEndLine": "//!\n",
"headerLinePrefix": "//! ",
"headerLineSuffix": None
},
"proto": {
"extensions": [".proto"],
"keepFirst": None,
"blockCommentStartPattern": None,
"blockCommentEndPattern": None,
"lineCommentStartPattern": re.compile(r'^\s*//'),
"lineCommentEndPattern": None,
"headerStartLine": None,
"headerEndLine": None,
"headerLinePrefix": "// ",
"headerLineSuffix": None
},
"terraform": {
"extensions": [".tf"],
"keepFirst": None,
"blockCommentStartPattern": None,
"blockCommentEndPattern": None,
"lineCommentStartPattern": re.compile(r'^\s*#'),
"lineCommentEndPattern": None,
"headerStartLine": "##\n",
"headerEndLine": "##\n",
"headerLinePrefix": "## ",
"headerLineSuffix": None
},
"bat": {
"extensions": [".bat"],
"keepFirst": None,
"blockCommentStartPattern": None,
"blockCommentEndPattern": None,
"lineCommentStartPattern": re.compile(r'^\s*::'),
"lineCommentEndPattern": None,
"headerStartLine": "::\n",
"headerEndLine": "::\n",
"headerLinePrefix": ":: ",
"headerLineSuffix": None
},
"ocaml": {
"extensions": [".ml", ".mli", ".mlg", ".v"],
"keepFirst": None,
"blockCommentStartPattern": re.compile(r'^\s*\(\*'),
"blockCommentEndPattern": re.compile(r'\*\)\s*$'),
"lineCommentStartPattern": None,
"lineCommentEndPattern": None,
"headerStartLine": "(*\n",
"headerEndLine": " *)\n",
"headerLinePrefix": " * ",
"headerLineSuffix": None
}
}
yearsPattern = re.compile(
r"(?<=Copyright\s*(?:\(\s*[Cc©]\s*\)\s*))?([0-9][0-9][0-9][0-9](?:-[0-9][0-9]?[0-9]?[0-9]?)?)",
re.IGNORECASE)
licensePattern = re.compile(r"license", re.IGNORECASE)
emptyPattern = re.compile(r'^\s*$')
# maps each extension to its processing type. Filled from tpeSettings during initialization
ext2type = {}
name2type = {}
patterns = []
# class for dict args. Use --argname key1=val1,val2 key2=val3 key3=val4, val5
class DictArgs(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
dict_args = {}
if not isinstance(values, (list,)):
values = (values,)
for value in values:
n, v = value.split("=")
if n not in TYPE_SETTINGS:
LOGGER.error("No valid language '%s' to add additional file extensions for" % n)
if v and "," in str(v):
dict_args[n] = v.split(",")
else:
dict_args[n] = list()
dict_args[n].append(str(v).strip())
setattr(namespace, self.dest, dict_args)
def parse_command_line(argv):
"""
Parse command line argument. See -h option.
:param argv: the actual program arguments
:return: parsed arguments
"""
import textwrap
known_extensions = [ftype+":"+",".join(conf["extensions"]) for ftype, conf in TYPE_SETTINGS.items() if "extensions" in conf]
# known_extensions = [ext for ftype in typeSettings.values() for ext in ftype["extensions"]]
example = textwrap.dedent("""
Known extensions: {0}
If -t/--tmpl is specified, that header is added to (or existing header replaced for) all source files of known type
If -t/--tmpl is not specified byt -y/--years is specified, all years in existing header files
are replaced with the years specified
Examples:
{1} -t lgpl-v3 -y 2012-2014 -o ThisNiceCompany -n ProjectName -u http://the.projectname.com
{1} -y 2012-2015
{1} -y 2012-2015 -d /dir/where/to/start/
{1} -y 2012-2015 -d /dir/where/to/start/ --additional-extensions python=.j2
{1} -y 2012-2015 -d /dir/where/to/start/ --additional-extensions python=.j2,.tpl script=.txt
{1} -t .copyright.tmpl -cy
{1} -t .copyright.tmpl -cy -f some_file.cpp
""").format(known_extensions, os.path.basename(argv[0]))
formatter_class = argparse.RawDescriptionHelpFormatter
parser = argparse.ArgumentParser(description="Python license header updater",
epilog=example,
formatter_class=formatter_class)
parser.add_argument("-V", "--version", action="version",
version="%(prog)s {}".format(__version__))
parser.add_argument("-v", "--verbose", dest="verbose_count",
action="count", default=0,
help="increases log verbosity (can be specified "
"1 to 3 times, default shows errors only)")
parser.add_argument("-d", "--dir", dest="dir", default=default_dir,
help="The directory to recursively process (default: {}).".format(default_dir))
parser.add_argument("-f", "--files", dest="files", nargs='*', type=str,
help="The list of files to process. If not empty - will disable '--dir' option")
parser.add_argument("-b", action="store_true",
help="Back up all files which get changed to a copy with .bak added to the name")
parser.add_argument("-t", "--tmpl", dest="tmpl", default=None,
help="Template name or file to use.")
parser.add_argument("-s", "--settings", dest="settings", default=None,
help="Settings file to use.")
parser.add_argument("-y", "--years", dest="years", default=None,
help="Year or year range to use.")
parser.add_argument("-cy", "--current-year", dest="current_year", action="store_true",
help="Use today's year.")
parser.add_argument("-o", "--owner", dest="owner", default=None,
help="Name of copyright owner to use.")
parser.add_argument("-n", "--projname", dest="projectname", default=None,
help="Name of project to use.")
parser.add_argument("-u", "--projurl", dest="projecturl", default=None,
help="Url of project to use.")
parser.add_argument("--enc", nargs=1, dest="encoding", default=default_encoding,
help="Encoding of program files (default: {})".format(default_encoding))
parser.add_argument("--dry", action="store_true", help="Only show what would get done, do not change any files")
parser.add_argument("--safesubst", action="store_true",
help="Do not raise error if template variables cannot be substituted.")
parser.add_argument("-D", "--debug", action="store_true", help="Enable debug messages (same as -v -v -v)")
parser.add_argument("-E","--ext", type=str, nargs="*",
help="If specified, restrict processing to the specified extension(s) only")
parser.add_argument("--additional-extensions", dest="additional_extensions", default=None, nargs="+",
help="Provide a comma-separated list of additional file extensions as value for a "
"specified language as key, each with a leading dot and no whitespace (default: None).",
action=DictArgs)
parser.add_argument("-x", "--exclude", type=str, nargs="*",
help="File path patterns to exclude")
parser.add_argument("--force-overwrite", action="store_true", dest="force_overwrite",
help="Try to include headers even in read-only files, given sufficient permissions. "
"File permissions are restored after successful header injection.")
arguments = parser.parse_args(argv[1:])
# Sets log level to WARN going more verbose for each new -V.
loglevel = max(4 - arguments.verbose_count, 1) * 10
global LOGGER
LOGGER.setLevel(loglevel)
if arguments.debug:
LOGGER.setLevel(logging.DEBUG)
# fmt = logging.Formatter('%(asctime)s|%(levelname)s|%(name)s|%(message)s')
fmt = logging.Formatter('%(name)s %(levelname)s: %(message)s')
hndlr = logging.StreamHandler(sys.stderr)
hndlr.setFormatter(fmt)
LOGGER.addHandler(hndlr)
return arguments
def read_type_settings(path):
def handle_regex(setting, name):
if setting[name]:
setting[name] = re.compile(setting[name])
else:
setting[name] = None
def handle_line(setting, name):
if setting[name]:
setting[name] = setting[name]
else:
setting[name] = None
settings = {}
import json
with open(path) as f:
data = json.load(f)
for key, value in data.items():
for setting_name in ["keepFirst", "blockCommentStartPattern", "blockCommentEndPattern", "lineCommentStartPattern", "lineCommentEndPattern"]:
handle_regex(value, setting_name)
for setting_name in ["headerStartLine", "headerEndLine", "headerLinePrefix", "headerLineSuffix"]:
handle_line(value, setting_name)
settings[key] = value
return settings
def get_paths(fnpatterns, start_dir=default_dir):
"""
Retrieve files that match any of the glob patterns from the start_dir and below.
:param fnpatterns: the file name patterns
:param start_dir: directory where to start searching
:return: generator that returns one path after the other
"""
seen = set()
for root, dirs, files in os.walk(start_dir):
names = []
for pattern in fnpatterns:
names += fnmatch.filter(files, pattern)
for name in names:
path = os.path.join(root, name)
if path in seen:
continue
seen.add(path)
yield path
def get_files(fnpatterns, files):
seen = set()
names = []
for f in files:
file_name = os.path.basename(f)
for pattern in fnpatterns:
if fnmatch.filter([file_name], pattern):
names += [f]
for path in names:
if path in seen:
continue
seen.add(path)
yield path
def read_template(template_file, vardict, args):
"""
Read a template file replace variables from the dict and return the lines.
Throws exception if a variable cannot be replaced.
:param template_file: template file with variables
:param vardict: dictionary to replace variables with values
:param args: the program arguments
:return: lines of the template, with variables replaced
"""
with open(template_file, 'r') as f:
lines = f.readlines()
if args.safesubst:
lines = [Template(line).safe_substitute(vardict) for line in lines]
else:
lines = [Template(line).substitute(vardict) for line in lines]
return lines
def for_type(templatelines, ftype, settings):
"""
Format the template lines for the given ftype.
:param templatelines: the lines of the template text
:param ftype: file type
:return: header lines
"""
lines = []
settings = settings[ftype]
header_start_line = settings["headerStartLine"]
header_end_line = settings["headerEndLine"]
header_line_prefix = settings["headerLinePrefix"]
header_line_suffix = settings["headerLineSuffix"]
if header_start_line is not None:
lines.append(header_start_line)
for line in templatelines:
tmp = line
if header_line_prefix is not None and line == '\n':
tmp = header_line_prefix.rstrip() + tmp
elif header_line_prefix is not None:
tmp = header_line_prefix + tmp
if header_line_suffix is not None:
tmp = tmp + header_line_suffix
lines.append(tmp)
if header_end_line is not None:
lines.append(header_end_line)
return lines
##
def read_file(file, args, type_settings):
"""
Read a file and return a dictionary with the following elements:
:param file: the file to read
:param args: the options specified by the user
:return: a dictionary with the following entries or None if the file is not supported:
- skip: number of lines at the beginning to skip (always keep them when replacing or adding something)
can also be seen as the index of the first line not to skip
- headStart: index of first line of detected header, or None if non header detected
- headEnd: index of last line of detected header, or None
- yearsLine: index of line which contains the copyright years, or None
- haveLicense: found a line that matches a pattern that indicates this could be a license header
- settings: the type settings
"""
skip = 0
head_start = None
head_end = None
years_line = None
have_license = False
filename, extension = os.path.splitext(file)
LOGGER.debug("File name is %s", os.path.basename(filename))
LOGGER.debug("File extension is %s", extension)
# if we have no entry in the mapping from extensions to processing type, return None
ftype = ext2type.get(extension)
LOGGER.debug("Type for this file is %s", ftype)
if not ftype:
ftype = name2type.get(os.path.basename(file))
if not ftype:
return None
settings = type_settings.get(ftype)
if not os.access(file, os.R_OK):
LOGGER.error("File %s is not readable.", file)
with open(file, 'r', encoding=args.encoding) as f:
lines = f.readlines()
# now iterate throw the lines and try to determine the various indies
# first try to find the start of the header: skip over shebang or empty lines
keep_first = settings.get("keepFirst")
isBlockHeader = False
block_comment_start_pattern = settings.get("blockCommentStartPattern")
block_comment_end_pattern = settings.get("blockCommentEndPattern")
line_comment_start_pattern = settings.get("lineCommentStartPattern")
i = 0
LOGGER.info("Processing file {} as {}".format(file, ftype))
for line in lines:
if (i == 0 or i == skip) and keep_first and keep_first.findall(line):
skip = i + 1
elif emptyPattern.findall(line):
pass
elif block_comment_start_pattern and block_comment_start_pattern.findall(line):
head_start = i
isBlockHeader = True
break
elif line_comment_start_pattern and line_comment_start_pattern.findall(line):
head_start = i
break
elif not block_comment_start_pattern and \
line_comment_start_pattern and \
line_comment_start_pattern.findall(line):
head_start = i
break
else:
# we have reached something else, so no header in this file
# logging.debug("Did not find the start giving up at line %s, line is >%s<",i,line)
return {"type": ftype,
"lines": lines,
"skip": skip,
"headStart": None,
"headEnd": None,
"yearsLine": None,
"settings": settings,
"haveLicense": have_license
}
i = i + 1
LOGGER.debug("Found preliminary start at {}, i={}, lines={}".format(head_start, i, len(lines)))
# now we have either reached the end, or we are at a line where a block start or line comment occurred
# if we have reached the end, return default dictionary without info
if i == len(lines):
LOGGER.debug("We have reached the end, did not find anything really")
return {"type": ftype,
"lines": lines,
"skip": skip,
"headStart": head_start,
"headEnd": head_end,
"yearsLine": years_line,
"settings": settings,
"haveLicense": have_license
}
# otherwise process the comment block until it ends
if isBlockHeader:
LOGGER.debug("Found comment start, process until end")
for j in range(i, len(lines)):
LOGGER.debug("Checking line {}".format(j))
if licensePattern.findall(lines[j]):
have_license = True
elif block_comment_end_pattern.findall(lines[j]):
return {"type": ftype,
"lines": lines,
"skip": skip,
"headStart": head_start,
"headEnd": j,
"yearsLine": years_line,
"settings": settings,
"haveLicense": have_license
}
elif yearsPattern.findall(lines[j]):
have_license = True
years_line = j
# if we went through all the lines without finding an end, maybe we have some syntax error or some other
# unusual situation, so lets return no header
LOGGER.debug("Did not find the end of a block comment, returning no header")
return {"type": ftype,
"lines": lines,
"skip": skip,
"headStart": None,
"headEnd": None,
"yearsLine": None,
"settings": settings,
"haveLicense": have_license
}
else:
LOGGER.debug("ELSE1")
for j in range(i, len(lines)):
if line_comment_start_pattern.findall(lines[j]) and licensePattern.findall(lines[j]):
have_license = True
elif not line_comment_start_pattern.findall(lines[j]):
LOGGER.debug("ELSE2")
return {"type": ftype,
"lines": lines,
"skip": skip,
"headStart": i,
"headEnd": j - 1,
"yearsLine": years_line,
"settings": settings,
"haveLicense": have_license
}
elif yearsPattern.findall(lines[j]):
have_license = True
years_line = j
# if we went through all the lines without finding the end of the block, it could be that the whole
# file only consisted of the header, so lets return the last line index
LOGGER.debug("RETURN")
return {"type": ftype,
"lines": lines,
"skip": skip,
"headStart": i,
"headEnd": len(lines) - 1,
"yearsLine": years_line,
"settings": settings,
"haveLicense": have_license
}
def make_backup(file, arguments):
"""
Backup file by copying it to a file with the extension .bak appended to the name.
:param file: file to back up
:param arguments: program args, only backs up, if required by an option
:return:
"""
if arguments.b:
LOGGER.info("Backing up file {} to {}".format(file, file + ".bak"))
if not arguments.dry:
copyfile(file, file + ".bak")
class OpenAsWriteable(object):
"""
This contextmanager wraps standard open(file, 'w', encoding=...) using
arguments.encoding encoding. If file cannot be written (read-only file),
and if args.force_overwrite is set, try to alter the owner write flag before
yielding the file handle. On exit, file permissions are restored to original
permissions on __exit__ . If the file does not exist, or if it is read-only
and cannot be made writable (due to lacking user rights or force_overwrite
argument not being set), this contextmanager yields None on __enter__.
"""
def __init__(self, filename, arguments):
"""
Initialize an OpenAsWriteable context manager
:param filename: path to the file to open
:param arguments: program arguments
"""
self._filename = filename
self._arguments = arguments
self._file_handle = None
self._file_permissions = None
def __enter__(self):
"""
Yields a writable file handle when possible, else None.
"""
filename = self._filename
arguments = self._arguments
file_handle = None
file_permissions = None
if os.path.isfile(filename):
file_permissions = stat.S_IMODE(os.lstat(filename).st_mode)
if not os.access(filename, os.W_OK):
if arguments.force_overwrite:
try:
os.chmod(filename, file_permissions | stat.S_IWUSR)
except PermissionError:
LOGGER.warning("File {} cannot be made writable, it will be skipped.".format(filename))
else:
LOGGER.warning("File {} is not writable, it will be skipped.".format(filename))
if os.access(filename, os.W_OK):
file_handle = open(filename, 'w', encoding=arguments.encoding)
else:
LOGGER.warning("File {} does not exist, it will be skipped.".format(filename))
self._file_handle = file_handle
self._file_permissions = file_permissions
return file_handle
def __exit__ (self, exc_type, exc_value, traceback):
"""
Restore back file permissions and close file handle (if any).
"""
if (self._file_handle is not None):
self._file_handle.close()
actual_permissions = stat.S_IMODE(os.lstat(self._filename).st_mode)
if (actual_permissions != self._file_permissions):
try:
os.chmod(self._filename, self._file_permissions)
except PermissionError:
LOGGER.error("File {} permissions could not be restored.".format(self._filename))
self._file_handle = None
self._file_permissions = None
return True
@contextlib.contextmanager
def open_as_writable(file, arguments):
"""
Wrapper around OpenAsWriteable context manager.
"""
with OpenAsWriteable(file, arguments=arguments) as fw:
yield fw
def main():
"""Main function."""
# LOGGER.addHandler(logging.StreamHandler(stream=sys.stderr))
# init: create the ext2type mappings
arguments = parse_command_line(sys.argv)
additional_extensions = arguments.additional_extensions
type_settings = TYPE_SETTINGS
if arguments.settings:
type_settings = read_type_settings(arguments.settings)
for t in type_settings:
settings = type_settings[t]
exts = settings["extensions"]
if "filenames" in settings:
names = settings['filenames']
else:
names = []
# if additional file extensions are provided by the user, they are "merged" here:
if additional_extensions and t in additional_extensions:
for aext in additional_extensions[t]:
LOGGER.debug("Enable custom file extension '%s' for language '%s'" % (aext, t))
exts.append(aext)
for ext in exts:
ext2type[ext] = t
patterns.append("*" + ext)
for name in names:
name2type[name] = t
patterns.append(name)
LOGGER.debug("Allowed file patterns %s" % patterns)
limit2exts = None
if arguments.ext is not None and len(arguments.ext) > 0:
limit2exts = arguments.ext
try:
error = False
template_lines = None
if arguments.dir is not default_dir and arguments.files:
LOGGER.error("Cannot use both '--dir' and '--files' options.")
error = True
if arguments.years and arguments.current_year:
LOGGER.error("Cannot use both '--years' and '--currentyear' options.")
error = True
years = arguments.years
if arguments.current_year:
import datetime
now = datetime.datetime.now()
years = str(now.year)
settings = {}
if years:
settings["years"] = years
if arguments.owner:
settings["owner"] = arguments.owner
if arguments.projectname:
settings["projectname"] = arguments.projectname
if arguments.projecturl:
settings["projecturl"] = arguments.projecturl
# if we have a template name specified, try to get or load the template
if arguments.tmpl:
opt_tmpl = arguments.tmpl
# first get all the names of our own templates
# for this get first the path of this file
templates_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "templates")
LOGGER.debug("File path: {}".format(os.path.abspath(__file__)))
# get all the templates in the templates directory
templates = [f for f in get_paths("*.tmpl", templates_dir)]
templates = [(os.path.splitext(os.path.basename(t))[0], t) for t in templates]
# filter by trying to match the name against what was specified
tmpls = [t for t in templates if opt_tmpl in t[0]]
# check if one of the matching template names is identical to the parameter, then take that one
tmpls_eq = [t for t in tmpls if opt_tmpl == t[0]]
if len(tmpls_eq) > 0:
tmpls = tmpls_eq
if len(tmpls) == 1:
tmpl_name = tmpls[0][0]
tmpl_file = tmpls[0][1]
LOGGER.info("Using template file {} for {}".format(tmpl_file, tmpl_name))
template_lines = read_template(tmpl_file, settings, arguments)
else:
if len(tmpls) == 0:
# check if we can interpret the option as file
if os.path.isfile(opt_tmpl):
LOGGER.info("Using file {}".format(os.path.abspath(opt_tmpl)))
template_lines = read_template(os.path.abspath(opt_tmpl), settings, arguments)
else:
LOGGER.error("Not a built-in template and not a file, cannot proceed: {}".format(opt_tmpl))
LOGGER.error("Built in templates: {}".format(", ".join([t[0] for t in templates])))
error = True
else:
LOGGER.error("There are multiple matching template names: {}".format([t[0] for t in tmpls]))
error = True
else:
# no tmpl parameter
if not years:
LOGGER.error("No template specified and no years either, nothing to do (use -h option for usage info)")
error = True
if error:
return 1
else:
# logging.debug("Got template lines: %s",templateLines)
# now do the actual processing: if we did not get some error, we have a template loaded or
# no template at all
# if we have no template, then we will have the years.
# now process all the files and either replace the years or replace/add the header
if arguments.files:
LOGGER.debug("Processing files %s", arguments.files)
LOGGER.debug("Patterns: %s", patterns)
paths = get_files(patterns, arguments.files)
else:
LOGGER.debug("Processing directory %s", arguments.dir)
LOGGER.debug("Patterns: %s", patterns)
paths = get_paths(patterns, arguments.dir)
for file in paths:
LOGGER.debug("Considering file: {}".format(file))
file = os.path.normpath(file)
if limit2exts is not None and not any([file.endswith(ext) for ext in limit2exts]):
LOGGER.info("Skipping file with non-matching extension: {}".format(file))
continue
if arguments.exclude and any([fnmatch.fnmatch(file, pat) for pat in arguments.exclude]):
LOGGER.info("Ignoring file {}".format(file))
continue
finfo = read_file(file, arguments, type_settings)
if not finfo:
LOGGER.debug("File not supported %s", file)
continue
# logging.debug("FINFO for the file: %s", finfo)
lines = finfo["lines"]
LOGGER.debug(
"Info for the file: headStart=%s, headEnd=%s, haveLicense=%s, skip=%s, len=%s, yearsline=%s",
finfo["headStart"], finfo["headEnd"], finfo["haveLicense"], finfo["skip"], len(lines),
finfo["yearsLine"])
# if we have a template: replace or add
if template_lines:
make_backup(file, arguments)
if arguments.dry:
LOGGER.info("Would be updating changed file: {}".format(file))
else:
with open_as_writable(file, arguments) as fw:
if (fw is not None):
# if we found a header, replace it
# otherwise, add it after the lines to skip
head_start = finfo["headStart"]
head_end = finfo["headEnd"]
have_license = finfo["haveLicense"]
ftype = finfo["type"]
skip = finfo["skip"]
if head_start is not None and head_end is not None and have_license:
LOGGER.debug("Replacing header in file {}".format(file))
# first write the lines before the header
fw.writelines(lines[0:head_start])
# now write the new header from the template lines
fw.writelines(for_type(template_lines, ftype, type_settings))
# now write the rest of the lines
fw.writelines(lines[head_end + 1:])
else:
LOGGER.debug("Adding header to file {}, skip={}".format(file, skip))
fw.writelines(lines[0:skip])
fw.writelines(for_type(template_lines, ftype, type_settings))
if head_start is not None and not have_license:
# There is some header, but not license - add an empty line
fw.write("\n")
fw.writelines(lines[skip:])
# TODO: optionally remove backup if all worked well?
else:
# no template lines, just update the line with the year, if we found a year
years_line = finfo["yearsLine"]
if years_line is not None:
make_backup(file, arguments)
if arguments.dry:
LOGGER.info("Would be updating year line in file {}".format(file))
else:
with open_as_writable(file, arguments) as fw:
if (fw is not None):
LOGGER.debug("Updating years in file {} in line {}".format(file, years_line))
fw.writelines(lines[0:years_line])
fw.write(yearsPattern.sub(years, lines[years_line]))
fw.writelines(lines[years_line + 1:])
# TODO: optionally remove backup if all worked well
return 0
finally:
logging.shutdown()
if __name__ == "__main__":
sys.exit(main())
golang-github-minio-madmin-go-3.0.104/logger/ 0000775 0000000 0000000 00000000000 14774251704 0020657 5 ustar 00root root 0000000 0000000 golang-github-minio-madmin-go-3.0.104/logger/audit/ 0000775 0000000 0000000 00000000000 14774251704 0021765 5 ustar 00root root 0000000 0000000 golang-github-minio-madmin-go-3.0.104/logger/audit/entry.go 0000664 0000000 0000000 00000006126 14774251704 0023462 0 ustar 00root root 0000000 0000000 // Copyright (c) 2015-2025 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see .
package audit
import "time"
// ObjectVersion object version key/versionId
type ObjectVersion struct {
ObjectName string `json:"objectName"`
VersionID string `json:"versionId,omitempty"`
}
// Entry - audit entry logs.
type Entry struct {
Version string `json:"version"`
DeploymentID string `json:"deploymentid,omitempty"`
SiteName string `json:"siteName,omitempty"`
Time time.Time `json:"time"`
Event string `json:"event"`
// Class of audit message - S3, admin ops, bucket management
Type string `json:"type,omitempty"`
// deprecated replaced by 'Event', kept here for some
// time for backward compatibility with k8s Operator.
Trigger string `json:"trigger"`
API struct {
Name string `json:"name,omitempty"`
Bucket string `json:"bucket,omitempty"`
Object string `json:"object,omitempty"`
Objects []ObjectVersion `json:"objects,omitempty"`
Status string `json:"status,omitempty"`
StatusCode int `json:"statusCode,omitempty"`
InputBytes int64 `json:"rx"`
OutputBytes int64 `json:"tx"`
HeaderBytes int64 `json:"txHeaders,omitempty"`
TimeToFirstByte string `json:"timeToFirstByte,omitempty"`
TimeToFirstByteInNS string `json:"timeToFirstByteInNS,omitempty"`
TimeToResponse string `json:"timeToResponse,omitempty"`
TimeToResponseInNS string `json:"timeToResponseInNS,omitempty"`
} `json:"api"`
RemoteHost string `json:"remotehost,omitempty"`
RequestID string `json:"requestID,omitempty"`
UserAgent string `json:"userAgent,omitempty"`
ReqPath string `json:"requestPath,omitempty"`
ReqHost string `json:"requestHost,omitempty"`
ReqClaims map[string]interface{} `json:"requestClaims,omitempty"`
ReqQuery map[string]string `json:"requestQuery,omitempty"`
ReqHeader map[string]string `json:"requestHeader,omitempty"`
RespHeader map[string]string `json:"responseHeader,omitempty"`
Tags map[string]interface{} `json:"tags,omitempty"`
AccessKey string `json:"accessKey,omitempty"`
ParentUser string `json:"parentUser,omitempty"`
Error string `json:"error,omitempty"`
}
golang-github-minio-madmin-go-3.0.104/logger/log/ 0000775 0000000 0000000 00000000000 14774251704 0021440 5 ustar 00root root 0000000 0000000 golang-github-minio-madmin-go-3.0.104/logger/log/entry.go 0000664 0000000 0000000 00000005706 14774251704 0023140 0 ustar 00root root 0000000 0000000 // Copyright (c) 2015-2025 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see .
package log
import (
"strings"
"time"
"github.com/minio/madmin-go/v3"
)
// ObjectVersion object version key/versionId
type ObjectVersion struct {
ObjectName string `json:"objectName"`
VersionID string `json:"versionId,omitempty"`
}
// Args - defines the arguments for the API.
type Args struct {
Bucket string `json:"bucket,omitempty"`
Object string `json:"object,omitempty"`
VersionID string `json:"versionId,omitempty"`
Objects []ObjectVersion `json:"objects,omitempty"`
Metadata map[string]string `json:"metadata,omitempty"`
}
// Trace - defines the trace.
type Trace struct {
Message string `json:"message,omitempty"`
Source []string `json:"source,omitempty"`
Variables map[string]interface{} `json:"variables,omitempty"`
}
// API - defines the api type and its args.
type API struct {
Name string `json:"name,omitempty"`
Args *Args `json:"args,omitempty"`
}
// Entry - defines fields and values of each log entry.
type Entry struct {
Site string `json:"site,omitempty"`
DeploymentID string `json:"deploymentid,omitempty"`
Level madmin.LogKind `json:"level"`
LogKind madmin.LogKind `json:"errKind,omitempty"` // Deprecated Jan 2024
Time time.Time `json:"time"`
API *API `json:"api,omitempty"`
RemoteHost string `json:"remotehost,omitempty"`
Host string `json:"host,omitempty"`
RequestID string `json:"requestID,omitempty"`
UserAgent string `json:"userAgent,omitempty"`
Message string `json:"message,omitempty"`
Trace *Trace `json:"error,omitempty"`
}
// Info holds console log messages
type Info struct {
Entry
ConsoleMsg string
NodeName string `json:"node"`
Err error `json:"-"`
}
// Mask returns the mask based on the error level.
func (l Info) Mask() uint64 {
return l.Level.LogMask().Mask()
}
// SendLog returns true if log pertains to node specified in args.
func (l Info) SendLog(node string, logKind madmin.LogMask) bool {
if logKind.Contains(l.Level.LogMask()) {
return node == "" || strings.EqualFold(node, l.NodeName) && !l.Time.IsZero()
}
return false
}
golang-github-minio-madmin-go-3.0.104/metrics.go 0000664 0000000 0000000 00000061264 14774251704 0021406 0 ustar 00root root 0000000 0000000 //
// Copyright (c) 2015-2024 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as
// published by the Free Software Foundation, either version 3 of the
// License, or (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see .
//
package madmin
import (
"context"
"encoding/json"
"errors"
"fmt"
"io"
"net/http"
"net/url"
"runtime/metrics"
"sort"
"strconv"
"strings"
"time"
"github.com/prometheus/procfs"
"github.com/shirou/gopsutil/v3/cpu"
"github.com/shirou/gopsutil/v3/load"
)
//msgp:clearomitted
//msgp:tag json
//go:generate msgp -unexported
// MetricType is a bitfield representation of different metric types.
type MetricType uint32
// MetricsNone indicates no metrics.
const MetricsNone MetricType = 0
const (
MetricsScanner MetricType = 1 << (iota)
MetricsDisk
MetricsOS
MetricsBatchJobs
MetricsSiteResync
MetricNet
MetricsMem
MetricsCPU
MetricsRPC
MetricsRuntime
// MetricsAll must be last.
// Enables all metrics.
MetricsAll = 1<<(iota) - 1
)
// MetricsOptions are options provided to Metrics call.
type MetricsOptions struct {
Type MetricType // Return only these metric types. Several types can be combined using |. Leave at 0 to return all.
N int // Maximum number of samples to return. 0 will return endless stream.
Interval time.Duration // Interval between samples. Will be rounded up to 1s.
Hosts []string // Leave empty for all
ByHost bool // Return metrics by host.
Disks []string
ByDisk bool
ByJobID string
ByDepID string
}
// Metrics makes an admin call to retrieve metrics.
// The provided function is called for each received entry.
func (adm *AdminClient) Metrics(ctx context.Context, o MetricsOptions, out func(RealtimeMetrics)) (err error) {
path := fmt.Sprintf(adminAPIPrefix + "/metrics")
q := make(url.Values)
q.Set("types", strconv.FormatUint(uint64(o.Type), 10))
q.Set("n", strconv.Itoa(o.N))
q.Set("interval", o.Interval.String())
q.Set("hosts", strings.Join(o.Hosts, ","))
if o.ByHost {
q.Set("by-host", "true")
}
q.Set("disks", strings.Join(o.Disks, ","))
if o.ByDisk {
q.Set("by-disk", "true")
}
if o.ByJobID != "" {
q.Set("by-jobID", o.ByJobID)
}
if o.ByDepID != "" {
q.Set("by-depID", o.ByDepID)
}
resp, err := adm.executeMethod(ctx,
http.MethodGet, requestData{
relPath: path,
queryValues: q,
},
)
if err != nil {
return err
}
if resp.StatusCode != http.StatusOK {
closeResponse(resp)
return httpRespToErrorResponse(resp)
}
defer closeResponse(resp)
dec := json.NewDecoder(resp.Body)
for {
var m RealtimeMetrics
err := dec.Decode(&m)
if err != nil {
if errors.Is(err, io.EOF) {
err = io.ErrUnexpectedEOF
}
return err
}
out(m)
if m.Final {
break
}
}
return nil
}
// Contains returns whether m contains all of x.
func (m MetricType) Contains(x MetricType) bool {
return m&x == x
}
// RealtimeMetrics provides realtime metrics.
// This is intended to be expanded over time to cover more types.
type RealtimeMetrics struct {
// Error indicates an error occurred.
Errors []string `json:"errors,omitempty"`
// Hosts indicates the scanned hosts
Hosts []string `json:"hosts"`
Aggregated Metrics `json:"aggregated"`
ByHost map[string]Metrics `json:"by_host,omitempty"`
ByDisk map[string]DiskMetric `json:"by_disk,omitempty"`
// Final indicates whether this is the final packet and the receiver can exit.
Final bool `json:"final"`
}
// Metrics contains all metric types.
type Metrics struct {
Scanner *ScannerMetrics `json:"scanner,omitempty"`
Disk *DiskMetric `json:"disk,omitempty"`
OS *OSMetrics `json:"os,omitempty"`
BatchJobs *BatchJobMetrics `json:"batchJobs,omitempty"`
SiteResync *SiteResyncMetrics `json:"siteResync,omitempty"`
Net *NetMetrics `json:"net,omitempty"`
Mem *MemMetrics `json:"mem,omitempty"`
CPU *CPUMetrics `json:"cpu,omitempty"`
RPC *RPCMetrics `json:"rpc,omitempty"`
Go *RuntimeMetrics `json:"go,omitempty"`
}
// Merge other into r.
func (r *Metrics) Merge(other *Metrics) {
if other == nil {
return
}
if r.Scanner == nil && other.Scanner != nil {
r.Scanner = &ScannerMetrics{}
}
r.Scanner.Merge(other.Scanner)
if r.Disk == nil && other.Disk != nil {
r.Disk = &DiskMetric{}
}
r.Disk.Merge(other.Disk)
if r.OS == nil && other.OS != nil {
r.OS = &OSMetrics{}
}
r.OS.Merge(other.OS)
if r.BatchJobs == nil && other.BatchJobs != nil {
r.BatchJobs = &BatchJobMetrics{}
}
r.BatchJobs.Merge(other.BatchJobs)
if r.SiteResync == nil && other.SiteResync != nil {
r.SiteResync = &SiteResyncMetrics{}
}
r.SiteResync.Merge(other.SiteResync)
if r.Net == nil && other.Net != nil {
r.Net = &NetMetrics{}
}
r.Net.Merge(other.Net)
if r.RPC == nil && other.RPC != nil {
r.RPC = &RPCMetrics{}
}
r.RPC.Merge(other.RPC)
if r.Go == nil && other.Go != nil {
r.Go = &RuntimeMetrics{}
}
r.Go.Merge(other.Go)
}
// Merge will merge other into r.
func (r *RealtimeMetrics) Merge(other *RealtimeMetrics) {
if other == nil {
return
}
if len(other.Errors) > 0 {
r.Errors = append(r.Errors, other.Errors...)
}
if r.ByHost == nil && len(other.ByHost) > 0 {
r.ByHost = make(map[string]Metrics, len(other.ByHost))
}
for host, metrics := range other.ByHost {
r.ByHost[host] = metrics
}
r.Hosts = append(r.Hosts, other.Hosts...)
r.Aggregated.Merge(&other.Aggregated)
sort.Strings(r.Hosts)
// Gather per disk metrics
if r.ByDisk == nil && len(other.ByDisk) > 0 {
r.ByDisk = make(map[string]DiskMetric, len(other.ByDisk))
}
for disk, metrics := range other.ByDisk {
r.ByDisk[disk] = metrics
}
}
// ScannerMetrics contains scanner information.
type ScannerMetrics struct {
// Time these metrics were collected
CollectedAt time.Time `json:"collected"`
CurrentCycle uint64 `json:"current_cycle"` // Deprecated Mar 2024
CurrentStarted time.Time `json:"current_started"` // Deprecated Mar 2024
CyclesCompletedAt []time.Time `json:"cycle_complete_times"` // Deprecated Mar 2024
// Number of buckets currently scanning
OngoingBuckets int `json:"ongoing_buckets"`
// Stats per bucket, a map between bucket name and scan stats in all erasure sets
PerBucketStats map[string][]BucketScanInfo `json:"per_bucket_stats,omitempty"`
// Number of accumulated operations by type since server restart.
LifeTimeOps map[string]uint64 `json:"life_time_ops,omitempty"`
// Number of accumulated ILM operations by type since server restart.
LifeTimeILM map[string]uint64 `json:"ilm_ops,omitempty"`
// Last minute operation statistics.
LastMinute struct {
// Scanner actions.
Actions map[string]TimedAction `json:"actions,omitempty"`
// ILM actions.
ILM map[string]TimedAction `json:"ilm,omitempty"`
} `json:"last_minute"`
// Currently active path(s) being scanned.
ActivePaths []string `json:"active,omitempty"`
}
// Merge other into 's'.
func (s *ScannerMetrics) Merge(other *ScannerMetrics) {
if other == nil {
return
}
if s.CollectedAt.Before(other.CollectedAt) {
// Use latest timestamp
s.CollectedAt = other.CollectedAt
}
if s.OngoingBuckets < other.OngoingBuckets {
s.OngoingBuckets = other.OngoingBuckets
}
if s.PerBucketStats == nil {
s.PerBucketStats = make(map[string][]BucketScanInfo)
}
for bucket, otherSt := range other.PerBucketStats {
if len(otherSt) == 0 {
continue
}
_, ok := s.PerBucketStats[bucket]
if !ok {
s.PerBucketStats[bucket] = otherSt
}
}
if s.CurrentCycle < other.CurrentCycle {
s.CurrentCycle = other.CurrentCycle
s.CyclesCompletedAt = other.CyclesCompletedAt
s.CurrentStarted = other.CurrentStarted
}
if len(other.CyclesCompletedAt) > len(s.CyclesCompletedAt) {
s.CyclesCompletedAt = other.CyclesCompletedAt
}
// Regular ops
if len(other.LifeTimeOps) > 0 && s.LifeTimeOps == nil {
s.LifeTimeOps = make(map[string]uint64, len(other.LifeTimeOps))
}
for k, v := range other.LifeTimeOps {
total := s.LifeTimeOps[k] + v
s.LifeTimeOps[k] = total
}
if s.LastMinute.Actions == nil && len(other.LastMinute.Actions) > 0 {
s.LastMinute.Actions = make(map[string]TimedAction, len(other.LastMinute.Actions))
}
for k, v := range other.LastMinute.Actions {
total := s.LastMinute.Actions[k]
total.Merge(v)
s.LastMinute.Actions[k] = total
}
// ILM
if len(other.LifeTimeILM) > 0 && s.LifeTimeILM == nil {
s.LifeTimeILM = make(map[string]uint64, len(other.LifeTimeILM))
}
for k, v := range other.LifeTimeILM {
total := s.LifeTimeILM[k] + v
s.LifeTimeILM[k] = total
}
if s.LastMinute.ILM == nil && len(other.LastMinute.ILM) > 0 {
s.LastMinute.ILM = make(map[string]TimedAction, len(other.LastMinute.ILM))
}
for k, v := range other.LastMinute.ILM {
total := s.LastMinute.ILM[k]
total.Merge(v)
s.LastMinute.ILM[k] = total
}
s.ActivePaths = append(s.ActivePaths, other.ActivePaths...)
sort.Strings(s.ActivePaths)
}
// DiskIOStats contains IO stats of a single drive
type DiskIOStats struct {
ReadIOs uint64 `json:"read_ios"`
ReadMerges uint64 `json:"read_merges"`
ReadSectors uint64 `json:"read_sectors"`
ReadTicks uint64 `json:"read_ticks"`
WriteIOs uint64 `json:"write_ios"`
WriteMerges uint64 `json:"write_merges"`
WriteSectors uint64 `json:"wrte_sectors"`
WriteTicks uint64 `json:"write_ticks"`
CurrentIOs uint64 `json:"current_ios"`
TotalTicks uint64 `json:"total_ticks"`
ReqTicks uint64 `json:"req_ticks"`
DiscardIOs uint64 `json:"discard_ios"`
DiscardMerges uint64 `json:"discard_merges"`
DiscardSectors uint64 `json:"discard_secotrs"`
DiscardTicks uint64 `json:"discard_ticks"`
FlushIOs uint64 `json:"flush_ios"`
FlushTicks uint64 `json:"flush_ticks"`
}
// DiskMetric contains metrics for one or more disks.
type DiskMetric struct {
// Time these metrics were collected
CollectedAt time.Time `json:"collected"`
// Number of disks
NDisks int `json:"n_disks"`
// Offline disks
Offline int `json:"offline,omitempty"`
// Healing disks
Healing int `json:"healing,omitempty"`
// Number of accumulated operations by type since server restart.
LifeTimeOps map[string]uint64 `json:"life_time_ops,omitempty"`
// Last minute statistics.
LastMinute struct {
Operations map[string]TimedAction `json:"operations,omitempty"`
} `json:"last_minute"`
IOStats DiskIOStats `json:"iostats,omitempty"`
}
// Merge other into 's'.
func (d *DiskMetric) Merge(other *DiskMetric) {
if other == nil {
return
}
if d.CollectedAt.Before(other.CollectedAt) {
// Use latest timestamp
d.CollectedAt = other.CollectedAt
}
d.NDisks += other.NDisks
d.Offline += other.Offline
d.Healing += other.Healing
if len(other.LifeTimeOps) > 0 && d.LifeTimeOps == nil {
d.LifeTimeOps = make(map[string]uint64, len(other.LifeTimeOps))
}
for k, v := range other.LifeTimeOps {
total := d.LifeTimeOps[k] + v
d.LifeTimeOps[k] = total
}
if d.LastMinute.Operations == nil && len(other.LastMinute.Operations) > 0 {
d.LastMinute.Operations = make(map[string]TimedAction, len(other.LastMinute.Operations))
}
for k, v := range other.LastMinute.Operations {
total := d.LastMinute.Operations[k]
total.Merge(v)
d.LastMinute.Operations[k] = total
}
}
// OSMetrics contains metrics for OS operations.
type OSMetrics struct {
// Time these metrics were collected
CollectedAt time.Time `json:"collected"`
// Number of accumulated operations by type since server restart.
LifeTimeOps map[string]uint64 `json:"life_time_ops,omitempty"`
// Last minute statistics.
LastMinute struct {
Operations map[string]TimedAction `json:"operations,omitempty"`
} `json:"last_minute"`
}
// Merge other into 'o'.
func (o *OSMetrics) Merge(other *OSMetrics) {
if other == nil {
return
}
if o.CollectedAt.Before(other.CollectedAt) {
// Use latest timestamp
o.CollectedAt = other.CollectedAt
}
if len(other.LifeTimeOps) > 0 && o.LifeTimeOps == nil {
o.LifeTimeOps = make(map[string]uint64, len(other.LifeTimeOps))
}
for k, v := range other.LifeTimeOps {
total := o.LifeTimeOps[k] + v
o.LifeTimeOps[k] = total
}
if o.LastMinute.Operations == nil && len(other.LastMinute.Operations) > 0 {
o.LastMinute.Operations = make(map[string]TimedAction, len(other.LastMinute.Operations))
}
for k, v := range other.LastMinute.Operations {
total := o.LastMinute.Operations[k]
total.Merge(v)
o.LastMinute.Operations[k] = total
}
}
// BatchJobMetrics contains metrics for batch operations
type BatchJobMetrics struct {
// Time these metrics were collected
CollectedAt time.Time `json:"collected"`
// Jobs by ID.
Jobs map[string]JobMetric
}
type JobMetric struct {
JobID string `json:"jobID"`
JobType string `json:"jobType"`
StartTime time.Time `json:"startTime"`
LastUpdate time.Time `json:"lastUpdate"`
RetryAttempts int `json:"retryAttempts"`
Complete bool `json:"complete"`
Failed bool `json:"failed"`
// Specific job type data:
Replicate *ReplicateInfo `json:"replicate,omitempty"`
KeyRotate *KeyRotationInfo `json:"rotation,omitempty"`
Expired *ExpirationInfo `json:"expired,omitempty"`
Catalog *CatalogInfo `json:"catalog,omitempty"`
}
type ReplicateInfo struct {
// Last bucket/object batch replicated
Bucket string `json:"lastBucket"`
Object string `json:"lastObject"`
// Verbose information
Objects int64 `json:"objects"`
ObjectsFailed int64 `json:"objectsFailed"`
BytesTransferred int64 `json:"bytesTransferred"`
BytesFailed int64 `json:"bytesFailed"`
}
type ExpirationInfo struct {
// Last bucket/object key rotated
Bucket string `json:"lastBucket"`
Object string `json:"lastObject"`
// Verbose information
Objects int64 `json:"objects"`
ObjectsFailed int64 `json:"objectsFailed"`
}
type KeyRotationInfo struct {
// Last bucket/object key rotated
Bucket string `json:"lastBucket"`
Object string `json:"lastObject"`
// Verbose information
Objects int64 `json:"objects"`
ObjectsFailed int64 `json:"objectsFailed"`
}
type CatalogInfo struct {
LastBucketScanned string `json:"lastBucketScanned"`
LastObjectScanned string `json:"lastObjectScanned"`
LastBucketMatched string `json:"lastBucketMatched"`
LastObjectMatched string `json:"lastObjectMatched"`
ObjectsScannedCount uint64 `json:"objectsScannedCount"`
ObjectsMatchedCount uint64 `json:"objectsMatchedCount"`
// Represents the number of objects' metadata that were written to output
// objects.
RecordsWrittenCount uint64 `json:"recordsWrittenCount"`
// Represents the number of output objects created.
OutputObjectsCount uint64 `json:"outputObjectsCount"`
// Manifest file path (part of the output of a catalog job)
ManifestPathBucket string `json:"manifestPathBucket"`
ManifestPathObject string `json:"manifestPathObject"`
// Error message
ErrorMsg string `json:"errorMsg"`
}
// Merge other into 'o'.
func (o *BatchJobMetrics) Merge(other *BatchJobMetrics) {
if other == nil || len(other.Jobs) == 0 {
return
}
if o.CollectedAt.Before(other.CollectedAt) {
// Use latest timestamp
o.CollectedAt = other.CollectedAt
}
if o.Jobs == nil {
o.Jobs = make(map[string]JobMetric, len(other.Jobs))
}
// Job
for k, v := range other.Jobs {
o.Jobs[k] = v
}
}
// SiteResyncMetrics contains metrics for site resync operation
type SiteResyncMetrics struct {
// Time these metrics were collected
CollectedAt time.Time `json:"collected"`
// Status of resync operation
ResyncStatus string `json:"resyncStatus,omitempty"`
StartTime time.Time `json:"startTime"`
LastUpdate time.Time `json:"lastUpdate"`
NumBuckets int64 `json:"numBuckets"`
ResyncID string `json:"resyncID"`
DeplID string `json:"deplID"`
// Completed size in bytes
ReplicatedSize int64 `json:"completedReplicationSize"`
// Total number of objects replicated
ReplicatedCount int64 `json:"replicationCount"`
// Failed size in bytes
FailedSize int64 `json:"failedReplicationSize"`
// Total number of failed operations
FailedCount int64 `json:"failedReplicationCount"`
// Buckets that could not be synced
FailedBuckets []string `json:"failedBuckets"`
// Last bucket/object replicated.
Bucket string `json:"bucket,omitempty"`
Object string `json:"object,omitempty"`
}
func (o SiteResyncMetrics) Complete() bool {
return strings.ToLower(o.ResyncStatus) == "completed"
}
// Merge other into 'o'.
func (o *SiteResyncMetrics) Merge(other *SiteResyncMetrics) {
if other == nil {
return
}
if o.CollectedAt.Before(other.CollectedAt) {
// Use latest
*o = *other
}
}
type NetMetrics struct {
// Time these metrics were collected
CollectedAt time.Time `json:"collected"`
// net of Interface
InterfaceName string `json:"interfaceName"`
NetStats procfs.NetDevLine `json:"netstats"`
}
//msgp:replace procfs.NetDevLine with:procfsNetDevLine
// Merge other into 'o'.
func (n *NetMetrics) Merge(other *NetMetrics) {
if other == nil {
return
}
if n.CollectedAt.Before(other.CollectedAt) {
// Use latest timestamp
n.CollectedAt = other.CollectedAt
}
n.NetStats.RxBytes += other.NetStats.RxBytes
n.NetStats.RxPackets += other.NetStats.RxPackets
n.NetStats.RxErrors += other.NetStats.RxErrors
n.NetStats.RxDropped += other.NetStats.RxDropped
n.NetStats.RxFIFO += other.NetStats.RxFIFO
n.NetStats.RxFrame += other.NetStats.RxFrame
n.NetStats.RxCompressed += other.NetStats.RxCompressed
n.NetStats.RxMulticast += other.NetStats.RxMulticast
n.NetStats.TxBytes += other.NetStats.TxBytes
n.NetStats.TxPackets += other.NetStats.TxPackets
n.NetStats.TxErrors += other.NetStats.TxErrors
n.NetStats.TxDropped += other.NetStats.TxDropped
n.NetStats.TxFIFO += other.NetStats.TxFIFO
n.NetStats.TxCollisions += other.NetStats.TxCollisions
n.NetStats.TxCarrier += other.NetStats.TxCarrier
n.NetStats.TxCompressed += other.NetStats.TxCompressed
}
//msgp:replace NodeCommon with:nodeCommon
// nodeCommon - use as replacement for NodeCommon
// We do not want to give NodeCommon codegen, since it is used for embedding.
type nodeCommon struct {
Addr string `json:"addr"`
Error string `json:"error,omitempty"`
}
// MemInfo contains system's RAM and swap information.
type MemInfo struct {
NodeCommon
Total uint64 `json:"total,omitempty"`
Used uint64 `json:"used,omitempty"`
Free uint64 `json:"free,omitempty"`
Available uint64 `json:"available,omitempty"`
Shared uint64 `json:"shared,omitempty"`
Cache uint64 `json:"cache,omitempty"`
Buffers uint64 `json:"buffer,omitempty"`
SwapSpaceTotal uint64 `json:"swap_space_total,omitempty"`
SwapSpaceFree uint64 `json:"swap_space_free,omitempty"`
// Limit will store cgroup limit if configured and
// less than Total, otherwise same as Total
Limit uint64 `json:"limit,omitempty"`
}
type MemMetrics struct {
// Time these metrics were collected
CollectedAt time.Time `json:"collected"`
Info MemInfo `json:"memInfo"`
}
// Merge other into 'm'.
func (m *MemMetrics) Merge(other *MemMetrics) {
if m.CollectedAt.Before(other.CollectedAt) {
// Use latest timestamp
m.CollectedAt = other.CollectedAt
}
m.Info.Total += other.Info.Total
m.Info.Available += other.Info.Available
m.Info.SwapSpaceTotal += other.Info.SwapSpaceTotal
m.Info.SwapSpaceFree += other.Info.SwapSpaceFree
m.Info.Limit += other.Info.Limit
}
//msgp:replace cpu.TimesStat with:cpuTimesStat
//msgp:replace load.AvgStat with:loadAvgStat
type CPUMetrics struct {
// Time these metrics were collected
CollectedAt time.Time `json:"collected"`
TimesStat *cpu.TimesStat `json:"timesStat"`
LoadStat *load.AvgStat `json:"loadStat"`
CPUCount int `json:"cpuCount"`
}
// Merge other into 'm'.
func (m *CPUMetrics) Merge(other *CPUMetrics) {
if m.CollectedAt.Before(other.CollectedAt) {
// Use latest timestamp
m.CollectedAt = other.CollectedAt
}
m.TimesStat.User += other.TimesStat.User
m.TimesStat.System += other.TimesStat.System
m.TimesStat.Idle += other.TimesStat.Idle
m.TimesStat.Nice += other.TimesStat.Nice
m.TimesStat.Iowait += other.TimesStat.Iowait
m.TimesStat.Irq += other.TimesStat.Irq
m.TimesStat.Softirq += other.TimesStat.Softirq
m.TimesStat.Steal += other.TimesStat.Steal
m.TimesStat.Guest += other.TimesStat.Guest
m.TimesStat.GuestNice += other.TimesStat.GuestNice
m.LoadStat.Load1 += other.LoadStat.Load1
m.LoadStat.Load5 += other.LoadStat.Load5
m.LoadStat.Load15 += other.LoadStat.Load15
}
// RPCMetrics contains metrics for RPC operations.
type RPCMetrics struct {
CollectedAt time.Time `json:"collectedAt"`
Connected int `json:"connected"`
ReconnectCount int `json:"reconnectCount"`
Disconnected int `json:"disconnected"`
OutgoingStreams int `json:"outgoingStreams"`
IncomingStreams int `json:"incomingStreams"`
OutgoingBytes int64 `json:"outgoingBytes"`
IncomingBytes int64 `json:"incomingBytes"`
OutgoingMessages int64 `json:"outgoingMessages"`
IncomingMessages int64 `json:"incomingMessages"`
OutQueue int `json:"outQueue"`
LastPongTime time.Time `json:"lastPongTime"`
LastPingMS float64 `json:"lastPingMS"`
MaxPingDurMS float64 `json:"maxPingDurMS"` // Maximum across all merged entries.
LastConnectTime time.Time `json:"lastConnectTime"`
ByDestination map[string]RPCMetrics `json:"byDestination,omitempty"`
ByCaller map[string]RPCMetrics `json:"byCaller,omitempty"`
}
// Merge other into 'm'.
func (m *RPCMetrics) Merge(other *RPCMetrics) {
if m == nil || other == nil {
return
}
if m.CollectedAt.Before(other.CollectedAt) {
// Use latest timestamp
m.CollectedAt = other.CollectedAt
}
if m.LastConnectTime.Before(other.LastConnectTime) {
m.LastConnectTime = other.LastConnectTime
}
m.Connected += other.Connected
m.Disconnected += other.Disconnected
m.ReconnectCount += other.ReconnectCount
m.OutgoingStreams += other.OutgoingStreams
m.IncomingStreams += other.IncomingStreams
m.OutgoingBytes += other.OutgoingBytes
m.IncomingBytes += other.IncomingBytes
m.OutgoingMessages += other.OutgoingMessages
m.IncomingMessages += other.IncomingMessages
m.OutQueue += other.OutQueue
if m.LastPongTime.Before(other.LastPongTime) {
m.LastPongTime = other.LastPongTime
m.LastPingMS = other.LastPingMS
}
if m.MaxPingDurMS < other.MaxPingDurMS {
m.MaxPingDurMS = other.MaxPingDurMS
}
for k, v := range other.ByDestination {
if m.ByDestination == nil {
m.ByDestination = make(map[string]RPCMetrics, len(other.ByDestination))
}
existing := m.ByDestination[k]
existing.Merge(&v)
m.ByDestination[k] = existing
}
for k, v := range other.ByCaller {
if m.ByCaller == nil {
m.ByCaller = make(map[string]RPCMetrics, len(other.ByCaller))
}
existing := m.ByCaller[k]
existing.Merge(&v)
m.ByCaller[k] = existing
}
}
//msgp:replace metrics.Float64Histogram with:localF64H
// local copy of localF64H, can be casted to/from metrics.Float64Histogram
type localF64H struct {
Counts []uint64 `json:"counts,omitempty"`
Buckets []float64 `json:"buckets,omitempty"`
}
// RuntimeMetrics contains metrics for the go runtime.
// See more at https://pkg.go.dev/runtime/metrics
type RuntimeMetrics struct {
// UintMetrics contains KindUint64 values
UintMetrics map[string]uint64 `json:"uintMetrics,omitempty"`
// FloatMetrics contains KindFloat64 values
FloatMetrics map[string]float64 `json:"floatMetrics,omitempty"`
// HistMetrics contains KindFloat64Histogram values
HistMetrics map[string]metrics.Float64Histogram `json:"histMetrics,omitempty"`
// N tracks the number of merged entries.
N int `json:"n"`
}
// Merge other into 'm'.
func (m *RuntimeMetrics) Merge(other *RuntimeMetrics) {
if m == nil || other == nil {
return
}
if m.UintMetrics == nil {
m.UintMetrics = make(map[string]uint64, len(other.UintMetrics))
}
if m.FloatMetrics == nil {
m.FloatMetrics = make(map[string]float64, len(other.FloatMetrics))
}
if m.HistMetrics == nil {
m.HistMetrics = make(map[string]metrics.Float64Histogram, len(other.HistMetrics))
}
for k, v := range other.UintMetrics {
m.UintMetrics[k] += v
}
for k, v := range other.FloatMetrics {
m.FloatMetrics[k] += v
}
for k, v := range other.HistMetrics {
existing := m.HistMetrics[k]
if len(existing.Buckets) == 0 {
m.HistMetrics[k] = v
continue
}
// TODO: Technically, I guess we may have differing buckets,
// but they should be the same for the runtime.
if len(existing.Buckets) == len(v.Buckets) {
for i, count := range v.Counts {
existing.Counts[i] += count
}
}
}
m.N += other.N
}
golang-github-minio-madmin-go-3.0.104/metrics_client.go 0000664 0000000 0000000 00000013215 14774251704 0022735 0 ustar 00root root 0000000 0000000 //
// Copyright (c) 2015-2024 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as
// published by the Free Software Foundation, either version 3 of the
// License, or (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see .
//
package madmin
import (
"context"
"fmt"
"net/http"
"net/url"
"time"
jwtgo "github.com/golang-jwt/jwt/v4"
"github.com/minio/minio-go/v7/pkg/credentials"
)
const (
defaultPrometheusJWTExpiry = 100 * 365 * 24 * time.Hour
libraryMinioURLPrefix = "/minio"
prometheusIssuer = "prometheus"
)
// MetricsClient implements MinIO metrics operations
type MetricsClient struct {
/// Credentials for authentication
creds *credentials.Credentials
// Indicate whether we are using https or not
secure bool
// Parsed endpoint url provided by the user.
endpointURL *url.URL
// Needs allocation.
httpClient *http.Client
}
// metricsRequestData - is container for all the values to make a
// request.
type metricsRequestData struct {
relativePath string // URL path relative to admin API base endpoint
}
// NewMetricsClientWithOptions - instantiate minio metrics client honoring Prometheus format
func NewMetricsClientWithOptions(endpoint string, opts *Options) (*MetricsClient, error) {
if opts == nil {
return nil, ErrInvalidArgument("empty options not allowed")
}
endpointURL, err := getEndpointURL(endpoint, opts.Secure)
if err != nil {
return nil, err
}
clnt, err := privateNewMetricsClient(endpointURL, opts)
if err != nil {
return nil, err
}
return clnt, nil
}
// NewMetricsClient - instantiate minio metrics client honoring Prometheus format
//
// Deprecated: please use NewMetricsClientWithOptions
func NewMetricsClient(endpoint string, accessKeyID, secretAccessKey string, secure bool) (*MetricsClient, error) {
return NewMetricsClientWithOptions(endpoint, &Options{
Creds: credentials.NewStaticV4(accessKeyID, secretAccessKey, ""),
Secure: secure,
})
}
// getPrometheusToken creates a JWT from MinIO access and secret keys
func getPrometheusToken(accessKey, secretKey string) (string, error) {
jwt := jwtgo.NewWithClaims(jwtgo.SigningMethodHS512, jwtgo.RegisteredClaims{
ExpiresAt: jwtgo.NewNumericDate(time.Now().UTC().Add(defaultPrometheusJWTExpiry)),
Subject: accessKey,
Issuer: prometheusIssuer,
})
token, err := jwt.SignedString([]byte(secretKey))
if err != nil {
return "", err
}
return token, nil
}
func privateNewMetricsClient(endpointURL *url.URL, opts *Options) (*MetricsClient, error) {
clnt := new(MetricsClient)
clnt.creds = opts.Creds
clnt.secure = opts.Secure
clnt.endpointURL = endpointURL
tr := opts.Transport
if tr == nil {
tr = DefaultTransport(opts.Secure)
}
clnt.httpClient = &http.Client{
Transport: tr,
}
return clnt, nil
}
// executeGetRequest - instantiates a Get method and performs the request
func (client *MetricsClient) executeGetRequest(ctx context.Context, reqData metricsRequestData) (res *http.Response, err error) {
req, err := client.newGetRequest(ctx, reqData)
if err != nil {
return nil, err
}
v, err := client.creds.GetWithContext(client.CredContext())
if err != nil {
return nil, err
}
accessKeyID := v.AccessKeyID
secretAccessKey := v.SecretAccessKey
jwtToken, err := getPrometheusToken(accessKeyID, secretAccessKey)
if err != nil {
return nil, err
}
req.Header.Add("Authorization", "Bearer "+jwtToken)
req.Header.Set("X-Amz-Security-Token", v.SessionToken)
return client.httpClient.Do(req)
}
// newGetRequest - instantiate a new HTTP GET request
func (client *MetricsClient) newGetRequest(ctx context.Context, reqData metricsRequestData) (req *http.Request, err error) {
targetURL, err := client.makeTargetURL(reqData)
if err != nil {
return nil, err
}
return http.NewRequestWithContext(ctx, http.MethodGet, targetURL.String(), nil)
}
// makeTargetURL make a new target url.
func (client *MetricsClient) makeTargetURL(r metricsRequestData) (*url.URL, error) {
if client.endpointURL == nil {
return nil, fmt.Errorf("enpointURL cannot be nil")
}
host := client.endpointURL.Host
scheme := client.endpointURL.Scheme
prefix := libraryMinioURLPrefix
urlStr := scheme + "://" + host + prefix + r.relativePath
return url.Parse(urlStr)
}
// SetCustomTransport - set new custom transport.
//
// Deprecated: please use Options{Transport: tr} to provide custom transport.
func (client *MetricsClient) SetCustomTransport(customHTTPTransport http.RoundTripper) {
// Set this to override default transport
// ``http.DefaultTransport``.
//
// This transport is usually needed for debugging OR to add your
// own custom TLS certificates on the client transport, for custom
// CA's and certs which are not part of standard certificate
// authority follow this example :-
//
// tr := &http.Transport{
// TLSClientConfig: &tls.Config{RootCAs: pool},
// DisableCompression: true,
// }
// api.SetTransport(tr)
//
if client.httpClient != nil {
client.httpClient.Transport = customHTTPTransport
}
}
// CredContext returns the context for fetching credentials
func (client *MetricsClient) CredContext() *credentials.CredContext {
return &credentials.CredContext{
Client: client.httpClient,
}
}
golang-github-minio-madmin-go-3.0.104/metrics_client_test.go 0000664 0000000 0000000 00000005205 14774251704 0023774 0 ustar 00root root 0000000 0000000 //
// Copyright (c) 2015-2024 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as
// published by the Free Software Foundation, either version 3 of the
// License, or (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see .
//
package madmin
import (
"fmt"
"net/url"
"testing"
jwtgo "github.com/golang-jwt/jwt/v4"
)
func TestMakeTargetUrlBuildsURLWithClientAndRelativePath(t *testing.T) {
clnt := MetricsClient{
endpointURL: &url.URL{
Host: "localhost:9000",
Scheme: "http",
},
}
requestData := metricsRequestData{
relativePath: "/some/path",
}
targetURL, err := clnt.makeTargetURL(requestData)
if err != nil {
t.Errorf("error not expected, got: %v", err)
}
expectedURL := "http://localhost:9000/minio/some/path"
if expectedURL != targetURL.String() {
t.Errorf("target url: %s not equal to expected url: %s", targetURL, expectedURL)
}
}
func TestMakeTargetUrlReturnsErrorIfEndpointURLNotSet(t *testing.T) {
clnt := MetricsClient{}
requestData := metricsRequestData{
relativePath: "/some/path",
}
_, err := clnt.makeTargetURL(requestData)
if err == nil {
t.Errorf("error expected got nil")
}
}
func TestMakeTargetUrlReturnsErrorOnURLParse(t *testing.T) {
clnt := MetricsClient{
endpointURL: &url.URL{},
}
requestData := metricsRequestData{
relativePath: "/some/path",
}
_, err := clnt.makeTargetURL(requestData)
if err == nil {
t.Errorf("error expected got nil")
}
}
func TestGetPrometheusTokenReturnsValidJwtTokenFromAccessAndSecretKey(t *testing.T) {
accessKey := "myaccessKey"
secretKey := "mysecretKey"
jwtToken, err := getPrometheusToken(accessKey, secretKey)
if err != nil {
t.Errorf("error not expected, got: %v", err)
}
token, err := jwtgo.Parse(jwtToken, func(token *jwtgo.Token) (interface{}, error) {
// Set same signing method used in our function
if _, ok := token.Method.(*jwtgo.SigningMethodHMAC); !ok {
return nil, fmt.Errorf("Unexpected signing method: %v", token.Header["alg"])
}
return []byte(secretKey), nil
})
if err != nil {
t.Errorf("error not expected, got: %v", err)
}
if !token.Valid {
t.Errorf("invalid token: %s", jwtToken)
}
}
golang-github-minio-madmin-go-3.0.104/metrics_gen.go 0000664 0000000 0000000 00000735241 14774251704 0022242 0 ustar 00root root 0000000 0000000 package madmin
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
import (
"runtime/metrics"
"time"
"github.com/shirou/gopsutil/v3/cpu"
"github.com/shirou/gopsutil/v3/load"
"github.com/tinylib/msgp/msgp"
)
// DecodeMsg implements msgp.Decodable
func (z *BatchJobMetrics) DecodeMsg(dc *msgp.Reader) (err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, err = dc.ReadMapHeader()
if err != nil {
err = msgp.WrapError(err)
return
}
for zb0001 > 0 {
zb0001--
field, err = dc.ReadMapKeyPtr()
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "collected":
z.CollectedAt, err = dc.ReadTime()
if err != nil {
err = msgp.WrapError(err, "CollectedAt")
return
}
case "Jobs":
var zb0002 uint32
zb0002, err = dc.ReadMapHeader()
if err != nil {
err = msgp.WrapError(err, "Jobs")
return
}
if z.Jobs == nil {
z.Jobs = make(map[string]JobMetric, zb0002)
} else if len(z.Jobs) > 0 {
for key := range z.Jobs {
delete(z.Jobs, key)
}
}
for zb0002 > 0 {
zb0002--
var za0001 string
var za0002 JobMetric
za0001, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "Jobs")
return
}
err = za0002.DecodeMsg(dc)
if err != nil {
err = msgp.WrapError(err, "Jobs", za0001)
return
}
z.Jobs[za0001] = za0002
}
default:
err = dc.Skip()
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
return
}
// EncodeMsg implements msgp.Encodable
func (z *BatchJobMetrics) EncodeMsg(en *msgp.Writer) (err error) {
// map header, size 2
// write "collected"
err = en.Append(0x82, 0xa9, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x65, 0x64)
if err != nil {
return
}
err = en.WriteTime(z.CollectedAt)
if err != nil {
err = msgp.WrapError(err, "CollectedAt")
return
}
// write "Jobs"
err = en.Append(0xa4, 0x4a, 0x6f, 0x62, 0x73)
if err != nil {
return
}
err = en.WriteMapHeader(uint32(len(z.Jobs)))
if err != nil {
err = msgp.WrapError(err, "Jobs")
return
}
for za0001, za0002 := range z.Jobs {
err = en.WriteString(za0001)
if err != nil {
err = msgp.WrapError(err, "Jobs")
return
}
err = za0002.EncodeMsg(en)
if err != nil {
err = msgp.WrapError(err, "Jobs", za0001)
return
}
}
return
}
// MarshalMsg implements msgp.Marshaler
func (z *BatchJobMetrics) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize())
// map header, size 2
// string "collected"
o = append(o, 0x82, 0xa9, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x65, 0x64)
o = msgp.AppendTime(o, z.CollectedAt)
// string "Jobs"
o = append(o, 0xa4, 0x4a, 0x6f, 0x62, 0x73)
o = msgp.AppendMapHeader(o, uint32(len(z.Jobs)))
for za0001, za0002 := range z.Jobs {
o = msgp.AppendString(o, za0001)
o, err = za0002.MarshalMsg(o)
if err != nil {
err = msgp.WrapError(err, "Jobs", za0001)
return
}
}
return
}
// UnmarshalMsg implements msgp.Unmarshaler
func (z *BatchJobMetrics) UnmarshalMsg(bts []byte) (o []byte, err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
for zb0001 > 0 {
zb0001--
field, bts, err = msgp.ReadMapKeyZC(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "collected":
z.CollectedAt, bts, err = msgp.ReadTimeBytes(bts)
if err != nil {
err = msgp.WrapError(err, "CollectedAt")
return
}
case "Jobs":
var zb0002 uint32
zb0002, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Jobs")
return
}
if z.Jobs == nil {
z.Jobs = make(map[string]JobMetric, zb0002)
} else if len(z.Jobs) > 0 {
for key := range z.Jobs {
delete(z.Jobs, key)
}
}
for zb0002 > 0 {
var za0001 string
var za0002 JobMetric
zb0002--
za0001, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Jobs")
return
}
bts, err = za0002.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "Jobs", za0001)
return
}
z.Jobs[za0001] = za0002
}
default:
bts, err = msgp.Skip(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
o = bts
return
}
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z *BatchJobMetrics) Msgsize() (s int) {
s = 1 + 10 + msgp.TimeSize + 5 + msgp.MapHeaderSize
if z.Jobs != nil {
for za0001, za0002 := range z.Jobs {
_ = za0002
s += msgp.StringPrefixSize + len(za0001) + za0002.Msgsize()
}
}
return
}
// DecodeMsg implements msgp.Decodable
func (z *CPUMetrics) DecodeMsg(dc *msgp.Reader) (err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, err = dc.ReadMapHeader()
if err != nil {
err = msgp.WrapError(err)
return
}
for zb0001 > 0 {
zb0001--
field, err = dc.ReadMapKeyPtr()
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "collected":
z.CollectedAt, err = dc.ReadTime()
if err != nil {
err = msgp.WrapError(err, "CollectedAt")
return
}
case "timesStat":
if dc.IsNil() {
err = dc.ReadNil()
if err != nil {
err = msgp.WrapError(err, "TimesStat")
return
}
z.TimesStat = nil
} else {
if z.TimesStat == nil {
z.TimesStat = new(cpu.TimesStat)
}
err = (*cpuTimesStat)(z.TimesStat).DecodeMsg(dc)
if err != nil {
err = msgp.WrapError(err, "TimesStat")
return
}
}
case "loadStat":
if dc.IsNil() {
err = dc.ReadNil()
if err != nil {
err = msgp.WrapError(err, "LoadStat")
return
}
z.LoadStat = nil
} else {
if z.LoadStat == nil {
z.LoadStat = new(load.AvgStat)
}
err = (*loadAvgStat)(z.LoadStat).DecodeMsg(dc)
if err != nil {
err = msgp.WrapError(err, "LoadStat")
return
}
}
case "cpuCount":
z.CPUCount, err = dc.ReadInt()
if err != nil {
err = msgp.WrapError(err, "CPUCount")
return
}
default:
err = dc.Skip()
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
return
}
// EncodeMsg implements msgp.Encodable
func (z *CPUMetrics) EncodeMsg(en *msgp.Writer) (err error) {
// map header, size 4
// write "collected"
err = en.Append(0x84, 0xa9, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x65, 0x64)
if err != nil {
return
}
err = en.WriteTime(z.CollectedAt)
if err != nil {
err = msgp.WrapError(err, "CollectedAt")
return
}
// write "timesStat"
err = en.Append(0xa9, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x53, 0x74, 0x61, 0x74)
if err != nil {
return
}
if z.TimesStat == nil {
err = en.WriteNil()
if err != nil {
return
}
} else {
err = (*cpuTimesStat)(z.TimesStat).EncodeMsg(en)
if err != nil {
err = msgp.WrapError(err, "TimesStat")
return
}
}
// write "loadStat"
err = en.Append(0xa8, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x74, 0x61, 0x74)
if err != nil {
return
}
if z.LoadStat == nil {
err = en.WriteNil()
if err != nil {
return
}
} else {
err = (*loadAvgStat)(z.LoadStat).EncodeMsg(en)
if err != nil {
err = msgp.WrapError(err, "LoadStat")
return
}
}
// write "cpuCount"
err = en.Append(0xa8, 0x63, 0x70, 0x75, 0x43, 0x6f, 0x75, 0x6e, 0x74)
if err != nil {
return
}
err = en.WriteInt(z.CPUCount)
if err != nil {
err = msgp.WrapError(err, "CPUCount")
return
}
return
}
// MarshalMsg implements msgp.Marshaler
func (z *CPUMetrics) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize())
// map header, size 4
// string "collected"
o = append(o, 0x84, 0xa9, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x65, 0x64)
o = msgp.AppendTime(o, z.CollectedAt)
// string "timesStat"
o = append(o, 0xa9, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x53, 0x74, 0x61, 0x74)
if z.TimesStat == nil {
o = msgp.AppendNil(o)
} else {
o, err = (*cpuTimesStat)(z.TimesStat).MarshalMsg(o)
if err != nil {
err = msgp.WrapError(err, "TimesStat")
return
}
}
// string "loadStat"
o = append(o, 0xa8, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x74, 0x61, 0x74)
if z.LoadStat == nil {
o = msgp.AppendNil(o)
} else {
o, err = (*loadAvgStat)(z.LoadStat).MarshalMsg(o)
if err != nil {
err = msgp.WrapError(err, "LoadStat")
return
}
}
// string "cpuCount"
o = append(o, 0xa8, 0x63, 0x70, 0x75, 0x43, 0x6f, 0x75, 0x6e, 0x74)
o = msgp.AppendInt(o, z.CPUCount)
return
}
// UnmarshalMsg implements msgp.Unmarshaler
func (z *CPUMetrics) UnmarshalMsg(bts []byte) (o []byte, err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
for zb0001 > 0 {
zb0001--
field, bts, err = msgp.ReadMapKeyZC(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "collected":
z.CollectedAt, bts, err = msgp.ReadTimeBytes(bts)
if err != nil {
err = msgp.WrapError(err, "CollectedAt")
return
}
case "timesStat":
if msgp.IsNil(bts) {
bts, err = msgp.ReadNilBytes(bts)
if err != nil {
return
}
z.TimesStat = nil
} else {
if z.TimesStat == nil {
z.TimesStat = new(cpu.TimesStat)
}
bts, err = (*cpuTimesStat)(z.TimesStat).UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "TimesStat")
return
}
}
case "loadStat":
if msgp.IsNil(bts) {
bts, err = msgp.ReadNilBytes(bts)
if err != nil {
return
}
z.LoadStat = nil
} else {
if z.LoadStat == nil {
z.LoadStat = new(load.AvgStat)
}
bts, err = (*loadAvgStat)(z.LoadStat).UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "LoadStat")
return
}
}
case "cpuCount":
z.CPUCount, bts, err = msgp.ReadIntBytes(bts)
if err != nil {
err = msgp.WrapError(err, "CPUCount")
return
}
default:
bts, err = msgp.Skip(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
o = bts
return
}
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z *CPUMetrics) Msgsize() (s int) {
s = 1 + 10 + msgp.TimeSize + 10
if z.TimesStat == nil {
s += msgp.NilSize
} else {
s += (*cpuTimesStat)(z.TimesStat).Msgsize()
}
s += 9
if z.LoadStat == nil {
s += msgp.NilSize
} else {
s += (*loadAvgStat)(z.LoadStat).Msgsize()
}
s += 9 + msgp.IntSize
return
}
// DecodeMsg implements msgp.Decodable
func (z *CatalogInfo) DecodeMsg(dc *msgp.Reader) (err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, err = dc.ReadMapHeader()
if err != nil {
err = msgp.WrapError(err)
return
}
for zb0001 > 0 {
zb0001--
field, err = dc.ReadMapKeyPtr()
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "lastBucketScanned":
z.LastBucketScanned, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "LastBucketScanned")
return
}
case "lastObjectScanned":
z.LastObjectScanned, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "LastObjectScanned")
return
}
case "lastBucketMatched":
z.LastBucketMatched, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "LastBucketMatched")
return
}
case "lastObjectMatched":
z.LastObjectMatched, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "LastObjectMatched")
return
}
case "objectsScannedCount":
z.ObjectsScannedCount, err = dc.ReadUint64()
if err != nil {
err = msgp.WrapError(err, "ObjectsScannedCount")
return
}
case "objectsMatchedCount":
z.ObjectsMatchedCount, err = dc.ReadUint64()
if err != nil {
err = msgp.WrapError(err, "ObjectsMatchedCount")
return
}
case "recordsWrittenCount":
z.RecordsWrittenCount, err = dc.ReadUint64()
if err != nil {
err = msgp.WrapError(err, "RecordsWrittenCount")
return
}
case "outputObjectsCount":
z.OutputObjectsCount, err = dc.ReadUint64()
if err != nil {
err = msgp.WrapError(err, "OutputObjectsCount")
return
}
case "manifestPathBucket":
z.ManifestPathBucket, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "ManifestPathBucket")
return
}
case "manifestPathObject":
z.ManifestPathObject, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "ManifestPathObject")
return
}
case "errorMsg":
z.ErrorMsg, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "ErrorMsg")
return
}
default:
err = dc.Skip()
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
return
}
// EncodeMsg implements msgp.Encodable
func (z *CatalogInfo) EncodeMsg(en *msgp.Writer) (err error) {
// map header, size 11
// write "lastBucketScanned"
err = en.Append(0x8b, 0xb1, 0x6c, 0x61, 0x73, 0x74, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x53, 0x63, 0x61, 0x6e, 0x6e, 0x65, 0x64)
if err != nil {
return
}
err = en.WriteString(z.LastBucketScanned)
if err != nil {
err = msgp.WrapError(err, "LastBucketScanned")
return
}
// write "lastObjectScanned"
err = en.Append(0xb1, 0x6c, 0x61, 0x73, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x53, 0x63, 0x61, 0x6e, 0x6e, 0x65, 0x64)
if err != nil {
return
}
err = en.WriteString(z.LastObjectScanned)
if err != nil {
err = msgp.WrapError(err, "LastObjectScanned")
return
}
// write "lastBucketMatched"
err = en.Append(0xb1, 0x6c, 0x61, 0x73, 0x74, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x64)
if err != nil {
return
}
err = en.WriteString(z.LastBucketMatched)
if err != nil {
err = msgp.WrapError(err, "LastBucketMatched")
return
}
// write "lastObjectMatched"
err = en.Append(0xb1, 0x6c, 0x61, 0x73, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x64)
if err != nil {
return
}
err = en.WriteString(z.LastObjectMatched)
if err != nil {
err = msgp.WrapError(err, "LastObjectMatched")
return
}
// write "objectsScannedCount"
err = en.Append(0xb3, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x53, 0x63, 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x43, 0x6f, 0x75, 0x6e, 0x74)
if err != nil {
return
}
err = en.WriteUint64(z.ObjectsScannedCount)
if err != nil {
err = msgp.WrapError(err, "ObjectsScannedCount")
return
}
// write "objectsMatchedCount"
err = en.Append(0xb3, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x64, 0x43, 0x6f, 0x75, 0x6e, 0x74)
if err != nil {
return
}
err = en.WriteUint64(z.ObjectsMatchedCount)
if err != nil {
err = msgp.WrapError(err, "ObjectsMatchedCount")
return
}
// write "recordsWrittenCount"
err = en.Append(0xb3, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x73, 0x57, 0x72, 0x69, 0x74, 0x74, 0x65, 0x6e, 0x43, 0x6f, 0x75, 0x6e, 0x74)
if err != nil {
return
}
err = en.WriteUint64(z.RecordsWrittenCount)
if err != nil {
err = msgp.WrapError(err, "RecordsWrittenCount")
return
}
// write "outputObjectsCount"
err = en.Append(0xb2, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74)
if err != nil {
return
}
err = en.WriteUint64(z.OutputObjectsCount)
if err != nil {
err = msgp.WrapError(err, "OutputObjectsCount")
return
}
// write "manifestPathBucket"
err = en.Append(0xb2, 0x6d, 0x61, 0x6e, 0x69, 0x66, 0x65, 0x73, 0x74, 0x50, 0x61, 0x74, 0x68, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74)
if err != nil {
return
}
err = en.WriteString(z.ManifestPathBucket)
if err != nil {
err = msgp.WrapError(err, "ManifestPathBucket")
return
}
// write "manifestPathObject"
err = en.Append(0xb2, 0x6d, 0x61, 0x6e, 0x69, 0x66, 0x65, 0x73, 0x74, 0x50, 0x61, 0x74, 0x68, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74)
if err != nil {
return
}
err = en.WriteString(z.ManifestPathObject)
if err != nil {
err = msgp.WrapError(err, "ManifestPathObject")
return
}
// write "errorMsg"
err = en.Append(0xa8, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x4d, 0x73, 0x67)
if err != nil {
return
}
err = en.WriteString(z.ErrorMsg)
if err != nil {
err = msgp.WrapError(err, "ErrorMsg")
return
}
return
}
// MarshalMsg implements msgp.Marshaler
func (z *CatalogInfo) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize())
// map header, size 11
// string "lastBucketScanned"
o = append(o, 0x8b, 0xb1, 0x6c, 0x61, 0x73, 0x74, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x53, 0x63, 0x61, 0x6e, 0x6e, 0x65, 0x64)
o = msgp.AppendString(o, z.LastBucketScanned)
// string "lastObjectScanned"
o = append(o, 0xb1, 0x6c, 0x61, 0x73, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x53, 0x63, 0x61, 0x6e, 0x6e, 0x65, 0x64)
o = msgp.AppendString(o, z.LastObjectScanned)
// string "lastBucketMatched"
o = append(o, 0xb1, 0x6c, 0x61, 0x73, 0x74, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x64)
o = msgp.AppendString(o, z.LastBucketMatched)
// string "lastObjectMatched"
o = append(o, 0xb1, 0x6c, 0x61, 0x73, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x64)
o = msgp.AppendString(o, z.LastObjectMatched)
// string "objectsScannedCount"
o = append(o, 0xb3, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x53, 0x63, 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x43, 0x6f, 0x75, 0x6e, 0x74)
o = msgp.AppendUint64(o, z.ObjectsScannedCount)
// string "objectsMatchedCount"
o = append(o, 0xb3, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x64, 0x43, 0x6f, 0x75, 0x6e, 0x74)
o = msgp.AppendUint64(o, z.ObjectsMatchedCount)
// string "recordsWrittenCount"
o = append(o, 0xb3, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x73, 0x57, 0x72, 0x69, 0x74, 0x74, 0x65, 0x6e, 0x43, 0x6f, 0x75, 0x6e, 0x74)
o = msgp.AppendUint64(o, z.RecordsWrittenCount)
// string "outputObjectsCount"
o = append(o, 0xb2, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74)
o = msgp.AppendUint64(o, z.OutputObjectsCount)
// string "manifestPathBucket"
o = append(o, 0xb2, 0x6d, 0x61, 0x6e, 0x69, 0x66, 0x65, 0x73, 0x74, 0x50, 0x61, 0x74, 0x68, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74)
o = msgp.AppendString(o, z.ManifestPathBucket)
// string "manifestPathObject"
o = append(o, 0xb2, 0x6d, 0x61, 0x6e, 0x69, 0x66, 0x65, 0x73, 0x74, 0x50, 0x61, 0x74, 0x68, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74)
o = msgp.AppendString(o, z.ManifestPathObject)
// string "errorMsg"
o = append(o, 0xa8, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x4d, 0x73, 0x67)
o = msgp.AppendString(o, z.ErrorMsg)
return
}
// UnmarshalMsg implements msgp.Unmarshaler
func (z *CatalogInfo) UnmarshalMsg(bts []byte) (o []byte, err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
for zb0001 > 0 {
zb0001--
field, bts, err = msgp.ReadMapKeyZC(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "lastBucketScanned":
z.LastBucketScanned, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "LastBucketScanned")
return
}
case "lastObjectScanned":
z.LastObjectScanned, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "LastObjectScanned")
return
}
case "lastBucketMatched":
z.LastBucketMatched, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "LastBucketMatched")
return
}
case "lastObjectMatched":
z.LastObjectMatched, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "LastObjectMatched")
return
}
case "objectsScannedCount":
z.ObjectsScannedCount, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "ObjectsScannedCount")
return
}
case "objectsMatchedCount":
z.ObjectsMatchedCount, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "ObjectsMatchedCount")
return
}
case "recordsWrittenCount":
z.RecordsWrittenCount, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "RecordsWrittenCount")
return
}
case "outputObjectsCount":
z.OutputObjectsCount, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "OutputObjectsCount")
return
}
case "manifestPathBucket":
z.ManifestPathBucket, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "ManifestPathBucket")
return
}
case "manifestPathObject":
z.ManifestPathObject, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "ManifestPathObject")
return
}
case "errorMsg":
z.ErrorMsg, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "ErrorMsg")
return
}
default:
bts, err = msgp.Skip(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
o = bts
return
}
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z *CatalogInfo) Msgsize() (s int) {
s = 1 + 18 + msgp.StringPrefixSize + len(z.LastBucketScanned) + 18 + msgp.StringPrefixSize + len(z.LastObjectScanned) + 18 + msgp.StringPrefixSize + len(z.LastBucketMatched) + 18 + msgp.StringPrefixSize + len(z.LastObjectMatched) + 20 + msgp.Uint64Size + 20 + msgp.Uint64Size + 20 + msgp.Uint64Size + 19 + msgp.Uint64Size + 19 + msgp.StringPrefixSize + len(z.ManifestPathBucket) + 19 + msgp.StringPrefixSize + len(z.ManifestPathObject) + 9 + msgp.StringPrefixSize + len(z.ErrorMsg)
return
}
// DecodeMsg implements msgp.Decodable
func (z *DiskIOStats) DecodeMsg(dc *msgp.Reader) (err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, err = dc.ReadMapHeader()
if err != nil {
err = msgp.WrapError(err)
return
}
for zb0001 > 0 {
zb0001--
field, err = dc.ReadMapKeyPtr()
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "read_ios":
z.ReadIOs, err = dc.ReadUint64()
if err != nil {
err = msgp.WrapError(err, "ReadIOs")
return
}
case "read_merges":
z.ReadMerges, err = dc.ReadUint64()
if err != nil {
err = msgp.WrapError(err, "ReadMerges")
return
}
case "read_sectors":
z.ReadSectors, err = dc.ReadUint64()
if err != nil {
err = msgp.WrapError(err, "ReadSectors")
return
}
case "read_ticks":
z.ReadTicks, err = dc.ReadUint64()
if err != nil {
err = msgp.WrapError(err, "ReadTicks")
return
}
case "write_ios":
z.WriteIOs, err = dc.ReadUint64()
if err != nil {
err = msgp.WrapError(err, "WriteIOs")
return
}
case "write_merges":
z.WriteMerges, err = dc.ReadUint64()
if err != nil {
err = msgp.WrapError(err, "WriteMerges")
return
}
case "wrte_sectors":
z.WriteSectors, err = dc.ReadUint64()
if err != nil {
err = msgp.WrapError(err, "WriteSectors")
return
}
case "write_ticks":
z.WriteTicks, err = dc.ReadUint64()
if err != nil {
err = msgp.WrapError(err, "WriteTicks")
return
}
case "current_ios":
z.CurrentIOs, err = dc.ReadUint64()
if err != nil {
err = msgp.WrapError(err, "CurrentIOs")
return
}
case "total_ticks":
z.TotalTicks, err = dc.ReadUint64()
if err != nil {
err = msgp.WrapError(err, "TotalTicks")
return
}
case "req_ticks":
z.ReqTicks, err = dc.ReadUint64()
if err != nil {
err = msgp.WrapError(err, "ReqTicks")
return
}
case "discard_ios":
z.DiscardIOs, err = dc.ReadUint64()
if err != nil {
err = msgp.WrapError(err, "DiscardIOs")
return
}
case "discard_merges":
z.DiscardMerges, err = dc.ReadUint64()
if err != nil {
err = msgp.WrapError(err, "DiscardMerges")
return
}
case "discard_secotrs":
z.DiscardSectors, err = dc.ReadUint64()
if err != nil {
err = msgp.WrapError(err, "DiscardSectors")
return
}
case "discard_ticks":
z.DiscardTicks, err = dc.ReadUint64()
if err != nil {
err = msgp.WrapError(err, "DiscardTicks")
return
}
case "flush_ios":
z.FlushIOs, err = dc.ReadUint64()
if err != nil {
err = msgp.WrapError(err, "FlushIOs")
return
}
case "flush_ticks":
z.FlushTicks, err = dc.ReadUint64()
if err != nil {
err = msgp.WrapError(err, "FlushTicks")
return
}
default:
err = dc.Skip()
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
return
}
// EncodeMsg implements msgp.Encodable
func (z *DiskIOStats) EncodeMsg(en *msgp.Writer) (err error) {
// map header, size 17
// write "read_ios"
err = en.Append(0xde, 0x0, 0x11, 0xa8, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x69, 0x6f, 0x73)
if err != nil {
return
}
err = en.WriteUint64(z.ReadIOs)
if err != nil {
err = msgp.WrapError(err, "ReadIOs")
return
}
// write "read_merges"
err = en.Append(0xab, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6d, 0x65, 0x72, 0x67, 0x65, 0x73)
if err != nil {
return
}
err = en.WriteUint64(z.ReadMerges)
if err != nil {
err = msgp.WrapError(err, "ReadMerges")
return
}
// write "read_sectors"
err = en.Append(0xac, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x73, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x73)
if err != nil {
return
}
err = en.WriteUint64(z.ReadSectors)
if err != nil {
err = msgp.WrapError(err, "ReadSectors")
return
}
// write "read_ticks"
err = en.Append(0xaa, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x74, 0x69, 0x63, 0x6b, 0x73)
if err != nil {
return
}
err = en.WriteUint64(z.ReadTicks)
if err != nil {
err = msgp.WrapError(err, "ReadTicks")
return
}
// write "write_ios"
err = en.Append(0xa9, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x69, 0x6f, 0x73)
if err != nil {
return
}
err = en.WriteUint64(z.WriteIOs)
if err != nil {
err = msgp.WrapError(err, "WriteIOs")
return
}
// write "write_merges"
err = en.Append(0xac, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x6d, 0x65, 0x72, 0x67, 0x65, 0x73)
if err != nil {
return
}
err = en.WriteUint64(z.WriteMerges)
if err != nil {
err = msgp.WrapError(err, "WriteMerges")
return
}
// write "wrte_sectors"
err = en.Append(0xac, 0x77, 0x72, 0x74, 0x65, 0x5f, 0x73, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x73)
if err != nil {
return
}
err = en.WriteUint64(z.WriteSectors)
if err != nil {
err = msgp.WrapError(err, "WriteSectors")
return
}
// write "write_ticks"
err = en.Append(0xab, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x63, 0x6b, 0x73)
if err != nil {
return
}
err = en.WriteUint64(z.WriteTicks)
if err != nil {
err = msgp.WrapError(err, "WriteTicks")
return
}
// write "current_ios"
err = en.Append(0xab, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x6f, 0x73)
if err != nil {
return
}
err = en.WriteUint64(z.CurrentIOs)
if err != nil {
err = msgp.WrapError(err, "CurrentIOs")
return
}
// write "total_ticks"
err = en.Append(0xab, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x74, 0x69, 0x63, 0x6b, 0x73)
if err != nil {
return
}
err = en.WriteUint64(z.TotalTicks)
if err != nil {
err = msgp.WrapError(err, "TotalTicks")
return
}
// write "req_ticks"
err = en.Append(0xa9, 0x72, 0x65, 0x71, 0x5f, 0x74, 0x69, 0x63, 0x6b, 0x73)
if err != nil {
return
}
err = en.WriteUint64(z.ReqTicks)
if err != nil {
err = msgp.WrapError(err, "ReqTicks")
return
}
// write "discard_ios"
err = en.Append(0xab, 0x64, 0x69, 0x73, 0x63, 0x61, 0x72, 0x64, 0x5f, 0x69, 0x6f, 0x73)
if err != nil {
return
}
err = en.WriteUint64(z.DiscardIOs)
if err != nil {
err = msgp.WrapError(err, "DiscardIOs")
return
}
// write "discard_merges"
err = en.Append(0xae, 0x64, 0x69, 0x73, 0x63, 0x61, 0x72, 0x64, 0x5f, 0x6d, 0x65, 0x72, 0x67, 0x65, 0x73)
if err != nil {
return
}
err = en.WriteUint64(z.DiscardMerges)
if err != nil {
err = msgp.WrapError(err, "DiscardMerges")
return
}
// write "discard_secotrs"
err = en.Append(0xaf, 0x64, 0x69, 0x73, 0x63, 0x61, 0x72, 0x64, 0x5f, 0x73, 0x65, 0x63, 0x6f, 0x74, 0x72, 0x73)
if err != nil {
return
}
err = en.WriteUint64(z.DiscardSectors)
if err != nil {
err = msgp.WrapError(err, "DiscardSectors")
return
}
// write "discard_ticks"
err = en.Append(0xad, 0x64, 0x69, 0x73, 0x63, 0x61, 0x72, 0x64, 0x5f, 0x74, 0x69, 0x63, 0x6b, 0x73)
if err != nil {
return
}
err = en.WriteUint64(z.DiscardTicks)
if err != nil {
err = msgp.WrapError(err, "DiscardTicks")
return
}
// write "flush_ios"
err = en.Append(0xa9, 0x66, 0x6c, 0x75, 0x73, 0x68, 0x5f, 0x69, 0x6f, 0x73)
if err != nil {
return
}
err = en.WriteUint64(z.FlushIOs)
if err != nil {
err = msgp.WrapError(err, "FlushIOs")
return
}
// write "flush_ticks"
err = en.Append(0xab, 0x66, 0x6c, 0x75, 0x73, 0x68, 0x5f, 0x74, 0x69, 0x63, 0x6b, 0x73)
if err != nil {
return
}
err = en.WriteUint64(z.FlushTicks)
if err != nil {
err = msgp.WrapError(err, "FlushTicks")
return
}
return
}
// MarshalMsg implements msgp.Marshaler
func (z *DiskIOStats) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize())
// map header, size 17
// string "read_ios"
o = append(o, 0xde, 0x0, 0x11, 0xa8, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x69, 0x6f, 0x73)
o = msgp.AppendUint64(o, z.ReadIOs)
// string "read_merges"
o = append(o, 0xab, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6d, 0x65, 0x72, 0x67, 0x65, 0x73)
o = msgp.AppendUint64(o, z.ReadMerges)
// string "read_sectors"
o = append(o, 0xac, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x73, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x73)
o = msgp.AppendUint64(o, z.ReadSectors)
// string "read_ticks"
o = append(o, 0xaa, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x74, 0x69, 0x63, 0x6b, 0x73)
o = msgp.AppendUint64(o, z.ReadTicks)
// string "write_ios"
o = append(o, 0xa9, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x69, 0x6f, 0x73)
o = msgp.AppendUint64(o, z.WriteIOs)
// string "write_merges"
o = append(o, 0xac, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x6d, 0x65, 0x72, 0x67, 0x65, 0x73)
o = msgp.AppendUint64(o, z.WriteMerges)
// string "wrte_sectors"
o = append(o, 0xac, 0x77, 0x72, 0x74, 0x65, 0x5f, 0x73, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x73)
o = msgp.AppendUint64(o, z.WriteSectors)
// string "write_ticks"
o = append(o, 0xab, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x63, 0x6b, 0x73)
o = msgp.AppendUint64(o, z.WriteTicks)
// string "current_ios"
o = append(o, 0xab, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x6f, 0x73)
o = msgp.AppendUint64(o, z.CurrentIOs)
// string "total_ticks"
o = append(o, 0xab, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x74, 0x69, 0x63, 0x6b, 0x73)
o = msgp.AppendUint64(o, z.TotalTicks)
// string "req_ticks"
o = append(o, 0xa9, 0x72, 0x65, 0x71, 0x5f, 0x74, 0x69, 0x63, 0x6b, 0x73)
o = msgp.AppendUint64(o, z.ReqTicks)
// string "discard_ios"
o = append(o, 0xab, 0x64, 0x69, 0x73, 0x63, 0x61, 0x72, 0x64, 0x5f, 0x69, 0x6f, 0x73)
o = msgp.AppendUint64(o, z.DiscardIOs)
// string "discard_merges"
o = append(o, 0xae, 0x64, 0x69, 0x73, 0x63, 0x61, 0x72, 0x64, 0x5f, 0x6d, 0x65, 0x72, 0x67, 0x65, 0x73)
o = msgp.AppendUint64(o, z.DiscardMerges)
// string "discard_secotrs"
o = append(o, 0xaf, 0x64, 0x69, 0x73, 0x63, 0x61, 0x72, 0x64, 0x5f, 0x73, 0x65, 0x63, 0x6f, 0x74, 0x72, 0x73)
o = msgp.AppendUint64(o, z.DiscardSectors)
// string "discard_ticks"
o = append(o, 0xad, 0x64, 0x69, 0x73, 0x63, 0x61, 0x72, 0x64, 0x5f, 0x74, 0x69, 0x63, 0x6b, 0x73)
o = msgp.AppendUint64(o, z.DiscardTicks)
// string "flush_ios"
o = append(o, 0xa9, 0x66, 0x6c, 0x75, 0x73, 0x68, 0x5f, 0x69, 0x6f, 0x73)
o = msgp.AppendUint64(o, z.FlushIOs)
// string "flush_ticks"
o = append(o, 0xab, 0x66, 0x6c, 0x75, 0x73, 0x68, 0x5f, 0x74, 0x69, 0x63, 0x6b, 0x73)
o = msgp.AppendUint64(o, z.FlushTicks)
return
}
// UnmarshalMsg implements msgp.Unmarshaler
func (z *DiskIOStats) UnmarshalMsg(bts []byte) (o []byte, err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
for zb0001 > 0 {
zb0001--
field, bts, err = msgp.ReadMapKeyZC(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "read_ios":
z.ReadIOs, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "ReadIOs")
return
}
case "read_merges":
z.ReadMerges, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "ReadMerges")
return
}
case "read_sectors":
z.ReadSectors, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "ReadSectors")
return
}
case "read_ticks":
z.ReadTicks, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "ReadTicks")
return
}
case "write_ios":
z.WriteIOs, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "WriteIOs")
return
}
case "write_merges":
z.WriteMerges, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "WriteMerges")
return
}
case "wrte_sectors":
z.WriteSectors, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "WriteSectors")
return
}
case "write_ticks":
z.WriteTicks, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "WriteTicks")
return
}
case "current_ios":
z.CurrentIOs, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "CurrentIOs")
return
}
case "total_ticks":
z.TotalTicks, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "TotalTicks")
return
}
case "req_ticks":
z.ReqTicks, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "ReqTicks")
return
}
case "discard_ios":
z.DiscardIOs, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "DiscardIOs")
return
}
case "discard_merges":
z.DiscardMerges, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "DiscardMerges")
return
}
case "discard_secotrs":
z.DiscardSectors, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "DiscardSectors")
return
}
case "discard_ticks":
z.DiscardTicks, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "DiscardTicks")
return
}
case "flush_ios":
z.FlushIOs, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "FlushIOs")
return
}
case "flush_ticks":
z.FlushTicks, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "FlushTicks")
return
}
default:
bts, err = msgp.Skip(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
o = bts
return
}
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z *DiskIOStats) Msgsize() (s int) {
s = 3 + 9 + msgp.Uint64Size + 12 + msgp.Uint64Size + 13 + msgp.Uint64Size + 11 + msgp.Uint64Size + 10 + msgp.Uint64Size + 13 + msgp.Uint64Size + 13 + msgp.Uint64Size + 12 + msgp.Uint64Size + 12 + msgp.Uint64Size + 12 + msgp.Uint64Size + 10 + msgp.Uint64Size + 12 + msgp.Uint64Size + 15 + msgp.Uint64Size + 16 + msgp.Uint64Size + 14 + msgp.Uint64Size + 10 + msgp.Uint64Size + 12 + msgp.Uint64Size
return
}
// DecodeMsg implements msgp.Decodable
func (z *DiskMetric) DecodeMsg(dc *msgp.Reader) (err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, err = dc.ReadMapHeader()
if err != nil {
err = msgp.WrapError(err)
return
}
var zb0001Mask uint8 /* 4 bits */
_ = zb0001Mask
for zb0001 > 0 {
zb0001--
field, err = dc.ReadMapKeyPtr()
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "collected":
z.CollectedAt, err = dc.ReadTime()
if err != nil {
err = msgp.WrapError(err, "CollectedAt")
return
}
case "n_disks":
z.NDisks, err = dc.ReadInt()
if err != nil {
err = msgp.WrapError(err, "NDisks")
return
}
case "offline":
z.Offline, err = dc.ReadInt()
if err != nil {
err = msgp.WrapError(err, "Offline")
return
}
zb0001Mask |= 0x1
case "healing":
z.Healing, err = dc.ReadInt()
if err != nil {
err = msgp.WrapError(err, "Healing")
return
}
zb0001Mask |= 0x2
case "life_time_ops":
var zb0002 uint32
zb0002, err = dc.ReadMapHeader()
if err != nil {
err = msgp.WrapError(err, "LifeTimeOps")
return
}
if z.LifeTimeOps == nil {
z.LifeTimeOps = make(map[string]uint64, zb0002)
} else if len(z.LifeTimeOps) > 0 {
for key := range z.LifeTimeOps {
delete(z.LifeTimeOps, key)
}
}
for zb0002 > 0 {
zb0002--
var za0001 string
var za0002 uint64
za0001, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "LifeTimeOps")
return
}
za0002, err = dc.ReadUint64()
if err != nil {
err = msgp.WrapError(err, "LifeTimeOps", za0001)
return
}
z.LifeTimeOps[za0001] = za0002
}
zb0001Mask |= 0x4
case "last_minute":
var zb0003 uint32
zb0003, err = dc.ReadMapHeader()
if err != nil {
err = msgp.WrapError(err, "LastMinute")
return
}
var zb0003Mask uint8 /* 1 bits */
_ = zb0003Mask
for zb0003 > 0 {
zb0003--
field, err = dc.ReadMapKeyPtr()
if err != nil {
err = msgp.WrapError(err, "LastMinute")
return
}
switch msgp.UnsafeString(field) {
case "operations":
var zb0004 uint32
zb0004, err = dc.ReadMapHeader()
if err != nil {
err = msgp.WrapError(err, "LastMinute", "Operations")
return
}
if z.LastMinute.Operations == nil {
z.LastMinute.Operations = make(map[string]TimedAction, zb0004)
} else if len(z.LastMinute.Operations) > 0 {
for key := range z.LastMinute.Operations {
delete(z.LastMinute.Operations, key)
}
}
for zb0004 > 0 {
zb0004--
var za0003 string
var za0004 TimedAction
za0003, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "LastMinute", "Operations")
return
}
err = za0004.DecodeMsg(dc)
if err != nil {
err = msgp.WrapError(err, "LastMinute", "Operations", za0003)
return
}
z.LastMinute.Operations[za0003] = za0004
}
zb0003Mask |= 0x1
default:
err = dc.Skip()
if err != nil {
err = msgp.WrapError(err, "LastMinute")
return
}
}
}
// Clear omitted fields.
if (zb0003Mask & 0x1) == 0 {
z.LastMinute.Operations = nil
}
case "iostats":
err = z.IOStats.DecodeMsg(dc)
if err != nil {
err = msgp.WrapError(err, "IOStats")
return
}
zb0001Mask |= 0x8
default:
err = dc.Skip()
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
// Clear omitted fields.
if zb0001Mask != 0xf {
if (zb0001Mask & 0x1) == 0 {
z.Offline = 0
}
if (zb0001Mask & 0x2) == 0 {
z.Healing = 0
}
if (zb0001Mask & 0x4) == 0 {
z.LifeTimeOps = nil
}
if (zb0001Mask & 0x8) == 0 {
z.IOStats = DiskIOStats{}
}
}
return
}
// EncodeMsg implements msgp.Encodable
func (z *DiskMetric) EncodeMsg(en *msgp.Writer) (err error) {
// check for omitted fields
zb0001Len := uint32(7)
var zb0001Mask uint8 /* 7 bits */
_ = zb0001Mask
if z.Offline == 0 {
zb0001Len--
zb0001Mask |= 0x4
}
if z.Healing == 0 {
zb0001Len--
zb0001Mask |= 0x8
}
if z.LifeTimeOps == nil {
zb0001Len--
zb0001Mask |= 0x10
}
// variable map header, size zb0001Len
err = en.Append(0x80 | uint8(zb0001Len))
if err != nil {
return
}
// skip if no fields are to be emitted
if zb0001Len != 0 {
// write "collected"
err = en.Append(0xa9, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x65, 0x64)
if err != nil {
return
}
err = en.WriteTime(z.CollectedAt)
if err != nil {
err = msgp.WrapError(err, "CollectedAt")
return
}
// write "n_disks"
err = en.Append(0xa7, 0x6e, 0x5f, 0x64, 0x69, 0x73, 0x6b, 0x73)
if err != nil {
return
}
err = en.WriteInt(z.NDisks)
if err != nil {
err = msgp.WrapError(err, "NDisks")
return
}
if (zb0001Mask & 0x4) == 0 { // if not omitted
// write "offline"
err = en.Append(0xa7, 0x6f, 0x66, 0x66, 0x6c, 0x69, 0x6e, 0x65)
if err != nil {
return
}
err = en.WriteInt(z.Offline)
if err != nil {
err = msgp.WrapError(err, "Offline")
return
}
}
if (zb0001Mask & 0x8) == 0 { // if not omitted
// write "healing"
err = en.Append(0xa7, 0x68, 0x65, 0x61, 0x6c, 0x69, 0x6e, 0x67)
if err != nil {
return
}
err = en.WriteInt(z.Healing)
if err != nil {
err = msgp.WrapError(err, "Healing")
return
}
}
if (zb0001Mask & 0x10) == 0 { // if not omitted
// write "life_time_ops"
err = en.Append(0xad, 0x6c, 0x69, 0x66, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x6f, 0x70, 0x73)
if err != nil {
return
}
err = en.WriteMapHeader(uint32(len(z.LifeTimeOps)))
if err != nil {
err = msgp.WrapError(err, "LifeTimeOps")
return
}
for za0001, za0002 := range z.LifeTimeOps {
err = en.WriteString(za0001)
if err != nil {
err = msgp.WrapError(err, "LifeTimeOps")
return
}
err = en.WriteUint64(za0002)
if err != nil {
err = msgp.WrapError(err, "LifeTimeOps", za0001)
return
}
}
}
// write "last_minute"
err = en.Append(0xab, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x6d, 0x69, 0x6e, 0x75, 0x74, 0x65)
if err != nil {
return
}
// check for omitted fields
zb0002Len := uint32(1)
var zb0002Mask uint8 /* 1 bits */
_ = zb0002Mask
if z.LastMinute.Operations == nil {
zb0002Len--
zb0002Mask |= 0x1
}
// variable map header, size zb0002Len
err = en.Append(0x80 | uint8(zb0002Len))
if err != nil {
return
}
if (zb0002Mask & 0x1) == 0 { // if not omitted
// write "operations"
err = en.Append(0xaa, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73)
if err != nil {
return
}
err = en.WriteMapHeader(uint32(len(z.LastMinute.Operations)))
if err != nil {
err = msgp.WrapError(err, "LastMinute", "Operations")
return
}
for za0003, za0004 := range z.LastMinute.Operations {
err = en.WriteString(za0003)
if err != nil {
err = msgp.WrapError(err, "LastMinute", "Operations")
return
}
err = za0004.EncodeMsg(en)
if err != nil {
err = msgp.WrapError(err, "LastMinute", "Operations", za0003)
return
}
}
}
// write "iostats"
err = en.Append(0xa7, 0x69, 0x6f, 0x73, 0x74, 0x61, 0x74, 0x73)
if err != nil {
return
}
err = z.IOStats.EncodeMsg(en)
if err != nil {
err = msgp.WrapError(err, "IOStats")
return
}
}
return
}
// MarshalMsg implements msgp.Marshaler
func (z *DiskMetric) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize())
// check for omitted fields
zb0001Len := uint32(7)
var zb0001Mask uint8 /* 7 bits */
_ = zb0001Mask
if z.Offline == 0 {
zb0001Len--
zb0001Mask |= 0x4
}
if z.Healing == 0 {
zb0001Len--
zb0001Mask |= 0x8
}
if z.LifeTimeOps == nil {
zb0001Len--
zb0001Mask |= 0x10
}
// variable map header, size zb0001Len
o = append(o, 0x80|uint8(zb0001Len))
// skip if no fields are to be emitted
if zb0001Len != 0 {
// string "collected"
o = append(o, 0xa9, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x65, 0x64)
o = msgp.AppendTime(o, z.CollectedAt)
// string "n_disks"
o = append(o, 0xa7, 0x6e, 0x5f, 0x64, 0x69, 0x73, 0x6b, 0x73)
o = msgp.AppendInt(o, z.NDisks)
if (zb0001Mask & 0x4) == 0 { // if not omitted
// string "offline"
o = append(o, 0xa7, 0x6f, 0x66, 0x66, 0x6c, 0x69, 0x6e, 0x65)
o = msgp.AppendInt(o, z.Offline)
}
if (zb0001Mask & 0x8) == 0 { // if not omitted
// string "healing"
o = append(o, 0xa7, 0x68, 0x65, 0x61, 0x6c, 0x69, 0x6e, 0x67)
o = msgp.AppendInt(o, z.Healing)
}
if (zb0001Mask & 0x10) == 0 { // if not omitted
// string "life_time_ops"
o = append(o, 0xad, 0x6c, 0x69, 0x66, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x6f, 0x70, 0x73)
o = msgp.AppendMapHeader(o, uint32(len(z.LifeTimeOps)))
for za0001, za0002 := range z.LifeTimeOps {
o = msgp.AppendString(o, za0001)
o = msgp.AppendUint64(o, za0002)
}
}
// string "last_minute"
o = append(o, 0xab, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x6d, 0x69, 0x6e, 0x75, 0x74, 0x65)
// check for omitted fields
zb0002Len := uint32(1)
var zb0002Mask uint8 /* 1 bits */
_ = zb0002Mask
if z.LastMinute.Operations == nil {
zb0002Len--
zb0002Mask |= 0x1
}
// variable map header, size zb0002Len
o = append(o, 0x80|uint8(zb0002Len))
if (zb0002Mask & 0x1) == 0 { // if not omitted
// string "operations"
o = append(o, 0xaa, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73)
o = msgp.AppendMapHeader(o, uint32(len(z.LastMinute.Operations)))
for za0003, za0004 := range z.LastMinute.Operations {
o = msgp.AppendString(o, za0003)
o, err = za0004.MarshalMsg(o)
if err != nil {
err = msgp.WrapError(err, "LastMinute", "Operations", za0003)
return
}
}
}
// string "iostats"
o = append(o, 0xa7, 0x69, 0x6f, 0x73, 0x74, 0x61, 0x74, 0x73)
o, err = z.IOStats.MarshalMsg(o)
if err != nil {
err = msgp.WrapError(err, "IOStats")
return
}
}
return
}
// UnmarshalMsg implements msgp.Unmarshaler
func (z *DiskMetric) UnmarshalMsg(bts []byte) (o []byte, err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
var zb0001Mask uint8 /* 4 bits */
_ = zb0001Mask
for zb0001 > 0 {
zb0001--
field, bts, err = msgp.ReadMapKeyZC(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "collected":
z.CollectedAt, bts, err = msgp.ReadTimeBytes(bts)
if err != nil {
err = msgp.WrapError(err, "CollectedAt")
return
}
case "n_disks":
z.NDisks, bts, err = msgp.ReadIntBytes(bts)
if err != nil {
err = msgp.WrapError(err, "NDisks")
return
}
case "offline":
z.Offline, bts, err = msgp.ReadIntBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Offline")
return
}
zb0001Mask |= 0x1
case "healing":
z.Healing, bts, err = msgp.ReadIntBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Healing")
return
}
zb0001Mask |= 0x2
case "life_time_ops":
var zb0002 uint32
zb0002, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "LifeTimeOps")
return
}
if z.LifeTimeOps == nil {
z.LifeTimeOps = make(map[string]uint64, zb0002)
} else if len(z.LifeTimeOps) > 0 {
for key := range z.LifeTimeOps {
delete(z.LifeTimeOps, key)
}
}
for zb0002 > 0 {
var za0001 string
var za0002 uint64
zb0002--
za0001, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "LifeTimeOps")
return
}
za0002, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "LifeTimeOps", za0001)
return
}
z.LifeTimeOps[za0001] = za0002
}
zb0001Mask |= 0x4
case "last_minute":
var zb0003 uint32
zb0003, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "LastMinute")
return
}
var zb0003Mask uint8 /* 1 bits */
_ = zb0003Mask
for zb0003 > 0 {
zb0003--
field, bts, err = msgp.ReadMapKeyZC(bts)
if err != nil {
err = msgp.WrapError(err, "LastMinute")
return
}
switch msgp.UnsafeString(field) {
case "operations":
var zb0004 uint32
zb0004, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "LastMinute", "Operations")
return
}
if z.LastMinute.Operations == nil {
z.LastMinute.Operations = make(map[string]TimedAction, zb0004)
} else if len(z.LastMinute.Operations) > 0 {
for key := range z.LastMinute.Operations {
delete(z.LastMinute.Operations, key)
}
}
for zb0004 > 0 {
var za0003 string
var za0004 TimedAction
zb0004--
za0003, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "LastMinute", "Operations")
return
}
bts, err = za0004.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "LastMinute", "Operations", za0003)
return
}
z.LastMinute.Operations[za0003] = za0004
}
zb0003Mask |= 0x1
default:
bts, err = msgp.Skip(bts)
if err != nil {
err = msgp.WrapError(err, "LastMinute")
return
}
}
}
// Clear omitted fields.
if (zb0003Mask & 0x1) == 0 {
z.LastMinute.Operations = nil
}
case "iostats":
bts, err = z.IOStats.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "IOStats")
return
}
zb0001Mask |= 0x8
default:
bts, err = msgp.Skip(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
// Clear omitted fields.
if zb0001Mask != 0xf {
if (zb0001Mask & 0x1) == 0 {
z.Offline = 0
}
if (zb0001Mask & 0x2) == 0 {
z.Healing = 0
}
if (zb0001Mask & 0x4) == 0 {
z.LifeTimeOps = nil
}
if (zb0001Mask & 0x8) == 0 {
z.IOStats = DiskIOStats{}
}
}
o = bts
return
}
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z *DiskMetric) Msgsize() (s int) {
s = 1 + 10 + msgp.TimeSize + 8 + msgp.IntSize + 8 + msgp.IntSize + 8 + msgp.IntSize + 14 + msgp.MapHeaderSize
if z.LifeTimeOps != nil {
for za0001, za0002 := range z.LifeTimeOps {
_ = za0002
s += msgp.StringPrefixSize + len(za0001) + msgp.Uint64Size
}
}
s += 12 + 1 + 11 + msgp.MapHeaderSize
if z.LastMinute.Operations != nil {
for za0003, za0004 := range z.LastMinute.Operations {
_ = za0004
s += msgp.StringPrefixSize + len(za0003) + za0004.Msgsize()
}
}
s += 8 + z.IOStats.Msgsize()
return
}
// DecodeMsg implements msgp.Decodable
func (z *ExpirationInfo) DecodeMsg(dc *msgp.Reader) (err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, err = dc.ReadMapHeader()
if err != nil {
err = msgp.WrapError(err)
return
}
for zb0001 > 0 {
zb0001--
field, err = dc.ReadMapKeyPtr()
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "lastBucket":
z.Bucket, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "Bucket")
return
}
case "lastObject":
z.Object, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "Object")
return
}
case "objects":
z.Objects, err = dc.ReadInt64()
if err != nil {
err = msgp.WrapError(err, "Objects")
return
}
case "objectsFailed":
z.ObjectsFailed, err = dc.ReadInt64()
if err != nil {
err = msgp.WrapError(err, "ObjectsFailed")
return
}
default:
err = dc.Skip()
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
return
}
// EncodeMsg implements msgp.Encodable
func (z *ExpirationInfo) EncodeMsg(en *msgp.Writer) (err error) {
// map header, size 4
// write "lastBucket"
err = en.Append(0x84, 0xaa, 0x6c, 0x61, 0x73, 0x74, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74)
if err != nil {
return
}
err = en.WriteString(z.Bucket)
if err != nil {
err = msgp.WrapError(err, "Bucket")
return
}
// write "lastObject"
err = en.Append(0xaa, 0x6c, 0x61, 0x73, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74)
if err != nil {
return
}
err = en.WriteString(z.Object)
if err != nil {
err = msgp.WrapError(err, "Object")
return
}
// write "objects"
err = en.Append(0xa7, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73)
if err != nil {
return
}
err = en.WriteInt64(z.Objects)
if err != nil {
err = msgp.WrapError(err, "Objects")
return
}
// write "objectsFailed"
err = en.Append(0xad, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x46, 0x61, 0x69, 0x6c, 0x65, 0x64)
if err != nil {
return
}
err = en.WriteInt64(z.ObjectsFailed)
if err != nil {
err = msgp.WrapError(err, "ObjectsFailed")
return
}
return
}
// MarshalMsg implements msgp.Marshaler
func (z *ExpirationInfo) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize())
// map header, size 4
// string "lastBucket"
o = append(o, 0x84, 0xaa, 0x6c, 0x61, 0x73, 0x74, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74)
o = msgp.AppendString(o, z.Bucket)
// string "lastObject"
o = append(o, 0xaa, 0x6c, 0x61, 0x73, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74)
o = msgp.AppendString(o, z.Object)
// string "objects"
o = append(o, 0xa7, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73)
o = msgp.AppendInt64(o, z.Objects)
// string "objectsFailed"
o = append(o, 0xad, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x46, 0x61, 0x69, 0x6c, 0x65, 0x64)
o = msgp.AppendInt64(o, z.ObjectsFailed)
return
}
// UnmarshalMsg implements msgp.Unmarshaler
func (z *ExpirationInfo) UnmarshalMsg(bts []byte) (o []byte, err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
for zb0001 > 0 {
zb0001--
field, bts, err = msgp.ReadMapKeyZC(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "lastBucket":
z.Bucket, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Bucket")
return
}
case "lastObject":
z.Object, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Object")
return
}
case "objects":
z.Objects, bts, err = msgp.ReadInt64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "Objects")
return
}
case "objectsFailed":
z.ObjectsFailed, bts, err = msgp.ReadInt64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "ObjectsFailed")
return
}
default:
bts, err = msgp.Skip(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
o = bts
return
}
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z *ExpirationInfo) Msgsize() (s int) {
s = 1 + 11 + msgp.StringPrefixSize + len(z.Bucket) + 11 + msgp.StringPrefixSize + len(z.Object) + 8 + msgp.Int64Size + 14 + msgp.Int64Size
return
}
// DecodeMsg implements msgp.Decodable
func (z *JobMetric) DecodeMsg(dc *msgp.Reader) (err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, err = dc.ReadMapHeader()
if err != nil {
err = msgp.WrapError(err)
return
}
var zb0001Mask uint8 /* 4 bits */
_ = zb0001Mask
for zb0001 > 0 {
zb0001--
field, err = dc.ReadMapKeyPtr()
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "jobID":
z.JobID, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "JobID")
return
}
case "jobType":
z.JobType, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "JobType")
return
}
case "startTime":
z.StartTime, err = dc.ReadTime()
if err != nil {
err = msgp.WrapError(err, "StartTime")
return
}
case "lastUpdate":
z.LastUpdate, err = dc.ReadTime()
if err != nil {
err = msgp.WrapError(err, "LastUpdate")
return
}
case "retryAttempts":
z.RetryAttempts, err = dc.ReadInt()
if err != nil {
err = msgp.WrapError(err, "RetryAttempts")
return
}
case "complete":
z.Complete, err = dc.ReadBool()
if err != nil {
err = msgp.WrapError(err, "Complete")
return
}
case "failed":
z.Failed, err = dc.ReadBool()
if err != nil {
err = msgp.WrapError(err, "Failed")
return
}
case "replicate":
if dc.IsNil() {
err = dc.ReadNil()
if err != nil {
err = msgp.WrapError(err, "Replicate")
return
}
z.Replicate = nil
} else {
if z.Replicate == nil {
z.Replicate = new(ReplicateInfo)
}
err = z.Replicate.DecodeMsg(dc)
if err != nil {
err = msgp.WrapError(err, "Replicate")
return
}
}
zb0001Mask |= 0x1
case "rotation":
if dc.IsNil() {
err = dc.ReadNil()
if err != nil {
err = msgp.WrapError(err, "KeyRotate")
return
}
z.KeyRotate = nil
} else {
if z.KeyRotate == nil {
z.KeyRotate = new(KeyRotationInfo)
}
err = z.KeyRotate.DecodeMsg(dc)
if err != nil {
err = msgp.WrapError(err, "KeyRotate")
return
}
}
zb0001Mask |= 0x2
case "expired":
if dc.IsNil() {
err = dc.ReadNil()
if err != nil {
err = msgp.WrapError(err, "Expired")
return
}
z.Expired = nil
} else {
if z.Expired == nil {
z.Expired = new(ExpirationInfo)
}
err = z.Expired.DecodeMsg(dc)
if err != nil {
err = msgp.WrapError(err, "Expired")
return
}
}
zb0001Mask |= 0x4
case "catalog":
if dc.IsNil() {
err = dc.ReadNil()
if err != nil {
err = msgp.WrapError(err, "Catalog")
return
}
z.Catalog = nil
} else {
if z.Catalog == nil {
z.Catalog = new(CatalogInfo)
}
err = z.Catalog.DecodeMsg(dc)
if err != nil {
err = msgp.WrapError(err, "Catalog")
return
}
}
zb0001Mask |= 0x8
default:
err = dc.Skip()
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
// Clear omitted fields.
if zb0001Mask != 0xf {
if (zb0001Mask & 0x1) == 0 {
z.Replicate = nil
}
if (zb0001Mask & 0x2) == 0 {
z.KeyRotate = nil
}
if (zb0001Mask & 0x4) == 0 {
z.Expired = nil
}
if (zb0001Mask & 0x8) == 0 {
z.Catalog = nil
}
}
return
}
// EncodeMsg implements msgp.Encodable
func (z *JobMetric) EncodeMsg(en *msgp.Writer) (err error) {
// check for omitted fields
zb0001Len := uint32(11)
var zb0001Mask uint16 /* 11 bits */
_ = zb0001Mask
if z.Replicate == nil {
zb0001Len--
zb0001Mask |= 0x80
}
if z.KeyRotate == nil {
zb0001Len--
zb0001Mask |= 0x100
}
if z.Expired == nil {
zb0001Len--
zb0001Mask |= 0x200
}
if z.Catalog == nil {
zb0001Len--
zb0001Mask |= 0x400
}
// variable map header, size zb0001Len
err = en.Append(0x80 | uint8(zb0001Len))
if err != nil {
return
}
// skip if no fields are to be emitted
if zb0001Len != 0 {
// write "jobID"
err = en.Append(0xa5, 0x6a, 0x6f, 0x62, 0x49, 0x44)
if err != nil {
return
}
err = en.WriteString(z.JobID)
if err != nil {
err = msgp.WrapError(err, "JobID")
return
}
// write "jobType"
err = en.Append(0xa7, 0x6a, 0x6f, 0x62, 0x54, 0x79, 0x70, 0x65)
if err != nil {
return
}
err = en.WriteString(z.JobType)
if err != nil {
err = msgp.WrapError(err, "JobType")
return
}
// write "startTime"
err = en.Append(0xa9, 0x73, 0x74, 0x61, 0x72, 0x74, 0x54, 0x69, 0x6d, 0x65)
if err != nil {
return
}
err = en.WriteTime(z.StartTime)
if err != nil {
err = msgp.WrapError(err, "StartTime")
return
}
// write "lastUpdate"
err = en.Append(0xaa, 0x6c, 0x61, 0x73, 0x74, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65)
if err != nil {
return
}
err = en.WriteTime(z.LastUpdate)
if err != nil {
err = msgp.WrapError(err, "LastUpdate")
return
}
// write "retryAttempts"
err = en.Append(0xad, 0x72, 0x65, 0x74, 0x72, 0x79, 0x41, 0x74, 0x74, 0x65, 0x6d, 0x70, 0x74, 0x73)
if err != nil {
return
}
err = en.WriteInt(z.RetryAttempts)
if err != nil {
err = msgp.WrapError(err, "RetryAttempts")
return
}
// write "complete"
err = en.Append(0xa8, 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65)
if err != nil {
return
}
err = en.WriteBool(z.Complete)
if err != nil {
err = msgp.WrapError(err, "Complete")
return
}
// write "failed"
err = en.Append(0xa6, 0x66, 0x61, 0x69, 0x6c, 0x65, 0x64)
if err != nil {
return
}
err = en.WriteBool(z.Failed)
if err != nil {
err = msgp.WrapError(err, "Failed")
return
}
if (zb0001Mask & 0x80) == 0 { // if not omitted
// write "replicate"
err = en.Append(0xa9, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x65)
if err != nil {
return
}
if z.Replicate == nil {
err = en.WriteNil()
if err != nil {
return
}
} else {
err = z.Replicate.EncodeMsg(en)
if err != nil {
err = msgp.WrapError(err, "Replicate")
return
}
}
}
if (zb0001Mask & 0x100) == 0 { // if not omitted
// write "rotation"
err = en.Append(0xa8, 0x72, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e)
if err != nil {
return
}
if z.KeyRotate == nil {
err = en.WriteNil()
if err != nil {
return
}
} else {
err = z.KeyRotate.EncodeMsg(en)
if err != nil {
err = msgp.WrapError(err, "KeyRotate")
return
}
}
}
if (zb0001Mask & 0x200) == 0 { // if not omitted
// write "expired"
err = en.Append(0xa7, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x64)
if err != nil {
return
}
if z.Expired == nil {
err = en.WriteNil()
if err != nil {
return
}
} else {
err = z.Expired.EncodeMsg(en)
if err != nil {
err = msgp.WrapError(err, "Expired")
return
}
}
}
if (zb0001Mask & 0x400) == 0 { // if not omitted
// write "catalog"
err = en.Append(0xa7, 0x63, 0x61, 0x74, 0x61, 0x6c, 0x6f, 0x67)
if err != nil {
return
}
if z.Catalog == nil {
err = en.WriteNil()
if err != nil {
return
}
} else {
err = z.Catalog.EncodeMsg(en)
if err != nil {
err = msgp.WrapError(err, "Catalog")
return
}
}
}
}
return
}
// MarshalMsg implements msgp.Marshaler
func (z *JobMetric) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize())
// check for omitted fields
zb0001Len := uint32(11)
var zb0001Mask uint16 /* 11 bits */
_ = zb0001Mask
if z.Replicate == nil {
zb0001Len--
zb0001Mask |= 0x80
}
if z.KeyRotate == nil {
zb0001Len--
zb0001Mask |= 0x100
}
if z.Expired == nil {
zb0001Len--
zb0001Mask |= 0x200
}
if z.Catalog == nil {
zb0001Len--
zb0001Mask |= 0x400
}
// variable map header, size zb0001Len
o = append(o, 0x80|uint8(zb0001Len))
// skip if no fields are to be emitted
if zb0001Len != 0 {
// string "jobID"
o = append(o, 0xa5, 0x6a, 0x6f, 0x62, 0x49, 0x44)
o = msgp.AppendString(o, z.JobID)
// string "jobType"
o = append(o, 0xa7, 0x6a, 0x6f, 0x62, 0x54, 0x79, 0x70, 0x65)
o = msgp.AppendString(o, z.JobType)
// string "startTime"
o = append(o, 0xa9, 0x73, 0x74, 0x61, 0x72, 0x74, 0x54, 0x69, 0x6d, 0x65)
o = msgp.AppendTime(o, z.StartTime)
// string "lastUpdate"
o = append(o, 0xaa, 0x6c, 0x61, 0x73, 0x74, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65)
o = msgp.AppendTime(o, z.LastUpdate)
// string "retryAttempts"
o = append(o, 0xad, 0x72, 0x65, 0x74, 0x72, 0x79, 0x41, 0x74, 0x74, 0x65, 0x6d, 0x70, 0x74, 0x73)
o = msgp.AppendInt(o, z.RetryAttempts)
// string "complete"
o = append(o, 0xa8, 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65)
o = msgp.AppendBool(o, z.Complete)
// string "failed"
o = append(o, 0xa6, 0x66, 0x61, 0x69, 0x6c, 0x65, 0x64)
o = msgp.AppendBool(o, z.Failed)
if (zb0001Mask & 0x80) == 0 { // if not omitted
// string "replicate"
o = append(o, 0xa9, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x65)
if z.Replicate == nil {
o = msgp.AppendNil(o)
} else {
o, err = z.Replicate.MarshalMsg(o)
if err != nil {
err = msgp.WrapError(err, "Replicate")
return
}
}
}
if (zb0001Mask & 0x100) == 0 { // if not omitted
// string "rotation"
o = append(o, 0xa8, 0x72, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e)
if z.KeyRotate == nil {
o = msgp.AppendNil(o)
} else {
o, err = z.KeyRotate.MarshalMsg(o)
if err != nil {
err = msgp.WrapError(err, "KeyRotate")
return
}
}
}
if (zb0001Mask & 0x200) == 0 { // if not omitted
// string "expired"
o = append(o, 0xa7, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x64)
if z.Expired == nil {
o = msgp.AppendNil(o)
} else {
o, err = z.Expired.MarshalMsg(o)
if err != nil {
err = msgp.WrapError(err, "Expired")
return
}
}
}
if (zb0001Mask & 0x400) == 0 { // if not omitted
// string "catalog"
o = append(o, 0xa7, 0x63, 0x61, 0x74, 0x61, 0x6c, 0x6f, 0x67)
if z.Catalog == nil {
o = msgp.AppendNil(o)
} else {
o, err = z.Catalog.MarshalMsg(o)
if err != nil {
err = msgp.WrapError(err, "Catalog")
return
}
}
}
}
return
}
// UnmarshalMsg implements msgp.Unmarshaler
func (z *JobMetric) UnmarshalMsg(bts []byte) (o []byte, err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
var zb0001Mask uint8 /* 4 bits */
_ = zb0001Mask
for zb0001 > 0 {
zb0001--
field, bts, err = msgp.ReadMapKeyZC(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "jobID":
z.JobID, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "JobID")
return
}
case "jobType":
z.JobType, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "JobType")
return
}
case "startTime":
z.StartTime, bts, err = msgp.ReadTimeBytes(bts)
if err != nil {
err = msgp.WrapError(err, "StartTime")
return
}
case "lastUpdate":
z.LastUpdate, bts, err = msgp.ReadTimeBytes(bts)
if err != nil {
err = msgp.WrapError(err, "LastUpdate")
return
}
case "retryAttempts":
z.RetryAttempts, bts, err = msgp.ReadIntBytes(bts)
if err != nil {
err = msgp.WrapError(err, "RetryAttempts")
return
}
case "complete":
z.Complete, bts, err = msgp.ReadBoolBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Complete")
return
}
case "failed":
z.Failed, bts, err = msgp.ReadBoolBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Failed")
return
}
case "replicate":
if msgp.IsNil(bts) {
bts, err = msgp.ReadNilBytes(bts)
if err != nil {
return
}
z.Replicate = nil
} else {
if z.Replicate == nil {
z.Replicate = new(ReplicateInfo)
}
bts, err = z.Replicate.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "Replicate")
return
}
}
zb0001Mask |= 0x1
case "rotation":
if msgp.IsNil(bts) {
bts, err = msgp.ReadNilBytes(bts)
if err != nil {
return
}
z.KeyRotate = nil
} else {
if z.KeyRotate == nil {
z.KeyRotate = new(KeyRotationInfo)
}
bts, err = z.KeyRotate.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "KeyRotate")
return
}
}
zb0001Mask |= 0x2
case "expired":
if msgp.IsNil(bts) {
bts, err = msgp.ReadNilBytes(bts)
if err != nil {
return
}
z.Expired = nil
} else {
if z.Expired == nil {
z.Expired = new(ExpirationInfo)
}
bts, err = z.Expired.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "Expired")
return
}
}
zb0001Mask |= 0x4
case "catalog":
if msgp.IsNil(bts) {
bts, err = msgp.ReadNilBytes(bts)
if err != nil {
return
}
z.Catalog = nil
} else {
if z.Catalog == nil {
z.Catalog = new(CatalogInfo)
}
bts, err = z.Catalog.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "Catalog")
return
}
}
zb0001Mask |= 0x8
default:
bts, err = msgp.Skip(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
// Clear omitted fields.
if zb0001Mask != 0xf {
if (zb0001Mask & 0x1) == 0 {
z.Replicate = nil
}
if (zb0001Mask & 0x2) == 0 {
z.KeyRotate = nil
}
if (zb0001Mask & 0x4) == 0 {
z.Expired = nil
}
if (zb0001Mask & 0x8) == 0 {
z.Catalog = nil
}
}
o = bts
return
}
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z *JobMetric) Msgsize() (s int) {
s = 1 + 6 + msgp.StringPrefixSize + len(z.JobID) + 8 + msgp.StringPrefixSize + len(z.JobType) + 10 + msgp.TimeSize + 11 + msgp.TimeSize + 14 + msgp.IntSize + 9 + msgp.BoolSize + 7 + msgp.BoolSize + 10
if z.Replicate == nil {
s += msgp.NilSize
} else {
s += z.Replicate.Msgsize()
}
s += 9
if z.KeyRotate == nil {
s += msgp.NilSize
} else {
s += z.KeyRotate.Msgsize()
}
s += 8
if z.Expired == nil {
s += msgp.NilSize
} else {
s += z.Expired.Msgsize()
}
s += 8
if z.Catalog == nil {
s += msgp.NilSize
} else {
s += z.Catalog.Msgsize()
}
return
}
// DecodeMsg implements msgp.Decodable
func (z *KeyRotationInfo) DecodeMsg(dc *msgp.Reader) (err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, err = dc.ReadMapHeader()
if err != nil {
err = msgp.WrapError(err)
return
}
for zb0001 > 0 {
zb0001--
field, err = dc.ReadMapKeyPtr()
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "lastBucket":
z.Bucket, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "Bucket")
return
}
case "lastObject":
z.Object, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "Object")
return
}
case "objects":
z.Objects, err = dc.ReadInt64()
if err != nil {
err = msgp.WrapError(err, "Objects")
return
}
case "objectsFailed":
z.ObjectsFailed, err = dc.ReadInt64()
if err != nil {
err = msgp.WrapError(err, "ObjectsFailed")
return
}
default:
err = dc.Skip()
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
return
}
// EncodeMsg implements msgp.Encodable
func (z *KeyRotationInfo) EncodeMsg(en *msgp.Writer) (err error) {
// map header, size 4
// write "lastBucket"
err = en.Append(0x84, 0xaa, 0x6c, 0x61, 0x73, 0x74, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74)
if err != nil {
return
}
err = en.WriteString(z.Bucket)
if err != nil {
err = msgp.WrapError(err, "Bucket")
return
}
// write "lastObject"
err = en.Append(0xaa, 0x6c, 0x61, 0x73, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74)
if err != nil {
return
}
err = en.WriteString(z.Object)
if err != nil {
err = msgp.WrapError(err, "Object")
return
}
// write "objects"
err = en.Append(0xa7, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73)
if err != nil {
return
}
err = en.WriteInt64(z.Objects)
if err != nil {
err = msgp.WrapError(err, "Objects")
return
}
// write "objectsFailed"
err = en.Append(0xad, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x46, 0x61, 0x69, 0x6c, 0x65, 0x64)
if err != nil {
return
}
err = en.WriteInt64(z.ObjectsFailed)
if err != nil {
err = msgp.WrapError(err, "ObjectsFailed")
return
}
return
}
// MarshalMsg implements msgp.Marshaler
func (z *KeyRotationInfo) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize())
// map header, size 4
// string "lastBucket"
o = append(o, 0x84, 0xaa, 0x6c, 0x61, 0x73, 0x74, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74)
o = msgp.AppendString(o, z.Bucket)
// string "lastObject"
o = append(o, 0xaa, 0x6c, 0x61, 0x73, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74)
o = msgp.AppendString(o, z.Object)
// string "objects"
o = append(o, 0xa7, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73)
o = msgp.AppendInt64(o, z.Objects)
// string "objectsFailed"
o = append(o, 0xad, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x46, 0x61, 0x69, 0x6c, 0x65, 0x64)
o = msgp.AppendInt64(o, z.ObjectsFailed)
return
}
// UnmarshalMsg implements msgp.Unmarshaler
func (z *KeyRotationInfo) UnmarshalMsg(bts []byte) (o []byte, err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
for zb0001 > 0 {
zb0001--
field, bts, err = msgp.ReadMapKeyZC(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "lastBucket":
z.Bucket, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Bucket")
return
}
case "lastObject":
z.Object, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Object")
return
}
case "objects":
z.Objects, bts, err = msgp.ReadInt64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "Objects")
return
}
case "objectsFailed":
z.ObjectsFailed, bts, err = msgp.ReadInt64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "ObjectsFailed")
return
}
default:
bts, err = msgp.Skip(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
o = bts
return
}
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z *KeyRotationInfo) Msgsize() (s int) {
s = 1 + 11 + msgp.StringPrefixSize + len(z.Bucket) + 11 + msgp.StringPrefixSize + len(z.Object) + 8 + msgp.Int64Size + 14 + msgp.Int64Size
return
}
// DecodeMsg implements msgp.Decodable
func (z *MemInfo) DecodeMsg(dc *msgp.Reader) (err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, err = dc.ReadMapHeader()
if err != nil {
err = msgp.WrapError(err)
return
}
var zb0001Mask uint16 /* 10 bits */
_ = zb0001Mask
for zb0001 > 0 {
zb0001--
field, err = dc.ReadMapKeyPtr()
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "NodeCommon":
err = (*nodeCommon)(&z.NodeCommon).DecodeMsg(dc)
if err != nil {
err = msgp.WrapError(err, "NodeCommon")
return
}
case "total":
z.Total, err = dc.ReadUint64()
if err != nil {
err = msgp.WrapError(err, "Total")
return
}
zb0001Mask |= 0x1
case "used":
z.Used, err = dc.ReadUint64()
if err != nil {
err = msgp.WrapError(err, "Used")
return
}
zb0001Mask |= 0x2
case "free":
z.Free, err = dc.ReadUint64()
if err != nil {
err = msgp.WrapError(err, "Free")
return
}
zb0001Mask |= 0x4
case "available":
z.Available, err = dc.ReadUint64()
if err != nil {
err = msgp.WrapError(err, "Available")
return
}
zb0001Mask |= 0x8
case "shared":
z.Shared, err = dc.ReadUint64()
if err != nil {
err = msgp.WrapError(err, "Shared")
return
}
zb0001Mask |= 0x10
case "cache":
z.Cache, err = dc.ReadUint64()
if err != nil {
err = msgp.WrapError(err, "Cache")
return
}
zb0001Mask |= 0x20
case "buffer":
z.Buffers, err = dc.ReadUint64()
if err != nil {
err = msgp.WrapError(err, "Buffers")
return
}
zb0001Mask |= 0x40
case "swap_space_total":
z.SwapSpaceTotal, err = dc.ReadUint64()
if err != nil {
err = msgp.WrapError(err, "SwapSpaceTotal")
return
}
zb0001Mask |= 0x80
case "swap_space_free":
z.SwapSpaceFree, err = dc.ReadUint64()
if err != nil {
err = msgp.WrapError(err, "SwapSpaceFree")
return
}
zb0001Mask |= 0x100
case "limit":
z.Limit, err = dc.ReadUint64()
if err != nil {
err = msgp.WrapError(err, "Limit")
return
}
zb0001Mask |= 0x200
default:
err = dc.Skip()
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
// Clear omitted fields.
if zb0001Mask != 0x3ff {
if (zb0001Mask & 0x1) == 0 {
z.Total = 0
}
if (zb0001Mask & 0x2) == 0 {
z.Used = 0
}
if (zb0001Mask & 0x4) == 0 {
z.Free = 0
}
if (zb0001Mask & 0x8) == 0 {
z.Available = 0
}
if (zb0001Mask & 0x10) == 0 {
z.Shared = 0
}
if (zb0001Mask & 0x20) == 0 {
z.Cache = 0
}
if (zb0001Mask & 0x40) == 0 {
z.Buffers = 0
}
if (zb0001Mask & 0x80) == 0 {
z.SwapSpaceTotal = 0
}
if (zb0001Mask & 0x100) == 0 {
z.SwapSpaceFree = 0
}
if (zb0001Mask & 0x200) == 0 {
z.Limit = 0
}
}
return
}
// EncodeMsg implements msgp.Encodable
func (z *MemInfo) EncodeMsg(en *msgp.Writer) (err error) {
// check for omitted fields
zb0001Len := uint32(11)
var zb0001Mask uint16 /* 11 bits */
_ = zb0001Mask
if z.Total == 0 {
zb0001Len--
zb0001Mask |= 0x2
}
if z.Used == 0 {
zb0001Len--
zb0001Mask |= 0x4
}
if z.Free == 0 {
zb0001Len--
zb0001Mask |= 0x8
}
if z.Available == 0 {
zb0001Len--
zb0001Mask |= 0x10
}
if z.Shared == 0 {
zb0001Len--
zb0001Mask |= 0x20
}
if z.Cache == 0 {
zb0001Len--
zb0001Mask |= 0x40
}
if z.Buffers == 0 {
zb0001Len--
zb0001Mask |= 0x80
}
if z.SwapSpaceTotal == 0 {
zb0001Len--
zb0001Mask |= 0x100
}
if z.SwapSpaceFree == 0 {
zb0001Len--
zb0001Mask |= 0x200
}
if z.Limit == 0 {
zb0001Len--
zb0001Mask |= 0x400
}
// variable map header, size zb0001Len
err = en.Append(0x80 | uint8(zb0001Len))
if err != nil {
return
}
// skip if no fields are to be emitted
if zb0001Len != 0 {
// write "NodeCommon"
err = en.Append(0xaa, 0x4e, 0x6f, 0x64, 0x65, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e)
if err != nil {
return
}
err = (*nodeCommon)(&z.NodeCommon).EncodeMsg(en)
if err != nil {
err = msgp.WrapError(err, "NodeCommon")
return
}
if (zb0001Mask & 0x2) == 0 { // if not omitted
// write "total"
err = en.Append(0xa5, 0x74, 0x6f, 0x74, 0x61, 0x6c)
if err != nil {
return
}
err = en.WriteUint64(z.Total)
if err != nil {
err = msgp.WrapError(err, "Total")
return
}
}
if (zb0001Mask & 0x4) == 0 { // if not omitted
// write "used"
err = en.Append(0xa4, 0x75, 0x73, 0x65, 0x64)
if err != nil {
return
}
err = en.WriteUint64(z.Used)
if err != nil {
err = msgp.WrapError(err, "Used")
return
}
}
if (zb0001Mask & 0x8) == 0 { // if not omitted
// write "free"
err = en.Append(0xa4, 0x66, 0x72, 0x65, 0x65)
if err != nil {
return
}
err = en.WriteUint64(z.Free)
if err != nil {
err = msgp.WrapError(err, "Free")
return
}
}
if (zb0001Mask & 0x10) == 0 { // if not omitted
// write "available"
err = en.Append(0xa9, 0x61, 0x76, 0x61, 0x69, 0x6c, 0x61, 0x62, 0x6c, 0x65)
if err != nil {
return
}
err = en.WriteUint64(z.Available)
if err != nil {
err = msgp.WrapError(err, "Available")
return
}
}
if (zb0001Mask & 0x20) == 0 { // if not omitted
// write "shared"
err = en.Append(0xa6, 0x73, 0x68, 0x61, 0x72, 0x65, 0x64)
if err != nil {
return
}
err = en.WriteUint64(z.Shared)
if err != nil {
err = msgp.WrapError(err, "Shared")
return
}
}
if (zb0001Mask & 0x40) == 0 { // if not omitted
// write "cache"
err = en.Append(0xa5, 0x63, 0x61, 0x63, 0x68, 0x65)
if err != nil {
return
}
err = en.WriteUint64(z.Cache)
if err != nil {
err = msgp.WrapError(err, "Cache")
return
}
}
if (zb0001Mask & 0x80) == 0 { // if not omitted
// write "buffer"
err = en.Append(0xa6, 0x62, 0x75, 0x66, 0x66, 0x65, 0x72)
if err != nil {
return
}
err = en.WriteUint64(z.Buffers)
if err != nil {
err = msgp.WrapError(err, "Buffers")
return
}
}
if (zb0001Mask & 0x100) == 0 { // if not omitted
// write "swap_space_total"
err = en.Append(0xb0, 0x73, 0x77, 0x61, 0x70, 0x5f, 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x74, 0x6f, 0x74, 0x61, 0x6c)
if err != nil {
return
}
err = en.WriteUint64(z.SwapSpaceTotal)
if err != nil {
err = msgp.WrapError(err, "SwapSpaceTotal")
return
}
}
if (zb0001Mask & 0x200) == 0 { // if not omitted
// write "swap_space_free"
err = en.Append(0xaf, 0x73, 0x77, 0x61, 0x70, 0x5f, 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x66, 0x72, 0x65, 0x65)
if err != nil {
return
}
err = en.WriteUint64(z.SwapSpaceFree)
if err != nil {
err = msgp.WrapError(err, "SwapSpaceFree")
return
}
}
if (zb0001Mask & 0x400) == 0 { // if not omitted
// write "limit"
err = en.Append(0xa5, 0x6c, 0x69, 0x6d, 0x69, 0x74)
if err != nil {
return
}
err = en.WriteUint64(z.Limit)
if err != nil {
err = msgp.WrapError(err, "Limit")
return
}
}
}
return
}
// MarshalMsg implements msgp.Marshaler
func (z *MemInfo) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize())
// check for omitted fields
zb0001Len := uint32(11)
var zb0001Mask uint16 /* 11 bits */
_ = zb0001Mask
if z.Total == 0 {
zb0001Len--
zb0001Mask |= 0x2
}
if z.Used == 0 {
zb0001Len--
zb0001Mask |= 0x4
}
if z.Free == 0 {
zb0001Len--
zb0001Mask |= 0x8
}
if z.Available == 0 {
zb0001Len--
zb0001Mask |= 0x10
}
if z.Shared == 0 {
zb0001Len--
zb0001Mask |= 0x20
}
if z.Cache == 0 {
zb0001Len--
zb0001Mask |= 0x40
}
if z.Buffers == 0 {
zb0001Len--
zb0001Mask |= 0x80
}
if z.SwapSpaceTotal == 0 {
zb0001Len--
zb0001Mask |= 0x100
}
if z.SwapSpaceFree == 0 {
zb0001Len--
zb0001Mask |= 0x200
}
if z.Limit == 0 {
zb0001Len--
zb0001Mask |= 0x400
}
// variable map header, size zb0001Len
o = append(o, 0x80|uint8(zb0001Len))
// skip if no fields are to be emitted
if zb0001Len != 0 {
// string "NodeCommon"
o = append(o, 0xaa, 0x4e, 0x6f, 0x64, 0x65, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e)
o, err = (*nodeCommon)(&z.NodeCommon).MarshalMsg(o)
if err != nil {
err = msgp.WrapError(err, "NodeCommon")
return
}
if (zb0001Mask & 0x2) == 0 { // if not omitted
// string "total"
o = append(o, 0xa5, 0x74, 0x6f, 0x74, 0x61, 0x6c)
o = msgp.AppendUint64(o, z.Total)
}
if (zb0001Mask & 0x4) == 0 { // if not omitted
// string "used"
o = append(o, 0xa4, 0x75, 0x73, 0x65, 0x64)
o = msgp.AppendUint64(o, z.Used)
}
if (zb0001Mask & 0x8) == 0 { // if not omitted
// string "free"
o = append(o, 0xa4, 0x66, 0x72, 0x65, 0x65)
o = msgp.AppendUint64(o, z.Free)
}
if (zb0001Mask & 0x10) == 0 { // if not omitted
// string "available"
o = append(o, 0xa9, 0x61, 0x76, 0x61, 0x69, 0x6c, 0x61, 0x62, 0x6c, 0x65)
o = msgp.AppendUint64(o, z.Available)
}
if (zb0001Mask & 0x20) == 0 { // if not omitted
// string "shared"
o = append(o, 0xa6, 0x73, 0x68, 0x61, 0x72, 0x65, 0x64)
o = msgp.AppendUint64(o, z.Shared)
}
if (zb0001Mask & 0x40) == 0 { // if not omitted
// string "cache"
o = append(o, 0xa5, 0x63, 0x61, 0x63, 0x68, 0x65)
o = msgp.AppendUint64(o, z.Cache)
}
if (zb0001Mask & 0x80) == 0 { // if not omitted
// string "buffer"
o = append(o, 0xa6, 0x62, 0x75, 0x66, 0x66, 0x65, 0x72)
o = msgp.AppendUint64(o, z.Buffers)
}
if (zb0001Mask & 0x100) == 0 { // if not omitted
// string "swap_space_total"
o = append(o, 0xb0, 0x73, 0x77, 0x61, 0x70, 0x5f, 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x74, 0x6f, 0x74, 0x61, 0x6c)
o = msgp.AppendUint64(o, z.SwapSpaceTotal)
}
if (zb0001Mask & 0x200) == 0 { // if not omitted
// string "swap_space_free"
o = append(o, 0xaf, 0x73, 0x77, 0x61, 0x70, 0x5f, 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x66, 0x72, 0x65, 0x65)
o = msgp.AppendUint64(o, z.SwapSpaceFree)
}
if (zb0001Mask & 0x400) == 0 { // if not omitted
// string "limit"
o = append(o, 0xa5, 0x6c, 0x69, 0x6d, 0x69, 0x74)
o = msgp.AppendUint64(o, z.Limit)
}
}
return
}
// UnmarshalMsg implements msgp.Unmarshaler
func (z *MemInfo) UnmarshalMsg(bts []byte) (o []byte, err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
var zb0001Mask uint16 /* 10 bits */
_ = zb0001Mask
for zb0001 > 0 {
zb0001--
field, bts, err = msgp.ReadMapKeyZC(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "NodeCommon":
bts, err = (*nodeCommon)(&z.NodeCommon).UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "NodeCommon")
return
}
case "total":
z.Total, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "Total")
return
}
zb0001Mask |= 0x1
case "used":
z.Used, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "Used")
return
}
zb0001Mask |= 0x2
case "free":
z.Free, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "Free")
return
}
zb0001Mask |= 0x4
case "available":
z.Available, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "Available")
return
}
zb0001Mask |= 0x8
case "shared":
z.Shared, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "Shared")
return
}
zb0001Mask |= 0x10
case "cache":
z.Cache, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "Cache")
return
}
zb0001Mask |= 0x20
case "buffer":
z.Buffers, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "Buffers")
return
}
zb0001Mask |= 0x40
case "swap_space_total":
z.SwapSpaceTotal, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "SwapSpaceTotal")
return
}
zb0001Mask |= 0x80
case "swap_space_free":
z.SwapSpaceFree, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "SwapSpaceFree")
return
}
zb0001Mask |= 0x100
case "limit":
z.Limit, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "Limit")
return
}
zb0001Mask |= 0x200
default:
bts, err = msgp.Skip(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
// Clear omitted fields.
if zb0001Mask != 0x3ff {
if (zb0001Mask & 0x1) == 0 {
z.Total = 0
}
if (zb0001Mask & 0x2) == 0 {
z.Used = 0
}
if (zb0001Mask & 0x4) == 0 {
z.Free = 0
}
if (zb0001Mask & 0x8) == 0 {
z.Available = 0
}
if (zb0001Mask & 0x10) == 0 {
z.Shared = 0
}
if (zb0001Mask & 0x20) == 0 {
z.Cache = 0
}
if (zb0001Mask & 0x40) == 0 {
z.Buffers = 0
}
if (zb0001Mask & 0x80) == 0 {
z.SwapSpaceTotal = 0
}
if (zb0001Mask & 0x100) == 0 {
z.SwapSpaceFree = 0
}
if (zb0001Mask & 0x200) == 0 {
z.Limit = 0
}
}
o = bts
return
}
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z *MemInfo) Msgsize() (s int) {
s = 1 + 11 + (*nodeCommon)(&z.NodeCommon).Msgsize() + 6 + msgp.Uint64Size + 5 + msgp.Uint64Size + 5 + msgp.Uint64Size + 10 + msgp.Uint64Size + 7 + msgp.Uint64Size + 6 + msgp.Uint64Size + 7 + msgp.Uint64Size + 17 + msgp.Uint64Size + 16 + msgp.Uint64Size + 6 + msgp.Uint64Size
return
}
// DecodeMsg implements msgp.Decodable
func (z *MemMetrics) DecodeMsg(dc *msgp.Reader) (err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, err = dc.ReadMapHeader()
if err != nil {
err = msgp.WrapError(err)
return
}
for zb0001 > 0 {
zb0001--
field, err = dc.ReadMapKeyPtr()
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "collected":
z.CollectedAt, err = dc.ReadTime()
if err != nil {
err = msgp.WrapError(err, "CollectedAt")
return
}
case "memInfo":
err = z.Info.DecodeMsg(dc)
if err != nil {
err = msgp.WrapError(err, "Info")
return
}
default:
err = dc.Skip()
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
return
}
// EncodeMsg implements msgp.Encodable
func (z *MemMetrics) EncodeMsg(en *msgp.Writer) (err error) {
// map header, size 2
// write "collected"
err = en.Append(0x82, 0xa9, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x65, 0x64)
if err != nil {
return
}
err = en.WriteTime(z.CollectedAt)
if err != nil {
err = msgp.WrapError(err, "CollectedAt")
return
}
// write "memInfo"
err = en.Append(0xa7, 0x6d, 0x65, 0x6d, 0x49, 0x6e, 0x66, 0x6f)
if err != nil {
return
}
err = z.Info.EncodeMsg(en)
if err != nil {
err = msgp.WrapError(err, "Info")
return
}
return
}
// MarshalMsg implements msgp.Marshaler
func (z *MemMetrics) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize())
// map header, size 2
// string "collected"
o = append(o, 0x82, 0xa9, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x65, 0x64)
o = msgp.AppendTime(o, z.CollectedAt)
// string "memInfo"
o = append(o, 0xa7, 0x6d, 0x65, 0x6d, 0x49, 0x6e, 0x66, 0x6f)
o, err = z.Info.MarshalMsg(o)
if err != nil {
err = msgp.WrapError(err, "Info")
return
}
return
}
// UnmarshalMsg implements msgp.Unmarshaler
func (z *MemMetrics) UnmarshalMsg(bts []byte) (o []byte, err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
for zb0001 > 0 {
zb0001--
field, bts, err = msgp.ReadMapKeyZC(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "collected":
z.CollectedAt, bts, err = msgp.ReadTimeBytes(bts)
if err != nil {
err = msgp.WrapError(err, "CollectedAt")
return
}
case "memInfo":
bts, err = z.Info.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "Info")
return
}
default:
bts, err = msgp.Skip(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
o = bts
return
}
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z *MemMetrics) Msgsize() (s int) {
s = 1 + 10 + msgp.TimeSize + 8 + z.Info.Msgsize()
return
}
// DecodeMsg implements msgp.Decodable
func (z *MetricType) DecodeMsg(dc *msgp.Reader) (err error) {
{
var zb0001 uint32
zb0001, err = dc.ReadUint32()
if err != nil {
err = msgp.WrapError(err)
return
}
(*z) = MetricType(zb0001)
}
return
}
// EncodeMsg implements msgp.Encodable
func (z MetricType) EncodeMsg(en *msgp.Writer) (err error) {
err = en.WriteUint32(uint32(z))
if err != nil {
err = msgp.WrapError(err)
return
}
return
}
// MarshalMsg implements msgp.Marshaler
func (z MetricType) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize())
o = msgp.AppendUint32(o, uint32(z))
return
}
// UnmarshalMsg implements msgp.Unmarshaler
func (z *MetricType) UnmarshalMsg(bts []byte) (o []byte, err error) {
{
var zb0001 uint32
zb0001, bts, err = msgp.ReadUint32Bytes(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
(*z) = MetricType(zb0001)
}
o = bts
return
}
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z MetricType) Msgsize() (s int) {
s = msgp.Uint32Size
return
}
// DecodeMsg implements msgp.Decodable
func (z *Metrics) DecodeMsg(dc *msgp.Reader) (err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, err = dc.ReadMapHeader()
if err != nil {
err = msgp.WrapError(err)
return
}
var zb0001Mask uint16 /* 10 bits */
_ = zb0001Mask
for zb0001 > 0 {
zb0001--
field, err = dc.ReadMapKeyPtr()
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "scanner":
if dc.IsNil() {
err = dc.ReadNil()
if err != nil {
err = msgp.WrapError(err, "Scanner")
return
}
z.Scanner = nil
} else {
if z.Scanner == nil {
z.Scanner = new(ScannerMetrics)
}
err = z.Scanner.DecodeMsg(dc)
if err != nil {
err = msgp.WrapError(err, "Scanner")
return
}
}
zb0001Mask |= 0x1
case "disk":
if dc.IsNil() {
err = dc.ReadNil()
if err != nil {
err = msgp.WrapError(err, "Disk")
return
}
z.Disk = nil
} else {
if z.Disk == nil {
z.Disk = new(DiskMetric)
}
err = z.Disk.DecodeMsg(dc)
if err != nil {
err = msgp.WrapError(err, "Disk")
return
}
}
zb0001Mask |= 0x2
case "os":
if dc.IsNil() {
err = dc.ReadNil()
if err != nil {
err = msgp.WrapError(err, "OS")
return
}
z.OS = nil
} else {
if z.OS == nil {
z.OS = new(OSMetrics)
}
err = z.OS.DecodeMsg(dc)
if err != nil {
err = msgp.WrapError(err, "OS")
return
}
}
zb0001Mask |= 0x4
case "batchJobs":
if dc.IsNil() {
err = dc.ReadNil()
if err != nil {
err = msgp.WrapError(err, "BatchJobs")
return
}
z.BatchJobs = nil
} else {
if z.BatchJobs == nil {
z.BatchJobs = new(BatchJobMetrics)
}
err = z.BatchJobs.DecodeMsg(dc)
if err != nil {
err = msgp.WrapError(err, "BatchJobs")
return
}
}
zb0001Mask |= 0x8
case "siteResync":
if dc.IsNil() {
err = dc.ReadNil()
if err != nil {
err = msgp.WrapError(err, "SiteResync")
return
}
z.SiteResync = nil
} else {
if z.SiteResync == nil {
z.SiteResync = new(SiteResyncMetrics)
}
err = z.SiteResync.DecodeMsg(dc)
if err != nil {
err = msgp.WrapError(err, "SiteResync")
return
}
}
zb0001Mask |= 0x10
case "net":
if dc.IsNil() {
err = dc.ReadNil()
if err != nil {
err = msgp.WrapError(err, "Net")
return
}
z.Net = nil
} else {
if z.Net == nil {
z.Net = new(NetMetrics)
}
var zb0002 uint32
zb0002, err = dc.ReadMapHeader()
if err != nil {
err = msgp.WrapError(err, "Net")
return
}
for zb0002 > 0 {
zb0002--
field, err = dc.ReadMapKeyPtr()
if err != nil {
err = msgp.WrapError(err, "Net")
return
}
switch msgp.UnsafeString(field) {
case "collected":
z.Net.CollectedAt, err = dc.ReadTime()
if err != nil {
err = msgp.WrapError(err, "Net", "CollectedAt")
return
}
case "interfaceName":
z.Net.InterfaceName, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "Net", "InterfaceName")
return
}
case "netstats":
err = (*procfsNetDevLine)(&z.Net.NetStats).DecodeMsg(dc)
if err != nil {
err = msgp.WrapError(err, "Net", "NetStats")
return
}
default:
err = dc.Skip()
if err != nil {
err = msgp.WrapError(err, "Net")
return
}
}
}
}
zb0001Mask |= 0x20
case "mem":
if dc.IsNil() {
err = dc.ReadNil()
if err != nil {
err = msgp.WrapError(err, "Mem")
return
}
z.Mem = nil
} else {
if z.Mem == nil {
z.Mem = new(MemMetrics)
}
var zb0003 uint32
zb0003, err = dc.ReadMapHeader()
if err != nil {
err = msgp.WrapError(err, "Mem")
return
}
for zb0003 > 0 {
zb0003--
field, err = dc.ReadMapKeyPtr()
if err != nil {
err = msgp.WrapError(err, "Mem")
return
}
switch msgp.UnsafeString(field) {
case "collected":
z.Mem.CollectedAt, err = dc.ReadTime()
if err != nil {
err = msgp.WrapError(err, "Mem", "CollectedAt")
return
}
case "memInfo":
err = z.Mem.Info.DecodeMsg(dc)
if err != nil {
err = msgp.WrapError(err, "Mem", "Info")
return
}
default:
err = dc.Skip()
if err != nil {
err = msgp.WrapError(err, "Mem")
return
}
}
}
}
zb0001Mask |= 0x40
case "cpu":
if dc.IsNil() {
err = dc.ReadNil()
if err != nil {
err = msgp.WrapError(err, "CPU")
return
}
z.CPU = nil
} else {
if z.CPU == nil {
z.CPU = new(CPUMetrics)
}
err = z.CPU.DecodeMsg(dc)
if err != nil {
err = msgp.WrapError(err, "CPU")
return
}
}
zb0001Mask |= 0x80
case "rpc":
if dc.IsNil() {
err = dc.ReadNil()
if err != nil {
err = msgp.WrapError(err, "RPC")
return
}
z.RPC = nil
} else {
if z.RPC == nil {
z.RPC = new(RPCMetrics)
}
err = z.RPC.DecodeMsg(dc)
if err != nil {
err = msgp.WrapError(err, "RPC")
return
}
}
zb0001Mask |= 0x100
case "go":
if dc.IsNil() {
err = dc.ReadNil()
if err != nil {
err = msgp.WrapError(err, "Go")
return
}
z.Go = nil
} else {
if z.Go == nil {
z.Go = new(RuntimeMetrics)
}
err = z.Go.DecodeMsg(dc)
if err != nil {
err = msgp.WrapError(err, "Go")
return
}
}
zb0001Mask |= 0x200
default:
err = dc.Skip()
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
// Clear omitted fields.
if zb0001Mask != 0x3ff {
if (zb0001Mask & 0x1) == 0 {
z.Scanner = nil
}
if (zb0001Mask & 0x2) == 0 {
z.Disk = nil
}
if (zb0001Mask & 0x4) == 0 {
z.OS = nil
}
if (zb0001Mask & 0x8) == 0 {
z.BatchJobs = nil
}
if (zb0001Mask & 0x10) == 0 {
z.SiteResync = nil
}
if (zb0001Mask & 0x20) == 0 {
z.Net = nil
}
if (zb0001Mask & 0x40) == 0 {
z.Mem = nil
}
if (zb0001Mask & 0x80) == 0 {
z.CPU = nil
}
if (zb0001Mask & 0x100) == 0 {
z.RPC = nil
}
if (zb0001Mask & 0x200) == 0 {
z.Go = nil
}
}
return
}
// EncodeMsg implements msgp.Encodable
func (z *Metrics) EncodeMsg(en *msgp.Writer) (err error) {
// check for omitted fields
zb0001Len := uint32(10)
var zb0001Mask uint16 /* 10 bits */
_ = zb0001Mask
if z.Scanner == nil {
zb0001Len--
zb0001Mask |= 0x1
}
if z.Disk == nil {
zb0001Len--
zb0001Mask |= 0x2
}
if z.OS == nil {
zb0001Len--
zb0001Mask |= 0x4
}
if z.BatchJobs == nil {
zb0001Len--
zb0001Mask |= 0x8
}
if z.SiteResync == nil {
zb0001Len--
zb0001Mask |= 0x10
}
if z.Net == nil {
zb0001Len--
zb0001Mask |= 0x20
}
if z.Mem == nil {
zb0001Len--
zb0001Mask |= 0x40
}
if z.CPU == nil {
zb0001Len--
zb0001Mask |= 0x80
}
if z.RPC == nil {
zb0001Len--
zb0001Mask |= 0x100
}
if z.Go == nil {
zb0001Len--
zb0001Mask |= 0x200
}
// variable map header, size zb0001Len
err = en.Append(0x80 | uint8(zb0001Len))
if err != nil {
return
}
// skip if no fields are to be emitted
if zb0001Len != 0 {
if (zb0001Mask & 0x1) == 0 { // if not omitted
// write "scanner"
err = en.Append(0xa7, 0x73, 0x63, 0x61, 0x6e, 0x6e, 0x65, 0x72)
if err != nil {
return
}
if z.Scanner == nil {
err = en.WriteNil()
if err != nil {
return
}
} else {
err = z.Scanner.EncodeMsg(en)
if err != nil {
err = msgp.WrapError(err, "Scanner")
return
}
}
}
if (zb0001Mask & 0x2) == 0 { // if not omitted
// write "disk"
err = en.Append(0xa4, 0x64, 0x69, 0x73, 0x6b)
if err != nil {
return
}
if z.Disk == nil {
err = en.WriteNil()
if err != nil {
return
}
} else {
err = z.Disk.EncodeMsg(en)
if err != nil {
err = msgp.WrapError(err, "Disk")
return
}
}
}
if (zb0001Mask & 0x4) == 0 { // if not omitted
// write "os"
err = en.Append(0xa2, 0x6f, 0x73)
if err != nil {
return
}
if z.OS == nil {
err = en.WriteNil()
if err != nil {
return
}
} else {
err = z.OS.EncodeMsg(en)
if err != nil {
err = msgp.WrapError(err, "OS")
return
}
}
}
if (zb0001Mask & 0x8) == 0 { // if not omitted
// write "batchJobs"
err = en.Append(0xa9, 0x62, 0x61, 0x74, 0x63, 0x68, 0x4a, 0x6f, 0x62, 0x73)
if err != nil {
return
}
if z.BatchJobs == nil {
err = en.WriteNil()
if err != nil {
return
}
} else {
err = z.BatchJobs.EncodeMsg(en)
if err != nil {
err = msgp.WrapError(err, "BatchJobs")
return
}
}
}
if (zb0001Mask & 0x10) == 0 { // if not omitted
// write "siteResync"
err = en.Append(0xaa, 0x73, 0x69, 0x74, 0x65, 0x52, 0x65, 0x73, 0x79, 0x6e, 0x63)
if err != nil {
return
}
if z.SiteResync == nil {
err = en.WriteNil()
if err != nil {
return
}
} else {
err = z.SiteResync.EncodeMsg(en)
if err != nil {
err = msgp.WrapError(err, "SiteResync")
return
}
}
}
if (zb0001Mask & 0x20) == 0 { // if not omitted
// write "net"
err = en.Append(0xa3, 0x6e, 0x65, 0x74)
if err != nil {
return
}
if z.Net == nil {
err = en.WriteNil()
if err != nil {
return
}
} else {
// map header, size 3
// write "collected"
err = en.Append(0x83, 0xa9, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x65, 0x64)
if err != nil {
return
}
err = en.WriteTime(z.Net.CollectedAt)
if err != nil {
err = msgp.WrapError(err, "Net", "CollectedAt")
return
}
// write "interfaceName"
err = en.Append(0xad, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x66, 0x61, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65)
if err != nil {
return
}
err = en.WriteString(z.Net.InterfaceName)
if err != nil {
err = msgp.WrapError(err, "Net", "InterfaceName")
return
}
// write "netstats"
err = en.Append(0xa8, 0x6e, 0x65, 0x74, 0x73, 0x74, 0x61, 0x74, 0x73)
if err != nil {
return
}
err = (*procfsNetDevLine)(&z.Net.NetStats).EncodeMsg(en)
if err != nil {
err = msgp.WrapError(err, "Net", "NetStats")
return
}
}
}
if (zb0001Mask & 0x40) == 0 { // if not omitted
// write "mem"
err = en.Append(0xa3, 0x6d, 0x65, 0x6d)
if err != nil {
return
}
if z.Mem == nil {
err = en.WriteNil()
if err != nil {
return
}
} else {
// map header, size 2
// write "collected"
err = en.Append(0x82, 0xa9, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x65, 0x64)
if err != nil {
return
}
err = en.WriteTime(z.Mem.CollectedAt)
if err != nil {
err = msgp.WrapError(err, "Mem", "CollectedAt")
return
}
// write "memInfo"
err = en.Append(0xa7, 0x6d, 0x65, 0x6d, 0x49, 0x6e, 0x66, 0x6f)
if err != nil {
return
}
err = z.Mem.Info.EncodeMsg(en)
if err != nil {
err = msgp.WrapError(err, "Mem", "Info")
return
}
}
}
if (zb0001Mask & 0x80) == 0 { // if not omitted
// write "cpu"
err = en.Append(0xa3, 0x63, 0x70, 0x75)
if err != nil {
return
}
if z.CPU == nil {
err = en.WriteNil()
if err != nil {
return
}
} else {
err = z.CPU.EncodeMsg(en)
if err != nil {
err = msgp.WrapError(err, "CPU")
return
}
}
}
if (zb0001Mask & 0x100) == 0 { // if not omitted
// write "rpc"
err = en.Append(0xa3, 0x72, 0x70, 0x63)
if err != nil {
return
}
if z.RPC == nil {
err = en.WriteNil()
if err != nil {
return
}
} else {
err = z.RPC.EncodeMsg(en)
if err != nil {
err = msgp.WrapError(err, "RPC")
return
}
}
}
if (zb0001Mask & 0x200) == 0 { // if not omitted
// write "go"
err = en.Append(0xa2, 0x67, 0x6f)
if err != nil {
return
}
if z.Go == nil {
err = en.WriteNil()
if err != nil {
return
}
} else {
err = z.Go.EncodeMsg(en)
if err != nil {
err = msgp.WrapError(err, "Go")
return
}
}
}
}
return
}
// MarshalMsg implements msgp.Marshaler
func (z *Metrics) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize())
// check for omitted fields
zb0001Len := uint32(10)
var zb0001Mask uint16 /* 10 bits */
_ = zb0001Mask
if z.Scanner == nil {
zb0001Len--
zb0001Mask |= 0x1
}
if z.Disk == nil {
zb0001Len--
zb0001Mask |= 0x2
}
if z.OS == nil {
zb0001Len--
zb0001Mask |= 0x4
}
if z.BatchJobs == nil {
zb0001Len--
zb0001Mask |= 0x8
}
if z.SiteResync == nil {
zb0001Len--
zb0001Mask |= 0x10
}
if z.Net == nil {
zb0001Len--
zb0001Mask |= 0x20
}
if z.Mem == nil {
zb0001Len--
zb0001Mask |= 0x40
}
if z.CPU == nil {
zb0001Len--
zb0001Mask |= 0x80
}
if z.RPC == nil {
zb0001Len--
zb0001Mask |= 0x100
}
if z.Go == nil {
zb0001Len--
zb0001Mask |= 0x200
}
// variable map header, size zb0001Len
o = append(o, 0x80|uint8(zb0001Len))
// skip if no fields are to be emitted
if zb0001Len != 0 {
if (zb0001Mask & 0x1) == 0 { // if not omitted
// string "scanner"
o = append(o, 0xa7, 0x73, 0x63, 0x61, 0x6e, 0x6e, 0x65, 0x72)
if z.Scanner == nil {
o = msgp.AppendNil(o)
} else {
o, err = z.Scanner.MarshalMsg(o)
if err != nil {
err = msgp.WrapError(err, "Scanner")
return
}
}
}
if (zb0001Mask & 0x2) == 0 { // if not omitted
// string "disk"
o = append(o, 0xa4, 0x64, 0x69, 0x73, 0x6b)
if z.Disk == nil {
o = msgp.AppendNil(o)
} else {
o, err = z.Disk.MarshalMsg(o)
if err != nil {
err = msgp.WrapError(err, "Disk")
return
}
}
}
if (zb0001Mask & 0x4) == 0 { // if not omitted
// string "os"
o = append(o, 0xa2, 0x6f, 0x73)
if z.OS == nil {
o = msgp.AppendNil(o)
} else {
o, err = z.OS.MarshalMsg(o)
if err != nil {
err = msgp.WrapError(err, "OS")
return
}
}
}
if (zb0001Mask & 0x8) == 0 { // if not omitted
// string "batchJobs"
o = append(o, 0xa9, 0x62, 0x61, 0x74, 0x63, 0x68, 0x4a, 0x6f, 0x62, 0x73)
if z.BatchJobs == nil {
o = msgp.AppendNil(o)
} else {
o, err = z.BatchJobs.MarshalMsg(o)
if err != nil {
err = msgp.WrapError(err, "BatchJobs")
return
}
}
}
if (zb0001Mask & 0x10) == 0 { // if not omitted
// string "siteResync"
o = append(o, 0xaa, 0x73, 0x69, 0x74, 0x65, 0x52, 0x65, 0x73, 0x79, 0x6e, 0x63)
if z.SiteResync == nil {
o = msgp.AppendNil(o)
} else {
o, err = z.SiteResync.MarshalMsg(o)
if err != nil {
err = msgp.WrapError(err, "SiteResync")
return
}
}
}
if (zb0001Mask & 0x20) == 0 { // if not omitted
// string "net"
o = append(o, 0xa3, 0x6e, 0x65, 0x74)
if z.Net == nil {
o = msgp.AppendNil(o)
} else {
// map header, size 3
// string "collected"
o = append(o, 0x83, 0xa9, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x65, 0x64)
o = msgp.AppendTime(o, z.Net.CollectedAt)
// string "interfaceName"
o = append(o, 0xad, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x66, 0x61, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65)
o = msgp.AppendString(o, z.Net.InterfaceName)
// string "netstats"
o = append(o, 0xa8, 0x6e, 0x65, 0x74, 0x73, 0x74, 0x61, 0x74, 0x73)
o, err = (*procfsNetDevLine)(&z.Net.NetStats).MarshalMsg(o)
if err != nil {
err = msgp.WrapError(err, "Net", "NetStats")
return
}
}
}
if (zb0001Mask & 0x40) == 0 { // if not omitted
// string "mem"
o = append(o, 0xa3, 0x6d, 0x65, 0x6d)
if z.Mem == nil {
o = msgp.AppendNil(o)
} else {
// map header, size 2
// string "collected"
o = append(o, 0x82, 0xa9, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x65, 0x64)
o = msgp.AppendTime(o, z.Mem.CollectedAt)
// string "memInfo"
o = append(o, 0xa7, 0x6d, 0x65, 0x6d, 0x49, 0x6e, 0x66, 0x6f)
o, err = z.Mem.Info.MarshalMsg(o)
if err != nil {
err = msgp.WrapError(err, "Mem", "Info")
return
}
}
}
if (zb0001Mask & 0x80) == 0 { // if not omitted
// string "cpu"
o = append(o, 0xa3, 0x63, 0x70, 0x75)
if z.CPU == nil {
o = msgp.AppendNil(o)
} else {
o, err = z.CPU.MarshalMsg(o)
if err != nil {
err = msgp.WrapError(err, "CPU")
return
}
}
}
if (zb0001Mask & 0x100) == 0 { // if not omitted
// string "rpc"
o = append(o, 0xa3, 0x72, 0x70, 0x63)
if z.RPC == nil {
o = msgp.AppendNil(o)
} else {
o, err = z.RPC.MarshalMsg(o)
if err != nil {
err = msgp.WrapError(err, "RPC")
return
}
}
}
if (zb0001Mask & 0x200) == 0 { // if not omitted
// string "go"
o = append(o, 0xa2, 0x67, 0x6f)
if z.Go == nil {
o = msgp.AppendNil(o)
} else {
o, err = z.Go.MarshalMsg(o)
if err != nil {
err = msgp.WrapError(err, "Go")
return
}
}
}
}
return
}
// UnmarshalMsg implements msgp.Unmarshaler
func (z *Metrics) UnmarshalMsg(bts []byte) (o []byte, err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
var zb0001Mask uint16 /* 10 bits */
_ = zb0001Mask
for zb0001 > 0 {
zb0001--
field, bts, err = msgp.ReadMapKeyZC(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "scanner":
if msgp.IsNil(bts) {
bts, err = msgp.ReadNilBytes(bts)
if err != nil {
return
}
z.Scanner = nil
} else {
if z.Scanner == nil {
z.Scanner = new(ScannerMetrics)
}
bts, err = z.Scanner.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "Scanner")
return
}
}
zb0001Mask |= 0x1
case "disk":
if msgp.IsNil(bts) {
bts, err = msgp.ReadNilBytes(bts)
if err != nil {
return
}
z.Disk = nil
} else {
if z.Disk == nil {
z.Disk = new(DiskMetric)
}
bts, err = z.Disk.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "Disk")
return
}
}
zb0001Mask |= 0x2
case "os":
if msgp.IsNil(bts) {
bts, err = msgp.ReadNilBytes(bts)
if err != nil {
return
}
z.OS = nil
} else {
if z.OS == nil {
z.OS = new(OSMetrics)
}
bts, err = z.OS.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "OS")
return
}
}
zb0001Mask |= 0x4
case "batchJobs":
if msgp.IsNil(bts) {
bts, err = msgp.ReadNilBytes(bts)
if err != nil {
return
}
z.BatchJobs = nil
} else {
if z.BatchJobs == nil {
z.BatchJobs = new(BatchJobMetrics)
}
bts, err = z.BatchJobs.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "BatchJobs")
return
}
}
zb0001Mask |= 0x8
case "siteResync":
if msgp.IsNil(bts) {
bts, err = msgp.ReadNilBytes(bts)
if err != nil {
return
}
z.SiteResync = nil
} else {
if z.SiteResync == nil {
z.SiteResync = new(SiteResyncMetrics)
}
bts, err = z.SiteResync.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "SiteResync")
return
}
}
zb0001Mask |= 0x10
case "net":
if msgp.IsNil(bts) {
bts, err = msgp.ReadNilBytes(bts)
if err != nil {
return
}
z.Net = nil
} else {
if z.Net == nil {
z.Net = new(NetMetrics)
}
var zb0002 uint32
zb0002, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Net")
return
}
for zb0002 > 0 {
zb0002--
field, bts, err = msgp.ReadMapKeyZC(bts)
if err != nil {
err = msgp.WrapError(err, "Net")
return
}
switch msgp.UnsafeString(field) {
case "collected":
z.Net.CollectedAt, bts, err = msgp.ReadTimeBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Net", "CollectedAt")
return
}
case "interfaceName":
z.Net.InterfaceName, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Net", "InterfaceName")
return
}
case "netstats":
bts, err = (*procfsNetDevLine)(&z.Net.NetStats).UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "Net", "NetStats")
return
}
default:
bts, err = msgp.Skip(bts)
if err != nil {
err = msgp.WrapError(err, "Net")
return
}
}
}
}
zb0001Mask |= 0x20
case "mem":
if msgp.IsNil(bts) {
bts, err = msgp.ReadNilBytes(bts)
if err != nil {
return
}
z.Mem = nil
} else {
if z.Mem == nil {
z.Mem = new(MemMetrics)
}
var zb0003 uint32
zb0003, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Mem")
return
}
for zb0003 > 0 {
zb0003--
field, bts, err = msgp.ReadMapKeyZC(bts)
if err != nil {
err = msgp.WrapError(err, "Mem")
return
}
switch msgp.UnsafeString(field) {
case "collected":
z.Mem.CollectedAt, bts, err = msgp.ReadTimeBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Mem", "CollectedAt")
return
}
case "memInfo":
bts, err = z.Mem.Info.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "Mem", "Info")
return
}
default:
bts, err = msgp.Skip(bts)
if err != nil {
err = msgp.WrapError(err, "Mem")
return
}
}
}
}
zb0001Mask |= 0x40
case "cpu":
if msgp.IsNil(bts) {
bts, err = msgp.ReadNilBytes(bts)
if err != nil {
return
}
z.CPU = nil
} else {
if z.CPU == nil {
z.CPU = new(CPUMetrics)
}
bts, err = z.CPU.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "CPU")
return
}
}
zb0001Mask |= 0x80
case "rpc":
if msgp.IsNil(bts) {
bts, err = msgp.ReadNilBytes(bts)
if err != nil {
return
}
z.RPC = nil
} else {
if z.RPC == nil {
z.RPC = new(RPCMetrics)
}
bts, err = z.RPC.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "RPC")
return
}
}
zb0001Mask |= 0x100
case "go":
if msgp.IsNil(bts) {
bts, err = msgp.ReadNilBytes(bts)
if err != nil {
return
}
z.Go = nil
} else {
if z.Go == nil {
z.Go = new(RuntimeMetrics)
}
bts, err = z.Go.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "Go")
return
}
}
zb0001Mask |= 0x200
default:
bts, err = msgp.Skip(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
// Clear omitted fields.
if zb0001Mask != 0x3ff {
if (zb0001Mask & 0x1) == 0 {
z.Scanner = nil
}
if (zb0001Mask & 0x2) == 0 {
z.Disk = nil
}
if (zb0001Mask & 0x4) == 0 {
z.OS = nil
}
if (zb0001Mask & 0x8) == 0 {
z.BatchJobs = nil
}
if (zb0001Mask & 0x10) == 0 {
z.SiteResync = nil
}
if (zb0001Mask & 0x20) == 0 {
z.Net = nil
}
if (zb0001Mask & 0x40) == 0 {
z.Mem = nil
}
if (zb0001Mask & 0x80) == 0 {
z.CPU = nil
}
if (zb0001Mask & 0x100) == 0 {
z.RPC = nil
}
if (zb0001Mask & 0x200) == 0 {
z.Go = nil
}
}
o = bts
return
}
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z *Metrics) Msgsize() (s int) {
s = 1 + 8
if z.Scanner == nil {
s += msgp.NilSize
} else {
s += z.Scanner.Msgsize()
}
s += 5
if z.Disk == nil {
s += msgp.NilSize
} else {
s += z.Disk.Msgsize()
}
s += 3
if z.OS == nil {
s += msgp.NilSize
} else {
s += z.OS.Msgsize()
}
s += 10
if z.BatchJobs == nil {
s += msgp.NilSize
} else {
s += z.BatchJobs.Msgsize()
}
s += 11
if z.SiteResync == nil {
s += msgp.NilSize
} else {
s += z.SiteResync.Msgsize()
}
s += 4
if z.Net == nil {
s += msgp.NilSize
} else {
s += 1 + 10 + msgp.TimeSize + 14 + msgp.StringPrefixSize + len(z.Net.InterfaceName) + 9 + (*procfsNetDevLine)(&z.Net.NetStats).Msgsize()
}
s += 4
if z.Mem == nil {
s += msgp.NilSize
} else {
s += 1 + 10 + msgp.TimeSize + 8 + z.Mem.Info.Msgsize()
}
s += 4
if z.CPU == nil {
s += msgp.NilSize
} else {
s += z.CPU.Msgsize()
}
s += 4
if z.RPC == nil {
s += msgp.NilSize
} else {
s += z.RPC.Msgsize()
}
s += 3
if z.Go == nil {
s += msgp.NilSize
} else {
s += z.Go.Msgsize()
}
return
}
// DecodeMsg implements msgp.Decodable
func (z *MetricsOptions) DecodeMsg(dc *msgp.Reader) (err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, err = dc.ReadMapHeader()
if err != nil {
err = msgp.WrapError(err)
return
}
for zb0001 > 0 {
zb0001--
field, err = dc.ReadMapKeyPtr()
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "Type":
{
var zb0002 uint32
zb0002, err = dc.ReadUint32()
if err != nil {
err = msgp.WrapError(err, "Type")
return
}
z.Type = MetricType(zb0002)
}
case "N":
z.N, err = dc.ReadInt()
if err != nil {
err = msgp.WrapError(err, "N")
return
}
case "Interval":
z.Interval, err = dc.ReadDuration()
if err != nil {
err = msgp.WrapError(err, "Interval")
return
}
case "Hosts":
var zb0003 uint32
zb0003, err = dc.ReadArrayHeader()
if err != nil {
err = msgp.WrapError(err, "Hosts")
return
}
if cap(z.Hosts) >= int(zb0003) {
z.Hosts = (z.Hosts)[:zb0003]
} else {
z.Hosts = make([]string, zb0003)
}
for za0001 := range z.Hosts {
z.Hosts[za0001], err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "Hosts", za0001)
return
}
}
case "ByHost":
z.ByHost, err = dc.ReadBool()
if err != nil {
err = msgp.WrapError(err, "ByHost")
return
}
case "Disks":
var zb0004 uint32
zb0004, err = dc.ReadArrayHeader()
if err != nil {
err = msgp.WrapError(err, "Disks")
return
}
if cap(z.Disks) >= int(zb0004) {
z.Disks = (z.Disks)[:zb0004]
} else {
z.Disks = make([]string, zb0004)
}
for za0002 := range z.Disks {
z.Disks[za0002], err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "Disks", za0002)
return
}
}
case "ByDisk":
z.ByDisk, err = dc.ReadBool()
if err != nil {
err = msgp.WrapError(err, "ByDisk")
return
}
case "ByJobID":
z.ByJobID, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "ByJobID")
return
}
case "ByDepID":
z.ByDepID, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "ByDepID")
return
}
default:
err = dc.Skip()
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
return
}
// EncodeMsg implements msgp.Encodable
func (z *MetricsOptions) EncodeMsg(en *msgp.Writer) (err error) {
// map header, size 9
// write "Type"
err = en.Append(0x89, 0xa4, 0x54, 0x79, 0x70, 0x65)
if err != nil {
return
}
err = en.WriteUint32(uint32(z.Type))
if err != nil {
err = msgp.WrapError(err, "Type")
return
}
// write "N"
err = en.Append(0xa1, 0x4e)
if err != nil {
return
}
err = en.WriteInt(z.N)
if err != nil {
err = msgp.WrapError(err, "N")
return
}
// write "Interval"
err = en.Append(0xa8, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c)
if err != nil {
return
}
err = en.WriteDuration(z.Interval)
if err != nil {
err = msgp.WrapError(err, "Interval")
return
}
// write "Hosts"
err = en.Append(0xa5, 0x48, 0x6f, 0x73, 0x74, 0x73)
if err != nil {
return
}
err = en.WriteArrayHeader(uint32(len(z.Hosts)))
if err != nil {
err = msgp.WrapError(err, "Hosts")
return
}
for za0001 := range z.Hosts {
err = en.WriteString(z.Hosts[za0001])
if err != nil {
err = msgp.WrapError(err, "Hosts", za0001)
return
}
}
// write "ByHost"
err = en.Append(0xa6, 0x42, 0x79, 0x48, 0x6f, 0x73, 0x74)
if err != nil {
return
}
err = en.WriteBool(z.ByHost)
if err != nil {
err = msgp.WrapError(err, "ByHost")
return
}
// write "Disks"
err = en.Append(0xa5, 0x44, 0x69, 0x73, 0x6b, 0x73)
if err != nil {
return
}
err = en.WriteArrayHeader(uint32(len(z.Disks)))
if err != nil {
err = msgp.WrapError(err, "Disks")
return
}
for za0002 := range z.Disks {
err = en.WriteString(z.Disks[za0002])
if err != nil {
err = msgp.WrapError(err, "Disks", za0002)
return
}
}
// write "ByDisk"
err = en.Append(0xa6, 0x42, 0x79, 0x44, 0x69, 0x73, 0x6b)
if err != nil {
return
}
err = en.WriteBool(z.ByDisk)
if err != nil {
err = msgp.WrapError(err, "ByDisk")
return
}
// write "ByJobID"
err = en.Append(0xa7, 0x42, 0x79, 0x4a, 0x6f, 0x62, 0x49, 0x44)
if err != nil {
return
}
err = en.WriteString(z.ByJobID)
if err != nil {
err = msgp.WrapError(err, "ByJobID")
return
}
// write "ByDepID"
err = en.Append(0xa7, 0x42, 0x79, 0x44, 0x65, 0x70, 0x49, 0x44)
if err != nil {
return
}
err = en.WriteString(z.ByDepID)
if err != nil {
err = msgp.WrapError(err, "ByDepID")
return
}
return
}
// MarshalMsg implements msgp.Marshaler
func (z *MetricsOptions) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize())
// map header, size 9
// string "Type"
o = append(o, 0x89, 0xa4, 0x54, 0x79, 0x70, 0x65)
o = msgp.AppendUint32(o, uint32(z.Type))
// string "N"
o = append(o, 0xa1, 0x4e)
o = msgp.AppendInt(o, z.N)
// string "Interval"
o = append(o, 0xa8, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c)
o = msgp.AppendDuration(o, z.Interval)
// string "Hosts"
o = append(o, 0xa5, 0x48, 0x6f, 0x73, 0x74, 0x73)
o = msgp.AppendArrayHeader(o, uint32(len(z.Hosts)))
for za0001 := range z.Hosts {
o = msgp.AppendString(o, z.Hosts[za0001])
}
// string "ByHost"
o = append(o, 0xa6, 0x42, 0x79, 0x48, 0x6f, 0x73, 0x74)
o = msgp.AppendBool(o, z.ByHost)
// string "Disks"
o = append(o, 0xa5, 0x44, 0x69, 0x73, 0x6b, 0x73)
o = msgp.AppendArrayHeader(o, uint32(len(z.Disks)))
for za0002 := range z.Disks {
o = msgp.AppendString(o, z.Disks[za0002])
}
// string "ByDisk"
o = append(o, 0xa6, 0x42, 0x79, 0x44, 0x69, 0x73, 0x6b)
o = msgp.AppendBool(o, z.ByDisk)
// string "ByJobID"
o = append(o, 0xa7, 0x42, 0x79, 0x4a, 0x6f, 0x62, 0x49, 0x44)
o = msgp.AppendString(o, z.ByJobID)
// string "ByDepID"
o = append(o, 0xa7, 0x42, 0x79, 0x44, 0x65, 0x70, 0x49, 0x44)
o = msgp.AppendString(o, z.ByDepID)
return
}
// UnmarshalMsg implements msgp.Unmarshaler
func (z *MetricsOptions) UnmarshalMsg(bts []byte) (o []byte, err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
for zb0001 > 0 {
zb0001--
field, bts, err = msgp.ReadMapKeyZC(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "Type":
{
var zb0002 uint32
zb0002, bts, err = msgp.ReadUint32Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "Type")
return
}
z.Type = MetricType(zb0002)
}
case "N":
z.N, bts, err = msgp.ReadIntBytes(bts)
if err != nil {
err = msgp.WrapError(err, "N")
return
}
case "Interval":
z.Interval, bts, err = msgp.ReadDurationBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Interval")
return
}
case "Hosts":
var zb0003 uint32
zb0003, bts, err = msgp.ReadArrayHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Hosts")
return
}
if cap(z.Hosts) >= int(zb0003) {
z.Hosts = (z.Hosts)[:zb0003]
} else {
z.Hosts = make([]string, zb0003)
}
for za0001 := range z.Hosts {
z.Hosts[za0001], bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Hosts", za0001)
return
}
}
case "ByHost":
z.ByHost, bts, err = msgp.ReadBoolBytes(bts)
if err != nil {
err = msgp.WrapError(err, "ByHost")
return
}
case "Disks":
var zb0004 uint32
zb0004, bts, err = msgp.ReadArrayHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Disks")
return
}
if cap(z.Disks) >= int(zb0004) {
z.Disks = (z.Disks)[:zb0004]
} else {
z.Disks = make([]string, zb0004)
}
for za0002 := range z.Disks {
z.Disks[za0002], bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Disks", za0002)
return
}
}
case "ByDisk":
z.ByDisk, bts, err = msgp.ReadBoolBytes(bts)
if err != nil {
err = msgp.WrapError(err, "ByDisk")
return
}
case "ByJobID":
z.ByJobID, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "ByJobID")
return
}
case "ByDepID":
z.ByDepID, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "ByDepID")
return
}
default:
bts, err = msgp.Skip(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
o = bts
return
}
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z *MetricsOptions) Msgsize() (s int) {
s = 1 + 5 + msgp.Uint32Size + 2 + msgp.IntSize + 9 + msgp.DurationSize + 6 + msgp.ArrayHeaderSize
for za0001 := range z.Hosts {
s += msgp.StringPrefixSize + len(z.Hosts[za0001])
}
s += 7 + msgp.BoolSize + 6 + msgp.ArrayHeaderSize
for za0002 := range z.Disks {
s += msgp.StringPrefixSize + len(z.Disks[za0002])
}
s += 7 + msgp.BoolSize + 8 + msgp.StringPrefixSize + len(z.ByJobID) + 8 + msgp.StringPrefixSize + len(z.ByDepID)
return
}
// DecodeMsg implements msgp.Decodable
func (z *NetMetrics) DecodeMsg(dc *msgp.Reader) (err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, err = dc.ReadMapHeader()
if err != nil {
err = msgp.WrapError(err)
return
}
for zb0001 > 0 {
zb0001--
field, err = dc.ReadMapKeyPtr()
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "collected":
z.CollectedAt, err = dc.ReadTime()
if err != nil {
err = msgp.WrapError(err, "CollectedAt")
return
}
case "interfaceName":
z.InterfaceName, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "InterfaceName")
return
}
case "netstats":
err = (*procfsNetDevLine)(&z.NetStats).DecodeMsg(dc)
if err != nil {
err = msgp.WrapError(err, "NetStats")
return
}
default:
err = dc.Skip()
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
return
}
// EncodeMsg implements msgp.Encodable
func (z *NetMetrics) EncodeMsg(en *msgp.Writer) (err error) {
// map header, size 3
// write "collected"
err = en.Append(0x83, 0xa9, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x65, 0x64)
if err != nil {
return
}
err = en.WriteTime(z.CollectedAt)
if err != nil {
err = msgp.WrapError(err, "CollectedAt")
return
}
// write "interfaceName"
err = en.Append(0xad, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x66, 0x61, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65)
if err != nil {
return
}
err = en.WriteString(z.InterfaceName)
if err != nil {
err = msgp.WrapError(err, "InterfaceName")
return
}
// write "netstats"
err = en.Append(0xa8, 0x6e, 0x65, 0x74, 0x73, 0x74, 0x61, 0x74, 0x73)
if err != nil {
return
}
err = (*procfsNetDevLine)(&z.NetStats).EncodeMsg(en)
if err != nil {
err = msgp.WrapError(err, "NetStats")
return
}
return
}
// MarshalMsg implements msgp.Marshaler
func (z *NetMetrics) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize())
// map header, size 3
// string "collected"
o = append(o, 0x83, 0xa9, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x65, 0x64)
o = msgp.AppendTime(o, z.CollectedAt)
// string "interfaceName"
o = append(o, 0xad, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x66, 0x61, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65)
o = msgp.AppendString(o, z.InterfaceName)
// string "netstats"
o = append(o, 0xa8, 0x6e, 0x65, 0x74, 0x73, 0x74, 0x61, 0x74, 0x73)
o, err = (*procfsNetDevLine)(&z.NetStats).MarshalMsg(o)
if err != nil {
err = msgp.WrapError(err, "NetStats")
return
}
return
}
// UnmarshalMsg implements msgp.Unmarshaler
func (z *NetMetrics) UnmarshalMsg(bts []byte) (o []byte, err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
for zb0001 > 0 {
zb0001--
field, bts, err = msgp.ReadMapKeyZC(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "collected":
z.CollectedAt, bts, err = msgp.ReadTimeBytes(bts)
if err != nil {
err = msgp.WrapError(err, "CollectedAt")
return
}
case "interfaceName":
z.InterfaceName, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "InterfaceName")
return
}
case "netstats":
bts, err = (*procfsNetDevLine)(&z.NetStats).UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "NetStats")
return
}
default:
bts, err = msgp.Skip(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
o = bts
return
}
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z *NetMetrics) Msgsize() (s int) {
s = 1 + 10 + msgp.TimeSize + 14 + msgp.StringPrefixSize + len(z.InterfaceName) + 9 + (*procfsNetDevLine)(&z.NetStats).Msgsize()
return
}
// DecodeMsg implements msgp.Decodable
func (z *OSMetrics) DecodeMsg(dc *msgp.Reader) (err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, err = dc.ReadMapHeader()
if err != nil {
err = msgp.WrapError(err)
return
}
var zb0001Mask uint8 /* 1 bits */
_ = zb0001Mask
for zb0001 > 0 {
zb0001--
field, err = dc.ReadMapKeyPtr()
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "collected":
z.CollectedAt, err = dc.ReadTime()
if err != nil {
err = msgp.WrapError(err, "CollectedAt")
return
}
case "life_time_ops":
var zb0002 uint32
zb0002, err = dc.ReadMapHeader()
if err != nil {
err = msgp.WrapError(err, "LifeTimeOps")
return
}
if z.LifeTimeOps == nil {
z.LifeTimeOps = make(map[string]uint64, zb0002)
} else if len(z.LifeTimeOps) > 0 {
for key := range z.LifeTimeOps {
delete(z.LifeTimeOps, key)
}
}
for zb0002 > 0 {
zb0002--
var za0001 string
var za0002 uint64
za0001, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "LifeTimeOps")
return
}
za0002, err = dc.ReadUint64()
if err != nil {
err = msgp.WrapError(err, "LifeTimeOps", za0001)
return
}
z.LifeTimeOps[za0001] = za0002
}
zb0001Mask |= 0x1
case "last_minute":
var zb0003 uint32
zb0003, err = dc.ReadMapHeader()
if err != nil {
err = msgp.WrapError(err, "LastMinute")
return
}
var zb0003Mask uint8 /* 1 bits */
_ = zb0003Mask
for zb0003 > 0 {
zb0003--
field, err = dc.ReadMapKeyPtr()
if err != nil {
err = msgp.WrapError(err, "LastMinute")
return
}
switch msgp.UnsafeString(field) {
case "operations":
var zb0004 uint32
zb0004, err = dc.ReadMapHeader()
if err != nil {
err = msgp.WrapError(err, "LastMinute", "Operations")
return
}
if z.LastMinute.Operations == nil {
z.LastMinute.Operations = make(map[string]TimedAction, zb0004)
} else if len(z.LastMinute.Operations) > 0 {
for key := range z.LastMinute.Operations {
delete(z.LastMinute.Operations, key)
}
}
for zb0004 > 0 {
zb0004--
var za0003 string
var za0004 TimedAction
za0003, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "LastMinute", "Operations")
return
}
err = za0004.DecodeMsg(dc)
if err != nil {
err = msgp.WrapError(err, "LastMinute", "Operations", za0003)
return
}
z.LastMinute.Operations[za0003] = za0004
}
zb0003Mask |= 0x1
default:
err = dc.Skip()
if err != nil {
err = msgp.WrapError(err, "LastMinute")
return
}
}
}
// Clear omitted fields.
if (zb0003Mask & 0x1) == 0 {
z.LastMinute.Operations = nil
}
default:
err = dc.Skip()
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
// Clear omitted fields.
if (zb0001Mask & 0x1) == 0 {
z.LifeTimeOps = nil
}
return
}
// EncodeMsg implements msgp.Encodable
func (z *OSMetrics) EncodeMsg(en *msgp.Writer) (err error) {
// check for omitted fields
zb0001Len := uint32(3)
var zb0001Mask uint8 /* 3 bits */
_ = zb0001Mask
if z.LifeTimeOps == nil {
zb0001Len--
zb0001Mask |= 0x2
}
// variable map header, size zb0001Len
err = en.Append(0x80 | uint8(zb0001Len))
if err != nil {
return
}
// skip if no fields are to be emitted
if zb0001Len != 0 {
// write "collected"
err = en.Append(0xa9, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x65, 0x64)
if err != nil {
return
}
err = en.WriteTime(z.CollectedAt)
if err != nil {
err = msgp.WrapError(err, "CollectedAt")
return
}
if (zb0001Mask & 0x2) == 0 { // if not omitted
// write "life_time_ops"
err = en.Append(0xad, 0x6c, 0x69, 0x66, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x6f, 0x70, 0x73)
if err != nil {
return
}
err = en.WriteMapHeader(uint32(len(z.LifeTimeOps)))
if err != nil {
err = msgp.WrapError(err, "LifeTimeOps")
return
}
for za0001, za0002 := range z.LifeTimeOps {
err = en.WriteString(za0001)
if err != nil {
err = msgp.WrapError(err, "LifeTimeOps")
return
}
err = en.WriteUint64(za0002)
if err != nil {
err = msgp.WrapError(err, "LifeTimeOps", za0001)
return
}
}
}
// write "last_minute"
err = en.Append(0xab, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x6d, 0x69, 0x6e, 0x75, 0x74, 0x65)
if err != nil {
return
}
// check for omitted fields
zb0002Len := uint32(1)
var zb0002Mask uint8 /* 1 bits */
_ = zb0002Mask
if z.LastMinute.Operations == nil {
zb0002Len--
zb0002Mask |= 0x1
}
// variable map header, size zb0002Len
err = en.Append(0x80 | uint8(zb0002Len))
if err != nil {
return
}
if (zb0002Mask & 0x1) == 0 { // if not omitted
// write "operations"
err = en.Append(0xaa, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73)
if err != nil {
return
}
err = en.WriteMapHeader(uint32(len(z.LastMinute.Operations)))
if err != nil {
err = msgp.WrapError(err, "LastMinute", "Operations")
return
}
for za0003, za0004 := range z.LastMinute.Operations {
err = en.WriteString(za0003)
if err != nil {
err = msgp.WrapError(err, "LastMinute", "Operations")
return
}
err = za0004.EncodeMsg(en)
if err != nil {
err = msgp.WrapError(err, "LastMinute", "Operations", za0003)
return
}
}
}
}
return
}
// MarshalMsg implements msgp.Marshaler
func (z *OSMetrics) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize())
// check for omitted fields
zb0001Len := uint32(3)
var zb0001Mask uint8 /* 3 bits */
_ = zb0001Mask
if z.LifeTimeOps == nil {
zb0001Len--
zb0001Mask |= 0x2
}
// variable map header, size zb0001Len
o = append(o, 0x80|uint8(zb0001Len))
// skip if no fields are to be emitted
if zb0001Len != 0 {
// string "collected"
o = append(o, 0xa9, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x65, 0x64)
o = msgp.AppendTime(o, z.CollectedAt)
if (zb0001Mask & 0x2) == 0 { // if not omitted
// string "life_time_ops"
o = append(o, 0xad, 0x6c, 0x69, 0x66, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x6f, 0x70, 0x73)
o = msgp.AppendMapHeader(o, uint32(len(z.LifeTimeOps)))
for za0001, za0002 := range z.LifeTimeOps {
o = msgp.AppendString(o, za0001)
o = msgp.AppendUint64(o, za0002)
}
}
// string "last_minute"
o = append(o, 0xab, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x6d, 0x69, 0x6e, 0x75, 0x74, 0x65)
// check for omitted fields
zb0002Len := uint32(1)
var zb0002Mask uint8 /* 1 bits */
_ = zb0002Mask
if z.LastMinute.Operations == nil {
zb0002Len--
zb0002Mask |= 0x1
}
// variable map header, size zb0002Len
o = append(o, 0x80|uint8(zb0002Len))
if (zb0002Mask & 0x1) == 0 { // if not omitted
// string "operations"
o = append(o, 0xaa, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73)
o = msgp.AppendMapHeader(o, uint32(len(z.LastMinute.Operations)))
for za0003, za0004 := range z.LastMinute.Operations {
o = msgp.AppendString(o, za0003)
o, err = za0004.MarshalMsg(o)
if err != nil {
err = msgp.WrapError(err, "LastMinute", "Operations", za0003)
return
}
}
}
}
return
}
// UnmarshalMsg implements msgp.Unmarshaler
func (z *OSMetrics) UnmarshalMsg(bts []byte) (o []byte, err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
var zb0001Mask uint8 /* 1 bits */
_ = zb0001Mask
for zb0001 > 0 {
zb0001--
field, bts, err = msgp.ReadMapKeyZC(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "collected":
z.CollectedAt, bts, err = msgp.ReadTimeBytes(bts)
if err != nil {
err = msgp.WrapError(err, "CollectedAt")
return
}
case "life_time_ops":
var zb0002 uint32
zb0002, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "LifeTimeOps")
return
}
if z.LifeTimeOps == nil {
z.LifeTimeOps = make(map[string]uint64, zb0002)
} else if len(z.LifeTimeOps) > 0 {
for key := range z.LifeTimeOps {
delete(z.LifeTimeOps, key)
}
}
for zb0002 > 0 {
var za0001 string
var za0002 uint64
zb0002--
za0001, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "LifeTimeOps")
return
}
za0002, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "LifeTimeOps", za0001)
return
}
z.LifeTimeOps[za0001] = za0002
}
zb0001Mask |= 0x1
case "last_minute":
var zb0003 uint32
zb0003, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "LastMinute")
return
}
var zb0003Mask uint8 /* 1 bits */
_ = zb0003Mask
for zb0003 > 0 {
zb0003--
field, bts, err = msgp.ReadMapKeyZC(bts)
if err != nil {
err = msgp.WrapError(err, "LastMinute")
return
}
switch msgp.UnsafeString(field) {
case "operations":
var zb0004 uint32
zb0004, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "LastMinute", "Operations")
return
}
if z.LastMinute.Operations == nil {
z.LastMinute.Operations = make(map[string]TimedAction, zb0004)
} else if len(z.LastMinute.Operations) > 0 {
for key := range z.LastMinute.Operations {
delete(z.LastMinute.Operations, key)
}
}
for zb0004 > 0 {
var za0003 string
var za0004 TimedAction
zb0004--
za0003, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "LastMinute", "Operations")
return
}
bts, err = za0004.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "LastMinute", "Operations", za0003)
return
}
z.LastMinute.Operations[za0003] = za0004
}
zb0003Mask |= 0x1
default:
bts, err = msgp.Skip(bts)
if err != nil {
err = msgp.WrapError(err, "LastMinute")
return
}
}
}
// Clear omitted fields.
if (zb0003Mask & 0x1) == 0 {
z.LastMinute.Operations = nil
}
default:
bts, err = msgp.Skip(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
// Clear omitted fields.
if (zb0001Mask & 0x1) == 0 {
z.LifeTimeOps = nil
}
o = bts
return
}
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z *OSMetrics) Msgsize() (s int) {
s = 1 + 10 + msgp.TimeSize + 14 + msgp.MapHeaderSize
if z.LifeTimeOps != nil {
for za0001, za0002 := range z.LifeTimeOps {
_ = za0002
s += msgp.StringPrefixSize + len(za0001) + msgp.Uint64Size
}
}
s += 12 + 1 + 11 + msgp.MapHeaderSize
if z.LastMinute.Operations != nil {
for za0003, za0004 := range z.LastMinute.Operations {
_ = za0004
s += msgp.StringPrefixSize + len(za0003) + za0004.Msgsize()
}
}
return
}
// DecodeMsg implements msgp.Decodable
func (z *RPCMetrics) DecodeMsg(dc *msgp.Reader) (err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, err = dc.ReadMapHeader()
if err != nil {
err = msgp.WrapError(err)
return
}
var zb0001Mask uint8 /* 2 bits */
_ = zb0001Mask
for zb0001 > 0 {
zb0001--
field, err = dc.ReadMapKeyPtr()
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "collectedAt":
z.CollectedAt, err = dc.ReadTime()
if err != nil {
err = msgp.WrapError(err, "CollectedAt")
return
}
case "connected":
z.Connected, err = dc.ReadInt()
if err != nil {
err = msgp.WrapError(err, "Connected")
return
}
case "reconnectCount":
z.ReconnectCount, err = dc.ReadInt()
if err != nil {
err = msgp.WrapError(err, "ReconnectCount")
return
}
case "disconnected":
z.Disconnected, err = dc.ReadInt()
if err != nil {
err = msgp.WrapError(err, "Disconnected")
return
}
case "outgoingStreams":
z.OutgoingStreams, err = dc.ReadInt()
if err != nil {
err = msgp.WrapError(err, "OutgoingStreams")
return
}
case "incomingStreams":
z.IncomingStreams, err = dc.ReadInt()
if err != nil {
err = msgp.WrapError(err, "IncomingStreams")
return
}
case "outgoingBytes":
z.OutgoingBytes, err = dc.ReadInt64()
if err != nil {
err = msgp.WrapError(err, "OutgoingBytes")
return
}
case "incomingBytes":
z.IncomingBytes, err = dc.ReadInt64()
if err != nil {
err = msgp.WrapError(err, "IncomingBytes")
return
}
case "outgoingMessages":
z.OutgoingMessages, err = dc.ReadInt64()
if err != nil {
err = msgp.WrapError(err, "OutgoingMessages")
return
}
case "incomingMessages":
z.IncomingMessages, err = dc.ReadInt64()
if err != nil {
err = msgp.WrapError(err, "IncomingMessages")
return
}
case "outQueue":
z.OutQueue, err = dc.ReadInt()
if err != nil {
err = msgp.WrapError(err, "OutQueue")
return
}
case "lastPongTime":
z.LastPongTime, err = dc.ReadTime()
if err != nil {
err = msgp.WrapError(err, "LastPongTime")
return
}
case "lastPingMS":
z.LastPingMS, err = dc.ReadFloat64()
if err != nil {
err = msgp.WrapError(err, "LastPingMS")
return
}
case "maxPingDurMS":
z.MaxPingDurMS, err = dc.ReadFloat64()
if err != nil {
err = msgp.WrapError(err, "MaxPingDurMS")
return
}
case "lastConnectTime":
z.LastConnectTime, err = dc.ReadTime()
if err != nil {
err = msgp.WrapError(err, "LastConnectTime")
return
}
case "byDestination":
var zb0002 uint32
zb0002, err = dc.ReadMapHeader()
if err != nil {
err = msgp.WrapError(err, "ByDestination")
return
}
if z.ByDestination == nil {
z.ByDestination = make(map[string]RPCMetrics, zb0002)
} else if len(z.ByDestination) > 0 {
for key := range z.ByDestination {
delete(z.ByDestination, key)
}
}
for zb0002 > 0 {
zb0002--
var za0001 string
var za0002 RPCMetrics
za0001, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "ByDestination")
return
}
err = za0002.DecodeMsg(dc)
if err != nil {
err = msgp.WrapError(err, "ByDestination", za0001)
return
}
z.ByDestination[za0001] = za0002
}
zb0001Mask |= 0x1
case "byCaller":
var zb0003 uint32
zb0003, err = dc.ReadMapHeader()
if err != nil {
err = msgp.WrapError(err, "ByCaller")
return
}
if z.ByCaller == nil {
z.ByCaller = make(map[string]RPCMetrics, zb0003)
} else if len(z.ByCaller) > 0 {
for key := range z.ByCaller {
delete(z.ByCaller, key)
}
}
for zb0003 > 0 {
zb0003--
var za0003 string
var za0004 RPCMetrics
za0003, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "ByCaller")
return
}
err = za0004.DecodeMsg(dc)
if err != nil {
err = msgp.WrapError(err, "ByCaller", za0003)
return
}
z.ByCaller[za0003] = za0004
}
zb0001Mask |= 0x2
default:
err = dc.Skip()
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
// Clear omitted fields.
if zb0001Mask != 0x3 {
if (zb0001Mask & 0x1) == 0 {
z.ByDestination = nil
}
if (zb0001Mask & 0x2) == 0 {
z.ByCaller = nil
}
}
return
}
// EncodeMsg implements msgp.Encodable
func (z *RPCMetrics) EncodeMsg(en *msgp.Writer) (err error) {
// check for omitted fields
zb0001Len := uint32(17)
var zb0001Mask uint32 /* 17 bits */
_ = zb0001Mask
if z.ByDestination == nil {
zb0001Len--
zb0001Mask |= 0x8000
}
if z.ByCaller == nil {
zb0001Len--
zb0001Mask |= 0x10000
}
// variable map header, size zb0001Len
err = en.WriteMapHeader(zb0001Len)
if err != nil {
return
}
// skip if no fields are to be emitted
if zb0001Len != 0 {
// write "collectedAt"
err = en.Append(0xab, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x65, 0x64, 0x41, 0x74)
if err != nil {
return
}
err = en.WriteTime(z.CollectedAt)
if err != nil {
err = msgp.WrapError(err, "CollectedAt")
return
}
// write "connected"
err = en.Append(0xa9, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x65, 0x64)
if err != nil {
return
}
err = en.WriteInt(z.Connected)
if err != nil {
err = msgp.WrapError(err, "Connected")
return
}
// write "reconnectCount"
err = en.Append(0xae, 0x72, 0x65, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x43, 0x6f, 0x75, 0x6e, 0x74)
if err != nil {
return
}
err = en.WriteInt(z.ReconnectCount)
if err != nil {
err = msgp.WrapError(err, "ReconnectCount")
return
}
// write "disconnected"
err = en.Append(0xac, 0x64, 0x69, 0x73, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x65, 0x64)
if err != nil {
return
}
err = en.WriteInt(z.Disconnected)
if err != nil {
err = msgp.WrapError(err, "Disconnected")
return
}
// write "outgoingStreams"
err = en.Append(0xaf, 0x6f, 0x75, 0x74, 0x67, 0x6f, 0x69, 0x6e, 0x67, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x73)
if err != nil {
return
}
err = en.WriteInt(z.OutgoingStreams)
if err != nil {
err = msgp.WrapError(err, "OutgoingStreams")
return
}
// write "incomingStreams"
err = en.Append(0xaf, 0x69, 0x6e, 0x63, 0x6f, 0x6d, 0x69, 0x6e, 0x67, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x73)
if err != nil {
return
}
err = en.WriteInt(z.IncomingStreams)
if err != nil {
err = msgp.WrapError(err, "IncomingStreams")
return
}
// write "outgoingBytes"
err = en.Append(0xad, 0x6f, 0x75, 0x74, 0x67, 0x6f, 0x69, 0x6e, 0x67, 0x42, 0x79, 0x74, 0x65, 0x73)
if err != nil {
return
}
err = en.WriteInt64(z.OutgoingBytes)
if err != nil {
err = msgp.WrapError(err, "OutgoingBytes")
return
}
// write "incomingBytes"
err = en.Append(0xad, 0x69, 0x6e, 0x63, 0x6f, 0x6d, 0x69, 0x6e, 0x67, 0x42, 0x79, 0x74, 0x65, 0x73)
if err != nil {
return
}
err = en.WriteInt64(z.IncomingBytes)
if err != nil {
err = msgp.WrapError(err, "IncomingBytes")
return
}
// write "outgoingMessages"
err = en.Append(0xb0, 0x6f, 0x75, 0x74, 0x67, 0x6f, 0x69, 0x6e, 0x67, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73)
if err != nil {
return
}
err = en.WriteInt64(z.OutgoingMessages)
if err != nil {
err = msgp.WrapError(err, "OutgoingMessages")
return
}
// write "incomingMessages"
err = en.Append(0xb0, 0x69, 0x6e, 0x63, 0x6f, 0x6d, 0x69, 0x6e, 0x67, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73)
if err != nil {
return
}
err = en.WriteInt64(z.IncomingMessages)
if err != nil {
err = msgp.WrapError(err, "IncomingMessages")
return
}
// write "outQueue"
err = en.Append(0xa8, 0x6f, 0x75, 0x74, 0x51, 0x75, 0x65, 0x75, 0x65)
if err != nil {
return
}
err = en.WriteInt(z.OutQueue)
if err != nil {
err = msgp.WrapError(err, "OutQueue")
return
}
// write "lastPongTime"
err = en.Append(0xac, 0x6c, 0x61, 0x73, 0x74, 0x50, 0x6f, 0x6e, 0x67, 0x54, 0x69, 0x6d, 0x65)
if err != nil {
return
}
err = en.WriteTime(z.LastPongTime)
if err != nil {
err = msgp.WrapError(err, "LastPongTime")
return
}
// write "lastPingMS"
err = en.Append(0xaa, 0x6c, 0x61, 0x73, 0x74, 0x50, 0x69, 0x6e, 0x67, 0x4d, 0x53)
if err != nil {
return
}
err = en.WriteFloat64(z.LastPingMS)
if err != nil {
err = msgp.WrapError(err, "LastPingMS")
return
}
// write "maxPingDurMS"
err = en.Append(0xac, 0x6d, 0x61, 0x78, 0x50, 0x69, 0x6e, 0x67, 0x44, 0x75, 0x72, 0x4d, 0x53)
if err != nil {
return
}
err = en.WriteFloat64(z.MaxPingDurMS)
if err != nil {
err = msgp.WrapError(err, "MaxPingDurMS")
return
}
// write "lastConnectTime"
err = en.Append(0xaf, 0x6c, 0x61, 0x73, 0x74, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x54, 0x69, 0x6d, 0x65)
if err != nil {
return
}
err = en.WriteTime(z.LastConnectTime)
if err != nil {
err = msgp.WrapError(err, "LastConnectTime")
return
}
if (zb0001Mask & 0x8000) == 0 { // if not omitted
// write "byDestination"
err = en.Append(0xad, 0x62, 0x79, 0x44, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e)
if err != nil {
return
}
err = en.WriteMapHeader(uint32(len(z.ByDestination)))
if err != nil {
err = msgp.WrapError(err, "ByDestination")
return
}
for za0001, za0002 := range z.ByDestination {
err = en.WriteString(za0001)
if err != nil {
err = msgp.WrapError(err, "ByDestination")
return
}
err = za0002.EncodeMsg(en)
if err != nil {
err = msgp.WrapError(err, "ByDestination", za0001)
return
}
}
}
if (zb0001Mask & 0x10000) == 0 { // if not omitted
// write "byCaller"
err = en.Append(0xa8, 0x62, 0x79, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72)
if err != nil {
return
}
err = en.WriteMapHeader(uint32(len(z.ByCaller)))
if err != nil {
err = msgp.WrapError(err, "ByCaller")
return
}
for za0003, za0004 := range z.ByCaller {
err = en.WriteString(za0003)
if err != nil {
err = msgp.WrapError(err, "ByCaller")
return
}
err = za0004.EncodeMsg(en)
if err != nil {
err = msgp.WrapError(err, "ByCaller", za0003)
return
}
}
}
}
return
}
// MarshalMsg implements msgp.Marshaler
func (z *RPCMetrics) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize())
// check for omitted fields
zb0001Len := uint32(17)
var zb0001Mask uint32 /* 17 bits */
_ = zb0001Mask
if z.ByDestination == nil {
zb0001Len--
zb0001Mask |= 0x8000
}
if z.ByCaller == nil {
zb0001Len--
zb0001Mask |= 0x10000
}
// variable map header, size zb0001Len
o = msgp.AppendMapHeader(o, zb0001Len)
// skip if no fields are to be emitted
if zb0001Len != 0 {
// string "collectedAt"
o = append(o, 0xab, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x65, 0x64, 0x41, 0x74)
o = msgp.AppendTime(o, z.CollectedAt)
// string "connected"
o = append(o, 0xa9, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x65, 0x64)
o = msgp.AppendInt(o, z.Connected)
// string "reconnectCount"
o = append(o, 0xae, 0x72, 0x65, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x43, 0x6f, 0x75, 0x6e, 0x74)
o = msgp.AppendInt(o, z.ReconnectCount)
// string "disconnected"
o = append(o, 0xac, 0x64, 0x69, 0x73, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x65, 0x64)
o = msgp.AppendInt(o, z.Disconnected)
// string "outgoingStreams"
o = append(o, 0xaf, 0x6f, 0x75, 0x74, 0x67, 0x6f, 0x69, 0x6e, 0x67, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x73)
o = msgp.AppendInt(o, z.OutgoingStreams)
// string "incomingStreams"
o = append(o, 0xaf, 0x69, 0x6e, 0x63, 0x6f, 0x6d, 0x69, 0x6e, 0x67, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x73)
o = msgp.AppendInt(o, z.IncomingStreams)
// string "outgoingBytes"
o = append(o, 0xad, 0x6f, 0x75, 0x74, 0x67, 0x6f, 0x69, 0x6e, 0x67, 0x42, 0x79, 0x74, 0x65, 0x73)
o = msgp.AppendInt64(o, z.OutgoingBytes)
// string "incomingBytes"
o = append(o, 0xad, 0x69, 0x6e, 0x63, 0x6f, 0x6d, 0x69, 0x6e, 0x67, 0x42, 0x79, 0x74, 0x65, 0x73)
o = msgp.AppendInt64(o, z.IncomingBytes)
// string "outgoingMessages"
o = append(o, 0xb0, 0x6f, 0x75, 0x74, 0x67, 0x6f, 0x69, 0x6e, 0x67, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73)
o = msgp.AppendInt64(o, z.OutgoingMessages)
// string "incomingMessages"
o = append(o, 0xb0, 0x69, 0x6e, 0x63, 0x6f, 0x6d, 0x69, 0x6e, 0x67, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73)
o = msgp.AppendInt64(o, z.IncomingMessages)
// string "outQueue"
o = append(o, 0xa8, 0x6f, 0x75, 0x74, 0x51, 0x75, 0x65, 0x75, 0x65)
o = msgp.AppendInt(o, z.OutQueue)
// string "lastPongTime"
o = append(o, 0xac, 0x6c, 0x61, 0x73, 0x74, 0x50, 0x6f, 0x6e, 0x67, 0x54, 0x69, 0x6d, 0x65)
o = msgp.AppendTime(o, z.LastPongTime)
// string "lastPingMS"
o = append(o, 0xaa, 0x6c, 0x61, 0x73, 0x74, 0x50, 0x69, 0x6e, 0x67, 0x4d, 0x53)
o = msgp.AppendFloat64(o, z.LastPingMS)
// string "maxPingDurMS"
o = append(o, 0xac, 0x6d, 0x61, 0x78, 0x50, 0x69, 0x6e, 0x67, 0x44, 0x75, 0x72, 0x4d, 0x53)
o = msgp.AppendFloat64(o, z.MaxPingDurMS)
// string "lastConnectTime"
o = append(o, 0xaf, 0x6c, 0x61, 0x73, 0x74, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x54, 0x69, 0x6d, 0x65)
o = msgp.AppendTime(o, z.LastConnectTime)
if (zb0001Mask & 0x8000) == 0 { // if not omitted
// string "byDestination"
o = append(o, 0xad, 0x62, 0x79, 0x44, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e)
o = msgp.AppendMapHeader(o, uint32(len(z.ByDestination)))
for za0001, za0002 := range z.ByDestination {
o = msgp.AppendString(o, za0001)
o, err = za0002.MarshalMsg(o)
if err != nil {
err = msgp.WrapError(err, "ByDestination", za0001)
return
}
}
}
if (zb0001Mask & 0x10000) == 0 { // if not omitted
// string "byCaller"
o = append(o, 0xa8, 0x62, 0x79, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72)
o = msgp.AppendMapHeader(o, uint32(len(z.ByCaller)))
for za0003, za0004 := range z.ByCaller {
o = msgp.AppendString(o, za0003)
o, err = za0004.MarshalMsg(o)
if err != nil {
err = msgp.WrapError(err, "ByCaller", za0003)
return
}
}
}
}
return
}
// UnmarshalMsg implements msgp.Unmarshaler
func (z *RPCMetrics) UnmarshalMsg(bts []byte) (o []byte, err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
var zb0001Mask uint8 /* 2 bits */
_ = zb0001Mask
for zb0001 > 0 {
zb0001--
field, bts, err = msgp.ReadMapKeyZC(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "collectedAt":
z.CollectedAt, bts, err = msgp.ReadTimeBytes(bts)
if err != nil {
err = msgp.WrapError(err, "CollectedAt")
return
}
case "connected":
z.Connected, bts, err = msgp.ReadIntBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Connected")
return
}
case "reconnectCount":
z.ReconnectCount, bts, err = msgp.ReadIntBytes(bts)
if err != nil {
err = msgp.WrapError(err, "ReconnectCount")
return
}
case "disconnected":
z.Disconnected, bts, err = msgp.ReadIntBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Disconnected")
return
}
case "outgoingStreams":
z.OutgoingStreams, bts, err = msgp.ReadIntBytes(bts)
if err != nil {
err = msgp.WrapError(err, "OutgoingStreams")
return
}
case "incomingStreams":
z.IncomingStreams, bts, err = msgp.ReadIntBytes(bts)
if err != nil {
err = msgp.WrapError(err, "IncomingStreams")
return
}
case "outgoingBytes":
z.OutgoingBytes, bts, err = msgp.ReadInt64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "OutgoingBytes")
return
}
case "incomingBytes":
z.IncomingBytes, bts, err = msgp.ReadInt64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "IncomingBytes")
return
}
case "outgoingMessages":
z.OutgoingMessages, bts, err = msgp.ReadInt64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "OutgoingMessages")
return
}
case "incomingMessages":
z.IncomingMessages, bts, err = msgp.ReadInt64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "IncomingMessages")
return
}
case "outQueue":
z.OutQueue, bts, err = msgp.ReadIntBytes(bts)
if err != nil {
err = msgp.WrapError(err, "OutQueue")
return
}
case "lastPongTime":
z.LastPongTime, bts, err = msgp.ReadTimeBytes(bts)
if err != nil {
err = msgp.WrapError(err, "LastPongTime")
return
}
case "lastPingMS":
z.LastPingMS, bts, err = msgp.ReadFloat64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "LastPingMS")
return
}
case "maxPingDurMS":
z.MaxPingDurMS, bts, err = msgp.ReadFloat64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "MaxPingDurMS")
return
}
case "lastConnectTime":
z.LastConnectTime, bts, err = msgp.ReadTimeBytes(bts)
if err != nil {
err = msgp.WrapError(err, "LastConnectTime")
return
}
case "byDestination":
var zb0002 uint32
zb0002, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "ByDestination")
return
}
if z.ByDestination == nil {
z.ByDestination = make(map[string]RPCMetrics, zb0002)
} else if len(z.ByDestination) > 0 {
for key := range z.ByDestination {
delete(z.ByDestination, key)
}
}
for zb0002 > 0 {
var za0001 string
var za0002 RPCMetrics
zb0002--
za0001, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "ByDestination")
return
}
bts, err = za0002.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "ByDestination", za0001)
return
}
z.ByDestination[za0001] = za0002
}
zb0001Mask |= 0x1
case "byCaller":
var zb0003 uint32
zb0003, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "ByCaller")
return
}
if z.ByCaller == nil {
z.ByCaller = make(map[string]RPCMetrics, zb0003)
} else if len(z.ByCaller) > 0 {
for key := range z.ByCaller {
delete(z.ByCaller, key)
}
}
for zb0003 > 0 {
var za0003 string
var za0004 RPCMetrics
zb0003--
za0003, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "ByCaller")
return
}
bts, err = za0004.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "ByCaller", za0003)
return
}
z.ByCaller[za0003] = za0004
}
zb0001Mask |= 0x2
default:
bts, err = msgp.Skip(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
// Clear omitted fields.
if zb0001Mask != 0x3 {
if (zb0001Mask & 0x1) == 0 {
z.ByDestination = nil
}
if (zb0001Mask & 0x2) == 0 {
z.ByCaller = nil
}
}
o = bts
return
}
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z *RPCMetrics) Msgsize() (s int) {
s = 3 + 12 + msgp.TimeSize + 10 + msgp.IntSize + 15 + msgp.IntSize + 13 + msgp.IntSize + 16 + msgp.IntSize + 16 + msgp.IntSize + 14 + msgp.Int64Size + 14 + msgp.Int64Size + 17 + msgp.Int64Size + 17 + msgp.Int64Size + 9 + msgp.IntSize + 13 + msgp.TimeSize + 11 + msgp.Float64Size + 13 + msgp.Float64Size + 16 + msgp.TimeSize + 14 + msgp.MapHeaderSize
if z.ByDestination != nil {
for za0001, za0002 := range z.ByDestination {
_ = za0002
s += msgp.StringPrefixSize + len(za0001) + za0002.Msgsize()
}
}
s += 9 + msgp.MapHeaderSize
if z.ByCaller != nil {
for za0003, za0004 := range z.ByCaller {
_ = za0004
s += msgp.StringPrefixSize + len(za0003) + za0004.Msgsize()
}
}
return
}
// DecodeMsg implements msgp.Decodable
func (z *RealtimeMetrics) DecodeMsg(dc *msgp.Reader) (err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, err = dc.ReadMapHeader()
if err != nil {
err = msgp.WrapError(err)
return
}
var zb0001Mask uint8 /* 3 bits */
_ = zb0001Mask
for zb0001 > 0 {
zb0001--
field, err = dc.ReadMapKeyPtr()
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "errors":
var zb0002 uint32
zb0002, err = dc.ReadArrayHeader()
if err != nil {
err = msgp.WrapError(err, "Errors")
return
}
if cap(z.Errors) >= int(zb0002) {
z.Errors = (z.Errors)[:zb0002]
} else {
z.Errors = make([]string, zb0002)
}
for za0001 := range z.Errors {
z.Errors[za0001], err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "Errors", za0001)
return
}
}
zb0001Mask |= 0x1
case "hosts":
var zb0003 uint32
zb0003, err = dc.ReadArrayHeader()
if err != nil {
err = msgp.WrapError(err, "Hosts")
return
}
if cap(z.Hosts) >= int(zb0003) {
z.Hosts = (z.Hosts)[:zb0003]
} else {
z.Hosts = make([]string, zb0003)
}
for za0002 := range z.Hosts {
z.Hosts[za0002], err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "Hosts", za0002)
return
}
}
case "aggregated":
err = z.Aggregated.DecodeMsg(dc)
if err != nil {
err = msgp.WrapError(err, "Aggregated")
return
}
case "by_host":
var zb0004 uint32
zb0004, err = dc.ReadMapHeader()
if err != nil {
err = msgp.WrapError(err, "ByHost")
return
}
if z.ByHost == nil {
z.ByHost = make(map[string]Metrics, zb0004)
} else if len(z.ByHost) > 0 {
for key := range z.ByHost {
delete(z.ByHost, key)
}
}
for zb0004 > 0 {
zb0004--
var za0003 string
var za0004 Metrics
za0003, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "ByHost")
return
}
err = za0004.DecodeMsg(dc)
if err != nil {
err = msgp.WrapError(err, "ByHost", za0003)
return
}
z.ByHost[za0003] = za0004
}
zb0001Mask |= 0x2
case "by_disk":
var zb0005 uint32
zb0005, err = dc.ReadMapHeader()
if err != nil {
err = msgp.WrapError(err, "ByDisk")
return
}
if z.ByDisk == nil {
z.ByDisk = make(map[string]DiskMetric, zb0005)
} else if len(z.ByDisk) > 0 {
for key := range z.ByDisk {
delete(z.ByDisk, key)
}
}
for zb0005 > 0 {
zb0005--
var za0005 string
var za0006 DiskMetric
za0005, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "ByDisk")
return
}
err = za0006.DecodeMsg(dc)
if err != nil {
err = msgp.WrapError(err, "ByDisk", za0005)
return
}
z.ByDisk[za0005] = za0006
}
zb0001Mask |= 0x4
case "final":
z.Final, err = dc.ReadBool()
if err != nil {
err = msgp.WrapError(err, "Final")
return
}
default:
err = dc.Skip()
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
// Clear omitted fields.
if zb0001Mask != 0x7 {
if (zb0001Mask & 0x1) == 0 {
z.Errors = nil
}
if (zb0001Mask & 0x2) == 0 {
z.ByHost = nil
}
if (zb0001Mask & 0x4) == 0 {
z.ByDisk = nil
}
}
return
}
// EncodeMsg implements msgp.Encodable
func (z *RealtimeMetrics) EncodeMsg(en *msgp.Writer) (err error) {
// check for omitted fields
zb0001Len := uint32(6)
var zb0001Mask uint8 /* 6 bits */
_ = zb0001Mask
if z.Errors == nil {
zb0001Len--
zb0001Mask |= 0x1
}
if z.ByHost == nil {
zb0001Len--
zb0001Mask |= 0x8
}
if z.ByDisk == nil {
zb0001Len--
zb0001Mask |= 0x10
}
// variable map header, size zb0001Len
err = en.Append(0x80 | uint8(zb0001Len))
if err != nil {
return
}
// skip if no fields are to be emitted
if zb0001Len != 0 {
if (zb0001Mask & 0x1) == 0 { // if not omitted
// write "errors"
err = en.Append(0xa6, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x73)
if err != nil {
return
}
err = en.WriteArrayHeader(uint32(len(z.Errors)))
if err != nil {
err = msgp.WrapError(err, "Errors")
return
}
for za0001 := range z.Errors {
err = en.WriteString(z.Errors[za0001])
if err != nil {
err = msgp.WrapError(err, "Errors", za0001)
return
}
}
}
// write "hosts"
err = en.Append(0xa5, 0x68, 0x6f, 0x73, 0x74, 0x73)
if err != nil {
return
}
err = en.WriteArrayHeader(uint32(len(z.Hosts)))
if err != nil {
err = msgp.WrapError(err, "Hosts")
return
}
for za0002 := range z.Hosts {
err = en.WriteString(z.Hosts[za0002])
if err != nil {
err = msgp.WrapError(err, "Hosts", za0002)
return
}
}
// write "aggregated"
err = en.Append(0xaa, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x64)
if err != nil {
return
}
err = z.Aggregated.EncodeMsg(en)
if err != nil {
err = msgp.WrapError(err, "Aggregated")
return
}
if (zb0001Mask & 0x8) == 0 { // if not omitted
// write "by_host"
err = en.Append(0xa7, 0x62, 0x79, 0x5f, 0x68, 0x6f, 0x73, 0x74)
if err != nil {
return
}
err = en.WriteMapHeader(uint32(len(z.ByHost)))
if err != nil {
err = msgp.WrapError(err, "ByHost")
return
}
for za0003, za0004 := range z.ByHost {
err = en.WriteString(za0003)
if err != nil {
err = msgp.WrapError(err, "ByHost")
return
}
err = za0004.EncodeMsg(en)
if err != nil {
err = msgp.WrapError(err, "ByHost", za0003)
return
}
}
}
if (zb0001Mask & 0x10) == 0 { // if not omitted
// write "by_disk"
err = en.Append(0xa7, 0x62, 0x79, 0x5f, 0x64, 0x69, 0x73, 0x6b)
if err != nil {
return
}
err = en.WriteMapHeader(uint32(len(z.ByDisk)))
if err != nil {
err = msgp.WrapError(err, "ByDisk")
return
}
for za0005, za0006 := range z.ByDisk {
err = en.WriteString(za0005)
if err != nil {
err = msgp.WrapError(err, "ByDisk")
return
}
err = za0006.EncodeMsg(en)
if err != nil {
err = msgp.WrapError(err, "ByDisk", za0005)
return
}
}
}
// write "final"
err = en.Append(0xa5, 0x66, 0x69, 0x6e, 0x61, 0x6c)
if err != nil {
return
}
err = en.WriteBool(z.Final)
if err != nil {
err = msgp.WrapError(err, "Final")
return
}
}
return
}
// MarshalMsg implements msgp.Marshaler
func (z *RealtimeMetrics) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize())
// check for omitted fields
zb0001Len := uint32(6)
var zb0001Mask uint8 /* 6 bits */
_ = zb0001Mask
if z.Errors == nil {
zb0001Len--
zb0001Mask |= 0x1
}
if z.ByHost == nil {
zb0001Len--
zb0001Mask |= 0x8
}
if z.ByDisk == nil {
zb0001Len--
zb0001Mask |= 0x10
}
// variable map header, size zb0001Len
o = append(o, 0x80|uint8(zb0001Len))
// skip if no fields are to be emitted
if zb0001Len != 0 {
if (zb0001Mask & 0x1) == 0 { // if not omitted
// string "errors"
o = append(o, 0xa6, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x73)
o = msgp.AppendArrayHeader(o, uint32(len(z.Errors)))
for za0001 := range z.Errors {
o = msgp.AppendString(o, z.Errors[za0001])
}
}
// string "hosts"
o = append(o, 0xa5, 0x68, 0x6f, 0x73, 0x74, 0x73)
o = msgp.AppendArrayHeader(o, uint32(len(z.Hosts)))
for za0002 := range z.Hosts {
o = msgp.AppendString(o, z.Hosts[za0002])
}
// string "aggregated"
o = append(o, 0xaa, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x64)
o, err = z.Aggregated.MarshalMsg(o)
if err != nil {
err = msgp.WrapError(err, "Aggregated")
return
}
if (zb0001Mask & 0x8) == 0 { // if not omitted
// string "by_host"
o = append(o, 0xa7, 0x62, 0x79, 0x5f, 0x68, 0x6f, 0x73, 0x74)
o = msgp.AppendMapHeader(o, uint32(len(z.ByHost)))
for za0003, za0004 := range z.ByHost {
o = msgp.AppendString(o, za0003)
o, err = za0004.MarshalMsg(o)
if err != nil {
err = msgp.WrapError(err, "ByHost", za0003)
return
}
}
}
if (zb0001Mask & 0x10) == 0 { // if not omitted
// string "by_disk"
o = append(o, 0xa7, 0x62, 0x79, 0x5f, 0x64, 0x69, 0x73, 0x6b)
o = msgp.AppendMapHeader(o, uint32(len(z.ByDisk)))
for za0005, za0006 := range z.ByDisk {
o = msgp.AppendString(o, za0005)
o, err = za0006.MarshalMsg(o)
if err != nil {
err = msgp.WrapError(err, "ByDisk", za0005)
return
}
}
}
// string "final"
o = append(o, 0xa5, 0x66, 0x69, 0x6e, 0x61, 0x6c)
o = msgp.AppendBool(o, z.Final)
}
return
}
// UnmarshalMsg implements msgp.Unmarshaler
func (z *RealtimeMetrics) UnmarshalMsg(bts []byte) (o []byte, err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
var zb0001Mask uint8 /* 3 bits */
_ = zb0001Mask
for zb0001 > 0 {
zb0001--
field, bts, err = msgp.ReadMapKeyZC(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "errors":
var zb0002 uint32
zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Errors")
return
}
if cap(z.Errors) >= int(zb0002) {
z.Errors = (z.Errors)[:zb0002]
} else {
z.Errors = make([]string, zb0002)
}
for za0001 := range z.Errors {
z.Errors[za0001], bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Errors", za0001)
return
}
}
zb0001Mask |= 0x1
case "hosts":
var zb0003 uint32
zb0003, bts, err = msgp.ReadArrayHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Hosts")
return
}
if cap(z.Hosts) >= int(zb0003) {
z.Hosts = (z.Hosts)[:zb0003]
} else {
z.Hosts = make([]string, zb0003)
}
for za0002 := range z.Hosts {
z.Hosts[za0002], bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Hosts", za0002)
return
}
}
case "aggregated":
bts, err = z.Aggregated.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "Aggregated")
return
}
case "by_host":
var zb0004 uint32
zb0004, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "ByHost")
return
}
if z.ByHost == nil {
z.ByHost = make(map[string]Metrics, zb0004)
} else if len(z.ByHost) > 0 {
for key := range z.ByHost {
delete(z.ByHost, key)
}
}
for zb0004 > 0 {
var za0003 string
var za0004 Metrics
zb0004--
za0003, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "ByHost")
return
}
bts, err = za0004.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "ByHost", za0003)
return
}
z.ByHost[za0003] = za0004
}
zb0001Mask |= 0x2
case "by_disk":
var zb0005 uint32
zb0005, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "ByDisk")
return
}
if z.ByDisk == nil {
z.ByDisk = make(map[string]DiskMetric, zb0005)
} else if len(z.ByDisk) > 0 {
for key := range z.ByDisk {
delete(z.ByDisk, key)
}
}
for zb0005 > 0 {
var za0005 string
var za0006 DiskMetric
zb0005--
za0005, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "ByDisk")
return
}
bts, err = za0006.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "ByDisk", za0005)
return
}
z.ByDisk[za0005] = za0006
}
zb0001Mask |= 0x4
case "final":
z.Final, bts, err = msgp.ReadBoolBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Final")
return
}
default:
bts, err = msgp.Skip(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
// Clear omitted fields.
if zb0001Mask != 0x7 {
if (zb0001Mask & 0x1) == 0 {
z.Errors = nil
}
if (zb0001Mask & 0x2) == 0 {
z.ByHost = nil
}
if (zb0001Mask & 0x4) == 0 {
z.ByDisk = nil
}
}
o = bts
return
}
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z *RealtimeMetrics) Msgsize() (s int) {
s = 1 + 7 + msgp.ArrayHeaderSize
for za0001 := range z.Errors {
s += msgp.StringPrefixSize + len(z.Errors[za0001])
}
s += 6 + msgp.ArrayHeaderSize
for za0002 := range z.Hosts {
s += msgp.StringPrefixSize + len(z.Hosts[za0002])
}
s += 11 + z.Aggregated.Msgsize() + 8 + msgp.MapHeaderSize
if z.ByHost != nil {
for za0003, za0004 := range z.ByHost {
_ = za0004
s += msgp.StringPrefixSize + len(za0003) + za0004.Msgsize()
}
}
s += 8 + msgp.MapHeaderSize
if z.ByDisk != nil {
for za0005, za0006 := range z.ByDisk {
_ = za0006
s += msgp.StringPrefixSize + len(za0005) + za0006.Msgsize()
}
}
s += 6 + msgp.BoolSize
return
}
// DecodeMsg implements msgp.Decodable
func (z *ReplicateInfo) DecodeMsg(dc *msgp.Reader) (err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, err = dc.ReadMapHeader()
if err != nil {
err = msgp.WrapError(err)
return
}
for zb0001 > 0 {
zb0001--
field, err = dc.ReadMapKeyPtr()
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "lastBucket":
z.Bucket, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "Bucket")
return
}
case "lastObject":
z.Object, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "Object")
return
}
case "objects":
z.Objects, err = dc.ReadInt64()
if err != nil {
err = msgp.WrapError(err, "Objects")
return
}
case "objectsFailed":
z.ObjectsFailed, err = dc.ReadInt64()
if err != nil {
err = msgp.WrapError(err, "ObjectsFailed")
return
}
case "bytesTransferred":
z.BytesTransferred, err = dc.ReadInt64()
if err != nil {
err = msgp.WrapError(err, "BytesTransferred")
return
}
case "bytesFailed":
z.BytesFailed, err = dc.ReadInt64()
if err != nil {
err = msgp.WrapError(err, "BytesFailed")
return
}
default:
err = dc.Skip()
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
return
}
// EncodeMsg implements msgp.Encodable
func (z *ReplicateInfo) EncodeMsg(en *msgp.Writer) (err error) {
// map header, size 6
// write "lastBucket"
err = en.Append(0x86, 0xaa, 0x6c, 0x61, 0x73, 0x74, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74)
if err != nil {
return
}
err = en.WriteString(z.Bucket)
if err != nil {
err = msgp.WrapError(err, "Bucket")
return
}
// write "lastObject"
err = en.Append(0xaa, 0x6c, 0x61, 0x73, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74)
if err != nil {
return
}
err = en.WriteString(z.Object)
if err != nil {
err = msgp.WrapError(err, "Object")
return
}
// write "objects"
err = en.Append(0xa7, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73)
if err != nil {
return
}
err = en.WriteInt64(z.Objects)
if err != nil {
err = msgp.WrapError(err, "Objects")
return
}
// write "objectsFailed"
err = en.Append(0xad, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x46, 0x61, 0x69, 0x6c, 0x65, 0x64)
if err != nil {
return
}
err = en.WriteInt64(z.ObjectsFailed)
if err != nil {
err = msgp.WrapError(err, "ObjectsFailed")
return
}
// write "bytesTransferred"
err = en.Append(0xb0, 0x62, 0x79, 0x74, 0x65, 0x73, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x66, 0x65, 0x72, 0x72, 0x65, 0x64)
if err != nil {
return
}
err = en.WriteInt64(z.BytesTransferred)
if err != nil {
err = msgp.WrapError(err, "BytesTransferred")
return
}
// write "bytesFailed"
err = en.Append(0xab, 0x62, 0x79, 0x74, 0x65, 0x73, 0x46, 0x61, 0x69, 0x6c, 0x65, 0x64)
if err != nil {
return
}
err = en.WriteInt64(z.BytesFailed)
if err != nil {
err = msgp.WrapError(err, "BytesFailed")
return
}
return
}
// MarshalMsg implements msgp.Marshaler
func (z *ReplicateInfo) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize())
// map header, size 6
// string "lastBucket"
o = append(o, 0x86, 0xaa, 0x6c, 0x61, 0x73, 0x74, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74)
o = msgp.AppendString(o, z.Bucket)
// string "lastObject"
o = append(o, 0xaa, 0x6c, 0x61, 0x73, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74)
o = msgp.AppendString(o, z.Object)
// string "objects"
o = append(o, 0xa7, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73)
o = msgp.AppendInt64(o, z.Objects)
// string "objectsFailed"
o = append(o, 0xad, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x46, 0x61, 0x69, 0x6c, 0x65, 0x64)
o = msgp.AppendInt64(o, z.ObjectsFailed)
// string "bytesTransferred"
o = append(o, 0xb0, 0x62, 0x79, 0x74, 0x65, 0x73, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x66, 0x65, 0x72, 0x72, 0x65, 0x64)
o = msgp.AppendInt64(o, z.BytesTransferred)
// string "bytesFailed"
o = append(o, 0xab, 0x62, 0x79, 0x74, 0x65, 0x73, 0x46, 0x61, 0x69, 0x6c, 0x65, 0x64)
o = msgp.AppendInt64(o, z.BytesFailed)
return
}
// UnmarshalMsg implements msgp.Unmarshaler
func (z *ReplicateInfo) UnmarshalMsg(bts []byte) (o []byte, err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
for zb0001 > 0 {
zb0001--
field, bts, err = msgp.ReadMapKeyZC(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "lastBucket":
z.Bucket, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Bucket")
return
}
case "lastObject":
z.Object, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Object")
return
}
case "objects":
z.Objects, bts, err = msgp.ReadInt64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "Objects")
return
}
case "objectsFailed":
z.ObjectsFailed, bts, err = msgp.ReadInt64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "ObjectsFailed")
return
}
case "bytesTransferred":
z.BytesTransferred, bts, err = msgp.ReadInt64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "BytesTransferred")
return
}
case "bytesFailed":
z.BytesFailed, bts, err = msgp.ReadInt64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "BytesFailed")
return
}
default:
bts, err = msgp.Skip(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
o = bts
return
}
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z *ReplicateInfo) Msgsize() (s int) {
s = 1 + 11 + msgp.StringPrefixSize + len(z.Bucket) + 11 + msgp.StringPrefixSize + len(z.Object) + 8 + msgp.Int64Size + 14 + msgp.Int64Size + 17 + msgp.Int64Size + 12 + msgp.Int64Size
return
}
// DecodeMsg implements msgp.Decodable
func (z *RuntimeMetrics) DecodeMsg(dc *msgp.Reader) (err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, err = dc.ReadMapHeader()
if err != nil {
err = msgp.WrapError(err)
return
}
var zb0001Mask uint8 /* 3 bits */
_ = zb0001Mask
for zb0001 > 0 {
zb0001--
field, err = dc.ReadMapKeyPtr()
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "uintMetrics":
var zb0002 uint32
zb0002, err = dc.ReadMapHeader()
if err != nil {
err = msgp.WrapError(err, "UintMetrics")
return
}
if z.UintMetrics == nil {
z.UintMetrics = make(map[string]uint64, zb0002)
} else if len(z.UintMetrics) > 0 {
for key := range z.UintMetrics {
delete(z.UintMetrics, key)
}
}
for zb0002 > 0 {
zb0002--
var za0001 string
var za0002 uint64
za0001, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "UintMetrics")
return
}
za0002, err = dc.ReadUint64()
if err != nil {
err = msgp.WrapError(err, "UintMetrics", za0001)
return
}
z.UintMetrics[za0001] = za0002
}
zb0001Mask |= 0x1
case "floatMetrics":
var zb0003 uint32
zb0003, err = dc.ReadMapHeader()
if err != nil {
err = msgp.WrapError(err, "FloatMetrics")
return
}
if z.FloatMetrics == nil {
z.FloatMetrics = make(map[string]float64, zb0003)
} else if len(z.FloatMetrics) > 0 {
for key := range z.FloatMetrics {
delete(z.FloatMetrics, key)
}
}
for zb0003 > 0 {
zb0003--
var za0003 string
var za0004 float64
za0003, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "FloatMetrics")
return
}
za0004, err = dc.ReadFloat64()
if err != nil {
err = msgp.WrapError(err, "FloatMetrics", za0003)
return
}
z.FloatMetrics[za0003] = za0004
}
zb0001Mask |= 0x2
case "histMetrics":
var zb0004 uint32
zb0004, err = dc.ReadMapHeader()
if err != nil {
err = msgp.WrapError(err, "HistMetrics")
return
}
if z.HistMetrics == nil {
z.HistMetrics = make(map[string]metrics.Float64Histogram, zb0004)
} else if len(z.HistMetrics) > 0 {
for key := range z.HistMetrics {
delete(z.HistMetrics, key)
}
}
for zb0004 > 0 {
zb0004--
var za0005 string
var za0006 metrics.Float64Histogram
za0005, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "HistMetrics")
return
}
err = (*localF64H)(&za0006).DecodeMsg(dc)
if err != nil {
err = msgp.WrapError(err, "HistMetrics", za0005)
return
}
z.HistMetrics[za0005] = za0006
}
zb0001Mask |= 0x4
case "n":
z.N, err = dc.ReadInt()
if err != nil {
err = msgp.WrapError(err, "N")
return
}
default:
err = dc.Skip()
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
// Clear omitted fields.
if zb0001Mask != 0x7 {
if (zb0001Mask & 0x1) == 0 {
z.UintMetrics = nil
}
if (zb0001Mask & 0x2) == 0 {
z.FloatMetrics = nil
}
if (zb0001Mask & 0x4) == 0 {
z.HistMetrics = nil
}
}
return
}
// EncodeMsg implements msgp.Encodable
func (z *RuntimeMetrics) EncodeMsg(en *msgp.Writer) (err error) {
// check for omitted fields
zb0001Len := uint32(4)
var zb0001Mask uint8 /* 4 bits */
_ = zb0001Mask
if z.UintMetrics == nil {
zb0001Len--
zb0001Mask |= 0x1
}
if z.FloatMetrics == nil {
zb0001Len--
zb0001Mask |= 0x2
}
if z.HistMetrics == nil {
zb0001Len--
zb0001Mask |= 0x4
}
// variable map header, size zb0001Len
err = en.Append(0x80 | uint8(zb0001Len))
if err != nil {
return
}
// skip if no fields are to be emitted
if zb0001Len != 0 {
if (zb0001Mask & 0x1) == 0 { // if not omitted
// write "uintMetrics"
err = en.Append(0xab, 0x75, 0x69, 0x6e, 0x74, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73)
if err != nil {
return
}
err = en.WriteMapHeader(uint32(len(z.UintMetrics)))
if err != nil {
err = msgp.WrapError(err, "UintMetrics")
return
}
for za0001, za0002 := range z.UintMetrics {
err = en.WriteString(za0001)
if err != nil {
err = msgp.WrapError(err, "UintMetrics")
return
}
err = en.WriteUint64(za0002)
if err != nil {
err = msgp.WrapError(err, "UintMetrics", za0001)
return
}
}
}
if (zb0001Mask & 0x2) == 0 { // if not omitted
// write "floatMetrics"
err = en.Append(0xac, 0x66, 0x6c, 0x6f, 0x61, 0x74, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73)
if err != nil {
return
}
err = en.WriteMapHeader(uint32(len(z.FloatMetrics)))
if err != nil {
err = msgp.WrapError(err, "FloatMetrics")
return
}
for za0003, za0004 := range z.FloatMetrics {
err = en.WriteString(za0003)
if err != nil {
err = msgp.WrapError(err, "FloatMetrics")
return
}
err = en.WriteFloat64(za0004)
if err != nil {
err = msgp.WrapError(err, "FloatMetrics", za0003)
return
}
}
}
if (zb0001Mask & 0x4) == 0 { // if not omitted
// write "histMetrics"
err = en.Append(0xab, 0x68, 0x69, 0x73, 0x74, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73)
if err != nil {
return
}
err = en.WriteMapHeader(uint32(len(z.HistMetrics)))
if err != nil {
err = msgp.WrapError(err, "HistMetrics")
return
}
for za0005, za0006 := range z.HistMetrics {
err = en.WriteString(za0005)
if err != nil {
err = msgp.WrapError(err, "HistMetrics")
return
}
err = (*localF64H)(&za0006).EncodeMsg(en)
if err != nil {
err = msgp.WrapError(err, "HistMetrics", za0005)
return
}
}
}
// write "n"
err = en.Append(0xa1, 0x6e)
if err != nil {
return
}
err = en.WriteInt(z.N)
if err != nil {
err = msgp.WrapError(err, "N")
return
}
}
return
}
// MarshalMsg implements msgp.Marshaler
func (z *RuntimeMetrics) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize())
// check for omitted fields
zb0001Len := uint32(4)
var zb0001Mask uint8 /* 4 bits */
_ = zb0001Mask
if z.UintMetrics == nil {
zb0001Len--
zb0001Mask |= 0x1
}
if z.FloatMetrics == nil {
zb0001Len--
zb0001Mask |= 0x2
}
if z.HistMetrics == nil {
zb0001Len--
zb0001Mask |= 0x4
}
// variable map header, size zb0001Len
o = append(o, 0x80|uint8(zb0001Len))
// skip if no fields are to be emitted
if zb0001Len != 0 {
if (zb0001Mask & 0x1) == 0 { // if not omitted
// string "uintMetrics"
o = append(o, 0xab, 0x75, 0x69, 0x6e, 0x74, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73)
o = msgp.AppendMapHeader(o, uint32(len(z.UintMetrics)))
for za0001, za0002 := range z.UintMetrics {
o = msgp.AppendString(o, za0001)
o = msgp.AppendUint64(o, za0002)
}
}
if (zb0001Mask & 0x2) == 0 { // if not omitted
// string "floatMetrics"
o = append(o, 0xac, 0x66, 0x6c, 0x6f, 0x61, 0x74, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73)
o = msgp.AppendMapHeader(o, uint32(len(z.FloatMetrics)))
for za0003, za0004 := range z.FloatMetrics {
o = msgp.AppendString(o, za0003)
o = msgp.AppendFloat64(o, za0004)
}
}
if (zb0001Mask & 0x4) == 0 { // if not omitted
// string "histMetrics"
o = append(o, 0xab, 0x68, 0x69, 0x73, 0x74, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73)
o = msgp.AppendMapHeader(o, uint32(len(z.HistMetrics)))
for za0005, za0006 := range z.HistMetrics {
o = msgp.AppendString(o, za0005)
o, err = (*localF64H)(&za0006).MarshalMsg(o)
if err != nil {
err = msgp.WrapError(err, "HistMetrics", za0005)
return
}
}
}
// string "n"
o = append(o, 0xa1, 0x6e)
o = msgp.AppendInt(o, z.N)
}
return
}
// UnmarshalMsg implements msgp.Unmarshaler
func (z *RuntimeMetrics) UnmarshalMsg(bts []byte) (o []byte, err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
var zb0001Mask uint8 /* 3 bits */
_ = zb0001Mask
for zb0001 > 0 {
zb0001--
field, bts, err = msgp.ReadMapKeyZC(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "uintMetrics":
var zb0002 uint32
zb0002, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "UintMetrics")
return
}
if z.UintMetrics == nil {
z.UintMetrics = make(map[string]uint64, zb0002)
} else if len(z.UintMetrics) > 0 {
for key := range z.UintMetrics {
delete(z.UintMetrics, key)
}
}
for zb0002 > 0 {
var za0001 string
var za0002 uint64
zb0002--
za0001, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "UintMetrics")
return
}
za0002, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "UintMetrics", za0001)
return
}
z.UintMetrics[za0001] = za0002
}
zb0001Mask |= 0x1
case "floatMetrics":
var zb0003 uint32
zb0003, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "FloatMetrics")
return
}
if z.FloatMetrics == nil {
z.FloatMetrics = make(map[string]float64, zb0003)
} else if len(z.FloatMetrics) > 0 {
for key := range z.FloatMetrics {
delete(z.FloatMetrics, key)
}
}
for zb0003 > 0 {
var za0003 string
var za0004 float64
zb0003--
za0003, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "FloatMetrics")
return
}
za0004, bts, err = msgp.ReadFloat64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "FloatMetrics", za0003)
return
}
z.FloatMetrics[za0003] = za0004
}
zb0001Mask |= 0x2
case "histMetrics":
var zb0004 uint32
zb0004, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "HistMetrics")
return
}
if z.HistMetrics == nil {
z.HistMetrics = make(map[string]metrics.Float64Histogram, zb0004)
} else if len(z.HistMetrics) > 0 {
for key := range z.HistMetrics {
delete(z.HistMetrics, key)
}
}
for zb0004 > 0 {
var za0005 string
var za0006 metrics.Float64Histogram
zb0004--
za0005, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "HistMetrics")
return
}
bts, err = (*localF64H)(&za0006).UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "HistMetrics", za0005)
return
}
z.HistMetrics[za0005] = za0006
}
zb0001Mask |= 0x4
case "n":
z.N, bts, err = msgp.ReadIntBytes(bts)
if err != nil {
err = msgp.WrapError(err, "N")
return
}
default:
bts, err = msgp.Skip(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
// Clear omitted fields.
if zb0001Mask != 0x7 {
if (zb0001Mask & 0x1) == 0 {
z.UintMetrics = nil
}
if (zb0001Mask & 0x2) == 0 {
z.FloatMetrics = nil
}
if (zb0001Mask & 0x4) == 0 {
z.HistMetrics = nil
}
}
o = bts
return
}
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z *RuntimeMetrics) Msgsize() (s int) {
s = 1 + 12 + msgp.MapHeaderSize
if z.UintMetrics != nil {
for za0001, za0002 := range z.UintMetrics {
_ = za0002
s += msgp.StringPrefixSize + len(za0001) + msgp.Uint64Size
}
}
s += 13 + msgp.MapHeaderSize
if z.FloatMetrics != nil {
for za0003, za0004 := range z.FloatMetrics {
_ = za0004
s += msgp.StringPrefixSize + len(za0003) + msgp.Float64Size
}
}
s += 12 + msgp.MapHeaderSize
if z.HistMetrics != nil {
for za0005, za0006 := range z.HistMetrics {
_ = za0006
s += msgp.StringPrefixSize + len(za0005) + (*localF64H)(&za0006).Msgsize()
}
}
s += 2 + msgp.IntSize
return
}
// DecodeMsg implements msgp.Decodable
func (z *ScannerMetrics) DecodeMsg(dc *msgp.Reader) (err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, err = dc.ReadMapHeader()
if err != nil {
err = msgp.WrapError(err)
return
}
var zb0001Mask uint8 /* 4 bits */
_ = zb0001Mask
for zb0001 > 0 {
zb0001--
field, err = dc.ReadMapKeyPtr()
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "collected":
z.CollectedAt, err = dc.ReadTime()
if err != nil {
err = msgp.WrapError(err, "CollectedAt")
return
}
case "current_cycle":
z.CurrentCycle, err = dc.ReadUint64()
if err != nil {
err = msgp.WrapError(err, "CurrentCycle")
return
}
case "current_started":
z.CurrentStarted, err = dc.ReadTime()
if err != nil {
err = msgp.WrapError(err, "CurrentStarted")
return
}
case "cycle_complete_times":
var zb0002 uint32
zb0002, err = dc.ReadArrayHeader()
if err != nil {
err = msgp.WrapError(err, "CyclesCompletedAt")
return
}
if cap(z.CyclesCompletedAt) >= int(zb0002) {
z.CyclesCompletedAt = (z.CyclesCompletedAt)[:zb0002]
} else {
z.CyclesCompletedAt = make([]time.Time, zb0002)
}
for za0001 := range z.CyclesCompletedAt {
z.CyclesCompletedAt[za0001], err = dc.ReadTime()
if err != nil {
err = msgp.WrapError(err, "CyclesCompletedAt", za0001)
return
}
}
case "ongoing_buckets":
z.OngoingBuckets, err = dc.ReadInt()
if err != nil {
err = msgp.WrapError(err, "OngoingBuckets")
return
}
case "per_bucket_stats":
var zb0003 uint32
zb0003, err = dc.ReadMapHeader()
if err != nil {
err = msgp.WrapError(err, "PerBucketStats")
return
}
if z.PerBucketStats == nil {
z.PerBucketStats = make(map[string][]BucketScanInfo, zb0003)
} else if len(z.PerBucketStats) > 0 {
for key := range z.PerBucketStats {
delete(z.PerBucketStats, key)
}
}
for zb0003 > 0 {
zb0003--
var za0002 string
var za0003 []BucketScanInfo
za0002, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "PerBucketStats")
return
}
var zb0004 uint32
zb0004, err = dc.ReadArrayHeader()
if err != nil {
err = msgp.WrapError(err, "PerBucketStats", za0002)
return
}
if cap(za0003) >= int(zb0004) {
za0003 = (za0003)[:zb0004]
} else {
za0003 = make([]BucketScanInfo, zb0004)
}
for za0004 := range za0003 {
err = za0003[za0004].DecodeMsg(dc)
if err != nil {
err = msgp.WrapError(err, "PerBucketStats", za0002, za0004)
return
}
}
z.PerBucketStats[za0002] = za0003
}
zb0001Mask |= 0x1
case "life_time_ops":
var zb0005 uint32
zb0005, err = dc.ReadMapHeader()
if err != nil {
err = msgp.WrapError(err, "LifeTimeOps")
return
}
if z.LifeTimeOps == nil {
z.LifeTimeOps = make(map[string]uint64, zb0005)
} else if len(z.LifeTimeOps) > 0 {
for key := range z.LifeTimeOps {
delete(z.LifeTimeOps, key)
}
}
for zb0005 > 0 {
zb0005--
var za0005 string
var za0006 uint64
za0005, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "LifeTimeOps")
return
}
za0006, err = dc.ReadUint64()
if err != nil {
err = msgp.WrapError(err, "LifeTimeOps", za0005)
return
}
z.LifeTimeOps[za0005] = za0006
}
zb0001Mask |= 0x2
case "ilm_ops":
var zb0006 uint32
zb0006, err = dc.ReadMapHeader()
if err != nil {
err = msgp.WrapError(err, "LifeTimeILM")
return
}
if z.LifeTimeILM == nil {
z.LifeTimeILM = make(map[string]uint64, zb0006)
} else if len(z.LifeTimeILM) > 0 {
for key := range z.LifeTimeILM {
delete(z.LifeTimeILM, key)
}
}
for zb0006 > 0 {
zb0006--
var za0007 string
var za0008 uint64
za0007, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "LifeTimeILM")
return
}
za0008, err = dc.ReadUint64()
if err != nil {
err = msgp.WrapError(err, "LifeTimeILM", za0007)
return
}
z.LifeTimeILM[za0007] = za0008
}
zb0001Mask |= 0x4
case "last_minute":
var zb0007 uint32
zb0007, err = dc.ReadMapHeader()
if err != nil {
err = msgp.WrapError(err, "LastMinute")
return
}
var zb0007Mask uint8 /* 2 bits */
_ = zb0007Mask
for zb0007 > 0 {
zb0007--
field, err = dc.ReadMapKeyPtr()
if err != nil {
err = msgp.WrapError(err, "LastMinute")
return
}
switch msgp.UnsafeString(field) {
case "actions":
var zb0008 uint32
zb0008, err = dc.ReadMapHeader()
if err != nil {
err = msgp.WrapError(err, "LastMinute", "Actions")
return
}
if z.LastMinute.Actions == nil {
z.LastMinute.Actions = make(map[string]TimedAction, zb0008)
} else if len(z.LastMinute.Actions) > 0 {
for key := range z.LastMinute.Actions {
delete(z.LastMinute.Actions, key)
}
}
for zb0008 > 0 {
zb0008--
var za0009 string
var za0010 TimedAction
za0009, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "LastMinute", "Actions")
return
}
err = za0010.DecodeMsg(dc)
if err != nil {
err = msgp.WrapError(err, "LastMinute", "Actions", za0009)
return
}
z.LastMinute.Actions[za0009] = za0010
}
zb0007Mask |= 0x1
case "ilm":
var zb0009 uint32
zb0009, err = dc.ReadMapHeader()
if err != nil {
err = msgp.WrapError(err, "LastMinute", "ILM")
return
}
if z.LastMinute.ILM == nil {
z.LastMinute.ILM = make(map[string]TimedAction, zb0009)
} else if len(z.LastMinute.ILM) > 0 {
for key := range z.LastMinute.ILM {
delete(z.LastMinute.ILM, key)
}
}
for zb0009 > 0 {
zb0009--
var za0011 string
var za0012 TimedAction
za0011, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "LastMinute", "ILM")
return
}
err = za0012.DecodeMsg(dc)
if err != nil {
err = msgp.WrapError(err, "LastMinute", "ILM", za0011)
return
}
z.LastMinute.ILM[za0011] = za0012
}
zb0007Mask |= 0x2
default:
err = dc.Skip()
if err != nil {
err = msgp.WrapError(err, "LastMinute")
return
}
}
}
// Clear omitted fields.
if zb0007Mask != 0x3 {
if (zb0007Mask & 0x1) == 0 {
z.LastMinute.Actions = nil
}
if (zb0007Mask & 0x2) == 0 {
z.LastMinute.ILM = nil
}
}
case "active":
var zb0010 uint32
zb0010, err = dc.ReadArrayHeader()
if err != nil {
err = msgp.WrapError(err, "ActivePaths")
return
}
if cap(z.ActivePaths) >= int(zb0010) {
z.ActivePaths = (z.ActivePaths)[:zb0010]
} else {
z.ActivePaths = make([]string, zb0010)
}
for za0013 := range z.ActivePaths {
z.ActivePaths[za0013], err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "ActivePaths", za0013)
return
}
}
zb0001Mask |= 0x8
default:
err = dc.Skip()
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
// Clear omitted fields.
if zb0001Mask != 0xf {
if (zb0001Mask & 0x1) == 0 {
z.PerBucketStats = nil
}
if (zb0001Mask & 0x2) == 0 {
z.LifeTimeOps = nil
}
if (zb0001Mask & 0x4) == 0 {
z.LifeTimeILM = nil
}
if (zb0001Mask & 0x8) == 0 {
z.ActivePaths = nil
}
}
return
}
// EncodeMsg implements msgp.Encodable
func (z *ScannerMetrics) EncodeMsg(en *msgp.Writer) (err error) {
// check for omitted fields
zb0001Len := uint32(10)
var zb0001Mask uint16 /* 10 bits */
_ = zb0001Mask
if z.PerBucketStats == nil {
zb0001Len--
zb0001Mask |= 0x20
}
if z.LifeTimeOps == nil {
zb0001Len--
zb0001Mask |= 0x40
}
if z.LifeTimeILM == nil {
zb0001Len--
zb0001Mask |= 0x80
}
if z.ActivePaths == nil {
zb0001Len--
zb0001Mask |= 0x200
}
// variable map header, size zb0001Len
err = en.Append(0x80 | uint8(zb0001Len))
if err != nil {
return
}
// skip if no fields are to be emitted
if zb0001Len != 0 {
// write "collected"
err = en.Append(0xa9, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x65, 0x64)
if err != nil {
return
}
err = en.WriteTime(z.CollectedAt)
if err != nil {
err = msgp.WrapError(err, "CollectedAt")
return
}
// write "current_cycle"
err = en.Append(0xad, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x63, 0x79, 0x63, 0x6c, 0x65)
if err != nil {
return
}
err = en.WriteUint64(z.CurrentCycle)
if err != nil {
err = msgp.WrapError(err, "CurrentCycle")
return
}
// write "current_started"
err = en.Append(0xaf, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x74, 0x61, 0x72, 0x74, 0x65, 0x64)
if err != nil {
return
}
err = en.WriteTime(z.CurrentStarted)
if err != nil {
err = msgp.WrapError(err, "CurrentStarted")
return
}
// write "cycle_complete_times"
err = en.Append(0xb4, 0x63, 0x79, 0x63, 0x6c, 0x65, 0x5f, 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x73)
if err != nil {
return
}
err = en.WriteArrayHeader(uint32(len(z.CyclesCompletedAt)))
if err != nil {
err = msgp.WrapError(err, "CyclesCompletedAt")
return
}
for za0001 := range z.CyclesCompletedAt {
err = en.WriteTime(z.CyclesCompletedAt[za0001])
if err != nil {
err = msgp.WrapError(err, "CyclesCompletedAt", za0001)
return
}
}
// write "ongoing_buckets"
err = en.Append(0xaf, 0x6f, 0x6e, 0x67, 0x6f, 0x69, 0x6e, 0x67, 0x5f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73)
if err != nil {
return
}
err = en.WriteInt(z.OngoingBuckets)
if err != nil {
err = msgp.WrapError(err, "OngoingBuckets")
return
}
if (zb0001Mask & 0x20) == 0 { // if not omitted
// write "per_bucket_stats"
err = en.Append(0xb0, 0x70, 0x65, 0x72, 0x5f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x73)
if err != nil {
return
}
err = en.WriteMapHeader(uint32(len(z.PerBucketStats)))
if err != nil {
err = msgp.WrapError(err, "PerBucketStats")
return
}
for za0002, za0003 := range z.PerBucketStats {
err = en.WriteString(za0002)
if err != nil {
err = msgp.WrapError(err, "PerBucketStats")
return
}
err = en.WriteArrayHeader(uint32(len(za0003)))
if err != nil {
err = msgp.WrapError(err, "PerBucketStats", za0002)
return
}
for za0004 := range za0003 {
err = za0003[za0004].EncodeMsg(en)
if err != nil {
err = msgp.WrapError(err, "PerBucketStats", za0002, za0004)
return
}
}
}
}
if (zb0001Mask & 0x40) == 0 { // if not omitted
// write "life_time_ops"
err = en.Append(0xad, 0x6c, 0x69, 0x66, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x6f, 0x70, 0x73)
if err != nil {
return
}
err = en.WriteMapHeader(uint32(len(z.LifeTimeOps)))
if err != nil {
err = msgp.WrapError(err, "LifeTimeOps")
return
}
for za0005, za0006 := range z.LifeTimeOps {
err = en.WriteString(za0005)
if err != nil {
err = msgp.WrapError(err, "LifeTimeOps")
return
}
err = en.WriteUint64(za0006)
if err != nil {
err = msgp.WrapError(err, "LifeTimeOps", za0005)
return
}
}
}
if (zb0001Mask & 0x80) == 0 { // if not omitted
// write "ilm_ops"
err = en.Append(0xa7, 0x69, 0x6c, 0x6d, 0x5f, 0x6f, 0x70, 0x73)
if err != nil {
return
}
err = en.WriteMapHeader(uint32(len(z.LifeTimeILM)))
if err != nil {
err = msgp.WrapError(err, "LifeTimeILM")
return
}
for za0007, za0008 := range z.LifeTimeILM {
err = en.WriteString(za0007)
if err != nil {
err = msgp.WrapError(err, "LifeTimeILM")
return
}
err = en.WriteUint64(za0008)
if err != nil {
err = msgp.WrapError(err, "LifeTimeILM", za0007)
return
}
}
}
// write "last_minute"
err = en.Append(0xab, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x6d, 0x69, 0x6e, 0x75, 0x74, 0x65)
if err != nil {
return
}
// check for omitted fields
zb0002Len := uint32(2)
var zb0002Mask uint8 /* 2 bits */
_ = zb0002Mask
if z.LastMinute.Actions == nil {
zb0002Len--
zb0002Mask |= 0x1
}
if z.LastMinute.ILM == nil {
zb0002Len--
zb0002Mask |= 0x2
}
// variable map header, size zb0002Len
err = en.Append(0x80 | uint8(zb0002Len))
if err != nil {
return
}
// skip if no fields are to be emitted
if zb0002Len != 0 {
if (zb0002Mask & 0x1) == 0 { // if not omitted
// write "actions"
err = en.Append(0xa7, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73)
if err != nil {
return
}
err = en.WriteMapHeader(uint32(len(z.LastMinute.Actions)))
if err != nil {
err = msgp.WrapError(err, "LastMinute", "Actions")
return
}
for za0009, za0010 := range z.LastMinute.Actions {
err = en.WriteString(za0009)
if err != nil {
err = msgp.WrapError(err, "LastMinute", "Actions")
return
}
err = za0010.EncodeMsg(en)
if err != nil {
err = msgp.WrapError(err, "LastMinute", "Actions", za0009)
return
}
}
}
if (zb0002Mask & 0x2) == 0 { // if not omitted
// write "ilm"
err = en.Append(0xa3, 0x69, 0x6c, 0x6d)
if err != nil {
return
}
err = en.WriteMapHeader(uint32(len(z.LastMinute.ILM)))
if err != nil {
err = msgp.WrapError(err, "LastMinute", "ILM")
return
}
for za0011, za0012 := range z.LastMinute.ILM {
err = en.WriteString(za0011)
if err != nil {
err = msgp.WrapError(err, "LastMinute", "ILM")
return
}
err = za0012.EncodeMsg(en)
if err != nil {
err = msgp.WrapError(err, "LastMinute", "ILM", za0011)
return
}
}
}
}
if (zb0001Mask & 0x200) == 0 { // if not omitted
// write "active"
err = en.Append(0xa6, 0x61, 0x63, 0x74, 0x69, 0x76, 0x65)
if err != nil {
return
}
err = en.WriteArrayHeader(uint32(len(z.ActivePaths)))
if err != nil {
err = msgp.WrapError(err, "ActivePaths")
return
}
for za0013 := range z.ActivePaths {
err = en.WriteString(z.ActivePaths[za0013])
if err != nil {
err = msgp.WrapError(err, "ActivePaths", za0013)
return
}
}
}
}
return
}
// MarshalMsg implements msgp.Marshaler
func (z *ScannerMetrics) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize())
// check for omitted fields
zb0001Len := uint32(10)
var zb0001Mask uint16 /* 10 bits */
_ = zb0001Mask
if z.PerBucketStats == nil {
zb0001Len--
zb0001Mask |= 0x20
}
if z.LifeTimeOps == nil {
zb0001Len--
zb0001Mask |= 0x40
}
if z.LifeTimeILM == nil {
zb0001Len--
zb0001Mask |= 0x80
}
if z.ActivePaths == nil {
zb0001Len--
zb0001Mask |= 0x200
}
// variable map header, size zb0001Len
o = append(o, 0x80|uint8(zb0001Len))
// skip if no fields are to be emitted
if zb0001Len != 0 {
// string "collected"
o = append(o, 0xa9, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x65, 0x64)
o = msgp.AppendTime(o, z.CollectedAt)
// string "current_cycle"
o = append(o, 0xad, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x63, 0x79, 0x63, 0x6c, 0x65)
o = msgp.AppendUint64(o, z.CurrentCycle)
// string "current_started"
o = append(o, 0xaf, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x74, 0x61, 0x72, 0x74, 0x65, 0x64)
o = msgp.AppendTime(o, z.CurrentStarted)
// string "cycle_complete_times"
o = append(o, 0xb4, 0x63, 0x79, 0x63, 0x6c, 0x65, 0x5f, 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x73)
o = msgp.AppendArrayHeader(o, uint32(len(z.CyclesCompletedAt)))
for za0001 := range z.CyclesCompletedAt {
o = msgp.AppendTime(o, z.CyclesCompletedAt[za0001])
}
// string "ongoing_buckets"
o = append(o, 0xaf, 0x6f, 0x6e, 0x67, 0x6f, 0x69, 0x6e, 0x67, 0x5f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73)
o = msgp.AppendInt(o, z.OngoingBuckets)
if (zb0001Mask & 0x20) == 0 { // if not omitted
// string "per_bucket_stats"
o = append(o, 0xb0, 0x70, 0x65, 0x72, 0x5f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x73)
o = msgp.AppendMapHeader(o, uint32(len(z.PerBucketStats)))
for za0002, za0003 := range z.PerBucketStats {
o = msgp.AppendString(o, za0002)
o = msgp.AppendArrayHeader(o, uint32(len(za0003)))
for za0004 := range za0003 {
o, err = za0003[za0004].MarshalMsg(o)
if err != nil {
err = msgp.WrapError(err, "PerBucketStats", za0002, za0004)
return
}
}
}
}
if (zb0001Mask & 0x40) == 0 { // if not omitted
// string "life_time_ops"
o = append(o, 0xad, 0x6c, 0x69, 0x66, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x6f, 0x70, 0x73)
o = msgp.AppendMapHeader(o, uint32(len(z.LifeTimeOps)))
for za0005, za0006 := range z.LifeTimeOps {
o = msgp.AppendString(o, za0005)
o = msgp.AppendUint64(o, za0006)
}
}
if (zb0001Mask & 0x80) == 0 { // if not omitted
// string "ilm_ops"
o = append(o, 0xa7, 0x69, 0x6c, 0x6d, 0x5f, 0x6f, 0x70, 0x73)
o = msgp.AppendMapHeader(o, uint32(len(z.LifeTimeILM)))
for za0007, za0008 := range z.LifeTimeILM {
o = msgp.AppendString(o, za0007)
o = msgp.AppendUint64(o, za0008)
}
}
// string "last_minute"
o = append(o, 0xab, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x6d, 0x69, 0x6e, 0x75, 0x74, 0x65)
// check for omitted fields
zb0002Len := uint32(2)
var zb0002Mask uint8 /* 2 bits */
_ = zb0002Mask
if z.LastMinute.Actions == nil {
zb0002Len--
zb0002Mask |= 0x1
}
if z.LastMinute.ILM == nil {
zb0002Len--
zb0002Mask |= 0x2
}
// variable map header, size zb0002Len
o = append(o, 0x80|uint8(zb0002Len))
// skip if no fields are to be emitted
if zb0002Len != 0 {
if (zb0002Mask & 0x1) == 0 { // if not omitted
// string "actions"
o = append(o, 0xa7, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73)
o = msgp.AppendMapHeader(o, uint32(len(z.LastMinute.Actions)))
for za0009, za0010 := range z.LastMinute.Actions {
o = msgp.AppendString(o, za0009)
o, err = za0010.MarshalMsg(o)
if err != nil {
err = msgp.WrapError(err, "LastMinute", "Actions", za0009)
return
}
}
}
if (zb0002Mask & 0x2) == 0 { // if not omitted
// string "ilm"
o = append(o, 0xa3, 0x69, 0x6c, 0x6d)
o = msgp.AppendMapHeader(o, uint32(len(z.LastMinute.ILM)))
for za0011, za0012 := range z.LastMinute.ILM {
o = msgp.AppendString(o, za0011)
o, err = za0012.MarshalMsg(o)
if err != nil {
err = msgp.WrapError(err, "LastMinute", "ILM", za0011)
return
}
}
}
}
if (zb0001Mask & 0x200) == 0 { // if not omitted
// string "active"
o = append(o, 0xa6, 0x61, 0x63, 0x74, 0x69, 0x76, 0x65)
o = msgp.AppendArrayHeader(o, uint32(len(z.ActivePaths)))
for za0013 := range z.ActivePaths {
o = msgp.AppendString(o, z.ActivePaths[za0013])
}
}
}
return
}
// UnmarshalMsg implements msgp.Unmarshaler
func (z *ScannerMetrics) UnmarshalMsg(bts []byte) (o []byte, err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
var zb0001Mask uint8 /* 4 bits */
_ = zb0001Mask
for zb0001 > 0 {
zb0001--
field, bts, err = msgp.ReadMapKeyZC(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "collected":
z.CollectedAt, bts, err = msgp.ReadTimeBytes(bts)
if err != nil {
err = msgp.WrapError(err, "CollectedAt")
return
}
case "current_cycle":
z.CurrentCycle, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "CurrentCycle")
return
}
case "current_started":
z.CurrentStarted, bts, err = msgp.ReadTimeBytes(bts)
if err != nil {
err = msgp.WrapError(err, "CurrentStarted")
return
}
case "cycle_complete_times":
var zb0002 uint32
zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "CyclesCompletedAt")
return
}
if cap(z.CyclesCompletedAt) >= int(zb0002) {
z.CyclesCompletedAt = (z.CyclesCompletedAt)[:zb0002]
} else {
z.CyclesCompletedAt = make([]time.Time, zb0002)
}
for za0001 := range z.CyclesCompletedAt {
z.CyclesCompletedAt[za0001], bts, err = msgp.ReadTimeBytes(bts)
if err != nil {
err = msgp.WrapError(err, "CyclesCompletedAt", za0001)
return
}
}
case "ongoing_buckets":
z.OngoingBuckets, bts, err = msgp.ReadIntBytes(bts)
if err != nil {
err = msgp.WrapError(err, "OngoingBuckets")
return
}
case "per_bucket_stats":
var zb0003 uint32
zb0003, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "PerBucketStats")
return
}
if z.PerBucketStats == nil {
z.PerBucketStats = make(map[string][]BucketScanInfo, zb0003)
} else if len(z.PerBucketStats) > 0 {
for key := range z.PerBucketStats {
delete(z.PerBucketStats, key)
}
}
for zb0003 > 0 {
var za0002 string
var za0003 []BucketScanInfo
zb0003--
za0002, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "PerBucketStats")
return
}
var zb0004 uint32
zb0004, bts, err = msgp.ReadArrayHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "PerBucketStats", za0002)
return
}
if cap(za0003) >= int(zb0004) {
za0003 = (za0003)[:zb0004]
} else {
za0003 = make([]BucketScanInfo, zb0004)
}
for za0004 := range za0003 {
bts, err = za0003[za0004].UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "PerBucketStats", za0002, za0004)
return
}
}
z.PerBucketStats[za0002] = za0003
}
zb0001Mask |= 0x1
case "life_time_ops":
var zb0005 uint32
zb0005, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "LifeTimeOps")
return
}
if z.LifeTimeOps == nil {
z.LifeTimeOps = make(map[string]uint64, zb0005)
} else if len(z.LifeTimeOps) > 0 {
for key := range z.LifeTimeOps {
delete(z.LifeTimeOps, key)
}
}
for zb0005 > 0 {
var za0005 string
var za0006 uint64
zb0005--
za0005, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "LifeTimeOps")
return
}
za0006, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "LifeTimeOps", za0005)
return
}
z.LifeTimeOps[za0005] = za0006
}
zb0001Mask |= 0x2
case "ilm_ops":
var zb0006 uint32
zb0006, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "LifeTimeILM")
return
}
if z.LifeTimeILM == nil {
z.LifeTimeILM = make(map[string]uint64, zb0006)
} else if len(z.LifeTimeILM) > 0 {
for key := range z.LifeTimeILM {
delete(z.LifeTimeILM, key)
}
}
for zb0006 > 0 {
var za0007 string
var za0008 uint64
zb0006--
za0007, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "LifeTimeILM")
return
}
za0008, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "LifeTimeILM", za0007)
return
}
z.LifeTimeILM[za0007] = za0008
}
zb0001Mask |= 0x4
case "last_minute":
var zb0007 uint32
zb0007, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "LastMinute")
return
}
var zb0007Mask uint8 /* 2 bits */
_ = zb0007Mask
for zb0007 > 0 {
zb0007--
field, bts, err = msgp.ReadMapKeyZC(bts)
if err != nil {
err = msgp.WrapError(err, "LastMinute")
return
}
switch msgp.UnsafeString(field) {
case "actions":
var zb0008 uint32
zb0008, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "LastMinute", "Actions")
return
}
if z.LastMinute.Actions == nil {
z.LastMinute.Actions = make(map[string]TimedAction, zb0008)
} else if len(z.LastMinute.Actions) > 0 {
for key := range z.LastMinute.Actions {
delete(z.LastMinute.Actions, key)
}
}
for zb0008 > 0 {
var za0009 string
var za0010 TimedAction
zb0008--
za0009, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "LastMinute", "Actions")
return
}
bts, err = za0010.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "LastMinute", "Actions", za0009)
return
}
z.LastMinute.Actions[za0009] = za0010
}
zb0007Mask |= 0x1
case "ilm":
var zb0009 uint32
zb0009, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "LastMinute", "ILM")
return
}
if z.LastMinute.ILM == nil {
z.LastMinute.ILM = make(map[string]TimedAction, zb0009)
} else if len(z.LastMinute.ILM) > 0 {
for key := range z.LastMinute.ILM {
delete(z.LastMinute.ILM, key)
}
}
for zb0009 > 0 {
var za0011 string
var za0012 TimedAction
zb0009--
za0011, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "LastMinute", "ILM")
return
}
bts, err = za0012.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "LastMinute", "ILM", za0011)
return
}
z.LastMinute.ILM[za0011] = za0012
}
zb0007Mask |= 0x2
default:
bts, err = msgp.Skip(bts)
if err != nil {
err = msgp.WrapError(err, "LastMinute")
return
}
}
}
// Clear omitted fields.
if zb0007Mask != 0x3 {
if (zb0007Mask & 0x1) == 0 {
z.LastMinute.Actions = nil
}
if (zb0007Mask & 0x2) == 0 {
z.LastMinute.ILM = nil
}
}
case "active":
var zb0010 uint32
zb0010, bts, err = msgp.ReadArrayHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "ActivePaths")
return
}
if cap(z.ActivePaths) >= int(zb0010) {
z.ActivePaths = (z.ActivePaths)[:zb0010]
} else {
z.ActivePaths = make([]string, zb0010)
}
for za0013 := range z.ActivePaths {
z.ActivePaths[za0013], bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "ActivePaths", za0013)
return
}
}
zb0001Mask |= 0x8
default:
bts, err = msgp.Skip(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
// Clear omitted fields.
if zb0001Mask != 0xf {
if (zb0001Mask & 0x1) == 0 {
z.PerBucketStats = nil
}
if (zb0001Mask & 0x2) == 0 {
z.LifeTimeOps = nil
}
if (zb0001Mask & 0x4) == 0 {
z.LifeTimeILM = nil
}
if (zb0001Mask & 0x8) == 0 {
z.ActivePaths = nil
}
}
o = bts
return
}
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z *ScannerMetrics) Msgsize() (s int) {
s = 1 + 10 + msgp.TimeSize + 14 + msgp.Uint64Size + 16 + msgp.TimeSize + 21 + msgp.ArrayHeaderSize + (len(z.CyclesCompletedAt) * (msgp.TimeSize)) + 16 + msgp.IntSize + 17 + msgp.MapHeaderSize
if z.PerBucketStats != nil {
for za0002, za0003 := range z.PerBucketStats {
_ = za0003
s += msgp.StringPrefixSize + len(za0002) + msgp.ArrayHeaderSize
for za0004 := range za0003 {
s += za0003[za0004].Msgsize()
}
}
}
s += 14 + msgp.MapHeaderSize
if z.LifeTimeOps != nil {
for za0005, za0006 := range z.LifeTimeOps {
_ = za0006
s += msgp.StringPrefixSize + len(za0005) + msgp.Uint64Size
}
}
s += 8 + msgp.MapHeaderSize
if z.LifeTimeILM != nil {
for za0007, za0008 := range z.LifeTimeILM {
_ = za0008
s += msgp.StringPrefixSize + len(za0007) + msgp.Uint64Size
}
}
s += 12 + 1 + 8 + msgp.MapHeaderSize
if z.LastMinute.Actions != nil {
for za0009, za0010 := range z.LastMinute.Actions {
_ = za0010
s += msgp.StringPrefixSize + len(za0009) + za0010.Msgsize()
}
}
s += 4 + msgp.MapHeaderSize
if z.LastMinute.ILM != nil {
for za0011, za0012 := range z.LastMinute.ILM {
_ = za0012
s += msgp.StringPrefixSize + len(za0011) + za0012.Msgsize()
}
}
s += 7 + msgp.ArrayHeaderSize
for za0013 := range z.ActivePaths {
s += msgp.StringPrefixSize + len(z.ActivePaths[za0013])
}
return
}
// DecodeMsg implements msgp.Decodable
func (z *SiteResyncMetrics) DecodeMsg(dc *msgp.Reader) (err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, err = dc.ReadMapHeader()
if err != nil {
err = msgp.WrapError(err)
return
}
var zb0001Mask uint8 /* 3 bits */
_ = zb0001Mask
for zb0001 > 0 {
zb0001--
field, err = dc.ReadMapKeyPtr()
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "collected":
z.CollectedAt, err = dc.ReadTime()
if err != nil {
err = msgp.WrapError(err, "CollectedAt")
return
}
case "resyncStatus":
z.ResyncStatus, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "ResyncStatus")
return
}
zb0001Mask |= 0x1
case "startTime":
z.StartTime, err = dc.ReadTime()
if err != nil {
err = msgp.WrapError(err, "StartTime")
return
}
case "lastUpdate":
z.LastUpdate, err = dc.ReadTime()
if err != nil {
err = msgp.WrapError(err, "LastUpdate")
return
}
case "numBuckets":
z.NumBuckets, err = dc.ReadInt64()
if err != nil {
err = msgp.WrapError(err, "NumBuckets")
return
}
case "resyncID":
z.ResyncID, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "ResyncID")
return
}
case "deplID":
z.DeplID, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "DeplID")
return
}
case "completedReplicationSize":
z.ReplicatedSize, err = dc.ReadInt64()
if err != nil {
err = msgp.WrapError(err, "ReplicatedSize")
return
}
case "replicationCount":
z.ReplicatedCount, err = dc.ReadInt64()
if err != nil {
err = msgp.WrapError(err, "ReplicatedCount")
return
}
case "failedReplicationSize":
z.FailedSize, err = dc.ReadInt64()
if err != nil {
err = msgp.WrapError(err, "FailedSize")
return
}
case "failedReplicationCount":
z.FailedCount, err = dc.ReadInt64()
if err != nil {
err = msgp.WrapError(err, "FailedCount")
return
}
case "failedBuckets":
var zb0002 uint32
zb0002, err = dc.ReadArrayHeader()
if err != nil {
err = msgp.WrapError(err, "FailedBuckets")
return
}
if cap(z.FailedBuckets) >= int(zb0002) {
z.FailedBuckets = (z.FailedBuckets)[:zb0002]
} else {
z.FailedBuckets = make([]string, zb0002)
}
for za0001 := range z.FailedBuckets {
z.FailedBuckets[za0001], err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "FailedBuckets", za0001)
return
}
}
case "bucket":
z.Bucket, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "Bucket")
return
}
zb0001Mask |= 0x2
case "object":
z.Object, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "Object")
return
}
zb0001Mask |= 0x4
default:
err = dc.Skip()
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
// Clear omitted fields.
if zb0001Mask != 0x7 {
if (zb0001Mask & 0x1) == 0 {
z.ResyncStatus = ""
}
if (zb0001Mask & 0x2) == 0 {
z.Bucket = ""
}
if (zb0001Mask & 0x4) == 0 {
z.Object = ""
}
}
return
}
// EncodeMsg implements msgp.Encodable
func (z *SiteResyncMetrics) EncodeMsg(en *msgp.Writer) (err error) {
// check for omitted fields
zb0001Len := uint32(14)
var zb0001Mask uint16 /* 14 bits */
_ = zb0001Mask
if z.ResyncStatus == "" {
zb0001Len--
zb0001Mask |= 0x2
}
if z.Bucket == "" {
zb0001Len--
zb0001Mask |= 0x1000
}
if z.Object == "" {
zb0001Len--
zb0001Mask |= 0x2000
}
// variable map header, size zb0001Len
err = en.Append(0x80 | uint8(zb0001Len))
if err != nil {
return
}
// skip if no fields are to be emitted
if zb0001Len != 0 {
// write "collected"
err = en.Append(0xa9, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x65, 0x64)
if err != nil {
return
}
err = en.WriteTime(z.CollectedAt)
if err != nil {
err = msgp.WrapError(err, "CollectedAt")
return
}
if (zb0001Mask & 0x2) == 0 { // if not omitted
// write "resyncStatus"
err = en.Append(0xac, 0x72, 0x65, 0x73, 0x79, 0x6e, 0x63, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73)
if err != nil {
return
}
err = en.WriteString(z.ResyncStatus)
if err != nil {
err = msgp.WrapError(err, "ResyncStatus")
return
}
}
// write "startTime"
err = en.Append(0xa9, 0x73, 0x74, 0x61, 0x72, 0x74, 0x54, 0x69, 0x6d, 0x65)
if err != nil {
return
}
err = en.WriteTime(z.StartTime)
if err != nil {
err = msgp.WrapError(err, "StartTime")
return
}
// write "lastUpdate"
err = en.Append(0xaa, 0x6c, 0x61, 0x73, 0x74, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65)
if err != nil {
return
}
err = en.WriteTime(z.LastUpdate)
if err != nil {
err = msgp.WrapError(err, "LastUpdate")
return
}
// write "numBuckets"
err = en.Append(0xaa, 0x6e, 0x75, 0x6d, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73)
if err != nil {
return
}
err = en.WriteInt64(z.NumBuckets)
if err != nil {
err = msgp.WrapError(err, "NumBuckets")
return
}
// write "resyncID"
err = en.Append(0xa8, 0x72, 0x65, 0x73, 0x79, 0x6e, 0x63, 0x49, 0x44)
if err != nil {
return
}
err = en.WriteString(z.ResyncID)
if err != nil {
err = msgp.WrapError(err, "ResyncID")
return
}
// write "deplID"
err = en.Append(0xa6, 0x64, 0x65, 0x70, 0x6c, 0x49, 0x44)
if err != nil {
return
}
err = en.WriteString(z.DeplID)
if err != nil {
err = msgp.WrapError(err, "DeplID")
return
}
// write "completedReplicationSize"
err = en.Append(0xb8, 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x69, 0x7a, 0x65)
if err != nil {
return
}
err = en.WriteInt64(z.ReplicatedSize)
if err != nil {
err = msgp.WrapError(err, "ReplicatedSize")
return
}
// write "replicationCount"
err = en.Append(0xb0, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x75, 0x6e, 0x74)
if err != nil {
return
}
err = en.WriteInt64(z.ReplicatedCount)
if err != nil {
err = msgp.WrapError(err, "ReplicatedCount")
return
}
// write "failedReplicationSize"
err = en.Append(0xb5, 0x66, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x69, 0x7a, 0x65)
if err != nil {
return
}
err = en.WriteInt64(z.FailedSize)
if err != nil {
err = msgp.WrapError(err, "FailedSize")
return
}
// write "failedReplicationCount"
err = en.Append(0xb6, 0x66, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x75, 0x6e, 0x74)
if err != nil {
return
}
err = en.WriteInt64(z.FailedCount)
if err != nil {
err = msgp.WrapError(err, "FailedCount")
return
}
// write "failedBuckets"
err = en.Append(0xad, 0x66, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73)
if err != nil {
return
}
err = en.WriteArrayHeader(uint32(len(z.FailedBuckets)))
if err != nil {
err = msgp.WrapError(err, "FailedBuckets")
return
}
for za0001 := range z.FailedBuckets {
err = en.WriteString(z.FailedBuckets[za0001])
if err != nil {
err = msgp.WrapError(err, "FailedBuckets", za0001)
return
}
}
if (zb0001Mask & 0x1000) == 0 { // if not omitted
// write "bucket"
err = en.Append(0xa6, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74)
if err != nil {
return
}
err = en.WriteString(z.Bucket)
if err != nil {
err = msgp.WrapError(err, "Bucket")
return
}
}
if (zb0001Mask & 0x2000) == 0 { // if not omitted
// write "object"
err = en.Append(0xa6, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74)
if err != nil {
return
}
err = en.WriteString(z.Object)
if err != nil {
err = msgp.WrapError(err, "Object")
return
}
}
}
return
}
// MarshalMsg implements msgp.Marshaler
func (z *SiteResyncMetrics) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize())
// check for omitted fields
zb0001Len := uint32(14)
var zb0001Mask uint16 /* 14 bits */
_ = zb0001Mask
if z.ResyncStatus == "" {
zb0001Len--
zb0001Mask |= 0x2
}
if z.Bucket == "" {
zb0001Len--
zb0001Mask |= 0x1000
}
if z.Object == "" {
zb0001Len--
zb0001Mask |= 0x2000
}
// variable map header, size zb0001Len
o = append(o, 0x80|uint8(zb0001Len))
// skip if no fields are to be emitted
if zb0001Len != 0 {
// string "collected"
o = append(o, 0xa9, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x65, 0x64)
o = msgp.AppendTime(o, z.CollectedAt)
if (zb0001Mask & 0x2) == 0 { // if not omitted
// string "resyncStatus"
o = append(o, 0xac, 0x72, 0x65, 0x73, 0x79, 0x6e, 0x63, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73)
o = msgp.AppendString(o, z.ResyncStatus)
}
// string "startTime"
o = append(o, 0xa9, 0x73, 0x74, 0x61, 0x72, 0x74, 0x54, 0x69, 0x6d, 0x65)
o = msgp.AppendTime(o, z.StartTime)
// string "lastUpdate"
o = append(o, 0xaa, 0x6c, 0x61, 0x73, 0x74, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65)
o = msgp.AppendTime(o, z.LastUpdate)
// string "numBuckets"
o = append(o, 0xaa, 0x6e, 0x75, 0x6d, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73)
o = msgp.AppendInt64(o, z.NumBuckets)
// string "resyncID"
o = append(o, 0xa8, 0x72, 0x65, 0x73, 0x79, 0x6e, 0x63, 0x49, 0x44)
o = msgp.AppendString(o, z.ResyncID)
// string "deplID"
o = append(o, 0xa6, 0x64, 0x65, 0x70, 0x6c, 0x49, 0x44)
o = msgp.AppendString(o, z.DeplID)
// string "completedReplicationSize"
o = append(o, 0xb8, 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x69, 0x7a, 0x65)
o = msgp.AppendInt64(o, z.ReplicatedSize)
// string "replicationCount"
o = append(o, 0xb0, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x75, 0x6e, 0x74)
o = msgp.AppendInt64(o, z.ReplicatedCount)
// string "failedReplicationSize"
o = append(o, 0xb5, 0x66, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x69, 0x7a, 0x65)
o = msgp.AppendInt64(o, z.FailedSize)
// string "failedReplicationCount"
o = append(o, 0xb6, 0x66, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x75, 0x6e, 0x74)
o = msgp.AppendInt64(o, z.FailedCount)
// string "failedBuckets"
o = append(o, 0xad, 0x66, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73)
o = msgp.AppendArrayHeader(o, uint32(len(z.FailedBuckets)))
for za0001 := range z.FailedBuckets {
o = msgp.AppendString(o, z.FailedBuckets[za0001])
}
if (zb0001Mask & 0x1000) == 0 { // if not omitted
// string "bucket"
o = append(o, 0xa6, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74)
o = msgp.AppendString(o, z.Bucket)
}
if (zb0001Mask & 0x2000) == 0 { // if not omitted
// string "object"
o = append(o, 0xa6, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74)
o = msgp.AppendString(o, z.Object)
}
}
return
}
// UnmarshalMsg implements msgp.Unmarshaler
func (z *SiteResyncMetrics) UnmarshalMsg(bts []byte) (o []byte, err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
var zb0001Mask uint8 /* 3 bits */
_ = zb0001Mask
for zb0001 > 0 {
zb0001--
field, bts, err = msgp.ReadMapKeyZC(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "collected":
z.CollectedAt, bts, err = msgp.ReadTimeBytes(bts)
if err != nil {
err = msgp.WrapError(err, "CollectedAt")
return
}
case "resyncStatus":
z.ResyncStatus, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "ResyncStatus")
return
}
zb0001Mask |= 0x1
case "startTime":
z.StartTime, bts, err = msgp.ReadTimeBytes(bts)
if err != nil {
err = msgp.WrapError(err, "StartTime")
return
}
case "lastUpdate":
z.LastUpdate, bts, err = msgp.ReadTimeBytes(bts)
if err != nil {
err = msgp.WrapError(err, "LastUpdate")
return
}
case "numBuckets":
z.NumBuckets, bts, err = msgp.ReadInt64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "NumBuckets")
return
}
case "resyncID":
z.ResyncID, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "ResyncID")
return
}
case "deplID":
z.DeplID, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "DeplID")
return
}
case "completedReplicationSize":
z.ReplicatedSize, bts, err = msgp.ReadInt64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "ReplicatedSize")
return
}
case "replicationCount":
z.ReplicatedCount, bts, err = msgp.ReadInt64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "ReplicatedCount")
return
}
case "failedReplicationSize":
z.FailedSize, bts, err = msgp.ReadInt64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "FailedSize")
return
}
case "failedReplicationCount":
z.FailedCount, bts, err = msgp.ReadInt64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "FailedCount")
return
}
case "failedBuckets":
var zb0002 uint32
zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "FailedBuckets")
return
}
if cap(z.FailedBuckets) >= int(zb0002) {
z.FailedBuckets = (z.FailedBuckets)[:zb0002]
} else {
z.FailedBuckets = make([]string, zb0002)
}
for za0001 := range z.FailedBuckets {
z.FailedBuckets[za0001], bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "FailedBuckets", za0001)
return
}
}
case "bucket":
z.Bucket, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Bucket")
return
}
zb0001Mask |= 0x2
case "object":
z.Object, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Object")
return
}
zb0001Mask |= 0x4
default:
bts, err = msgp.Skip(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
// Clear omitted fields.
if zb0001Mask != 0x7 {
if (zb0001Mask & 0x1) == 0 {
z.ResyncStatus = ""
}
if (zb0001Mask & 0x2) == 0 {
z.Bucket = ""
}
if (zb0001Mask & 0x4) == 0 {
z.Object = ""
}
}
o = bts
return
}
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z *SiteResyncMetrics) Msgsize() (s int) {
s = 1 + 10 + msgp.TimeSize + 13 + msgp.StringPrefixSize + len(z.ResyncStatus) + 10 + msgp.TimeSize + 11 + msgp.TimeSize + 11 + msgp.Int64Size + 9 + msgp.StringPrefixSize + len(z.ResyncID) + 7 + msgp.StringPrefixSize + len(z.DeplID) + 25 + msgp.Int64Size + 17 + msgp.Int64Size + 22 + msgp.Int64Size + 23 + msgp.Int64Size + 14 + msgp.ArrayHeaderSize
for za0001 := range z.FailedBuckets {
s += msgp.StringPrefixSize + len(z.FailedBuckets[za0001])
}
s += 7 + msgp.StringPrefixSize + len(z.Bucket) + 7 + msgp.StringPrefixSize + len(z.Object)
return
}
// DecodeMsg implements msgp.Decodable
func (z *localF64H) DecodeMsg(dc *msgp.Reader) (err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, err = dc.ReadMapHeader()
if err != nil {
err = msgp.WrapError(err)
return
}
var zb0001Mask uint8 /* 2 bits */
_ = zb0001Mask
for zb0001 > 0 {
zb0001--
field, err = dc.ReadMapKeyPtr()
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "counts":
var zb0002 uint32
zb0002, err = dc.ReadArrayHeader()
if err != nil {
err = msgp.WrapError(err, "Counts")
return
}
if cap(z.Counts) >= int(zb0002) {
z.Counts = (z.Counts)[:zb0002]
} else {
z.Counts = make([]uint64, zb0002)
}
for za0001 := range z.Counts {
z.Counts[za0001], err = dc.ReadUint64()
if err != nil {
err = msgp.WrapError(err, "Counts", za0001)
return
}
}
zb0001Mask |= 0x1
case "buckets":
var zb0003 uint32
zb0003, err = dc.ReadArrayHeader()
if err != nil {
err = msgp.WrapError(err, "Buckets")
return
}
if cap(z.Buckets) >= int(zb0003) {
z.Buckets = (z.Buckets)[:zb0003]
} else {
z.Buckets = make([]float64, zb0003)
}
for za0002 := range z.Buckets {
z.Buckets[za0002], err = dc.ReadFloat64()
if err != nil {
err = msgp.WrapError(err, "Buckets", za0002)
return
}
}
zb0001Mask |= 0x2
default:
err = dc.Skip()
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
// Clear omitted fields.
if zb0001Mask != 0x3 {
if (zb0001Mask & 0x1) == 0 {
z.Counts = nil
}
if (zb0001Mask & 0x2) == 0 {
z.Buckets = nil
}
}
return
}
// EncodeMsg implements msgp.Encodable
func (z *localF64H) EncodeMsg(en *msgp.Writer) (err error) {
// check for omitted fields
zb0001Len := uint32(2)
var zb0001Mask uint8 /* 2 bits */
_ = zb0001Mask
if z.Counts == nil {
zb0001Len--
zb0001Mask |= 0x1
}
if z.Buckets == nil {
zb0001Len--
zb0001Mask |= 0x2
}
// variable map header, size zb0001Len
err = en.Append(0x80 | uint8(zb0001Len))
if err != nil {
return
}
// skip if no fields are to be emitted
if zb0001Len != 0 {
if (zb0001Mask & 0x1) == 0 { // if not omitted
// write "counts"
err = en.Append(0xa6, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x73)
if err != nil {
return
}
err = en.WriteArrayHeader(uint32(len(z.Counts)))
if err != nil {
err = msgp.WrapError(err, "Counts")
return
}
for za0001 := range z.Counts {
err = en.WriteUint64(z.Counts[za0001])
if err != nil {
err = msgp.WrapError(err, "Counts", za0001)
return
}
}
}
if (zb0001Mask & 0x2) == 0 { // if not omitted
// write "buckets"
err = en.Append(0xa7, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73)
if err != nil {
return
}
err = en.WriteArrayHeader(uint32(len(z.Buckets)))
if err != nil {
err = msgp.WrapError(err, "Buckets")
return
}
for za0002 := range z.Buckets {
err = en.WriteFloat64(z.Buckets[za0002])
if err != nil {
err = msgp.WrapError(err, "Buckets", za0002)
return
}
}
}
}
return
}
// MarshalMsg implements msgp.Marshaler
func (z *localF64H) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize())
// check for omitted fields
zb0001Len := uint32(2)
var zb0001Mask uint8 /* 2 bits */
_ = zb0001Mask
if z.Counts == nil {
zb0001Len--
zb0001Mask |= 0x1
}
if z.Buckets == nil {
zb0001Len--
zb0001Mask |= 0x2
}
// variable map header, size zb0001Len
o = append(o, 0x80|uint8(zb0001Len))
// skip if no fields are to be emitted
if zb0001Len != 0 {
if (zb0001Mask & 0x1) == 0 { // if not omitted
// string "counts"
o = append(o, 0xa6, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x73)
o = msgp.AppendArrayHeader(o, uint32(len(z.Counts)))
for za0001 := range z.Counts {
o = msgp.AppendUint64(o, z.Counts[za0001])
}
}
if (zb0001Mask & 0x2) == 0 { // if not omitted
// string "buckets"
o = append(o, 0xa7, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73)
o = msgp.AppendArrayHeader(o, uint32(len(z.Buckets)))
for za0002 := range z.Buckets {
o = msgp.AppendFloat64(o, z.Buckets[za0002])
}
}
}
return
}
// UnmarshalMsg implements msgp.Unmarshaler
func (z *localF64H) UnmarshalMsg(bts []byte) (o []byte, err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
var zb0001Mask uint8 /* 2 bits */
_ = zb0001Mask
for zb0001 > 0 {
zb0001--
field, bts, err = msgp.ReadMapKeyZC(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "counts":
var zb0002 uint32
zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Counts")
return
}
if cap(z.Counts) >= int(zb0002) {
z.Counts = (z.Counts)[:zb0002]
} else {
z.Counts = make([]uint64, zb0002)
}
for za0001 := range z.Counts {
z.Counts[za0001], bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "Counts", za0001)
return
}
}
zb0001Mask |= 0x1
case "buckets":
var zb0003 uint32
zb0003, bts, err = msgp.ReadArrayHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Buckets")
return
}
if cap(z.Buckets) >= int(zb0003) {
z.Buckets = (z.Buckets)[:zb0003]
} else {
z.Buckets = make([]float64, zb0003)
}
for za0002 := range z.Buckets {
z.Buckets[za0002], bts, err = msgp.ReadFloat64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "Buckets", za0002)
return
}
}
zb0001Mask |= 0x2
default:
bts, err = msgp.Skip(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
// Clear omitted fields.
if zb0001Mask != 0x3 {
if (zb0001Mask & 0x1) == 0 {
z.Counts = nil
}
if (zb0001Mask & 0x2) == 0 {
z.Buckets = nil
}
}
o = bts
return
}
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z *localF64H) Msgsize() (s int) {
s = 1 + 7 + msgp.ArrayHeaderSize + (len(z.Counts) * (msgp.Uint64Size)) + 8 + msgp.ArrayHeaderSize + (len(z.Buckets) * (msgp.Float64Size))
return
}
// DecodeMsg implements msgp.Decodable
func (z *nodeCommon) DecodeMsg(dc *msgp.Reader) (err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, err = dc.ReadMapHeader()
if err != nil {
err = msgp.WrapError(err)
return
}
var zb0001Mask uint8 /* 1 bits */
_ = zb0001Mask
for zb0001 > 0 {
zb0001--
field, err = dc.ReadMapKeyPtr()
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "addr":
z.Addr, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "Addr")
return
}
case "error":
z.Error, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "Error")
return
}
zb0001Mask |= 0x1
default:
err = dc.Skip()
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
// Clear omitted fields.
if (zb0001Mask & 0x1) == 0 {
z.Error = ""
}
return
}
// EncodeMsg implements msgp.Encodable
func (z nodeCommon) EncodeMsg(en *msgp.Writer) (err error) {
// check for omitted fields
zb0001Len := uint32(2)
var zb0001Mask uint8 /* 2 bits */
_ = zb0001Mask
if z.Error == "" {
zb0001Len--
zb0001Mask |= 0x2
}
// variable map header, size zb0001Len
err = en.Append(0x80 | uint8(zb0001Len))
if err != nil {
return
}
// skip if no fields are to be emitted
if zb0001Len != 0 {
// write "addr"
err = en.Append(0xa4, 0x61, 0x64, 0x64, 0x72)
if err != nil {
return
}
err = en.WriteString(z.Addr)
if err != nil {
err = msgp.WrapError(err, "Addr")
return
}
if (zb0001Mask & 0x2) == 0 { // if not omitted
// write "error"
err = en.Append(0xa5, 0x65, 0x72, 0x72, 0x6f, 0x72)
if err != nil {
return
}
err = en.WriteString(z.Error)
if err != nil {
err = msgp.WrapError(err, "Error")
return
}
}
}
return
}
// MarshalMsg implements msgp.Marshaler
func (z nodeCommon) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize())
// check for omitted fields
zb0001Len := uint32(2)
var zb0001Mask uint8 /* 2 bits */
_ = zb0001Mask
if z.Error == "" {
zb0001Len--
zb0001Mask |= 0x2
}
// variable map header, size zb0001Len
o = append(o, 0x80|uint8(zb0001Len))
// skip if no fields are to be emitted
if zb0001Len != 0 {
// string "addr"
o = append(o, 0xa4, 0x61, 0x64, 0x64, 0x72)
o = msgp.AppendString(o, z.Addr)
if (zb0001Mask & 0x2) == 0 { // if not omitted
// string "error"
o = append(o, 0xa5, 0x65, 0x72, 0x72, 0x6f, 0x72)
o = msgp.AppendString(o, z.Error)
}
}
return
}
// UnmarshalMsg implements msgp.Unmarshaler
func (z *nodeCommon) UnmarshalMsg(bts []byte) (o []byte, err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
var zb0001Mask uint8 /* 1 bits */
_ = zb0001Mask
for zb0001 > 0 {
zb0001--
field, bts, err = msgp.ReadMapKeyZC(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "addr":
z.Addr, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Addr")
return
}
case "error":
z.Error, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Error")
return
}
zb0001Mask |= 0x1
default:
bts, err = msgp.Skip(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
// Clear omitted fields.
if (zb0001Mask & 0x1) == 0 {
z.Error = ""
}
o = bts
return
}
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z nodeCommon) Msgsize() (s int) {
s = 1 + 5 + msgp.StringPrefixSize + len(z.Addr) + 6 + msgp.StringPrefixSize + len(z.Error)
return
}
golang-github-minio-madmin-go-3.0.104/metrics_gen_test.go 0000664 0000000 0000000 00000137467 14774251704 0023307 0 ustar 00root root 0000000 0000000 package madmin
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
import (
"bytes"
"testing"
"github.com/tinylib/msgp/msgp"
)
func TestMarshalUnmarshalBatchJobMetrics(t *testing.T) {
v := BatchJobMetrics{}
bts, err := v.MarshalMsg(nil)
if err != nil {
t.Fatal(err)
}
left, err := v.UnmarshalMsg(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
}
left, err = msgp.Skip(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
}
}
func BenchmarkMarshalMsgBatchJobMetrics(b *testing.B) {
v := BatchJobMetrics{}
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.MarshalMsg(nil)
}
}
func BenchmarkAppendMsgBatchJobMetrics(b *testing.B) {
v := BatchJobMetrics{}
bts := make([]byte, 0, v.Msgsize())
bts, _ = v.MarshalMsg(bts[0:0])
b.SetBytes(int64(len(bts)))
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
bts, _ = v.MarshalMsg(bts[0:0])
}
}
func BenchmarkUnmarshalBatchJobMetrics(b *testing.B) {
v := BatchJobMetrics{}
bts, _ := v.MarshalMsg(nil)
b.ReportAllocs()
b.SetBytes(int64(len(bts)))
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := v.UnmarshalMsg(bts)
if err != nil {
b.Fatal(err)
}
}
}
func TestEncodeDecodeBatchJobMetrics(t *testing.T) {
v := BatchJobMetrics{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
m := v.Msgsize()
if buf.Len() > m {
t.Log("WARNING: TestEncodeDecodeBatchJobMetrics Msgsize() is inaccurate")
}
vn := BatchJobMetrics{}
err := msgp.Decode(&buf, &vn)
if err != nil {
t.Error(err)
}
buf.Reset()
msgp.Encode(&buf, &v)
err = msgp.NewReader(&buf).Skip()
if err != nil {
t.Error(err)
}
}
func BenchmarkEncodeBatchJobMetrics(b *testing.B) {
v := BatchJobMetrics{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
en := msgp.NewWriter(msgp.Nowhere)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.EncodeMsg(en)
}
en.Flush()
}
func BenchmarkDecodeBatchJobMetrics(b *testing.B) {
v := BatchJobMetrics{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
rd := msgp.NewEndlessReader(buf.Bytes(), b)
dc := msgp.NewReader(rd)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
err := v.DecodeMsg(dc)
if err != nil {
b.Fatal(err)
}
}
}
func TestMarshalUnmarshalCPUMetrics(t *testing.T) {
v := CPUMetrics{}
bts, err := v.MarshalMsg(nil)
if err != nil {
t.Fatal(err)
}
left, err := v.UnmarshalMsg(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
}
left, err = msgp.Skip(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
}
}
func BenchmarkMarshalMsgCPUMetrics(b *testing.B) {
v := CPUMetrics{}
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.MarshalMsg(nil)
}
}
func BenchmarkAppendMsgCPUMetrics(b *testing.B) {
v := CPUMetrics{}
bts := make([]byte, 0, v.Msgsize())
bts, _ = v.MarshalMsg(bts[0:0])
b.SetBytes(int64(len(bts)))
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
bts, _ = v.MarshalMsg(bts[0:0])
}
}
func BenchmarkUnmarshalCPUMetrics(b *testing.B) {
v := CPUMetrics{}
bts, _ := v.MarshalMsg(nil)
b.ReportAllocs()
b.SetBytes(int64(len(bts)))
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := v.UnmarshalMsg(bts)
if err != nil {
b.Fatal(err)
}
}
}
func TestEncodeDecodeCPUMetrics(t *testing.T) {
v := CPUMetrics{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
m := v.Msgsize()
if buf.Len() > m {
t.Log("WARNING: TestEncodeDecodeCPUMetrics Msgsize() is inaccurate")
}
vn := CPUMetrics{}
err := msgp.Decode(&buf, &vn)
if err != nil {
t.Error(err)
}
buf.Reset()
msgp.Encode(&buf, &v)
err = msgp.NewReader(&buf).Skip()
if err != nil {
t.Error(err)
}
}
func BenchmarkEncodeCPUMetrics(b *testing.B) {
v := CPUMetrics{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
en := msgp.NewWriter(msgp.Nowhere)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.EncodeMsg(en)
}
en.Flush()
}
func BenchmarkDecodeCPUMetrics(b *testing.B) {
v := CPUMetrics{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
rd := msgp.NewEndlessReader(buf.Bytes(), b)
dc := msgp.NewReader(rd)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
err := v.DecodeMsg(dc)
if err != nil {
b.Fatal(err)
}
}
}
func TestMarshalUnmarshalCatalogInfo(t *testing.T) {
v := CatalogInfo{}
bts, err := v.MarshalMsg(nil)
if err != nil {
t.Fatal(err)
}
left, err := v.UnmarshalMsg(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
}
left, err = msgp.Skip(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
}
}
func BenchmarkMarshalMsgCatalogInfo(b *testing.B) {
v := CatalogInfo{}
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.MarshalMsg(nil)
}
}
func BenchmarkAppendMsgCatalogInfo(b *testing.B) {
v := CatalogInfo{}
bts := make([]byte, 0, v.Msgsize())
bts, _ = v.MarshalMsg(bts[0:0])
b.SetBytes(int64(len(bts)))
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
bts, _ = v.MarshalMsg(bts[0:0])
}
}
func BenchmarkUnmarshalCatalogInfo(b *testing.B) {
v := CatalogInfo{}
bts, _ := v.MarshalMsg(nil)
b.ReportAllocs()
b.SetBytes(int64(len(bts)))
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := v.UnmarshalMsg(bts)
if err != nil {
b.Fatal(err)
}
}
}
func TestEncodeDecodeCatalogInfo(t *testing.T) {
v := CatalogInfo{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
m := v.Msgsize()
if buf.Len() > m {
t.Log("WARNING: TestEncodeDecodeCatalogInfo Msgsize() is inaccurate")
}
vn := CatalogInfo{}
err := msgp.Decode(&buf, &vn)
if err != nil {
t.Error(err)
}
buf.Reset()
msgp.Encode(&buf, &v)
err = msgp.NewReader(&buf).Skip()
if err != nil {
t.Error(err)
}
}
func BenchmarkEncodeCatalogInfo(b *testing.B) {
v := CatalogInfo{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
en := msgp.NewWriter(msgp.Nowhere)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.EncodeMsg(en)
}
en.Flush()
}
func BenchmarkDecodeCatalogInfo(b *testing.B) {
v := CatalogInfo{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
rd := msgp.NewEndlessReader(buf.Bytes(), b)
dc := msgp.NewReader(rd)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
err := v.DecodeMsg(dc)
if err != nil {
b.Fatal(err)
}
}
}
func TestMarshalUnmarshalDiskIOStats(t *testing.T) {
v := DiskIOStats{}
bts, err := v.MarshalMsg(nil)
if err != nil {
t.Fatal(err)
}
left, err := v.UnmarshalMsg(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
}
left, err = msgp.Skip(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
}
}
func BenchmarkMarshalMsgDiskIOStats(b *testing.B) {
v := DiskIOStats{}
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.MarshalMsg(nil)
}
}
func BenchmarkAppendMsgDiskIOStats(b *testing.B) {
v := DiskIOStats{}
bts := make([]byte, 0, v.Msgsize())
bts, _ = v.MarshalMsg(bts[0:0])
b.SetBytes(int64(len(bts)))
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
bts, _ = v.MarshalMsg(bts[0:0])
}
}
func BenchmarkUnmarshalDiskIOStats(b *testing.B) {
v := DiskIOStats{}
bts, _ := v.MarshalMsg(nil)
b.ReportAllocs()
b.SetBytes(int64(len(bts)))
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := v.UnmarshalMsg(bts)
if err != nil {
b.Fatal(err)
}
}
}
func TestEncodeDecodeDiskIOStats(t *testing.T) {
v := DiskIOStats{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
m := v.Msgsize()
if buf.Len() > m {
t.Log("WARNING: TestEncodeDecodeDiskIOStats Msgsize() is inaccurate")
}
vn := DiskIOStats{}
err := msgp.Decode(&buf, &vn)
if err != nil {
t.Error(err)
}
buf.Reset()
msgp.Encode(&buf, &v)
err = msgp.NewReader(&buf).Skip()
if err != nil {
t.Error(err)
}
}
func BenchmarkEncodeDiskIOStats(b *testing.B) {
v := DiskIOStats{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
en := msgp.NewWriter(msgp.Nowhere)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.EncodeMsg(en)
}
en.Flush()
}
func BenchmarkDecodeDiskIOStats(b *testing.B) {
v := DiskIOStats{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
rd := msgp.NewEndlessReader(buf.Bytes(), b)
dc := msgp.NewReader(rd)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
err := v.DecodeMsg(dc)
if err != nil {
b.Fatal(err)
}
}
}
func TestMarshalUnmarshalDiskMetric(t *testing.T) {
v := DiskMetric{}
bts, err := v.MarshalMsg(nil)
if err != nil {
t.Fatal(err)
}
left, err := v.UnmarshalMsg(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
}
left, err = msgp.Skip(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
}
}
func BenchmarkMarshalMsgDiskMetric(b *testing.B) {
v := DiskMetric{}
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.MarshalMsg(nil)
}
}
func BenchmarkAppendMsgDiskMetric(b *testing.B) {
v := DiskMetric{}
bts := make([]byte, 0, v.Msgsize())
bts, _ = v.MarshalMsg(bts[0:0])
b.SetBytes(int64(len(bts)))
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
bts, _ = v.MarshalMsg(bts[0:0])
}
}
func BenchmarkUnmarshalDiskMetric(b *testing.B) {
v := DiskMetric{}
bts, _ := v.MarshalMsg(nil)
b.ReportAllocs()
b.SetBytes(int64(len(bts)))
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := v.UnmarshalMsg(bts)
if err != nil {
b.Fatal(err)
}
}
}
func TestEncodeDecodeDiskMetric(t *testing.T) {
v := DiskMetric{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
m := v.Msgsize()
if buf.Len() > m {
t.Log("WARNING: TestEncodeDecodeDiskMetric Msgsize() is inaccurate")
}
vn := DiskMetric{}
err := msgp.Decode(&buf, &vn)
if err != nil {
t.Error(err)
}
buf.Reset()
msgp.Encode(&buf, &v)
err = msgp.NewReader(&buf).Skip()
if err != nil {
t.Error(err)
}
}
func BenchmarkEncodeDiskMetric(b *testing.B) {
v := DiskMetric{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
en := msgp.NewWriter(msgp.Nowhere)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.EncodeMsg(en)
}
en.Flush()
}
func BenchmarkDecodeDiskMetric(b *testing.B) {
v := DiskMetric{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
rd := msgp.NewEndlessReader(buf.Bytes(), b)
dc := msgp.NewReader(rd)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
err := v.DecodeMsg(dc)
if err != nil {
b.Fatal(err)
}
}
}
func TestMarshalUnmarshalExpirationInfo(t *testing.T) {
v := ExpirationInfo{}
bts, err := v.MarshalMsg(nil)
if err != nil {
t.Fatal(err)
}
left, err := v.UnmarshalMsg(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
}
left, err = msgp.Skip(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
}
}
func BenchmarkMarshalMsgExpirationInfo(b *testing.B) {
v := ExpirationInfo{}
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.MarshalMsg(nil)
}
}
func BenchmarkAppendMsgExpirationInfo(b *testing.B) {
v := ExpirationInfo{}
bts := make([]byte, 0, v.Msgsize())
bts, _ = v.MarshalMsg(bts[0:0])
b.SetBytes(int64(len(bts)))
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
bts, _ = v.MarshalMsg(bts[0:0])
}
}
func BenchmarkUnmarshalExpirationInfo(b *testing.B) {
v := ExpirationInfo{}
bts, _ := v.MarshalMsg(nil)
b.ReportAllocs()
b.SetBytes(int64(len(bts)))
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := v.UnmarshalMsg(bts)
if err != nil {
b.Fatal(err)
}
}
}
func TestEncodeDecodeExpirationInfo(t *testing.T) {
v := ExpirationInfo{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
m := v.Msgsize()
if buf.Len() > m {
t.Log("WARNING: TestEncodeDecodeExpirationInfo Msgsize() is inaccurate")
}
vn := ExpirationInfo{}
err := msgp.Decode(&buf, &vn)
if err != nil {
t.Error(err)
}
buf.Reset()
msgp.Encode(&buf, &v)
err = msgp.NewReader(&buf).Skip()
if err != nil {
t.Error(err)
}
}
func BenchmarkEncodeExpirationInfo(b *testing.B) {
v := ExpirationInfo{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
en := msgp.NewWriter(msgp.Nowhere)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.EncodeMsg(en)
}
en.Flush()
}
func BenchmarkDecodeExpirationInfo(b *testing.B) {
v := ExpirationInfo{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
rd := msgp.NewEndlessReader(buf.Bytes(), b)
dc := msgp.NewReader(rd)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
err := v.DecodeMsg(dc)
if err != nil {
b.Fatal(err)
}
}
}
func TestMarshalUnmarshalJobMetric(t *testing.T) {
v := JobMetric{}
bts, err := v.MarshalMsg(nil)
if err != nil {
t.Fatal(err)
}
left, err := v.UnmarshalMsg(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
}
left, err = msgp.Skip(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
}
}
func BenchmarkMarshalMsgJobMetric(b *testing.B) {
v := JobMetric{}
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.MarshalMsg(nil)
}
}
func BenchmarkAppendMsgJobMetric(b *testing.B) {
v := JobMetric{}
bts := make([]byte, 0, v.Msgsize())
bts, _ = v.MarshalMsg(bts[0:0])
b.SetBytes(int64(len(bts)))
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
bts, _ = v.MarshalMsg(bts[0:0])
}
}
func BenchmarkUnmarshalJobMetric(b *testing.B) {
v := JobMetric{}
bts, _ := v.MarshalMsg(nil)
b.ReportAllocs()
b.SetBytes(int64(len(bts)))
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := v.UnmarshalMsg(bts)
if err != nil {
b.Fatal(err)
}
}
}
func TestEncodeDecodeJobMetric(t *testing.T) {
v := JobMetric{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
m := v.Msgsize()
if buf.Len() > m {
t.Log("WARNING: TestEncodeDecodeJobMetric Msgsize() is inaccurate")
}
vn := JobMetric{}
err := msgp.Decode(&buf, &vn)
if err != nil {
t.Error(err)
}
buf.Reset()
msgp.Encode(&buf, &v)
err = msgp.NewReader(&buf).Skip()
if err != nil {
t.Error(err)
}
}
func BenchmarkEncodeJobMetric(b *testing.B) {
v := JobMetric{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
en := msgp.NewWriter(msgp.Nowhere)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.EncodeMsg(en)
}
en.Flush()
}
func BenchmarkDecodeJobMetric(b *testing.B) {
v := JobMetric{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
rd := msgp.NewEndlessReader(buf.Bytes(), b)
dc := msgp.NewReader(rd)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
err := v.DecodeMsg(dc)
if err != nil {
b.Fatal(err)
}
}
}
func TestMarshalUnmarshalKeyRotationInfo(t *testing.T) {
v := KeyRotationInfo{}
bts, err := v.MarshalMsg(nil)
if err != nil {
t.Fatal(err)
}
left, err := v.UnmarshalMsg(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
}
left, err = msgp.Skip(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
}
}
func BenchmarkMarshalMsgKeyRotationInfo(b *testing.B) {
v := KeyRotationInfo{}
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.MarshalMsg(nil)
}
}
func BenchmarkAppendMsgKeyRotationInfo(b *testing.B) {
v := KeyRotationInfo{}
bts := make([]byte, 0, v.Msgsize())
bts, _ = v.MarshalMsg(bts[0:0])
b.SetBytes(int64(len(bts)))
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
bts, _ = v.MarshalMsg(bts[0:0])
}
}
func BenchmarkUnmarshalKeyRotationInfo(b *testing.B) {
v := KeyRotationInfo{}
bts, _ := v.MarshalMsg(nil)
b.ReportAllocs()
b.SetBytes(int64(len(bts)))
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := v.UnmarshalMsg(bts)
if err != nil {
b.Fatal(err)
}
}
}
func TestEncodeDecodeKeyRotationInfo(t *testing.T) {
v := KeyRotationInfo{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
m := v.Msgsize()
if buf.Len() > m {
t.Log("WARNING: TestEncodeDecodeKeyRotationInfo Msgsize() is inaccurate")
}
vn := KeyRotationInfo{}
err := msgp.Decode(&buf, &vn)
if err != nil {
t.Error(err)
}
buf.Reset()
msgp.Encode(&buf, &v)
err = msgp.NewReader(&buf).Skip()
if err != nil {
t.Error(err)
}
}
func BenchmarkEncodeKeyRotationInfo(b *testing.B) {
v := KeyRotationInfo{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
en := msgp.NewWriter(msgp.Nowhere)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.EncodeMsg(en)
}
en.Flush()
}
func BenchmarkDecodeKeyRotationInfo(b *testing.B) {
v := KeyRotationInfo{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
rd := msgp.NewEndlessReader(buf.Bytes(), b)
dc := msgp.NewReader(rd)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
err := v.DecodeMsg(dc)
if err != nil {
b.Fatal(err)
}
}
}
func TestMarshalUnmarshalMemInfo(t *testing.T) {
v := MemInfo{}
bts, err := v.MarshalMsg(nil)
if err != nil {
t.Fatal(err)
}
left, err := v.UnmarshalMsg(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
}
left, err = msgp.Skip(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
}
}
func BenchmarkMarshalMsgMemInfo(b *testing.B) {
v := MemInfo{}
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.MarshalMsg(nil)
}
}
func BenchmarkAppendMsgMemInfo(b *testing.B) {
v := MemInfo{}
bts := make([]byte, 0, v.Msgsize())
bts, _ = v.MarshalMsg(bts[0:0])
b.SetBytes(int64(len(bts)))
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
bts, _ = v.MarshalMsg(bts[0:0])
}
}
func BenchmarkUnmarshalMemInfo(b *testing.B) {
v := MemInfo{}
bts, _ := v.MarshalMsg(nil)
b.ReportAllocs()
b.SetBytes(int64(len(bts)))
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := v.UnmarshalMsg(bts)
if err != nil {
b.Fatal(err)
}
}
}
func TestEncodeDecodeMemInfo(t *testing.T) {
v := MemInfo{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
m := v.Msgsize()
if buf.Len() > m {
t.Log("WARNING: TestEncodeDecodeMemInfo Msgsize() is inaccurate")
}
vn := MemInfo{}
err := msgp.Decode(&buf, &vn)
if err != nil {
t.Error(err)
}
buf.Reset()
msgp.Encode(&buf, &v)
err = msgp.NewReader(&buf).Skip()
if err != nil {
t.Error(err)
}
}
func BenchmarkEncodeMemInfo(b *testing.B) {
v := MemInfo{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
en := msgp.NewWriter(msgp.Nowhere)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.EncodeMsg(en)
}
en.Flush()
}
func BenchmarkDecodeMemInfo(b *testing.B) {
v := MemInfo{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
rd := msgp.NewEndlessReader(buf.Bytes(), b)
dc := msgp.NewReader(rd)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
err := v.DecodeMsg(dc)
if err != nil {
b.Fatal(err)
}
}
}
func TestMarshalUnmarshalMemMetrics(t *testing.T) {
v := MemMetrics{}
bts, err := v.MarshalMsg(nil)
if err != nil {
t.Fatal(err)
}
left, err := v.UnmarshalMsg(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
}
left, err = msgp.Skip(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
}
}
func BenchmarkMarshalMsgMemMetrics(b *testing.B) {
v := MemMetrics{}
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.MarshalMsg(nil)
}
}
func BenchmarkAppendMsgMemMetrics(b *testing.B) {
v := MemMetrics{}
bts := make([]byte, 0, v.Msgsize())
bts, _ = v.MarshalMsg(bts[0:0])
b.SetBytes(int64(len(bts)))
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
bts, _ = v.MarshalMsg(bts[0:0])
}
}
func BenchmarkUnmarshalMemMetrics(b *testing.B) {
v := MemMetrics{}
bts, _ := v.MarshalMsg(nil)
b.ReportAllocs()
b.SetBytes(int64(len(bts)))
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := v.UnmarshalMsg(bts)
if err != nil {
b.Fatal(err)
}
}
}
func TestEncodeDecodeMemMetrics(t *testing.T) {
v := MemMetrics{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
m := v.Msgsize()
if buf.Len() > m {
t.Log("WARNING: TestEncodeDecodeMemMetrics Msgsize() is inaccurate")
}
vn := MemMetrics{}
err := msgp.Decode(&buf, &vn)
if err != nil {
t.Error(err)
}
buf.Reset()
msgp.Encode(&buf, &v)
err = msgp.NewReader(&buf).Skip()
if err != nil {
t.Error(err)
}
}
func BenchmarkEncodeMemMetrics(b *testing.B) {
v := MemMetrics{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
en := msgp.NewWriter(msgp.Nowhere)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.EncodeMsg(en)
}
en.Flush()
}
func BenchmarkDecodeMemMetrics(b *testing.B) {
v := MemMetrics{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
rd := msgp.NewEndlessReader(buf.Bytes(), b)
dc := msgp.NewReader(rd)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
err := v.DecodeMsg(dc)
if err != nil {
b.Fatal(err)
}
}
}
func TestMarshalUnmarshalMetrics(t *testing.T) {
v := Metrics{}
bts, err := v.MarshalMsg(nil)
if err != nil {
t.Fatal(err)
}
left, err := v.UnmarshalMsg(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
}
left, err = msgp.Skip(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
}
}
func BenchmarkMarshalMsgMetrics(b *testing.B) {
v := Metrics{}
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.MarshalMsg(nil)
}
}
func BenchmarkAppendMsgMetrics(b *testing.B) {
v := Metrics{}
bts := make([]byte, 0, v.Msgsize())
bts, _ = v.MarshalMsg(bts[0:0])
b.SetBytes(int64(len(bts)))
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
bts, _ = v.MarshalMsg(bts[0:0])
}
}
func BenchmarkUnmarshalMetrics(b *testing.B) {
v := Metrics{}
bts, _ := v.MarshalMsg(nil)
b.ReportAllocs()
b.SetBytes(int64(len(bts)))
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := v.UnmarshalMsg(bts)
if err != nil {
b.Fatal(err)
}
}
}
func TestEncodeDecodeMetrics(t *testing.T) {
v := Metrics{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
m := v.Msgsize()
if buf.Len() > m {
t.Log("WARNING: TestEncodeDecodeMetrics Msgsize() is inaccurate")
}
vn := Metrics{}
err := msgp.Decode(&buf, &vn)
if err != nil {
t.Error(err)
}
buf.Reset()
msgp.Encode(&buf, &v)
err = msgp.NewReader(&buf).Skip()
if err != nil {
t.Error(err)
}
}
func BenchmarkEncodeMetrics(b *testing.B) {
v := Metrics{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
en := msgp.NewWriter(msgp.Nowhere)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.EncodeMsg(en)
}
en.Flush()
}
func BenchmarkDecodeMetrics(b *testing.B) {
v := Metrics{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
rd := msgp.NewEndlessReader(buf.Bytes(), b)
dc := msgp.NewReader(rd)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
err := v.DecodeMsg(dc)
if err != nil {
b.Fatal(err)
}
}
}
func TestMarshalUnmarshalMetricsOptions(t *testing.T) {
v := MetricsOptions{}
bts, err := v.MarshalMsg(nil)
if err != nil {
t.Fatal(err)
}
left, err := v.UnmarshalMsg(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
}
left, err = msgp.Skip(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
}
}
func BenchmarkMarshalMsgMetricsOptions(b *testing.B) {
v := MetricsOptions{}
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.MarshalMsg(nil)
}
}
func BenchmarkAppendMsgMetricsOptions(b *testing.B) {
v := MetricsOptions{}
bts := make([]byte, 0, v.Msgsize())
bts, _ = v.MarshalMsg(bts[0:0])
b.SetBytes(int64(len(bts)))
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
bts, _ = v.MarshalMsg(bts[0:0])
}
}
func BenchmarkUnmarshalMetricsOptions(b *testing.B) {
v := MetricsOptions{}
bts, _ := v.MarshalMsg(nil)
b.ReportAllocs()
b.SetBytes(int64(len(bts)))
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := v.UnmarshalMsg(bts)
if err != nil {
b.Fatal(err)
}
}
}
func TestEncodeDecodeMetricsOptions(t *testing.T) {
v := MetricsOptions{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
m := v.Msgsize()
if buf.Len() > m {
t.Log("WARNING: TestEncodeDecodeMetricsOptions Msgsize() is inaccurate")
}
vn := MetricsOptions{}
err := msgp.Decode(&buf, &vn)
if err != nil {
t.Error(err)
}
buf.Reset()
msgp.Encode(&buf, &v)
err = msgp.NewReader(&buf).Skip()
if err != nil {
t.Error(err)
}
}
func BenchmarkEncodeMetricsOptions(b *testing.B) {
v := MetricsOptions{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
en := msgp.NewWriter(msgp.Nowhere)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.EncodeMsg(en)
}
en.Flush()
}
func BenchmarkDecodeMetricsOptions(b *testing.B) {
v := MetricsOptions{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
rd := msgp.NewEndlessReader(buf.Bytes(), b)
dc := msgp.NewReader(rd)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
err := v.DecodeMsg(dc)
if err != nil {
b.Fatal(err)
}
}
}
func TestMarshalUnmarshalNetMetrics(t *testing.T) {
v := NetMetrics{}
bts, err := v.MarshalMsg(nil)
if err != nil {
t.Fatal(err)
}
left, err := v.UnmarshalMsg(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
}
left, err = msgp.Skip(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
}
}
func BenchmarkMarshalMsgNetMetrics(b *testing.B) {
v := NetMetrics{}
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.MarshalMsg(nil)
}
}
func BenchmarkAppendMsgNetMetrics(b *testing.B) {
v := NetMetrics{}
bts := make([]byte, 0, v.Msgsize())
bts, _ = v.MarshalMsg(bts[0:0])
b.SetBytes(int64(len(bts)))
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
bts, _ = v.MarshalMsg(bts[0:0])
}
}
func BenchmarkUnmarshalNetMetrics(b *testing.B) {
v := NetMetrics{}
bts, _ := v.MarshalMsg(nil)
b.ReportAllocs()
b.SetBytes(int64(len(bts)))
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := v.UnmarshalMsg(bts)
if err != nil {
b.Fatal(err)
}
}
}
func TestEncodeDecodeNetMetrics(t *testing.T) {
v := NetMetrics{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
m := v.Msgsize()
if buf.Len() > m {
t.Log("WARNING: TestEncodeDecodeNetMetrics Msgsize() is inaccurate")
}
vn := NetMetrics{}
err := msgp.Decode(&buf, &vn)
if err != nil {
t.Error(err)
}
buf.Reset()
msgp.Encode(&buf, &v)
err = msgp.NewReader(&buf).Skip()
if err != nil {
t.Error(err)
}
}
func BenchmarkEncodeNetMetrics(b *testing.B) {
v := NetMetrics{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
en := msgp.NewWriter(msgp.Nowhere)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.EncodeMsg(en)
}
en.Flush()
}
func BenchmarkDecodeNetMetrics(b *testing.B) {
v := NetMetrics{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
rd := msgp.NewEndlessReader(buf.Bytes(), b)
dc := msgp.NewReader(rd)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
err := v.DecodeMsg(dc)
if err != nil {
b.Fatal(err)
}
}
}
func TestMarshalUnmarshalOSMetrics(t *testing.T) {
v := OSMetrics{}
bts, err := v.MarshalMsg(nil)
if err != nil {
t.Fatal(err)
}
left, err := v.UnmarshalMsg(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
}
left, err = msgp.Skip(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
}
}
func BenchmarkMarshalMsgOSMetrics(b *testing.B) {
v := OSMetrics{}
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.MarshalMsg(nil)
}
}
func BenchmarkAppendMsgOSMetrics(b *testing.B) {
v := OSMetrics{}
bts := make([]byte, 0, v.Msgsize())
bts, _ = v.MarshalMsg(bts[0:0])
b.SetBytes(int64(len(bts)))
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
bts, _ = v.MarshalMsg(bts[0:0])
}
}
func BenchmarkUnmarshalOSMetrics(b *testing.B) {
v := OSMetrics{}
bts, _ := v.MarshalMsg(nil)
b.ReportAllocs()
b.SetBytes(int64(len(bts)))
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := v.UnmarshalMsg(bts)
if err != nil {
b.Fatal(err)
}
}
}
func TestEncodeDecodeOSMetrics(t *testing.T) {
v := OSMetrics{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
m := v.Msgsize()
if buf.Len() > m {
t.Log("WARNING: TestEncodeDecodeOSMetrics Msgsize() is inaccurate")
}
vn := OSMetrics{}
err := msgp.Decode(&buf, &vn)
if err != nil {
t.Error(err)
}
buf.Reset()
msgp.Encode(&buf, &v)
err = msgp.NewReader(&buf).Skip()
if err != nil {
t.Error(err)
}
}
func BenchmarkEncodeOSMetrics(b *testing.B) {
v := OSMetrics{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
en := msgp.NewWriter(msgp.Nowhere)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.EncodeMsg(en)
}
en.Flush()
}
func BenchmarkDecodeOSMetrics(b *testing.B) {
v := OSMetrics{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
rd := msgp.NewEndlessReader(buf.Bytes(), b)
dc := msgp.NewReader(rd)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
err := v.DecodeMsg(dc)
if err != nil {
b.Fatal(err)
}
}
}
func TestMarshalUnmarshalRPCMetrics(t *testing.T) {
v := RPCMetrics{}
bts, err := v.MarshalMsg(nil)
if err != nil {
t.Fatal(err)
}
left, err := v.UnmarshalMsg(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
}
left, err = msgp.Skip(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
}
}
func BenchmarkMarshalMsgRPCMetrics(b *testing.B) {
v := RPCMetrics{}
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.MarshalMsg(nil)
}
}
func BenchmarkAppendMsgRPCMetrics(b *testing.B) {
v := RPCMetrics{}
bts := make([]byte, 0, v.Msgsize())
bts, _ = v.MarshalMsg(bts[0:0])
b.SetBytes(int64(len(bts)))
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
bts, _ = v.MarshalMsg(bts[0:0])
}
}
func BenchmarkUnmarshalRPCMetrics(b *testing.B) {
v := RPCMetrics{}
bts, _ := v.MarshalMsg(nil)
b.ReportAllocs()
b.SetBytes(int64(len(bts)))
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := v.UnmarshalMsg(bts)
if err != nil {
b.Fatal(err)
}
}
}
func TestEncodeDecodeRPCMetrics(t *testing.T) {
v := RPCMetrics{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
m := v.Msgsize()
if buf.Len() > m {
t.Log("WARNING: TestEncodeDecodeRPCMetrics Msgsize() is inaccurate")
}
vn := RPCMetrics{}
err := msgp.Decode(&buf, &vn)
if err != nil {
t.Error(err)
}
buf.Reset()
msgp.Encode(&buf, &v)
err = msgp.NewReader(&buf).Skip()
if err != nil {
t.Error(err)
}
}
func BenchmarkEncodeRPCMetrics(b *testing.B) {
v := RPCMetrics{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
en := msgp.NewWriter(msgp.Nowhere)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.EncodeMsg(en)
}
en.Flush()
}
func BenchmarkDecodeRPCMetrics(b *testing.B) {
v := RPCMetrics{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
rd := msgp.NewEndlessReader(buf.Bytes(), b)
dc := msgp.NewReader(rd)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
err := v.DecodeMsg(dc)
if err != nil {
b.Fatal(err)
}
}
}
func TestMarshalUnmarshalRealtimeMetrics(t *testing.T) {
v := RealtimeMetrics{}
bts, err := v.MarshalMsg(nil)
if err != nil {
t.Fatal(err)
}
left, err := v.UnmarshalMsg(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
}
left, err = msgp.Skip(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
}
}
func BenchmarkMarshalMsgRealtimeMetrics(b *testing.B) {
v := RealtimeMetrics{}
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.MarshalMsg(nil)
}
}
func BenchmarkAppendMsgRealtimeMetrics(b *testing.B) {
v := RealtimeMetrics{}
bts := make([]byte, 0, v.Msgsize())
bts, _ = v.MarshalMsg(bts[0:0])
b.SetBytes(int64(len(bts)))
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
bts, _ = v.MarshalMsg(bts[0:0])
}
}
func BenchmarkUnmarshalRealtimeMetrics(b *testing.B) {
v := RealtimeMetrics{}
bts, _ := v.MarshalMsg(nil)
b.ReportAllocs()
b.SetBytes(int64(len(bts)))
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := v.UnmarshalMsg(bts)
if err != nil {
b.Fatal(err)
}
}
}
func TestEncodeDecodeRealtimeMetrics(t *testing.T) {
v := RealtimeMetrics{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
m := v.Msgsize()
if buf.Len() > m {
t.Log("WARNING: TestEncodeDecodeRealtimeMetrics Msgsize() is inaccurate")
}
vn := RealtimeMetrics{}
err := msgp.Decode(&buf, &vn)
if err != nil {
t.Error(err)
}
buf.Reset()
msgp.Encode(&buf, &v)
err = msgp.NewReader(&buf).Skip()
if err != nil {
t.Error(err)
}
}
func BenchmarkEncodeRealtimeMetrics(b *testing.B) {
v := RealtimeMetrics{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
en := msgp.NewWriter(msgp.Nowhere)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.EncodeMsg(en)
}
en.Flush()
}
func BenchmarkDecodeRealtimeMetrics(b *testing.B) {
v := RealtimeMetrics{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
rd := msgp.NewEndlessReader(buf.Bytes(), b)
dc := msgp.NewReader(rd)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
err := v.DecodeMsg(dc)
if err != nil {
b.Fatal(err)
}
}
}
func TestMarshalUnmarshalReplicateInfo(t *testing.T) {
v := ReplicateInfo{}
bts, err := v.MarshalMsg(nil)
if err != nil {
t.Fatal(err)
}
left, err := v.UnmarshalMsg(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
}
left, err = msgp.Skip(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
}
}
func BenchmarkMarshalMsgReplicateInfo(b *testing.B) {
v := ReplicateInfo{}
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.MarshalMsg(nil)
}
}
func BenchmarkAppendMsgReplicateInfo(b *testing.B) {
v := ReplicateInfo{}
bts := make([]byte, 0, v.Msgsize())
bts, _ = v.MarshalMsg(bts[0:0])
b.SetBytes(int64(len(bts)))
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
bts, _ = v.MarshalMsg(bts[0:0])
}
}
func BenchmarkUnmarshalReplicateInfo(b *testing.B) {
v := ReplicateInfo{}
bts, _ := v.MarshalMsg(nil)
b.ReportAllocs()
b.SetBytes(int64(len(bts)))
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := v.UnmarshalMsg(bts)
if err != nil {
b.Fatal(err)
}
}
}
func TestEncodeDecodeReplicateInfo(t *testing.T) {
v := ReplicateInfo{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
m := v.Msgsize()
if buf.Len() > m {
t.Log("WARNING: TestEncodeDecodeReplicateInfo Msgsize() is inaccurate")
}
vn := ReplicateInfo{}
err := msgp.Decode(&buf, &vn)
if err != nil {
t.Error(err)
}
buf.Reset()
msgp.Encode(&buf, &v)
err = msgp.NewReader(&buf).Skip()
if err != nil {
t.Error(err)
}
}
func BenchmarkEncodeReplicateInfo(b *testing.B) {
v := ReplicateInfo{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
en := msgp.NewWriter(msgp.Nowhere)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.EncodeMsg(en)
}
en.Flush()
}
func BenchmarkDecodeReplicateInfo(b *testing.B) {
v := ReplicateInfo{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
rd := msgp.NewEndlessReader(buf.Bytes(), b)
dc := msgp.NewReader(rd)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
err := v.DecodeMsg(dc)
if err != nil {
b.Fatal(err)
}
}
}
func TestMarshalUnmarshalRuntimeMetrics(t *testing.T) {
v := RuntimeMetrics{}
bts, err := v.MarshalMsg(nil)
if err != nil {
t.Fatal(err)
}
left, err := v.UnmarshalMsg(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
}
left, err = msgp.Skip(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
}
}
func BenchmarkMarshalMsgRuntimeMetrics(b *testing.B) {
v := RuntimeMetrics{}
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.MarshalMsg(nil)
}
}
func BenchmarkAppendMsgRuntimeMetrics(b *testing.B) {
v := RuntimeMetrics{}
bts := make([]byte, 0, v.Msgsize())
bts, _ = v.MarshalMsg(bts[0:0])
b.SetBytes(int64(len(bts)))
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
bts, _ = v.MarshalMsg(bts[0:0])
}
}
func BenchmarkUnmarshalRuntimeMetrics(b *testing.B) {
v := RuntimeMetrics{}
bts, _ := v.MarshalMsg(nil)
b.ReportAllocs()
b.SetBytes(int64(len(bts)))
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := v.UnmarshalMsg(bts)
if err != nil {
b.Fatal(err)
}
}
}
func TestEncodeDecodeRuntimeMetrics(t *testing.T) {
v := RuntimeMetrics{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
m := v.Msgsize()
if buf.Len() > m {
t.Log("WARNING: TestEncodeDecodeRuntimeMetrics Msgsize() is inaccurate")
}
vn := RuntimeMetrics{}
err := msgp.Decode(&buf, &vn)
if err != nil {
t.Error(err)
}
buf.Reset()
msgp.Encode(&buf, &v)
err = msgp.NewReader(&buf).Skip()
if err != nil {
t.Error(err)
}
}
func BenchmarkEncodeRuntimeMetrics(b *testing.B) {
v := RuntimeMetrics{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
en := msgp.NewWriter(msgp.Nowhere)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.EncodeMsg(en)
}
en.Flush()
}
func BenchmarkDecodeRuntimeMetrics(b *testing.B) {
v := RuntimeMetrics{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
rd := msgp.NewEndlessReader(buf.Bytes(), b)
dc := msgp.NewReader(rd)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
err := v.DecodeMsg(dc)
if err != nil {
b.Fatal(err)
}
}
}
func TestMarshalUnmarshalScannerMetrics(t *testing.T) {
v := ScannerMetrics{}
bts, err := v.MarshalMsg(nil)
if err != nil {
t.Fatal(err)
}
left, err := v.UnmarshalMsg(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
}
left, err = msgp.Skip(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
}
}
func BenchmarkMarshalMsgScannerMetrics(b *testing.B) {
v := ScannerMetrics{}
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.MarshalMsg(nil)
}
}
func BenchmarkAppendMsgScannerMetrics(b *testing.B) {
v := ScannerMetrics{}
bts := make([]byte, 0, v.Msgsize())
bts, _ = v.MarshalMsg(bts[0:0])
b.SetBytes(int64(len(bts)))
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
bts, _ = v.MarshalMsg(bts[0:0])
}
}
func BenchmarkUnmarshalScannerMetrics(b *testing.B) {
v := ScannerMetrics{}
bts, _ := v.MarshalMsg(nil)
b.ReportAllocs()
b.SetBytes(int64(len(bts)))
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := v.UnmarshalMsg(bts)
if err != nil {
b.Fatal(err)
}
}
}
func TestEncodeDecodeScannerMetrics(t *testing.T) {
v := ScannerMetrics{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
m := v.Msgsize()
if buf.Len() > m {
t.Log("WARNING: TestEncodeDecodeScannerMetrics Msgsize() is inaccurate")
}
vn := ScannerMetrics{}
err := msgp.Decode(&buf, &vn)
if err != nil {
t.Error(err)
}
buf.Reset()
msgp.Encode(&buf, &v)
err = msgp.NewReader(&buf).Skip()
if err != nil {
t.Error(err)
}
}
func BenchmarkEncodeScannerMetrics(b *testing.B) {
v := ScannerMetrics{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
en := msgp.NewWriter(msgp.Nowhere)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.EncodeMsg(en)
}
en.Flush()
}
func BenchmarkDecodeScannerMetrics(b *testing.B) {
v := ScannerMetrics{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
rd := msgp.NewEndlessReader(buf.Bytes(), b)
dc := msgp.NewReader(rd)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
err := v.DecodeMsg(dc)
if err != nil {
b.Fatal(err)
}
}
}
func TestMarshalUnmarshalSiteResyncMetrics(t *testing.T) {
v := SiteResyncMetrics{}
bts, err := v.MarshalMsg(nil)
if err != nil {
t.Fatal(err)
}
left, err := v.UnmarshalMsg(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
}
left, err = msgp.Skip(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
}
}
func BenchmarkMarshalMsgSiteResyncMetrics(b *testing.B) {
v := SiteResyncMetrics{}
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.MarshalMsg(nil)
}
}
func BenchmarkAppendMsgSiteResyncMetrics(b *testing.B) {
v := SiteResyncMetrics{}
bts := make([]byte, 0, v.Msgsize())
bts, _ = v.MarshalMsg(bts[0:0])
b.SetBytes(int64(len(bts)))
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
bts, _ = v.MarshalMsg(bts[0:0])
}
}
func BenchmarkUnmarshalSiteResyncMetrics(b *testing.B) {
v := SiteResyncMetrics{}
bts, _ := v.MarshalMsg(nil)
b.ReportAllocs()
b.SetBytes(int64(len(bts)))
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := v.UnmarshalMsg(bts)
if err != nil {
b.Fatal(err)
}
}
}
func TestEncodeDecodeSiteResyncMetrics(t *testing.T) {
v := SiteResyncMetrics{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
m := v.Msgsize()
if buf.Len() > m {
t.Log("WARNING: TestEncodeDecodeSiteResyncMetrics Msgsize() is inaccurate")
}
vn := SiteResyncMetrics{}
err := msgp.Decode(&buf, &vn)
if err != nil {
t.Error(err)
}
buf.Reset()
msgp.Encode(&buf, &v)
err = msgp.NewReader(&buf).Skip()
if err != nil {
t.Error(err)
}
}
func BenchmarkEncodeSiteResyncMetrics(b *testing.B) {
v := SiteResyncMetrics{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
en := msgp.NewWriter(msgp.Nowhere)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.EncodeMsg(en)
}
en.Flush()
}
func BenchmarkDecodeSiteResyncMetrics(b *testing.B) {
v := SiteResyncMetrics{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
rd := msgp.NewEndlessReader(buf.Bytes(), b)
dc := msgp.NewReader(rd)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
err := v.DecodeMsg(dc)
if err != nil {
b.Fatal(err)
}
}
}
func TestMarshalUnmarshallocalF64H(t *testing.T) {
v := localF64H{}
bts, err := v.MarshalMsg(nil)
if err != nil {
t.Fatal(err)
}
left, err := v.UnmarshalMsg(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
}
left, err = msgp.Skip(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
}
}
func BenchmarkMarshalMsglocalF64H(b *testing.B) {
v := localF64H{}
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.MarshalMsg(nil)
}
}
func BenchmarkAppendMsglocalF64H(b *testing.B) {
v := localF64H{}
bts := make([]byte, 0, v.Msgsize())
bts, _ = v.MarshalMsg(bts[0:0])
b.SetBytes(int64(len(bts)))
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
bts, _ = v.MarshalMsg(bts[0:0])
}
}
func BenchmarkUnmarshallocalF64H(b *testing.B) {
v := localF64H{}
bts, _ := v.MarshalMsg(nil)
b.ReportAllocs()
b.SetBytes(int64(len(bts)))
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := v.UnmarshalMsg(bts)
if err != nil {
b.Fatal(err)
}
}
}
func TestEncodeDecodelocalF64H(t *testing.T) {
v := localF64H{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
m := v.Msgsize()
if buf.Len() > m {
t.Log("WARNING: TestEncodeDecodelocalF64H Msgsize() is inaccurate")
}
vn := localF64H{}
err := msgp.Decode(&buf, &vn)
if err != nil {
t.Error(err)
}
buf.Reset()
msgp.Encode(&buf, &v)
err = msgp.NewReader(&buf).Skip()
if err != nil {
t.Error(err)
}
}
func BenchmarkEncodelocalF64H(b *testing.B) {
v := localF64H{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
en := msgp.NewWriter(msgp.Nowhere)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.EncodeMsg(en)
}
en.Flush()
}
func BenchmarkDecodelocalF64H(b *testing.B) {
v := localF64H{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
rd := msgp.NewEndlessReader(buf.Bytes(), b)
dc := msgp.NewReader(rd)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
err := v.DecodeMsg(dc)
if err != nil {
b.Fatal(err)
}
}
}
func TestMarshalUnmarshalnodeCommon(t *testing.T) {
v := nodeCommon{}
bts, err := v.MarshalMsg(nil)
if err != nil {
t.Fatal(err)
}
left, err := v.UnmarshalMsg(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
}
left, err = msgp.Skip(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
}
}
func BenchmarkMarshalMsgnodeCommon(b *testing.B) {
v := nodeCommon{}
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.MarshalMsg(nil)
}
}
func BenchmarkAppendMsgnodeCommon(b *testing.B) {
v := nodeCommon{}
bts := make([]byte, 0, v.Msgsize())
bts, _ = v.MarshalMsg(bts[0:0])
b.SetBytes(int64(len(bts)))
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
bts, _ = v.MarshalMsg(bts[0:0])
}
}
func BenchmarkUnmarshalnodeCommon(b *testing.B) {
v := nodeCommon{}
bts, _ := v.MarshalMsg(nil)
b.ReportAllocs()
b.SetBytes(int64(len(bts)))
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := v.UnmarshalMsg(bts)
if err != nil {
b.Fatal(err)
}
}
}
func TestEncodeDecodenodeCommon(t *testing.T) {
v := nodeCommon{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
m := v.Msgsize()
if buf.Len() > m {
t.Log("WARNING: TestEncodeDecodenodeCommon Msgsize() is inaccurate")
}
vn := nodeCommon{}
err := msgp.Decode(&buf, &vn)
if err != nil {
t.Error(err)
}
buf.Reset()
msgp.Encode(&buf, &v)
err = msgp.NewReader(&buf).Skip()
if err != nil {
t.Error(err)
}
}
func BenchmarkEncodenodeCommon(b *testing.B) {
v := nodeCommon{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
en := msgp.NewWriter(msgp.Nowhere)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.EncodeMsg(en)
}
en.Flush()
}
func BenchmarkDecodenodeCommon(b *testing.B) {
v := nodeCommon{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
rd := msgp.NewEndlessReader(buf.Bytes(), b)
dc := msgp.NewReader(rd)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
err := v.DecodeMsg(dc)
if err != nil {
b.Fatal(err)
}
}
}
golang-github-minio-madmin-go-3.0.104/net_linux.go 0000664 0000000 0000000 00000003702 14774251704 0021736 0 ustar 00root root 0000000 0000000 //go:build linux
// +build linux
//
// Copyright (c) 2015-2024 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as
// published by the Free Software Foundation, either version 3 of the
// License, or (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see .
//
package madmin
import (
"fmt"
"github.com/safchain/ethtool"
)
// GetNetInfo returns information of the given network interface
func GetNetInfo(addr string, iface string) (ni NetInfo) {
ni.Addr = addr
ni.Interface = iface
ethHandle, err := ethtool.NewEthtool()
if err != nil {
ni.Error = err.Error()
return
}
defer ethHandle.Close()
di, err := ethHandle.DriverInfo(ni.Interface)
if err != nil {
ni.Error = fmt.Sprintf("Error getting driver info for %s: %s", ni.Interface, err.Error())
return
}
ni.Driver = di.Driver
ni.FirmwareVersion = di.FwVersion
ring, err := ethHandle.GetRing(ni.Interface)
if err != nil {
ni.Error = fmt.Sprintf("Error getting ring parameters for %s: %s", ni.Interface, err.Error())
return
}
ni.Settings = &NetSettings{
RxMaxPending: ring.RxMaxPending,
TxMaxPending: ring.TxMaxPending,
RxPending: ring.RxPending,
TxPending: ring.TxPending,
}
channels, err := ethHandle.GetChannels(iface)
if err != nil {
ni.Error = fmt.Sprintf("Error getting channels for %s: %s", ni.Interface, err.Error())
}
ni.Settings.CombinedCount = channels.CombinedCount
ni.Settings.MaxCombined = channels.MaxCombined
return
}
golang-github-minio-madmin-go-3.0.104/net_nolinux.go 0000664 0000000 0000000 00000002144 14774251704 0022272 0 ustar 00root root 0000000 0000000 //go:build !linux
// +build !linux
//
// Copyright (c) 2015-2024 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as
// published by the Free Software Foundation, either version 3 of the
// License, or (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see .
//
package madmin
// GetNetInfo returns information of the given network interface
// Not implemented for non-linux platforms
func GetNetInfo(addr string, iface string) NetInfo {
return NetInfo{
NodeCommon: NodeCommon{
Addr: addr,
Error: "Not implemented for non-linux platforms",
},
Interface: iface,
}
}
golang-github-minio-madmin-go-3.0.104/no_fips.go 0000664 0000000 0000000 00000002160 14774251704 0021363 0 ustar 00root root 0000000 0000000 //
// Copyright (c) 2015-2024 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as
// published by the Free Software Foundation, either version 3 of the
// License, or (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see .
//
//go:build !fips
// +build !fips
package madmin
// FIPSEnabled returns true if and only if FIPS 140-2 support
// is enabled.
//
// FIPS 140-2 requires that only specifc cryptographic
// primitives, like AES or SHA-256, are used and that
// those primitives are implemented by a FIPS 140-2
// certified cryptographic module.
func FIPSEnabled() bool { return false }
golang-github-minio-madmin-go-3.0.104/opentelemetry.go 0000664 0000000 0000000 00000007236 14774251704 0022633 0 ustar 00root root 0000000 0000000 // Copyright (c) 2015-2025 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as
// published by the Free Software Foundation, either version 3 of the
// License, or (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see .
package madmin
import (
"context"
"crypto/rsa"
"encoding/json"
"io"
"net/http"
"github.com/minio/madmin-go/v3/estream"
)
//go:generate msgp $GOFILE
//msgp:replace TraceType with:uint64
// ServiceTelemetryOpts is a request to add following types to tracing.
type ServiceTelemetryOpts struct {
// Types to add to tracing.
Types TraceType `json:"types"`
// Public cert to encrypt stream.
PubCert []byte
// Sample rate to set for this filter.
// If <=0 or >=1 no sampling will be performed
// and all hits will be traced.
SampleRate float64 `json:"sampleRate"`
// Disable sampling and only do tracing when a trace id is set on incoming request.
ParentOnly bool `json:"parentOnly"`
// Tag adds a `custom.tag` field to all traces triggered by this.
TagKV map[string]string `json:"tags"`
// On incoming HTTP types, only trigger if substring is in request.
HTTPFilter struct {
Func string `json:"funcFilter"`
UserAgent string `json:"userAgent"`
Header map[string]string `json:"header"`
} `json:"httpFilter"`
}
//msgp:ignore ServiceTelemetry
// ServiceTelemetry holds http telemetry spans, serialized and compressed.
type ServiceTelemetry struct {
SpanMZ []byte // Serialized and Compressed spans.
Err error // Any error that occurred
}
// ServiceTelemetryStream - gets raw stream for service telemetry.
func (adm AdminClient) ServiceTelemetryStream(ctx context.Context, opts ServiceTelemetryOpts) (io.ReadCloser, error) {
bopts, err := json.Marshal(opts)
if err != nil {
return nil, err
}
reqData := requestData{
relPath: adminAPIPrefix + "/telemetry",
content: bopts,
}
// Execute GET to call trace handler
resp, err := adm.executeMethod(ctx, http.MethodPost, reqData)
if err != nil {
return nil, err
}
if resp.StatusCode != http.StatusOK {
closeResponse(resp)
return nil, httpRespToErrorResponse(resp)
}
return resp.Body, nil
}
// ServiceTelemetry - perform trace request and return individual packages.
// If options contains a public key the private key must be provided.
// If context is canceled the function will return.
func (adm AdminClient) ServiceTelemetry(ctx context.Context, opts ServiceTelemetryOpts, dst chan<- ServiceTelemetry, pk *rsa.PrivateKey) {
defer close(dst)
resp, err := adm.ServiceTelemetryStream(ctx, opts)
if err != nil {
dst <- ServiceTelemetry{Err: err}
return
}
dec, err := estream.NewReader(resp)
if err != nil {
dst <- ServiceTelemetry{Err: err}
return
}
if pk != nil {
dec.SetPrivateKey(pk)
}
for {
st, err := dec.NextStream()
if err != nil {
dst <- ServiceTelemetry{Err: err}
return
}
if ctx.Err() != nil {
return
}
block, err := io.ReadAll(st)
if err == nil && len(block) == 0 {
// Ignore 0 sized blocks.
continue
}
if ctx.Err() != nil {
return
}
select {
case <-ctx.Done():
return
case dst <- ServiceTelemetry{SpanMZ: block, Err: err}:
if err != nil {
return
}
}
}
}
golang-github-minio-madmin-go-3.0.104/opentelemetry_gen.go 0000664 0000000 0000000 00000030113 14774251704 0023452 0 ustar 00root root 0000000 0000000 package madmin
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
import (
"github.com/tinylib/msgp/msgp"
)
// DecodeMsg implements msgp.Decodable
func (z *ServiceTelemetryOpts) DecodeMsg(dc *msgp.Reader) (err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, err = dc.ReadMapHeader()
if err != nil {
err = msgp.WrapError(err)
return
}
for zb0001 > 0 {
zb0001--
field, err = dc.ReadMapKeyPtr()
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "Types":
{
var zb0002 uint64
zb0002, err = dc.ReadUint64()
if err != nil {
err = msgp.WrapError(err, "Types")
return
}
z.Types = TraceType(zb0002)
}
case "PubCert":
z.PubCert, err = dc.ReadBytes(z.PubCert)
if err != nil {
err = msgp.WrapError(err, "PubCert")
return
}
case "SampleRate":
z.SampleRate, err = dc.ReadFloat64()
if err != nil {
err = msgp.WrapError(err, "SampleRate")
return
}
case "ParentOnly":
z.ParentOnly, err = dc.ReadBool()
if err != nil {
err = msgp.WrapError(err, "ParentOnly")
return
}
case "TagKV":
var zb0003 uint32
zb0003, err = dc.ReadMapHeader()
if err != nil {
err = msgp.WrapError(err, "TagKV")
return
}
if z.TagKV == nil {
z.TagKV = make(map[string]string, zb0003)
} else if len(z.TagKV) > 0 {
for key := range z.TagKV {
delete(z.TagKV, key)
}
}
for zb0003 > 0 {
zb0003--
var za0001 string
var za0002 string
za0001, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "TagKV")
return
}
za0002, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "TagKV", za0001)
return
}
z.TagKV[za0001] = za0002
}
case "HTTPFilter":
var zb0004 uint32
zb0004, err = dc.ReadMapHeader()
if err != nil {
err = msgp.WrapError(err, "HTTPFilter")
return
}
for zb0004 > 0 {
zb0004--
field, err = dc.ReadMapKeyPtr()
if err != nil {
err = msgp.WrapError(err, "HTTPFilter")
return
}
switch msgp.UnsafeString(field) {
case "Func":
z.HTTPFilter.Func, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "HTTPFilter", "Func")
return
}
case "UserAgent":
z.HTTPFilter.UserAgent, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "HTTPFilter", "UserAgent")
return
}
case "Header":
var zb0005 uint32
zb0005, err = dc.ReadMapHeader()
if err != nil {
err = msgp.WrapError(err, "HTTPFilter", "Header")
return
}
if z.HTTPFilter.Header == nil {
z.HTTPFilter.Header = make(map[string]string, zb0005)
} else if len(z.HTTPFilter.Header) > 0 {
for key := range z.HTTPFilter.Header {
delete(z.HTTPFilter.Header, key)
}
}
for zb0005 > 0 {
zb0005--
var za0003 string
var za0004 string
za0003, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "HTTPFilter", "Header")
return
}
za0004, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "HTTPFilter", "Header", za0003)
return
}
z.HTTPFilter.Header[za0003] = za0004
}
default:
err = dc.Skip()
if err != nil {
err = msgp.WrapError(err, "HTTPFilter")
return
}
}
}
default:
err = dc.Skip()
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
return
}
// EncodeMsg implements msgp.Encodable
func (z *ServiceTelemetryOpts) EncodeMsg(en *msgp.Writer) (err error) {
// map header, size 6
// write "Types"
err = en.Append(0x86, 0xa5, 0x54, 0x79, 0x70, 0x65, 0x73)
if err != nil {
return
}
err = en.WriteUint64(uint64(z.Types))
if err != nil {
err = msgp.WrapError(err, "Types")
return
}
// write "PubCert"
err = en.Append(0xa7, 0x50, 0x75, 0x62, 0x43, 0x65, 0x72, 0x74)
if err != nil {
return
}
err = en.WriteBytes(z.PubCert)
if err != nil {
err = msgp.WrapError(err, "PubCert")
return
}
// write "SampleRate"
err = en.Append(0xaa, 0x53, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x52, 0x61, 0x74, 0x65)
if err != nil {
return
}
err = en.WriteFloat64(z.SampleRate)
if err != nil {
err = msgp.WrapError(err, "SampleRate")
return
}
// write "ParentOnly"
err = en.Append(0xaa, 0x50, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x4f, 0x6e, 0x6c, 0x79)
if err != nil {
return
}
err = en.WriteBool(z.ParentOnly)
if err != nil {
err = msgp.WrapError(err, "ParentOnly")
return
}
// write "TagKV"
err = en.Append(0xa5, 0x54, 0x61, 0x67, 0x4b, 0x56)
if err != nil {
return
}
err = en.WriteMapHeader(uint32(len(z.TagKV)))
if err != nil {
err = msgp.WrapError(err, "TagKV")
return
}
for za0001, za0002 := range z.TagKV {
err = en.WriteString(za0001)
if err != nil {
err = msgp.WrapError(err, "TagKV")
return
}
err = en.WriteString(za0002)
if err != nil {
err = msgp.WrapError(err, "TagKV", za0001)
return
}
}
// write "HTTPFilter"
err = en.Append(0xaa, 0x48, 0x54, 0x54, 0x50, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72)
if err != nil {
return
}
// map header, size 3
// write "Func"
err = en.Append(0x83, 0xa4, 0x46, 0x75, 0x6e, 0x63)
if err != nil {
return
}
err = en.WriteString(z.HTTPFilter.Func)
if err != nil {
err = msgp.WrapError(err, "HTTPFilter", "Func")
return
}
// write "UserAgent"
err = en.Append(0xa9, 0x55, 0x73, 0x65, 0x72, 0x41, 0x67, 0x65, 0x6e, 0x74)
if err != nil {
return
}
err = en.WriteString(z.HTTPFilter.UserAgent)
if err != nil {
err = msgp.WrapError(err, "HTTPFilter", "UserAgent")
return
}
// write "Header"
err = en.Append(0xa6, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72)
if err != nil {
return
}
err = en.WriteMapHeader(uint32(len(z.HTTPFilter.Header)))
if err != nil {
err = msgp.WrapError(err, "HTTPFilter", "Header")
return
}
for za0003, za0004 := range z.HTTPFilter.Header {
err = en.WriteString(za0003)
if err != nil {
err = msgp.WrapError(err, "HTTPFilter", "Header")
return
}
err = en.WriteString(za0004)
if err != nil {
err = msgp.WrapError(err, "HTTPFilter", "Header", za0003)
return
}
}
return
}
// MarshalMsg implements msgp.Marshaler
func (z *ServiceTelemetryOpts) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize())
// map header, size 6
// string "Types"
o = append(o, 0x86, 0xa5, 0x54, 0x79, 0x70, 0x65, 0x73)
o = msgp.AppendUint64(o, uint64(z.Types))
// string "PubCert"
o = append(o, 0xa7, 0x50, 0x75, 0x62, 0x43, 0x65, 0x72, 0x74)
o = msgp.AppendBytes(o, z.PubCert)
// string "SampleRate"
o = append(o, 0xaa, 0x53, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x52, 0x61, 0x74, 0x65)
o = msgp.AppendFloat64(o, z.SampleRate)
// string "ParentOnly"
o = append(o, 0xaa, 0x50, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x4f, 0x6e, 0x6c, 0x79)
o = msgp.AppendBool(o, z.ParentOnly)
// string "TagKV"
o = append(o, 0xa5, 0x54, 0x61, 0x67, 0x4b, 0x56)
o = msgp.AppendMapHeader(o, uint32(len(z.TagKV)))
for za0001, za0002 := range z.TagKV {
o = msgp.AppendString(o, za0001)
o = msgp.AppendString(o, za0002)
}
// string "HTTPFilter"
o = append(o, 0xaa, 0x48, 0x54, 0x54, 0x50, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72)
// map header, size 3
// string "Func"
o = append(o, 0x83, 0xa4, 0x46, 0x75, 0x6e, 0x63)
o = msgp.AppendString(o, z.HTTPFilter.Func)
// string "UserAgent"
o = append(o, 0xa9, 0x55, 0x73, 0x65, 0x72, 0x41, 0x67, 0x65, 0x6e, 0x74)
o = msgp.AppendString(o, z.HTTPFilter.UserAgent)
// string "Header"
o = append(o, 0xa6, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72)
o = msgp.AppendMapHeader(o, uint32(len(z.HTTPFilter.Header)))
for za0003, za0004 := range z.HTTPFilter.Header {
o = msgp.AppendString(o, za0003)
o = msgp.AppendString(o, za0004)
}
return
}
// UnmarshalMsg implements msgp.Unmarshaler
func (z *ServiceTelemetryOpts) UnmarshalMsg(bts []byte) (o []byte, err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
for zb0001 > 0 {
zb0001--
field, bts, err = msgp.ReadMapKeyZC(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "Types":
{
var zb0002 uint64
zb0002, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "Types")
return
}
z.Types = TraceType(zb0002)
}
case "PubCert":
z.PubCert, bts, err = msgp.ReadBytesBytes(bts, z.PubCert)
if err != nil {
err = msgp.WrapError(err, "PubCert")
return
}
case "SampleRate":
z.SampleRate, bts, err = msgp.ReadFloat64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "SampleRate")
return
}
case "ParentOnly":
z.ParentOnly, bts, err = msgp.ReadBoolBytes(bts)
if err != nil {
err = msgp.WrapError(err, "ParentOnly")
return
}
case "TagKV":
var zb0003 uint32
zb0003, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "TagKV")
return
}
if z.TagKV == nil {
z.TagKV = make(map[string]string, zb0003)
} else if len(z.TagKV) > 0 {
for key := range z.TagKV {
delete(z.TagKV, key)
}
}
for zb0003 > 0 {
var za0001 string
var za0002 string
zb0003--
za0001, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "TagKV")
return
}
za0002, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "TagKV", za0001)
return
}
z.TagKV[za0001] = za0002
}
case "HTTPFilter":
var zb0004 uint32
zb0004, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "HTTPFilter")
return
}
for zb0004 > 0 {
zb0004--
field, bts, err = msgp.ReadMapKeyZC(bts)
if err != nil {
err = msgp.WrapError(err, "HTTPFilter")
return
}
switch msgp.UnsafeString(field) {
case "Func":
z.HTTPFilter.Func, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "HTTPFilter", "Func")
return
}
case "UserAgent":
z.HTTPFilter.UserAgent, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "HTTPFilter", "UserAgent")
return
}
case "Header":
var zb0005 uint32
zb0005, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "HTTPFilter", "Header")
return
}
if z.HTTPFilter.Header == nil {
z.HTTPFilter.Header = make(map[string]string, zb0005)
} else if len(z.HTTPFilter.Header) > 0 {
for key := range z.HTTPFilter.Header {
delete(z.HTTPFilter.Header, key)
}
}
for zb0005 > 0 {
var za0003 string
var za0004 string
zb0005--
za0003, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "HTTPFilter", "Header")
return
}
za0004, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "HTTPFilter", "Header", za0003)
return
}
z.HTTPFilter.Header[za0003] = za0004
}
default:
bts, err = msgp.Skip(bts)
if err != nil {
err = msgp.WrapError(err, "HTTPFilter")
return
}
}
}
default:
bts, err = msgp.Skip(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
o = bts
return
}
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z *ServiceTelemetryOpts) Msgsize() (s int) {
s = 1 + 6 + msgp.Uint64Size + 8 + msgp.BytesPrefixSize + len(z.PubCert) + 11 + msgp.Float64Size + 11 + msgp.BoolSize + 6 + msgp.MapHeaderSize
if z.TagKV != nil {
for za0001, za0002 := range z.TagKV {
_ = za0002
s += msgp.StringPrefixSize + len(za0001) + msgp.StringPrefixSize + len(za0002)
}
}
s += 11 + 1 + 5 + msgp.StringPrefixSize + len(z.HTTPFilter.Func) + 10 + msgp.StringPrefixSize + len(z.HTTPFilter.UserAgent) + 7 + msgp.MapHeaderSize
if z.HTTPFilter.Header != nil {
for za0003, za0004 := range z.HTTPFilter.Header {
_ = za0004
s += msgp.StringPrefixSize + len(za0003) + msgp.StringPrefixSize + len(za0004)
}
}
return
}
golang-github-minio-madmin-go-3.0.104/opentelemetry_gen_test.go 0000664 0000000 0000000 00000004675 14774251704 0024527 0 ustar 00root root 0000000 0000000 package madmin
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
import (
"bytes"
"testing"
"github.com/tinylib/msgp/msgp"
)
func TestMarshalUnmarshalServiceTelemetryOpts(t *testing.T) {
v := ServiceTelemetryOpts{}
bts, err := v.MarshalMsg(nil)
if err != nil {
t.Fatal(err)
}
left, err := v.UnmarshalMsg(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
}
left, err = msgp.Skip(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
}
}
func BenchmarkMarshalMsgServiceTelemetryOpts(b *testing.B) {
v := ServiceTelemetryOpts{}
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.MarshalMsg(nil)
}
}
func BenchmarkAppendMsgServiceTelemetryOpts(b *testing.B) {
v := ServiceTelemetryOpts{}
bts := make([]byte, 0, v.Msgsize())
bts, _ = v.MarshalMsg(bts[0:0])
b.SetBytes(int64(len(bts)))
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
bts, _ = v.MarshalMsg(bts[0:0])
}
}
func BenchmarkUnmarshalServiceTelemetryOpts(b *testing.B) {
v := ServiceTelemetryOpts{}
bts, _ := v.MarshalMsg(nil)
b.ReportAllocs()
b.SetBytes(int64(len(bts)))
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := v.UnmarshalMsg(bts)
if err != nil {
b.Fatal(err)
}
}
}
func TestEncodeDecodeServiceTelemetryOpts(t *testing.T) {
v := ServiceTelemetryOpts{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
m := v.Msgsize()
if buf.Len() > m {
t.Log("WARNING: TestEncodeDecodeServiceTelemetryOpts Msgsize() is inaccurate")
}
vn := ServiceTelemetryOpts{}
err := msgp.Decode(&buf, &vn)
if err != nil {
t.Error(err)
}
buf.Reset()
msgp.Encode(&buf, &v)
err = msgp.NewReader(&buf).Skip()
if err != nil {
t.Error(err)
}
}
func BenchmarkEncodeServiceTelemetryOpts(b *testing.B) {
v := ServiceTelemetryOpts{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
en := msgp.NewWriter(msgp.Nowhere)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.EncodeMsg(en)
}
en.Flush()
}
func BenchmarkDecodeServiceTelemetryOpts(b *testing.B) {
v := ServiceTelemetryOpts{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
rd := msgp.NewEndlessReader(buf.Bytes(), b)
dc := msgp.NewReader(rd)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
err := v.DecodeMsg(dc)
if err != nil {
b.Fatal(err)
}
}
}
golang-github-minio-madmin-go-3.0.104/parse-config.go 0000664 0000000 0000000 00000031775 14774251704 0022321 0 ustar 00root root 0000000 0000000 //
// Copyright (c) 2015-2024 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as
// published by the Free Software Foundation, either version 3 of the
// License, or (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see .
//
package madmin
import (
"errors"
"fmt"
"strings"
"unicode"
"github.com/minio/minio-go/v7/pkg/set"
)
// Top level configuration key constants.
const (
CredentialsSubSys = "credentials"
PolicyOPASubSys = "policy_opa"
PolicyPluginSubSys = "policy_plugin"
IdentityOpenIDSubSys = "identity_openid"
IdentityLDAPSubSys = "identity_ldap"
IdentityTLSSubSys = "identity_tls"
IdentityPluginSubSys = "identity_plugin"
CacheSubSys = "cache"
SiteSubSys = "site"
RegionSubSys = "region"
EtcdSubSys = "etcd"
StorageClassSubSys = "storage_class"
APISubSys = "api"
CompressionSubSys = "compression"
LoggerWebhookSubSys = "logger_webhook"
AuditWebhookSubSys = "audit_webhook"
AuditKafkaSubSys = "audit_kafka"
HealSubSys = "heal"
ScannerSubSys = "scanner"
CrawlerSubSys = "crawler"
SubnetSubSys = "subnet"
CallhomeSubSys = "callhome"
BatchSubSys = "batch"
DriveSubSys = "drive"
ILMSubsys = "ilm"
NotifyKafkaSubSys = "notify_kafka"
NotifyMQTTSubSys = "notify_mqtt"
NotifyMySQLSubSys = "notify_mysql"
NotifyNATSSubSys = "notify_nats"
NotifyNSQSubSys = "notify_nsq"
NotifyESSubSys = "notify_elasticsearch"
NotifyAMQPSubSys = "notify_amqp"
NotifyPostgresSubSys = "notify_postgres"
NotifyRedisSubSys = "notify_redis"
NotifyWebhookSubSys = "notify_webhook"
LambdaWebhookSubSys = "lambda_webhook"
BrowserSubSys = "browser"
AuditEventQueueSubSys = "audit_event_queue"
ErasureSubSys = "erasure"
BucketEventQueueSubSys = "bucket_event_queue"
)
// SubSystems - list of all subsystems in MinIO
var SubSystems = set.CreateStringSet(
CredentialsSubSys,
PolicyOPASubSys,
PolicyPluginSubSys,
IdentityOpenIDSubSys,
IdentityLDAPSubSys,
IdentityTLSSubSys,
IdentityPluginSubSys,
CacheSubSys,
SiteSubSys,
RegionSubSys,
EtcdSubSys,
StorageClassSubSys,
APISubSys,
CompressionSubSys,
LoggerWebhookSubSys,
AuditWebhookSubSys,
AuditKafkaSubSys,
HealSubSys,
ScannerSubSys,
CrawlerSubSys,
SubnetSubSys,
CallhomeSubSys,
BatchSubSys,
DriveSubSys,
ILMSubsys,
NotifyKafkaSubSys,
NotifyMQTTSubSys,
NotifyMySQLSubSys,
NotifyNATSSubSys,
NotifyNSQSubSys,
NotifyESSubSys,
NotifyAMQPSubSys,
NotifyPostgresSubSys,
NotifyRedisSubSys,
NotifyWebhookSubSys,
LambdaWebhookSubSys,
BrowserSubSys,
)
// EOSSubSystems - list of all subsystems for EOS
var EOSSubSystems = set.CreateStringSet(
CredentialsSubSys,
PolicyOPASubSys,
PolicyPluginSubSys,
IdentityOpenIDSubSys,
IdentityLDAPSubSys,
IdentityTLSSubSys,
IdentityPluginSubSys,
CacheSubSys,
SiteSubSys,
RegionSubSys,
EtcdSubSys,
StorageClassSubSys,
APISubSys,
CompressionSubSys,
LoggerWebhookSubSys,
AuditWebhookSubSys,
AuditKafkaSubSys,
HealSubSys,
ScannerSubSys,
CrawlerSubSys,
SubnetSubSys,
CallhomeSubSys,
BatchSubSys,
DriveSubSys,
ILMSubsys,
NotifyKafkaSubSys,
NotifyMQTTSubSys,
NotifyMySQLSubSys,
NotifyNATSSubSys,
NotifyNSQSubSys,
NotifyESSubSys,
NotifyAMQPSubSys,
NotifyPostgresSubSys,
NotifyRedisSubSys,
NotifyWebhookSubSys,
LambdaWebhookSubSys,
BrowserSubSys,
AuditEventQueueSubSys,
ErasureSubSys,
BucketEventQueueSubSys,
)
// Standard config keys and values.
const (
EnableKey = "enable"
CommentKey = "comment"
// Enable values
EnableOn = "on"
EnableOff = "off"
)
// HasSpace - returns if given string has space.
func HasSpace(s string) bool {
for _, r := range s {
if unicode.IsSpace(r) {
return true
}
}
return false
}
// Constant separators
const (
SubSystemSeparator = `:`
KvSeparator = `=`
KvComment = `#`
KvSpaceSeparator = ` `
KvNewline = "\n"
KvDoubleQuote = `"`
KvSingleQuote = `'`
Default = `_`
EnvPrefix = "MINIO_"
EnvWordDelimiter = `_`
EnvLinePrefix = KvComment + KvSpaceSeparator + EnvPrefix
)
// SanitizeValue - this function is needed, to trim off single or double quotes, creeping into the values.
func SanitizeValue(v string) string {
v = strings.TrimSuffix(strings.TrimPrefix(strings.TrimSpace(v), KvDoubleQuote), KvDoubleQuote)
return strings.TrimSuffix(strings.TrimPrefix(v, KvSingleQuote), KvSingleQuote)
}
// EnvOverride contains the name of the environment variable and its value.
type EnvOverride struct {
Name string `json:"name"`
Value string `json:"value"`
}
// ConfigKV represents a configuration key and value, along with any environment
// override if present.
type ConfigKV struct {
Key string `json:"key"`
Value string `json:"value"`
EnvOverride *EnvOverride `json:"envOverride,omitempty"`
}
// SubsysConfig represents the configuration for a particular subsytem and
// target.
type SubsysConfig struct {
SubSystem string `json:"subSystem"`
Target string `json:"target,omitempty"`
// WARNING: Use AddConfigKV() to mutate this.
KV []ConfigKV `json:"kv"`
kvIndexMap map[string]int
}
// AddConfigKV - adds a config parameter to the subsystem.
func (c *SubsysConfig) AddConfigKV(ckv ConfigKV) {
if c.kvIndexMap == nil {
c.kvIndexMap = make(map[string]int)
}
idx, ok := c.kvIndexMap[ckv.Key]
if ok {
c.KV[idx] = ckv
} else {
c.KV = append(c.KV, ckv)
c.kvIndexMap[ckv.Key] = len(c.KV) - 1
}
}
// Lookup resolves the value of a config parameter. If an env variable is
// specified on the server for the parameter, it is returned.
func (c *SubsysConfig) Lookup(key string) (val string, present bool) {
if c.kvIndexMap == nil {
return "", false
}
idx, ok := c.kvIndexMap[key]
if !ok {
return "", false
}
if c.KV[idx].EnvOverride != nil {
return c.KV[idx].EnvOverride.Value, true
}
return c.KV[idx].Value, true
}
var (
ErrInvalidEnvVarLine = errors.New("expected env var line of the form `# MINIO_...=...`")
ErrInvalidConfigKV = errors.New("expected config value in the format `key=value`")
)
func parseEnvVarLine(s, subSystem, target string) (val ConfigKV, err error) {
s = strings.TrimPrefix(s, KvComment+KvSpaceSeparator)
ps := strings.SplitN(s, KvSeparator, 2)
if len(ps) != 2 {
err = ErrInvalidEnvVarLine
return
}
val.EnvOverride = &EnvOverride{
Name: ps[0],
Value: ps[1],
}
envVar := val.EnvOverride.Name
envPrefix := EnvPrefix + strings.ToUpper(subSystem) + EnvWordDelimiter
if !strings.HasPrefix(envVar, envPrefix) {
err = fmt.Errorf("expected env %v to have prefix %v", envVar, envPrefix)
return
}
configVar := strings.TrimPrefix(envVar, envPrefix)
if target != Default {
configVar = strings.TrimSuffix(configVar, EnvWordDelimiter+target)
}
val.Key = strings.ToLower(configVar)
return
}
// Takes "k1=v1 k2=v2 ..." and returns key=k1 and rem="v1 k2=v2 ..." on success.
func parseConfigKey(text string) (key, rem string, err error) {
// Split to first `=`
ts := strings.SplitN(text, KvSeparator, 2)
key = strings.TrimSpace(ts[0])
if len(key) == 0 {
err = ErrInvalidConfigKV
return
}
if len(ts) == 1 {
err = ErrInvalidConfigKV
return
}
return key, ts[1], nil
}
func parseConfigValue(text string) (v, rem string, err error) {
// Value may be double quoted.
if strings.HasPrefix(text, KvDoubleQuote) {
text = strings.TrimPrefix(text, KvDoubleQuote)
ts := strings.SplitN(text, KvDoubleQuote, 2)
v = ts[0]
if len(ts) == 1 {
err = ErrInvalidConfigKV
return
}
rem = strings.TrimSpace(ts[1])
} else {
ts := strings.SplitN(text, KvSpaceSeparator, 2)
v = ts[0]
if len(ts) == 2 {
rem = strings.TrimSpace(ts[1])
} else {
rem = ""
}
}
return
}
func parseConfigLine(s string) (c SubsysConfig, err error) {
ps := strings.SplitN(s, KvSpaceSeparator, 2)
ws := strings.SplitN(ps[0], SubSystemSeparator, 2)
c.SubSystem = ws[0]
if len(ws) == 2 {
c.Target = ws[1]
}
if len(ps) == 1 {
// No config KVs present.
return
}
// Parse keys and values
text := strings.TrimSpace(ps[1])
for len(text) > 0 {
kv := ConfigKV{}
kv.Key, text, err = parseConfigKey(text)
if err != nil {
return
}
kv.Value, text, err = parseConfigValue(text)
if err != nil {
return
}
c.AddConfigKV(kv)
}
return
}
func isEnvLine(s string) bool {
return strings.HasPrefix(s, EnvLinePrefix)
}
func isCommentLine(s string) bool {
return strings.HasPrefix(s, KvComment)
}
func getConfigLineSubSystemAndTarget(s string) (subSys, target string) {
words := strings.SplitN(s, KvSpaceSeparator, 2)
pieces := strings.SplitN(words[0], SubSystemSeparator, 2)
if len(pieces) == 2 {
return pieces[0], pieces[1]
}
// If no target is present, it is the default target.
return pieces[0], Default
}
// ParseServerConfigOutput - takes a server config output and returns a slice of
// configs. Depending on the server config get API request, this may return
// configuration info for one or more configuration sub-systems.
//
// A configuration subsystem in the server may have one or more subsystem
// targets (named instances of the sub-system, for example `notify_postres`,
// `logger_webhook` or `identity_openid`). For every subsystem and target
// returned in `serverConfigOutput`, this function returns a separate
// `SubsysConfig` value in the output slice. The default target is returned as
// "" (empty string) by this function.
//
// Use the `Lookup()` function on the `SubsysConfig` type to query a
// subsystem-target pair for a configuration parameter. This returns the
// effective value (i.e. possibly overridden by an environment variable) of the
// configuration parameter on the server.
func ParseServerConfigOutput(serverConfigOutput string) ([]SubsysConfig, error) {
lines := strings.Split(serverConfigOutput, "\n")
// Clean up config lines
var configLines []string
for _, line := range lines {
line = strings.TrimSpace(line)
if line != "" {
configLines = append(configLines, line)
}
}
// Parse out config lines into groups corresponding to a single subsystem
// and target.
//
// How does it work? The server output is a list of lines, where each line
// may be one of:
//
// 1. A config line for a single subsystem (and optional target). For
// example, "site region=us-east-1" or "identity_openid:okta k1=v1 k2=v2".
//
// 2. A comment line showing an environment variable set on the server.
// For example "# MINIO_SITE_NAME=my-cluster".
//
// 3. Comment lines with other content. These will not start with `#
// MINIO_`.
//
// For the structured JSON representation, only lines of type 1 and 2 are
// required as they correspond to configuration specified by an
// administrator.
//
// Additionally, after ignoring lines of type 3 above:
//
// 1. environment variable lines for a subsystem (and target if present)
// appear consecutively.
//
// 2. exactly one config line for a subsystem and target immediately
// follows the env var lines for the same subsystem and target.
//
// The parsing logic below classifies each line and groups them by
// subsystem and target.
var configGroups [][]string
var subSystems []string
var targets []string
var currGroup []string
for _, line := range configLines {
if isEnvLine(line) {
currGroup = append(currGroup, line)
} else if isCommentLine(line) {
continue
} else {
subSys, target := getConfigLineSubSystemAndTarget(line)
currGroup = append(currGroup, line)
configGroups = append(configGroups, currGroup)
subSystems = append(subSystems, subSys)
targets = append(targets, target)
// Reset currGroup to collect lines for the next group.
currGroup = nil
}
}
res := make([]SubsysConfig, 0, len(configGroups))
for i, group := range configGroups {
sc := SubsysConfig{
SubSystem: subSystems[i],
}
if targets[i] != Default {
sc.Target = targets[i]
}
for _, line := range group {
if isEnvLine(line) {
ckv, err := parseEnvVarLine(line, subSystems[i], targets[i])
if err != nil {
return nil, err
}
// Since all env lines have distinct env vars, we can append
// here without risk of introducing any duplicates.
sc.AddConfigKV(ckv)
continue
}
// At this point all env vars for this subsys and target are already
// in `sc.KV`, so we fill in values if a ConfigKV entry for the
// config parameter is already present.
lineCfg, err := parseConfigLine(line)
if err != nil {
return nil, err
}
for _, kv := range lineCfg.KV {
idx, ok := sc.kvIndexMap[kv.Key]
if ok {
sc.KV[idx].Value = kv.Value
} else {
sc.AddConfigKV(kv)
}
}
}
res = append(res, sc)
}
return res, nil
}
golang-github-minio-madmin-go-3.0.104/parse-config_test.go 0000664 0000000 0000000 00000010430 14774251704 0023341 0 ustar 00root root 0000000 0000000 //
// Copyright (c) 2015-2024 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as
// published by the Free Software Foundation, either version 3 of the
// License, or (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see .
//
package madmin
import (
"reflect"
"testing"
)
func TestParseServerConfigOutput(t *testing.T) {
tests := []struct {
Name string
Config string
Expected []SubsysConfig
ExpectedErr error
}{
{
Name: "single target config data only",
Config: "subnet license= api_key= proxy=",
Expected: []SubsysConfig{
{
SubSystem: SubnetSubSys,
Target: "",
KV: []ConfigKV{
{
Key: "license",
Value: "",
EnvOverride: nil,
},
{
Key: "api_key",
Value: "",
EnvOverride: nil,
},
{
Key: "proxy",
Value: "",
EnvOverride: nil,
},
},
kvIndexMap: map[string]int{
"license": 0,
"api_key": 1,
"proxy": 2,
},
},
},
},
{
Name: "single target config + env",
Config: `# MINIO_SUBNET_API_KEY=xxx
# MINIO_SUBNET_LICENSE=2
subnet license=1 api_key= proxy=`,
Expected: []SubsysConfig{
{
SubSystem: SubnetSubSys,
Target: "",
KV: []ConfigKV{
{
Key: "api_key",
Value: "",
EnvOverride: &EnvOverride{
Name: "MINIO_SUBNET_API_KEY",
Value: "xxx",
},
},
{
Key: "license",
Value: "1",
EnvOverride: &EnvOverride{
Name: "MINIO_SUBNET_LICENSE",
Value: "2",
},
},
{
Key: "proxy",
Value: "",
EnvOverride: nil,
},
},
kvIndexMap: map[string]int{
"license": 1,
"api_key": 0,
"proxy": 2,
},
},
},
},
{
Name: "multiple targets no env",
Config: `logger_webhook enable=off endpoint= auth_token= client_cert= client_key= queue_size=100000
logger_webhook:1 endpoint=http://localhost:8080/ auth_token= client_cert= client_key= queue_size=100000
`,
Expected: []SubsysConfig{
{
SubSystem: LoggerWebhookSubSys,
Target: "",
KV: []ConfigKV{
{
Key: "enable",
Value: "off",
},
{
Key: "endpoint",
Value: "",
},
{
Key: "auth_token",
Value: "",
},
{
Key: "client_cert",
Value: "",
},
{
Key: "client_key",
Value: "",
},
{
Key: "queue_size",
Value: "100000",
},
},
kvIndexMap: map[string]int{
"enable": 0,
"endpoint": 1,
"auth_token": 2,
"client_cert": 3,
"client_key": 4,
"queue_size": 5,
},
},
{
SubSystem: LoggerWebhookSubSys,
Target: "1",
KV: []ConfigKV{
{
Key: "endpoint",
Value: "http://localhost:8080/",
},
{
Key: "auth_token",
Value: "",
},
{
Key: "client_cert",
Value: "",
},
{
Key: "client_key",
Value: "",
},
{
Key: "queue_size",
Value: "100000",
},
},
kvIndexMap: map[string]int{
"endpoint": 0,
"auth_token": 1,
"client_cert": 2,
"client_key": 3,
"queue_size": 4,
},
},
},
},
}
for i, test := range tests {
r, err := ParseServerConfigOutput(test.Config)
if err != nil {
if err.Error() != test.ExpectedErr.Error() {
t.Errorf("Test %d (%s) got unexpected error: %v", i, test.Name, err)
}
// got an expected error.
continue
}
if !reflect.DeepEqual(test.Expected, r) {
t.Errorf("Test %d (%s) expected:\n%#v\nbut got:\n%#v\n", i, test.Name, test.Expected, r)
}
}
}
golang-github-minio-madmin-go-3.0.104/perf-client.go 0000664 0000000 0000000 00000006775 14774251704 0022156 0 ustar 00root root 0000000 0000000 //
// Copyright (c) 2015-2024 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as
// published by the Free Software Foundation, either version 3 of the
// License, or (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see .
//
package madmin
import (
"context"
"crypto/rand"
"encoding/json"
"errors"
"fmt"
"io"
"net/http"
"net/url"
"time"
"github.com/dustin/go-humanize"
)
// ClientPerfExtraTime - time for get lock or other
type ClientPerfExtraTime struct {
TimeSpent int64 `json:"dur,omitempty"`
}
// ClientPerfResult - stats from client to server
type ClientPerfResult struct {
Endpoint string `json:"endpoint,omitempty"`
Error string `json:"error,omitempty"`
BytesSend uint64
TimeSpent int64
}
// clientPerfReader - wrap the reader
type clientPerfReader struct {
count uint64
startTime time.Time
endTime time.Time
buf []byte
}
// Start - reader start
func (c *clientPerfReader) Start() {
buf := make([]byte, 128*humanize.KiByte)
rand.Read(buf)
c.buf = buf
c.startTime = time.Now()
}
// End - reader end
func (c *clientPerfReader) End() {
c.endTime = time.Now()
}
// Read - reader send data
func (c *clientPerfReader) Read(p []byte) (n int, err error) {
n = copy(p, c.buf)
c.count += uint64(n)
return n, nil
}
var _ io.Reader = &clientPerfReader{}
const (
// MaxClientPerfTimeout for max time out for client perf
MaxClientPerfTimeout = time.Second * 30
// MinClientPerfTimeout for min time out for client perf
MinClientPerfTimeout = time.Second * 5
)
// ClientPerf - perform net from client to MinIO servers
func (adm *AdminClient) ClientPerf(ctx context.Context, dur time.Duration) (result ClientPerfResult, err error) {
if dur > MaxClientPerfTimeout {
dur = MaxClientPerfTimeout
}
if dur < MinClientPerfTimeout {
dur = MinClientPerfTimeout
}
ctx, cancel := context.WithTimeout(ctx, dur)
defer cancel()
queryVals := make(url.Values)
reader := &clientPerfReader{}
reader.Start()
_, err = adm.executeMethod(ctx, http.MethodPost, requestData{
queryValues: queryVals,
relPath: adminAPIPrefix + "/speedtest/client/devnull",
contentReader: reader,
})
reader.End()
if errors.Is(err, context.DeadlineExceeded) && ctx.Err() != nil {
err = nil
}
resp, err := adm.executeMethod(context.Background(), http.MethodPost, requestData{
queryValues: queryVals,
relPath: adminAPIPrefix + "/speedtest/client/devnull/extratime",
})
if err != nil {
return ClientPerfResult{}, err
}
var extraTime ClientPerfExtraTime
dec := json.NewDecoder(resp.Body)
err = dec.Decode(&extraTime)
if err != nil {
return ClientPerfResult{}, err
}
durSpend := reader.endTime.Sub(reader.startTime).Nanoseconds()
if extraTime.TimeSpent > 0 {
durSpend = durSpend - extraTime.TimeSpent
}
if durSpend <= 0 {
return ClientPerfResult{}, fmt.Errorf("unexpected spent time duration, mostly NTP errors on the server")
}
return ClientPerfResult{
BytesSend: reader.count,
TimeSpent: durSpend,
Error: "",
Endpoint: adm.endpointURL.String(),
}, err
}
golang-github-minio-madmin-go-3.0.104/perf-drive.go 0000664 0000000 0000000 00000005236 14774251704 0022000 0 ustar 00root root 0000000 0000000 //
// Copyright (c) 2015-2024 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as
// published by the Free Software Foundation, either version 3 of the
// License, or (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see .
//
package madmin
import (
"context"
"encoding/json"
"net/http"
"net/url"
"strconv"
)
// DriveSpeedTestResult - result of the drive speed test
type DriveSpeedTestResult struct {
Version string `json:"version"`
Endpoint string `json:"endpoint"`
DrivePerf []DrivePerf `json:"drivePerf,omitempty"`
Error string `json:"string,omitempty"`
}
// DrivePerf - result of drive speed test on 1 drive mounted at path
type DrivePerf struct {
Path string `json:"path"`
ReadThroughput uint64 `json:"readThroughput"`
WriteThroughput uint64 `json:"writeThroughput"`
Error string `json:"error,omitempty"`
}
// DriveSpeedTestOpts provide configurable options for drive speedtest
type DriveSpeedTestOpts struct {
Serial bool // Run speed tests one drive at a time
BlockSize uint64 // BlockSize for read/write (default 4MiB)
FileSize uint64 // Total fileSize to write and read (default 1GiB)
}
// DriveSpeedtest - perform drive speedtest on the MinIO servers
func (adm *AdminClient) DriveSpeedtest(ctx context.Context, opts DriveSpeedTestOpts) (chan DriveSpeedTestResult, error) {
queryVals := make(url.Values)
if opts.Serial {
queryVals.Set("serial", "true")
}
queryVals.Set("blocksize", strconv.FormatUint(opts.BlockSize, 10))
queryVals.Set("filesize", strconv.FormatUint(opts.FileSize, 10))
resp, err := adm.executeMethod(ctx,
http.MethodPost, requestData{
relPath: adminAPIPrefix + "/speedtest/drive",
queryValues: queryVals,
})
if err != nil {
return nil, err
}
if resp.StatusCode != http.StatusOK {
return nil, httpRespToErrorResponse(resp)
}
ch := make(chan DriveSpeedTestResult)
go func() {
defer closeResponse(resp)
defer close(ch)
dec := json.NewDecoder(resp.Body)
for {
var result DriveSpeedTestResult
if err := dec.Decode(&result); err != nil {
return
}
select {
case ch <- result:
case <-ctx.Done():
return
}
}
}()
return ch, nil
}
golang-github-minio-madmin-go-3.0.104/perf-net.go 0000664 0000000 0000000 00000003416 14774251704 0021453 0 ustar 00root root 0000000 0000000 //
// Copyright (c) 2015-2024 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as
// published by the Free Software Foundation, either version 3 of the
// License, or (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see .
//
package madmin
import (
"context"
"encoding/json"
"net/http"
"net/url"
"time"
)
// NetperfNodeResult - stats from each server
type NetperfNodeResult struct {
Endpoint string `json:"endpoint"`
TX uint64 `json:"tx"`
RX uint64 `json:"rx"`
Error string `json:"error,omitempty"`
}
// NetperfResult - aggregate results from all servers
type NetperfResult struct {
NodeResults []NetperfNodeResult `json:"nodeResults"`
}
// Netperf - perform netperf on the MinIO servers
func (adm *AdminClient) Netperf(ctx context.Context, duration time.Duration) (result NetperfResult, err error) {
queryVals := make(url.Values)
queryVals.Set("duration", duration.String())
resp, err := adm.executeMethod(ctx,
http.MethodPost, requestData{
relPath: adminAPIPrefix + "/speedtest/net",
queryValues: queryVals,
})
if err != nil {
return result, err
}
if resp.StatusCode != http.StatusOK {
return result, httpRespToErrorResponse(resp)
}
err = json.NewDecoder(resp.Body).Decode(&result)
return result, err
}
golang-github-minio-madmin-go-3.0.104/perf-object.go 0000664 0000000 0000000 00000007773 14774251704 0022145 0 ustar 00root root 0000000 0000000 //
// Copyright (c) 2015-2024 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as
// published by the Free Software Foundation, either version 3 of the
// License, or (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see .
//
package madmin
import (
"context"
"encoding/json"
"errors"
"net/http"
"net/url"
"strconv"
"time"
)
// SpeedTestStatServer - stats of a server
type SpeedTestStatServer struct {
Endpoint string `json:"endpoint"`
ThroughputPerSec uint64 `json:"throughputPerSec"`
ObjectsPerSec uint64 `json:"objectsPerSec"`
Err string `json:"err"`
}
// SpeedTestStats - stats of all the servers
type SpeedTestStats struct {
ThroughputPerSec uint64 `json:"throughputPerSec"`
ObjectsPerSec uint64 `json:"objectsPerSec"`
Response Timings `json:"responseTime"`
TTFB Timings `json:"ttfb,omitempty"`
Servers []SpeedTestStatServer `json:"servers"`
}
// SpeedTestResult - result of the speedtest() call
type SpeedTestResult struct {
Version string `json:"version"`
Servers int `json:"servers"`
Disks int `json:"disks"`
Size int `json:"size"`
Concurrent int `json:"concurrent"`
PUTStats SpeedTestStats
GETStats SpeedTestStats
}
// SpeedtestOpts provide configurable options for speedtest
type SpeedtestOpts struct {
Size int // Object size used in speed test
Concurrency int // Concurrency used in speed test
Duration time.Duration // Total duration of the speed test
Autotune bool // Enable autotuning
StorageClass string // Choose type of storage-class to be used while performing I/O
Bucket string // Choose a custom bucket name while performing I/O
NoClear bool // Avoid cleanup after running an object speed test
EnableSha256 bool // Enable calculating sha256 for uploads
}
// Speedtest - perform speedtest on the MinIO servers
func (adm *AdminClient) Speedtest(ctx context.Context, opts SpeedtestOpts) (chan SpeedTestResult, error) {
if !opts.Autotune {
if opts.Duration <= time.Second {
return nil, errors.New("duration must be greater a second")
}
if opts.Size <= 0 {
return nil, errors.New("size must be greater than 0 bytes")
}
if opts.Concurrency <= 0 {
return nil, errors.New("concurrency must be greater than 0")
}
}
queryVals := make(url.Values)
if opts.Size > 0 {
queryVals.Set("size", strconv.Itoa(opts.Size))
}
if opts.Duration > 0 {
queryVals.Set("duration", opts.Duration.String())
}
if opts.Concurrency > 0 {
queryVals.Set("concurrent", strconv.Itoa(opts.Concurrency))
}
if opts.Bucket != "" {
queryVals.Set("bucket", opts.Bucket)
}
if opts.Autotune {
queryVals.Set("autotune", "true")
}
if opts.NoClear {
queryVals.Set("noclear", "true")
}
if opts.EnableSha256 {
queryVals.Set("enableSha256", "true")
}
resp, err := adm.executeMethod(ctx,
http.MethodPost, requestData{
relPath: adminAPIPrefix + "/speedtest",
queryValues: queryVals,
})
if err != nil {
return nil, err
}
if resp.StatusCode != http.StatusOK {
return nil, httpRespToErrorResponse(resp)
}
ch := make(chan SpeedTestResult)
go func() {
defer closeResponse(resp)
defer close(ch)
dec := json.NewDecoder(resp.Body)
for {
var result SpeedTestResult
if err := dec.Decode(&result); err != nil {
return
}
select {
case ch <- result:
case <-ctx.Done():
return
}
}
}()
return ch, nil
}
golang-github-minio-madmin-go-3.0.104/perf-site-replication.go 0000664 0000000 0000000 00000004163 14774251704 0024140 0 ustar 00root root 0000000 0000000 //
// Copyright (c) 2015-2024 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as
// published by the Free Software Foundation, either version 3 of the
// License, or (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see .
//
package madmin
import (
"context"
"encoding/json"
"net/http"
"net/url"
"time"
)
// SiteNetPerfNodeResult - stats from each server
type SiteNetPerfNodeResult struct {
Endpoint string `json:"endpoint"`
TX uint64 `json:"tx"` // transfer rate in bytes
TXTotalDuration time.Duration `json:"txTotalDuration"`
RX uint64 `json:"rx"` // received rate in bytes
RXTotalDuration time.Duration `json:"rxTotalDuration"`
TotalConn uint64 `json:"totalConn"`
Error string `json:"error,omitempty"`
}
// SiteNetPerfResult - aggregate results from all servers
type SiteNetPerfResult struct {
NodeResults []SiteNetPerfNodeResult `json:"nodeResults"`
}
// SiteReplicationPerf - perform site-replication on the MinIO servers
func (adm *AdminClient) SiteReplicationPerf(ctx context.Context, duration time.Duration) (result SiteNetPerfResult, err error) {
queryVals := make(url.Values)
queryVals.Set("duration", duration.String())
resp, err := adm.executeMethod(ctx,
http.MethodPost, requestData{
relPath: adminAPIPrefix + "/speedtest/site",
queryValues: queryVals,
})
if err != nil {
return result, err
}
defer closeResponse(resp)
if resp.StatusCode != http.StatusOK {
return result, httpRespToErrorResponse(resp)
}
err = json.NewDecoder(resp.Body).Decode(&result)
return result, err
}
golang-github-minio-madmin-go-3.0.104/policy-commands.go 0000664 0000000 0000000 00000021263 14774251704 0023031 0 ustar 00root root 0000000 0000000 //
// Copyright (c) 2015-2024 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as
// published by the Free Software Foundation, either version 3 of the
// License, or (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see .
//
package madmin
import (
"context"
"encoding/json"
"io"
"net/http"
"net/url"
"time"
)
// InfoCannedPolicy - expand canned policy into JSON structure.
//
// Deprecated: Use InfoCannedPolicyV2 instead.
func (adm *AdminClient) InfoCannedPolicy(ctx context.Context, policyName string) ([]byte, error) {
queryValues := url.Values{}
queryValues.Set("name", policyName)
reqData := requestData{
relPath: adminAPIPrefix + "/info-canned-policy",
queryValues: queryValues,
}
// Execute GET on /minio/admin/v3/info-canned-policy
resp, err := adm.executeMethod(ctx, http.MethodGet, reqData)
defer closeResponse(resp)
if err != nil {
return nil, err
}
if resp.StatusCode != http.StatusOK {
return nil, httpRespToErrorResponse(resp)
}
return io.ReadAll(resp.Body)
}
// PolicyInfo contains information on a policy.
type PolicyInfo struct {
PolicyName string
Policy json.RawMessage
CreateDate time.Time `json:",omitempty"`
UpdateDate time.Time `json:",omitempty"`
}
// MarshalJSON marshaller for JSON
func (pi PolicyInfo) MarshalJSON() ([]byte, error) {
type aliasPolicyInfo PolicyInfo // needed to avoid recursive marshal
if pi.CreateDate.IsZero() && pi.UpdateDate.IsZero() {
return json.Marshal(&struct {
PolicyName string
Policy json.RawMessage
}{
PolicyName: pi.PolicyName,
Policy: pi.Policy,
})
}
return json.Marshal(aliasPolicyInfo(pi))
}
// InfoCannedPolicyV2 - get info on a policy including timestamps and policy json.
func (adm *AdminClient) InfoCannedPolicyV2(ctx context.Context, policyName string) (*PolicyInfo, error) {
queryValues := url.Values{}
queryValues.Set("name", policyName)
queryValues.Set("v", "2")
reqData := requestData{
relPath: adminAPIPrefix + "/info-canned-policy",
queryValues: queryValues,
}
// Execute GET on /minio/admin/v3/info-canned-policy
resp, err := adm.executeMethod(ctx, http.MethodGet, reqData)
defer closeResponse(resp)
if err != nil {
return nil, err
}
if resp.StatusCode != http.StatusOK {
return nil, httpRespToErrorResponse(resp)
}
data, err := io.ReadAll(resp.Body)
if err != nil {
return nil, err
}
var p PolicyInfo
err = json.Unmarshal(data, &p)
return &p, err
}
// ListCannedPolicies - list all configured canned policies.
func (adm *AdminClient) ListCannedPolicies(ctx context.Context) (map[string]json.RawMessage, error) {
reqData := requestData{
relPath: adminAPIPrefix + "/list-canned-policies",
}
// Execute GET on /minio/admin/v3/list-canned-policies
resp, err := adm.executeMethod(ctx, http.MethodGet, reqData)
defer closeResponse(resp)
if err != nil {
return nil, err
}
if resp.StatusCode != http.StatusOK {
return nil, httpRespToErrorResponse(resp)
}
respBytes, err := io.ReadAll(resp.Body)
if err != nil {
return nil, err
}
policies := make(map[string]json.RawMessage)
if err = json.Unmarshal(respBytes, &policies); err != nil {
return nil, err
}
return policies, nil
}
// RemoveCannedPolicy - remove a policy for a canned.
func (adm *AdminClient) RemoveCannedPolicy(ctx context.Context, policyName string) error {
queryValues := url.Values{}
queryValues.Set("name", policyName)
reqData := requestData{
relPath: adminAPIPrefix + "/remove-canned-policy",
queryValues: queryValues,
}
// Execute DELETE on /minio/admin/v3/remove-canned-policy to remove policy.
resp, err := adm.executeMethod(ctx, http.MethodDelete, reqData)
defer closeResponse(resp)
if err != nil {
return err
}
if resp.StatusCode != http.StatusOK {
return httpRespToErrorResponse(resp)
}
return nil
}
// AddCannedPolicy - adds a policy for a canned.
func (adm *AdminClient) AddCannedPolicy(ctx context.Context, policyName string, policy []byte) error {
if len(policy) == 0 {
return ErrInvalidArgument("policy input cannot be empty")
}
queryValues := url.Values{}
queryValues.Set("name", policyName)
reqData := requestData{
relPath: adminAPIPrefix + "/add-canned-policy",
queryValues: queryValues,
content: policy,
}
// Execute PUT on /minio/admin/v3/add-canned-policy to set policy.
resp, err := adm.executeMethod(ctx, http.MethodPut, reqData)
defer closeResponse(resp)
if err != nil {
return err
}
if resp.StatusCode != http.StatusOK {
return httpRespToErrorResponse(resp)
}
return nil
}
// SetPolicy - sets the policy for a user or a group.
//
// Deprecated: Use AttachPolicy/DetachPolicy to update builtin user policies
// instead. Use AttachPolicyLDAP/DetachPolicyLDAP to update LDAP user policies.
// This function and the corresponding server API will be removed in future
// releases.
func (adm *AdminClient) SetPolicy(ctx context.Context, policyName, entityName string, isGroup bool) error {
queryValues := url.Values{}
queryValues.Set("policyName", policyName)
queryValues.Set("userOrGroup", entityName)
groupStr := "false"
if isGroup {
groupStr = "true"
}
queryValues.Set("isGroup", groupStr)
reqData := requestData{
relPath: adminAPIPrefix + "/set-user-or-group-policy",
queryValues: queryValues,
}
// Execute PUT on /minio/admin/v3/set-user-or-group-policy to set policy.
resp, err := adm.executeMethod(ctx, http.MethodPut, reqData)
defer closeResponse(resp)
if err != nil {
return err
}
if resp.StatusCode != http.StatusOK {
return httpRespToErrorResponse(resp)
}
return nil
}
func (adm *AdminClient) attachOrDetachPolicyBuiltin(ctx context.Context, isAttach bool,
r PolicyAssociationReq,
) (PolicyAssociationResp, error) {
err := r.IsValid()
if err != nil {
return PolicyAssociationResp{}, err
}
plainBytes, err := json.Marshal(r)
if err != nil {
return PolicyAssociationResp{}, err
}
encBytes, err := EncryptData(adm.getSecretKey(), plainBytes)
if err != nil {
return PolicyAssociationResp{}, err
}
suffix := "detach"
if isAttach {
suffix = "attach"
}
h := make(http.Header, 1)
h.Add("Content-Type", "application/octet-stream")
reqData := requestData{
customHeaders: h,
relPath: adminAPIPrefix + "/idp/builtin/policy/" + suffix,
content: encBytes,
}
resp, err := adm.executeMethod(ctx, http.MethodPost, reqData)
defer closeResponse(resp)
if err != nil {
return PolicyAssociationResp{}, err
}
// Older minio does not send a response, so we handle that case.
switch resp.StatusCode {
case http.StatusOK:
// Newer/current minio sends a result.
content, err := DecryptData(adm.getSecretKey(), resp.Body)
if err != nil {
return PolicyAssociationResp{}, err
}
rsp := PolicyAssociationResp{}
err = json.Unmarshal(content, &rsp)
return rsp, err
case http.StatusCreated, http.StatusNoContent:
// Older minio - no result sent. TODO(aditya): Remove this case after
// newer minio is released.
return PolicyAssociationResp{}, nil
default:
// Error response case.
return PolicyAssociationResp{}, httpRespToErrorResponse(resp)
}
}
// AttachPolicy - attach policies to a user or group.
func (adm *AdminClient) AttachPolicy(ctx context.Context, r PolicyAssociationReq) (PolicyAssociationResp, error) {
return adm.attachOrDetachPolicyBuiltin(ctx, true, r)
}
// DetachPolicy - detach policies from a user or group.
func (adm *AdminClient) DetachPolicy(ctx context.Context, r PolicyAssociationReq) (PolicyAssociationResp, error) {
return adm.attachOrDetachPolicyBuiltin(ctx, false, r)
}
// GetPolicyEntities - returns builtin policy entities.
func (adm *AdminClient) GetPolicyEntities(ctx context.Context, q PolicyEntitiesQuery) (r PolicyEntitiesResult, err error) {
params := make(url.Values)
params["user"] = q.Users
params["group"] = q.Groups
params["policy"] = q.Policy
reqData := requestData{
relPath: adminAPIPrefix + "/idp/builtin/policy-entities",
queryValues: params,
}
resp, err := adm.executeMethod(ctx, http.MethodGet, reqData)
defer closeResponse(resp)
if err != nil {
return r, err
}
if resp.StatusCode != http.StatusOK {
return r, httpRespToErrorResponse(resp)
}
content, err := DecryptData(adm.getSecretKey(), resp.Body)
if err != nil {
return r, err
}
err = json.Unmarshal(content, &r)
return r, err
}
golang-github-minio-madmin-go-3.0.104/policy-commands_test.go 0000664 0000000 0000000 00000004606 14774251704 0024072 0 ustar 00root root 0000000 0000000 //
// Copyright (c) 2015-2024 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as
// published by the Free Software Foundation, either version 3 of the
// License, or (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see .
//
package madmin
import (
"bytes"
"encoding/json"
"testing"
"time"
)
var (
withCreateDate = []byte(`{"PolicyName":"readwrite","Policy":{"Version":"2012-10-17","Statement":[{"Effect":"Allow","Action":["admin:*"]},{"Effect":"Allow","Action":["s3:*"],"Resource":["arn:aws:s3:::*"]}]},"CreateDate":"2020-03-15T10:10:10Z","UpdateDate":"2021-03-15T10:10:10Z"}`)
withoutCreateDate = []byte(`{"PolicyName":"readwrite","Policy":{"Version":"2012-10-17","Statement":[{"Effect":"Allow","Action":["admin:*"]},{"Effect":"Allow","Action":["s3:*"],"Resource":["arn:aws:s3:::*"]}]}}`)
)
func TestPolicyInfo(t *testing.T) {
testCases := []struct {
pi *PolicyInfo
expectedBuf []byte
}{
{
&PolicyInfo{
PolicyName: "readwrite",
Policy: []byte(`{"Version":"2012-10-17","Statement":[{"Effect":"Allow","Action":["admin:*"]},{"Effect":"Allow","Action":["s3:*"],"Resource":["arn:aws:s3:::*"]}]}`),
CreateDate: time.Date(2020, time.March, 15, 10, 10, 10, 0, time.UTC),
UpdateDate: time.Date(2021, time.March, 15, 10, 10, 10, 0, time.UTC),
},
withCreateDate,
},
{
&PolicyInfo{
PolicyName: "readwrite",
Policy: []byte(`{"Version":"2012-10-17","Statement":[{"Effect":"Allow","Action":["admin:*"]},{"Effect":"Allow","Action":["s3:*"],"Resource":["arn:aws:s3:::*"]}]}`),
},
withoutCreateDate,
},
}
for _, testCase := range testCases {
testCase := testCase
t.Run("", func(t *testing.T) {
buf, err := json.Marshal(testCase.pi)
if err != nil {
t.Error(err)
}
if !bytes.Equal(buf, testCase.expectedBuf) {
t.Errorf("expected %s, got %s", string(testCase.expectedBuf), string(buf))
}
})
}
}
golang-github-minio-madmin-go-3.0.104/profiling-commands.go 0000664 0000000 0000000 00000010417 14774251704 0023522 0 ustar 00root root 0000000 0000000 //
// Copyright (c) 2015-2024 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as
// published by the Free Software Foundation, either version 3 of the
// License, or (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see .
//
package madmin
import (
"context"
"encoding/json"
"errors"
"fmt"
"io"
"net/http"
"net/url"
"time"
)
// ProfilerType represents the profiler type
// passed to the profiler subsystem.
type ProfilerType string
// Different supported profiler types.
const (
ProfilerCPU ProfilerType = "cpu" // represents CPU profiler type
ProfilerCPUIO ProfilerType = "cpuio" // represents CPU with IO (fgprof) profiler type
ProfilerMEM ProfilerType = "mem" // represents MEM profiler type
ProfilerBlock ProfilerType = "block" // represents Block profiler type
ProfilerMutex ProfilerType = "mutex" // represents Mutex profiler type
ProfilerTrace ProfilerType = "trace" // represents Trace profiler type
ProfilerThreads ProfilerType = "threads" // represents ThreadCreate profiler type
ProfilerGoroutines ProfilerType = "goroutines" // represents Goroutine dumps.
ProfilerRuntime ProfilerType = "runtime" // Include runtime metrics
)
// StartProfilingResult holds the result of starting
// profiler result in a given node.
type StartProfilingResult struct {
NodeName string `json:"nodeName"`
Success bool `json:"success"`
Error string `json:"error"`
}
// StartProfiling makes an admin call to remotely start profiling on a
// standalone server or the whole cluster in case of a distributed setup.
//
// Deprecated: use Profile API instead
func (adm *AdminClient) StartProfiling(ctx context.Context, profiler ProfilerType) ([]StartProfilingResult, error) {
v := url.Values{}
v.Set("profilerType", string(profiler))
resp, err := adm.executeMethod(ctx,
http.MethodPost, requestData{
relPath: adminAPIPrefix + "/profiling/start",
queryValues: v,
},
)
defer closeResponse(resp)
if err != nil {
return nil, err
}
if resp.StatusCode != http.StatusOK {
return nil, httpRespToErrorResponse(resp)
}
jsonResult, err := io.ReadAll(resp.Body)
if err != nil {
return nil, err
}
var startResults []StartProfilingResult
err = json.Unmarshal(jsonResult, &startResults)
if err != nil {
return nil, err
}
return startResults, nil
}
// DownloadProfilingData makes an admin call to download profiling data of a
// standalone server or of the whole cluster in case of a distributed setup.
//
// Deprecated: use Profile API instead
func (adm *AdminClient) DownloadProfilingData(ctx context.Context) (io.ReadCloser, error) {
path := fmt.Sprintf(adminAPIPrefix + "/profiling/download")
resp, err := adm.executeMethod(ctx,
http.MethodGet, requestData{
relPath: path,
},
)
if err != nil {
closeResponse(resp)
return nil, err
}
if resp.StatusCode != http.StatusOK {
return nil, httpRespToErrorResponse(resp)
}
if resp.Body == nil {
return nil, errors.New("body is nil")
}
return resp.Body, nil
}
// Profile makes an admin call to remotely start profiling on a standalone
// server or the whole cluster in case of a distributed setup for a specified duration.
func (adm *AdminClient) Profile(ctx context.Context, profiler ProfilerType, duration time.Duration) (io.ReadCloser, error) {
v := url.Values{}
v.Set("profilerType", string(profiler))
v.Set("duration", duration.String())
resp, err := adm.executeMethod(ctx,
http.MethodPost, requestData{
relPath: adminAPIPrefix + "/profile",
queryValues: v,
},
)
if err != nil {
closeResponse(resp)
return nil, err
}
if resp.StatusCode != http.StatusOK {
return nil, httpRespToErrorResponse(resp)
}
if resp.Body == nil {
return nil, errors.New("body is nil")
}
return resp.Body, nil
}
golang-github-minio-madmin-go-3.0.104/prometheus_metrics.go 0000664 0000000 0000000 00000006206 14774251704 0023654 0 ustar 00root root 0000000 0000000 //
// Copyright (c) 2015-2024 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as
// published by the Free Software Foundation, either version 3 of the
// License, or (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see .
//
package madmin
import (
"context"
"fmt"
"io"
"net/http"
"github.com/dustin/go-humanize"
"github.com/prometheus/common/expfmt"
"github.com/prometheus/prom2json"
)
// MetricsRespBodyLimit sets the top level limit to the size of the
// metrics results supported by this library.
var (
MetricsRespBodyLimit = int64(humanize.GiByte)
)
// NodeMetrics - returns Node Metrics in Prometheus format
//
// The client needs to be configured with the endpoint of the desired node
func (client *MetricsClient) NodeMetrics(ctx context.Context) ([]*prom2json.Family, error) {
return client.GetMetrics(ctx, "node")
}
// ClusterMetrics - returns Cluster Metrics in Prometheus format
func (client *MetricsClient) ClusterMetrics(ctx context.Context) ([]*prom2json.Family, error) {
return client.GetMetrics(ctx, "cluster")
}
// BucketMetrics - returns Bucket Metrics in Prometheus format
func (client *MetricsClient) BucketMetrics(ctx context.Context) ([]*prom2json.Family, error) {
return client.GetMetrics(ctx, "bucket")
}
// ResourceMetrics - returns Resource Metrics in Prometheus format
func (client *MetricsClient) ResourceMetrics(ctx context.Context) ([]*prom2json.Family, error) {
return client.GetMetrics(ctx, "resource")
}
// GetMetrics - returns Metrics of given subsystem in Prometheus format
func (client *MetricsClient) GetMetrics(ctx context.Context, subSystem string) ([]*prom2json.Family, error) {
reqData := metricsRequestData{
relativePath: "/v2/metrics/" + subSystem,
}
// Execute GET on /minio/v2/metrics/
resp, err := client.executeGetRequest(ctx, reqData)
if err != nil {
return nil, err
}
defer closeResponse(resp)
if resp.StatusCode != http.StatusOK {
return nil, httpRespToErrorResponse(resp)
}
return ParsePrometheusResults(io.LimitReader(resp.Body, MetricsRespBodyLimit))
}
func ParsePrometheusResults(reader io.Reader) (results []*prom2json.Family, err error) {
// We could do further content-type checks here, but the
// fallback for now will anyway be the text format
// version 0.0.4, so just go for it and see if it works.
var parser expfmt.TextParser
metricFamilies, err := parser.TextToMetricFamilies(reader)
if err != nil {
return nil, fmt.Errorf("reading text format failed: %v", err)
}
results = make([]*prom2json.Family, 0, len(metricFamilies))
for _, mf := range metricFamilies {
results = append(results, prom2json.NewFamily(mf))
}
return results, nil
}
golang-github-minio-madmin-go-3.0.104/prometheus_metrics_test.go 0000664 0000000 0000000 00000004212 14774251704 0024706 0 ustar 00root root 0000000 0000000 //
// Copyright (c) 2015-2024 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as
// published by the Free Software Foundation, either version 3 of the
// License, or (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see .
//
package madmin
import (
"strings"
"testing"
"github.com/prometheus/prom2json"
)
func TestParsePrometheusResultsReturnsPrometheusObjectsFromStringReader(t *testing.T) {
prometheusResults := `# HELP go_gc_duration_seconds A summary of the pause duration of garbage collection cycles.
# TYPE go_gc_duration_seconds summary
go_gc_duration_seconds_sum 0.248349766
go_gc_duration_seconds_count 397
`
myReader := strings.NewReader(prometheusResults)
results, err := ParsePrometheusResults(myReader)
if err != nil {
t.Errorf("error not expected, got: %v", err)
}
expectedResults := []*prom2json.Family{
{
Name: "go_gc_duration_seconds",
Type: "SUMMARY",
Help: "A summary of the pause duration of garbage collection cycles.",
Metrics: []interface{}{
prom2json.Summary{}, // We just verify length, not content
},
},
}
if len(results) != len(expectedResults) {
t.Errorf("len(results): %d not equal to len(expectedResults): %d", len(results), len(expectedResults))
}
for i, result := range results {
if result.Name != expectedResults[i].Name {
t.Errorf("result.Name: %v not equal to expectedResults[i].Name: %v", result.Name, expectedResults[i].Name)
}
if len(result.Metrics) != len(expectedResults[i].Metrics) {
t.Errorf("len(result.Metrics): %d not equal to len(expectedResults[i].Metrics): %d", len(result.Metrics), len(expectedResults[i].Metrics))
}
}
}
golang-github-minio-madmin-go-3.0.104/quota-commands.go 0000664 0000000 0000000 00000006323 14774251704 0022663 0 ustar 00root root 0000000 0000000 //
// Copyright (c) 2015-2024 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as
// published by the Free Software Foundation, either version 3 of the
// License, or (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see .
//
package madmin
import (
"context"
"encoding/json"
"io"
"net/http"
"net/url"
)
// QuotaType represents bucket quota type
type QuotaType string
const (
// HardQuota specifies a hard quota of usage for bucket
HardQuota QuotaType = "hard"
)
// IsValid returns true if quota type is one of Hard
func (t QuotaType) IsValid() bool {
return t == HardQuota
}
// BucketQuota holds bucket quota restrictions
type BucketQuota struct {
Quota uint64 `json:"quota"` // Deprecated Aug 2023
Size uint64 `json:"size"` // Indicates maximum size allowed per bucket
Rate uint64 `json:"rate"` // Indicates bandwidth rate allocated per bucket
Requests uint64 `json:"requests"` // Indicates number of requests allocated per bucket
Type QuotaType `json:"quotatype,omitempty"`
}
// IsValid returns false if quota is invalid
// empty quota when Quota == 0 is always true.
func (q BucketQuota) IsValid() bool {
if q.Quota > 0 {
return q.Type.IsValid()
}
// Empty configs are valid.
return true
}
// GetBucketQuota - get info on a user
func (adm *AdminClient) GetBucketQuota(ctx context.Context, bucket string) (q BucketQuota, err error) {
queryValues := url.Values{}
queryValues.Set("bucket", bucket)
reqData := requestData{
relPath: adminAPIPrefix + "/get-bucket-quota",
queryValues: queryValues,
}
// Execute GET on /minio/admin/v3/get-quota
resp, err := adm.executeMethod(ctx, http.MethodGet, reqData)
defer closeResponse(resp)
if err != nil {
return q, err
}
if resp.StatusCode != http.StatusOK {
return q, httpRespToErrorResponse(resp)
}
b, err := io.ReadAll(resp.Body)
if err != nil {
return q, err
}
if err = json.Unmarshal(b, &q); err != nil {
return q, err
}
return q, nil
}
// SetBucketQuota - sets a bucket's quota, if quota is set to '0'
// quota is disabled.
func (adm *AdminClient) SetBucketQuota(ctx context.Context, bucket string, quota *BucketQuota) error {
data, err := json.Marshal(quota)
if err != nil {
return err
}
queryValues := url.Values{}
queryValues.Set("bucket", bucket)
reqData := requestData{
relPath: adminAPIPrefix + "/set-bucket-quota",
queryValues: queryValues,
content: data,
}
// Execute PUT on /minio/admin/v3/set-bucket-quota to set quota for a bucket.
resp, err := adm.executeMethod(ctx, http.MethodPut, reqData)
defer closeResponse(resp)
if err != nil {
return err
}
if resp.StatusCode != http.StatusOK {
return httpRespToErrorResponse(resp)
}
return nil
}
golang-github-minio-madmin-go-3.0.104/rebalance.go 0000664 0000000 0000000 00000007602 14774251704 0021650 0 ustar 00root root 0000000 0000000 //
// Copyright (c) 2015-2024 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as
// published by the Free Software Foundation, either version 3 of the
// License, or (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see .
//
package madmin
import (
"context"
"encoding/json"
"io"
"net/http"
"time"
)
// RebalPoolProgress contains metrics like number of objects, versions, etc rebalanced so far.
type RebalPoolProgress struct {
NumObjects uint64 `json:"objects"`
NumVersions uint64 `json:"versions"`
Bytes uint64 `json:"bytes"`
Bucket string `json:"bucket"`
Object string `json:"object"`
Elapsed time.Duration `json:"elapsed"`
ETA time.Duration `json:"eta"`
}
// RebalancePoolStatus contains metrics of a rebalance operation on a given pool
type RebalancePoolStatus struct {
ID int `json:"id"` // Pool index (zero-based)
Status string `json:"status"` // Active if rebalance is running, empty otherwise
Used float64 `json:"used"` // Percentage used space
Progress RebalPoolProgress `json:"progress,omitempty"` // is empty when rebalance is not running
}
// RebalanceStatus contains metrics and progress related information on all pools
type RebalanceStatus struct {
ID string // identifies the ongoing rebalance operation by a uuid
StoppedAt time.Time `json:"stoppedAt,omitempty"`
Pools []RebalancePoolStatus `json:"pools"` // contains all pools, including inactive
}
// RebalanceStart starts a rebalance operation if one isn't in progress already
func (adm *AdminClient) RebalanceStart(ctx context.Context) (id string, err error) {
// Execute POST on /minio/admin/v3/rebalance/start to start a rebalance operation.
var resp *http.Response
resp, err = adm.executeMethod(ctx,
http.MethodPost,
requestData{relPath: adminAPIPrefix + "/rebalance/start"})
defer closeResponse(resp)
if err != nil {
return id, err
}
if resp.StatusCode != http.StatusOK {
return id, httpRespToErrorResponse(resp)
}
var rebalInfo struct {
ID string `json:"id"`
}
respBytes, err := io.ReadAll(resp.Body)
if err != nil {
return id, err
}
err = json.Unmarshal(respBytes, &rebalInfo)
if err != nil {
return id, err
}
return rebalInfo.ID, nil
}
func (adm *AdminClient) RebalanceStatus(ctx context.Context) (r RebalanceStatus, err error) {
// Execute GET on /minio/admin/v3/rebalance/status to get status of an ongoing rebalance operation.
resp, err := adm.executeMethod(ctx,
http.MethodGet,
requestData{relPath: adminAPIPrefix + "/rebalance/status"})
defer closeResponse(resp)
if err != nil {
return r, err
}
if resp.StatusCode != http.StatusOK {
return r, httpRespToErrorResponse(resp)
}
respBytes, err := io.ReadAll(resp.Body)
if err != nil {
return r, err
}
err = json.Unmarshal(respBytes, &r)
if err != nil {
return r, err
}
return r, nil
}
func (adm *AdminClient) RebalanceStop(ctx context.Context) error {
// Execute POST on /minio/admin/v3/rebalance/stop to stop an ongoing rebalance operation.
resp, err := adm.executeMethod(ctx,
http.MethodPost,
requestData{relPath: adminAPIPrefix + "/rebalance/stop"})
defer closeResponse(resp)
if err != nil {
return err
}
if resp.StatusCode != http.StatusOK {
return httpRespToErrorResponse(resp)
}
return nil
}
golang-github-minio-madmin-go-3.0.104/register.go 0000664 0000000 0000000 00000004365 14774251704 0021563 0 ustar 00root root 0000000 0000000 //
// Copyright (c) 2015-2024 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as
// published by the Free Software Foundation, either version 3 of the
// License, or (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see .
//
package madmin
// ClusterRegistrationReq - JSON payload of the subnet api for cluster registration
// Contains a registration token created by base64 encoding of the registration info
type ClusterRegistrationReq struct {
Token string `json:"token"`
}
// ClusterRegistrationInfo - Information stored in the cluster registration token
type ClusterRegistrationInfo struct {
DeploymentID string `json:"deployment_id"`
ClusterName string `json:"cluster_name"`
UsedCapacity uint64 `json:"used_capacity"`
Info ClusterInfo `json:"info"`
}
// ClusterInfo - The "info" sub-node of the cluster registration information struct
// Intended to be extensible i.e. more fields will be added as and when required
type ClusterInfo struct {
MinioVersion string `json:"minio_version"`
NoOfServerPools int `json:"no_of_server_pools"`
NoOfServers int `json:"no_of_servers"`
NoOfDrives int `json:"no_of_drives"`
NoOfBuckets uint64 `json:"no_of_buckets"`
NoOfObjects uint64 `json:"no_of_objects"`
TotalDriveSpace uint64 `json:"total_drive_space"`
UsedDriveSpace uint64 `json:"used_drive_space"`
Edition string `json:"edition"`
}
// SubnetLoginReq - JSON payload of the SUBNET login api
type SubnetLoginReq struct {
Username string `json:"username"`
Password string `json:"password"`
}
// SubnetMFAReq - JSON payload of the SUBNET mfa api
type SubnetMFAReq struct {
Username string `json:"username"`
OTP string `json:"otp"`
Token string `json:"token"`
}
golang-github-minio-madmin-go-3.0.104/remote-target-commands.go 0000664 0000000 0000000 00000017331 14774251704 0024312 0 ustar 00root root 0000000 0000000 //
// Copyright (c) 2015-2024 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as
// published by the Free Software Foundation, either version 3 of the
// License, or (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see .
//
package madmin
import (
"context"
"encoding/json"
"fmt"
"io"
"net/http"
"net/url"
"strings"
)
// ARN is a struct to define arn.
type ARN struct {
Type ServiceType
ID string
Region string
Bucket string
}
// Empty returns true if arn struct is empty
func (a ARN) Empty() bool {
return !a.Type.IsValid()
}
func (a ARN) String() string {
return fmt.Sprintf("arn:minio:%s:%s:%s:%s", a.Type, a.Region, a.ID, a.Bucket)
}
// ParseARN return ARN struct from string in arn format.
func ParseARN(s string) (*ARN, error) {
// ARN must be in the format of arn:minio::::
if !strings.HasPrefix(s, "arn:minio:") {
return nil, fmt.Errorf("invalid ARN %s", s)
}
tokens := strings.Split(s, ":")
if len(tokens) != 6 {
return nil, fmt.Errorf("invalid ARN %s", s)
}
if tokens[4] == "" || tokens[5] == "" {
return nil, fmt.Errorf("invalid ARN %s", s)
}
return &ARN{
Type: ServiceType(tokens[2]),
Region: tokens[3],
ID: tokens[4],
Bucket: tokens[5],
}, nil
}
// ListRemoteTargets - gets target(s) for this bucket
func (adm *AdminClient) ListRemoteTargets(ctx context.Context, bucket, arnType string) (targets []BucketTarget, err error) {
queryValues := url.Values{}
queryValues.Set("bucket", bucket)
queryValues.Set("type", arnType)
reqData := requestData{
relPath: adminAPIPrefix + "/list-remote-targets",
queryValues: queryValues,
}
// Execute GET on /minio/admin/v3/list-remote-targets
resp, err := adm.executeMethod(ctx, http.MethodGet, reqData)
defer closeResponse(resp)
if err != nil {
return targets, err
}
if resp.StatusCode != http.StatusOK {
return targets, httpRespToErrorResponse(resp)
}
b, err := io.ReadAll(resp.Body)
if err != nil {
return targets, err
}
if err = json.Unmarshal(b, &targets); err != nil {
return targets, err
}
return targets, nil
}
// SetRemoteTarget sets up a remote target for this bucket
func (adm *AdminClient) SetRemoteTarget(ctx context.Context, bucket string, target *BucketTarget) (string, error) {
data, err := json.Marshal(target)
if err != nil {
return "", err
}
encData, err := EncryptData(adm.getSecretKey(), data)
if err != nil {
return "", err
}
queryValues := url.Values{}
queryValues.Set("bucket", bucket)
reqData := requestData{
relPath: adminAPIPrefix + "/set-remote-target",
queryValues: queryValues,
content: encData,
}
// Execute PUT on /minio/admin/v3/set-remote-target to set a target for this bucket of specific arn type.
resp, err := adm.executeMethod(ctx, http.MethodPut, reqData)
defer closeResponse(resp)
if err != nil {
return "", err
}
if resp.StatusCode != http.StatusOK {
return "", httpRespToErrorResponse(resp)
}
b, err := io.ReadAll(resp.Body)
if err != nil {
return "", err
}
var arn string
if err = json.Unmarshal(b, &arn); err != nil {
return "", err
}
return arn, nil
}
// TargetUpdateType - type of update on the remote target
type TargetUpdateType int
const (
// CredentialsUpdateType update creds
CredentialsUpdateType TargetUpdateType = 1 + iota
// SyncUpdateType update synchronous replication setting
SyncUpdateType
// ProxyUpdateType update proxy setting
ProxyUpdateType
// BandwidthLimitUpdateType update bandwidth limit
BandwidthLimitUpdateType
// HealthCheckDurationUpdateType update health check duration
HealthCheckDurationUpdateType
// PathUpdateType update Path
PathUpdateType
// ResetUpdateType sets ResetBeforeDate and ResetID on a bucket target
ResetUpdateType
// EdgeUpdateType sets bucket target as a recipent of edge traffic
EdgeUpdateType
// EdgeExpiryUpdateType sets bucket target to sync before expiry
EdgeExpiryUpdateType
)
// GetTargetUpdateOps returns a slice of update operations being
// performed with `mc admin bucket remote edit`
func GetTargetUpdateOps(values url.Values) []TargetUpdateType {
var ops []TargetUpdateType
if values.Get("update") != "true" {
return ops
}
if values.Get("creds") == "true" {
ops = append(ops, CredentialsUpdateType)
}
if values.Get("sync") == "true" {
ops = append(ops, SyncUpdateType)
}
if values.Get("proxy") == "true" {
ops = append(ops, ProxyUpdateType)
}
if values.Get("healthcheck") == "true" {
ops = append(ops, HealthCheckDurationUpdateType)
}
if values.Get("bandwidth") == "true" {
ops = append(ops, BandwidthLimitUpdateType)
}
if values.Get("path") == "true" {
ops = append(ops, PathUpdateType)
}
if values.Get("edge") == "true" {
ops = append(ops, EdgeUpdateType)
}
if values.Get("edgeSyncBeforeExpiry") == "true" {
ops = append(ops, EdgeExpiryUpdateType)
}
return ops
}
// UpdateRemoteTarget updates credentials for a remote bucket target
func (adm *AdminClient) UpdateRemoteTarget(ctx context.Context, target *BucketTarget, ops ...TargetUpdateType) (string, error) {
if target == nil {
return "", fmt.Errorf("target cannot be nil")
}
data, err := json.Marshal(target)
if err != nil {
return "", err
}
encData, err := EncryptData(adm.getSecretKey(), data)
if err != nil {
return "", err
}
queryValues := url.Values{}
queryValues.Set("bucket", target.SourceBucket)
queryValues.Set("update", "true")
for _, op := range ops {
switch op {
case CredentialsUpdateType:
queryValues.Set("creds", "true")
case SyncUpdateType:
queryValues.Set("sync", "true")
case ProxyUpdateType:
queryValues.Set("proxy", "true")
case BandwidthLimitUpdateType:
queryValues.Set("bandwidth", "true")
case HealthCheckDurationUpdateType:
queryValues.Set("healthcheck", "true")
case PathUpdateType:
queryValues.Set("path", "true")
case EdgeUpdateType:
queryValues.Set("edge", "true")
case EdgeExpiryUpdateType:
queryValues.Set("edgeSyncBeforeExpiry", "true")
}
}
reqData := requestData{
relPath: adminAPIPrefix + "/set-remote-target",
queryValues: queryValues,
content: encData,
}
// Execute PUT on /minio/admin/v3/set-remote-target to set a target for this bucket of specific arn type.
resp, err := adm.executeMethod(ctx, http.MethodPut, reqData)
defer closeResponse(resp)
if err != nil {
return "", err
}
if resp.StatusCode != http.StatusOK {
return "", httpRespToErrorResponse(resp)
}
b, err := io.ReadAll(resp.Body)
if err != nil {
return "", err
}
var arn string
if err = json.Unmarshal(b, &arn); err != nil {
return "", err
}
return arn, nil
}
// RemoveRemoteTarget removes a remote target associated with particular ARN for this bucket
func (adm *AdminClient) RemoveRemoteTarget(ctx context.Context, bucket, arn string) error {
queryValues := url.Values{}
queryValues.Set("bucket", bucket)
queryValues.Set("arn", arn)
reqData := requestData{
relPath: adminAPIPrefix + "/remove-remote-target",
queryValues: queryValues,
}
// Execute PUT on /minio/admin/v3/remove-remote-target to remove a target for this bucket
// with specific ARN
resp, err := adm.executeMethod(ctx, http.MethodDelete, reqData)
defer closeResponse(resp)
if err != nil {
return err
}
if resp.StatusCode != http.StatusNoContent {
return httpRespToErrorResponse(resp)
}
return nil
}
golang-github-minio-madmin-go-3.0.104/remote-target-commands_test.go 0000664 0000000 0000000 00000004762 14774251704 0025355 0 ustar 00root root 0000000 0000000 //
// Copyright (c) 2015-2024 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as
// published by the Free Software Foundation, either version 3 of the
// License, or (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see .
//
package madmin
import (
"net/url"
"testing"
)
func isOpsEqual(op1 []TargetUpdateType, op2 []TargetUpdateType) bool {
if len(op1) != len(op2) {
return false
}
for _, o1 := range op1 {
found := false
for _, o2 := range op2 {
if o2 == o1 {
found = true
break
}
}
if !found {
return false
}
}
return true
}
// TestGetTargetUpdateOps tests GetTargetUpdateOps
func TestGetTargetUpdateOps(t *testing.T) {
testCases := []struct {
values url.Values
expectedOps []TargetUpdateType
}{
{
values: url.Values{
"update": []string{"true"},
},
expectedOps: []TargetUpdateType{},
},
{
values: url.Values{
"update": []string{"false"},
"path": []string{"true"},
},
expectedOps: []TargetUpdateType{},
},
{
values: url.Values{
"update": []string{"true"},
"path": []string{""},
},
expectedOps: []TargetUpdateType{},
},
{
values: url.Values{
"update": []string{"true"},
"path": []string{"true"},
"bzzzz": []string{"true"},
},
expectedOps: []TargetUpdateType{PathUpdateType},
},
{
values: url.Values{
"update": []string{"true"},
"path": []string{"true"},
"creds": []string{"true"},
"sync": []string{"true"},
"proxy": []string{"true"},
"bandwidth": []string{"true"},
"healthcheck": []string{"true"},
},
expectedOps: []TargetUpdateType{
PathUpdateType, CredentialsUpdateType, SyncUpdateType, ProxyUpdateType, BandwidthLimitUpdateType, HealthCheckDurationUpdateType,
},
},
}
for i, test := range testCases {
gotOps := GetTargetUpdateOps(test.values)
if !isOpsEqual(gotOps, test.expectedOps) {
t.Fatalf("test %d: expected %v got %v", i+1, test.expectedOps, gotOps)
}
}
}
golang-github-minio-madmin-go-3.0.104/replication-api.go 0000664 0000000 0000000 00000016324 14774251704 0023015 0 ustar 00root root 0000000 0000000 //
// Copyright (c) 2015-2024 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as
// published by the Free Software Foundation, either version 3 of the
// License, or (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see .
//
package madmin
import (
"context"
"encoding/json"
"net/http"
"net/url"
"time"
)
//go:generate msgp -file $GOFILE
// ReplDiffOpts holds options for `mc replicate diff` command
//
//msgp:ignore ReplDiffOpts
type ReplDiffOpts struct {
ARN string
Verbose bool
Prefix string
}
// TgtDiffInfo returns status of unreplicated objects
// for the target ARN
//msgp:ignore TgtDiffInfo
type TgtDiffInfo struct {
ReplicationStatus string `json:"rStatus,omitempty"` // target replication status
DeleteReplicationStatus string `json:"drStatus,omitempty"` // target delete replication status
}
// DiffInfo represents relevant replication status and last attempt to replicate
// for the replication targets configured for the bucket
//msgp:ignore DiffInfo
type DiffInfo struct {
Object string `json:"object"`
VersionID string `json:"versionId"`
Targets map[string]TgtDiffInfo `json:"targets,omitempty"`
Err error `json:"error,omitempty"`
ReplicationStatus string `json:"rStatus,omitempty"` // overall replication status
DeleteReplicationStatus string `json:"dStatus,omitempty"` // overall replication status of version delete
ReplicationTimestamp time.Time `json:"replTimestamp,omitempty"`
LastModified time.Time `json:"lastModified,omitempty"`
IsDeleteMarker bool `json:"deletemarker"`
}
// BucketReplicationDiff - gets diff for non-replicated entries.
func (adm *AdminClient) BucketReplicationDiff(ctx context.Context, bucketName string, opts ReplDiffOpts) <-chan DiffInfo {
diffCh := make(chan DiffInfo)
// start a routine to start reading line by line.
go func(diffCh chan<- DiffInfo) {
defer close(diffCh)
queryValues := url.Values{}
queryValues.Set("bucket", bucketName)
if opts.Verbose {
queryValues.Set("verbose", "true")
}
if opts.ARN != "" {
queryValues.Set("arn", opts.ARN)
}
if opts.Prefix != "" {
queryValues.Set("prefix", opts.Prefix)
}
reqData := requestData{
relPath: adminAPIPrefix + "/replication/diff",
queryValues: queryValues,
}
// Execute PUT on /minio/admin/v3/diff to set quota for a bucket.
resp, err := adm.executeMethod(ctx, http.MethodPost, reqData)
if err != nil {
diffCh <- DiffInfo{Err: err}
return
}
defer closeResponse(resp)
if resp.StatusCode != http.StatusOK {
diffCh <- DiffInfo{Err: httpRespToErrorResponse(resp)}
return
}
dec := json.NewDecoder(resp.Body)
for {
var di DiffInfo
if err = dec.Decode(&di); err != nil {
break
}
select {
case <-ctx.Done():
return
case diffCh <- di:
}
}
}(diffCh)
// Returns the diff channel, for caller to start reading from.
return diffCh
}
// ReplicationMRF represents MRF backlog for a bucket
type ReplicationMRF struct {
NodeName string `json:"nodeName" msg:"n"`
Bucket string `json:"bucket" msg:"b"`
Object string `json:"object" msg:"o"`
VersionID string `json:"versionId" msg:"v"`
RetryCount int `json:"retryCount" msg:"rc"`
Err string `json:"error,omitempty" msg:"err"`
}
// BucketReplicationMRF - gets MRF entries for bucket and node. Return MRF across buckets if bucket is empty, across nodes
// if node is `all`
func (adm *AdminClient) BucketReplicationMRF(ctx context.Context, bucketName string, node string) <-chan ReplicationMRF {
mrfCh := make(chan ReplicationMRF)
// start a routine to start reading line by line.
go func(mrfCh chan<- ReplicationMRF) {
defer close(mrfCh)
queryValues := url.Values{}
queryValues.Set("bucket", bucketName)
if node != "" {
queryValues.Set("node", node)
}
reqData := requestData{
relPath: adminAPIPrefix + "/replication/mrf",
queryValues: queryValues,
}
// Execute GET on /minio/admin/v3/replication/mrf to get mrf backlog for a bucket.
resp, err := adm.executeMethod(ctx, http.MethodGet, reqData)
if err != nil {
mrfCh <- ReplicationMRF{Err: err.Error()}
return
}
defer closeResponse(resp)
if resp.StatusCode != http.StatusOK {
mrfCh <- ReplicationMRF{Err: httpRespToErrorResponse(resp).Error()}
return
}
dec := json.NewDecoder(resp.Body)
for {
var bk ReplicationMRF
if err = dec.Decode(&bk); err != nil {
break
}
select {
case <-ctx.Done():
return
case mrfCh <- bk:
}
}
}(mrfCh)
// Returns the mrf backlog channel, for caller to start reading from.
return mrfCh
}
// LatencyStat represents replication link latency statistics
type LatencyStat struct {
Curr time.Duration `json:"curr"`
Avg time.Duration `json:"avg"`
Max time.Duration `json:"max"`
}
// TimedErrStats has failed replication stats across time windows
type TimedErrStats struct {
LastMinute RStat `json:"lastMinute"`
LastHour RStat `json:"lastHour"`
Totals RStat `json:"totals"`
// ErrCounts is a map of error codes to count of errors since server start - tracks
// only AccessDenied errors for now.
ErrCounts map[string]int `json:"errCounts,omitempty"`
}
// Add - adds two TimedErrStats
func (te TimedErrStats) Add(o TimedErrStats) TimedErrStats {
m := make(map[string]int)
for k, v := range te.ErrCounts {
m[k] = v
}
for k, v := range o.ErrCounts {
m[k] += v
}
return TimedErrStats{
LastMinute: te.LastMinute.Add(o.LastMinute),
LastHour: te.LastHour.Add(o.LastHour),
Totals: te.Totals.Add(o.Totals),
ErrCounts: m,
}
}
// RStat represents count and bytes replicated/failed
type RStat struct {
Count float64 `json:"count"`
Bytes int64 `json:"bytes"`
}
// Add - adds two RStats
func (r RStat) Add(r1 RStat) RStat {
return RStat{
Count: r.Count + r1.Count,
Bytes: r.Bytes + r1.Bytes,
}
}
// DowntimeInfo captures the downtime information
type DowntimeInfo struct {
Duration StatRecorder `json:"duration"`
Count StatRecorder `json:"count"`
}
// RecordCount records the value
func (d *DowntimeInfo) RecordCount(value int64) {
d.Count.Record(value)
}
// RecordDuration records the value
func (d *DowntimeInfo) RecordDuration(value int64) {
d.Duration.Record(value)
}
// StatRecorder records and calculates the aggregates
type StatRecorder struct {
Total int64 `json:"total"`
Avg int64 `json:"avg"`
Max int64 `json:"max"`
count int64 `json:"-"`
}
// Record will record the value and calculates the aggregates on the fly
func (s *StatRecorder) Record(value int64) {
s.Total += value
if s.count == 0 || value > s.Max {
s.Max = value
}
s.count++
s.Avg = s.Total / s.count
}
golang-github-minio-madmin-go-3.0.104/replication-api_gen.go 0000664 0000000 0000000 00000102145 14774251704 0023643 0 ustar 00root root 0000000 0000000 package madmin
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
import (
"github.com/tinylib/msgp/msgp"
)
// DecodeMsg implements msgp.Decodable
func (z *DowntimeInfo) DecodeMsg(dc *msgp.Reader) (err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, err = dc.ReadMapHeader()
if err != nil {
err = msgp.WrapError(err)
return
}
for zb0001 > 0 {
zb0001--
field, err = dc.ReadMapKeyPtr()
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "Duration":
var zb0002 uint32
zb0002, err = dc.ReadMapHeader()
if err != nil {
err = msgp.WrapError(err, "Duration")
return
}
for zb0002 > 0 {
zb0002--
field, err = dc.ReadMapKeyPtr()
if err != nil {
err = msgp.WrapError(err, "Duration")
return
}
switch msgp.UnsafeString(field) {
case "Total":
z.Duration.Total, err = dc.ReadInt64()
if err != nil {
err = msgp.WrapError(err, "Duration", "Total")
return
}
case "Avg":
z.Duration.Avg, err = dc.ReadInt64()
if err != nil {
err = msgp.WrapError(err, "Duration", "Avg")
return
}
case "Max":
z.Duration.Max, err = dc.ReadInt64()
if err != nil {
err = msgp.WrapError(err, "Duration", "Max")
return
}
default:
err = dc.Skip()
if err != nil {
err = msgp.WrapError(err, "Duration")
return
}
}
}
case "Count":
var zb0003 uint32
zb0003, err = dc.ReadMapHeader()
if err != nil {
err = msgp.WrapError(err, "Count")
return
}
for zb0003 > 0 {
zb0003--
field, err = dc.ReadMapKeyPtr()
if err != nil {
err = msgp.WrapError(err, "Count")
return
}
switch msgp.UnsafeString(field) {
case "Total":
z.Count.Total, err = dc.ReadInt64()
if err != nil {
err = msgp.WrapError(err, "Count", "Total")
return
}
case "Avg":
z.Count.Avg, err = dc.ReadInt64()
if err != nil {
err = msgp.WrapError(err, "Count", "Avg")
return
}
case "Max":
z.Count.Max, err = dc.ReadInt64()
if err != nil {
err = msgp.WrapError(err, "Count", "Max")
return
}
default:
err = dc.Skip()
if err != nil {
err = msgp.WrapError(err, "Count")
return
}
}
}
default:
err = dc.Skip()
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
return
}
// EncodeMsg implements msgp.Encodable
func (z *DowntimeInfo) EncodeMsg(en *msgp.Writer) (err error) {
// map header, size 2
// write "Duration"
err = en.Append(0x82, 0xa8, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e)
if err != nil {
return
}
// map header, size 3
// write "Total"
err = en.Append(0x83, 0xa5, 0x54, 0x6f, 0x74, 0x61, 0x6c)
if err != nil {
return
}
err = en.WriteInt64(z.Duration.Total)
if err != nil {
err = msgp.WrapError(err, "Duration", "Total")
return
}
// write "Avg"
err = en.Append(0xa3, 0x41, 0x76, 0x67)
if err != nil {
return
}
err = en.WriteInt64(z.Duration.Avg)
if err != nil {
err = msgp.WrapError(err, "Duration", "Avg")
return
}
// write "Max"
err = en.Append(0xa3, 0x4d, 0x61, 0x78)
if err != nil {
return
}
err = en.WriteInt64(z.Duration.Max)
if err != nil {
err = msgp.WrapError(err, "Duration", "Max")
return
}
// write "Count"
err = en.Append(0xa5, 0x43, 0x6f, 0x75, 0x6e, 0x74)
if err != nil {
return
}
// map header, size 3
// write "Total"
err = en.Append(0x83, 0xa5, 0x54, 0x6f, 0x74, 0x61, 0x6c)
if err != nil {
return
}
err = en.WriteInt64(z.Count.Total)
if err != nil {
err = msgp.WrapError(err, "Count", "Total")
return
}
// write "Avg"
err = en.Append(0xa3, 0x41, 0x76, 0x67)
if err != nil {
return
}
err = en.WriteInt64(z.Count.Avg)
if err != nil {
err = msgp.WrapError(err, "Count", "Avg")
return
}
// write "Max"
err = en.Append(0xa3, 0x4d, 0x61, 0x78)
if err != nil {
return
}
err = en.WriteInt64(z.Count.Max)
if err != nil {
err = msgp.WrapError(err, "Count", "Max")
return
}
return
}
// MarshalMsg implements msgp.Marshaler
func (z *DowntimeInfo) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize())
// map header, size 2
// string "Duration"
o = append(o, 0x82, 0xa8, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e)
// map header, size 3
// string "Total"
o = append(o, 0x83, 0xa5, 0x54, 0x6f, 0x74, 0x61, 0x6c)
o = msgp.AppendInt64(o, z.Duration.Total)
// string "Avg"
o = append(o, 0xa3, 0x41, 0x76, 0x67)
o = msgp.AppendInt64(o, z.Duration.Avg)
// string "Max"
o = append(o, 0xa3, 0x4d, 0x61, 0x78)
o = msgp.AppendInt64(o, z.Duration.Max)
// string "Count"
o = append(o, 0xa5, 0x43, 0x6f, 0x75, 0x6e, 0x74)
// map header, size 3
// string "Total"
o = append(o, 0x83, 0xa5, 0x54, 0x6f, 0x74, 0x61, 0x6c)
o = msgp.AppendInt64(o, z.Count.Total)
// string "Avg"
o = append(o, 0xa3, 0x41, 0x76, 0x67)
o = msgp.AppendInt64(o, z.Count.Avg)
// string "Max"
o = append(o, 0xa3, 0x4d, 0x61, 0x78)
o = msgp.AppendInt64(o, z.Count.Max)
return
}
// UnmarshalMsg implements msgp.Unmarshaler
func (z *DowntimeInfo) UnmarshalMsg(bts []byte) (o []byte, err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
for zb0001 > 0 {
zb0001--
field, bts, err = msgp.ReadMapKeyZC(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "Duration":
var zb0002 uint32
zb0002, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Duration")
return
}
for zb0002 > 0 {
zb0002--
field, bts, err = msgp.ReadMapKeyZC(bts)
if err != nil {
err = msgp.WrapError(err, "Duration")
return
}
switch msgp.UnsafeString(field) {
case "Total":
z.Duration.Total, bts, err = msgp.ReadInt64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "Duration", "Total")
return
}
case "Avg":
z.Duration.Avg, bts, err = msgp.ReadInt64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "Duration", "Avg")
return
}
case "Max":
z.Duration.Max, bts, err = msgp.ReadInt64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "Duration", "Max")
return
}
default:
bts, err = msgp.Skip(bts)
if err != nil {
err = msgp.WrapError(err, "Duration")
return
}
}
}
case "Count":
var zb0003 uint32
zb0003, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Count")
return
}
for zb0003 > 0 {
zb0003--
field, bts, err = msgp.ReadMapKeyZC(bts)
if err != nil {
err = msgp.WrapError(err, "Count")
return
}
switch msgp.UnsafeString(field) {
case "Total":
z.Count.Total, bts, err = msgp.ReadInt64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "Count", "Total")
return
}
case "Avg":
z.Count.Avg, bts, err = msgp.ReadInt64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "Count", "Avg")
return
}
case "Max":
z.Count.Max, bts, err = msgp.ReadInt64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "Count", "Max")
return
}
default:
bts, err = msgp.Skip(bts)
if err != nil {
err = msgp.WrapError(err, "Count")
return
}
}
}
default:
bts, err = msgp.Skip(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
o = bts
return
}
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z *DowntimeInfo) Msgsize() (s int) {
s = 1 + 9 + 1 + 6 + msgp.Int64Size + 4 + msgp.Int64Size + 4 + msgp.Int64Size + 6 + 1 + 6 + msgp.Int64Size + 4 + msgp.Int64Size + 4 + msgp.Int64Size
return
}
// DecodeMsg implements msgp.Decodable
func (z *LatencyStat) DecodeMsg(dc *msgp.Reader) (err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, err = dc.ReadMapHeader()
if err != nil {
err = msgp.WrapError(err)
return
}
for zb0001 > 0 {
zb0001--
field, err = dc.ReadMapKeyPtr()
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "Curr":
z.Curr, err = dc.ReadDuration()
if err != nil {
err = msgp.WrapError(err, "Curr")
return
}
case "Avg":
z.Avg, err = dc.ReadDuration()
if err != nil {
err = msgp.WrapError(err, "Avg")
return
}
case "Max":
z.Max, err = dc.ReadDuration()
if err != nil {
err = msgp.WrapError(err, "Max")
return
}
default:
err = dc.Skip()
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
return
}
// EncodeMsg implements msgp.Encodable
func (z LatencyStat) EncodeMsg(en *msgp.Writer) (err error) {
// map header, size 3
// write "Curr"
err = en.Append(0x83, 0xa4, 0x43, 0x75, 0x72, 0x72)
if err != nil {
return
}
err = en.WriteDuration(z.Curr)
if err != nil {
err = msgp.WrapError(err, "Curr")
return
}
// write "Avg"
err = en.Append(0xa3, 0x41, 0x76, 0x67)
if err != nil {
return
}
err = en.WriteDuration(z.Avg)
if err != nil {
err = msgp.WrapError(err, "Avg")
return
}
// write "Max"
err = en.Append(0xa3, 0x4d, 0x61, 0x78)
if err != nil {
return
}
err = en.WriteDuration(z.Max)
if err != nil {
err = msgp.WrapError(err, "Max")
return
}
return
}
// MarshalMsg implements msgp.Marshaler
func (z LatencyStat) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize())
// map header, size 3
// string "Curr"
o = append(o, 0x83, 0xa4, 0x43, 0x75, 0x72, 0x72)
o = msgp.AppendDuration(o, z.Curr)
// string "Avg"
o = append(o, 0xa3, 0x41, 0x76, 0x67)
o = msgp.AppendDuration(o, z.Avg)
// string "Max"
o = append(o, 0xa3, 0x4d, 0x61, 0x78)
o = msgp.AppendDuration(o, z.Max)
return
}
// UnmarshalMsg implements msgp.Unmarshaler
func (z *LatencyStat) UnmarshalMsg(bts []byte) (o []byte, err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
for zb0001 > 0 {
zb0001--
field, bts, err = msgp.ReadMapKeyZC(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "Curr":
z.Curr, bts, err = msgp.ReadDurationBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Curr")
return
}
case "Avg":
z.Avg, bts, err = msgp.ReadDurationBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Avg")
return
}
case "Max":
z.Max, bts, err = msgp.ReadDurationBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Max")
return
}
default:
bts, err = msgp.Skip(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
o = bts
return
}
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z LatencyStat) Msgsize() (s int) {
s = 1 + 5 + msgp.DurationSize + 4 + msgp.DurationSize + 4 + msgp.DurationSize
return
}
// DecodeMsg implements msgp.Decodable
func (z *RStat) DecodeMsg(dc *msgp.Reader) (err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, err = dc.ReadMapHeader()
if err != nil {
err = msgp.WrapError(err)
return
}
for zb0001 > 0 {
zb0001--
field, err = dc.ReadMapKeyPtr()
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "Count":
z.Count, err = dc.ReadFloat64()
if err != nil {
err = msgp.WrapError(err, "Count")
return
}
case "Bytes":
z.Bytes, err = dc.ReadInt64()
if err != nil {
err = msgp.WrapError(err, "Bytes")
return
}
default:
err = dc.Skip()
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
return
}
// EncodeMsg implements msgp.Encodable
func (z RStat) EncodeMsg(en *msgp.Writer) (err error) {
// map header, size 2
// write "Count"
err = en.Append(0x82, 0xa5, 0x43, 0x6f, 0x75, 0x6e, 0x74)
if err != nil {
return
}
err = en.WriteFloat64(z.Count)
if err != nil {
err = msgp.WrapError(err, "Count")
return
}
// write "Bytes"
err = en.Append(0xa5, 0x42, 0x79, 0x74, 0x65, 0x73)
if err != nil {
return
}
err = en.WriteInt64(z.Bytes)
if err != nil {
err = msgp.WrapError(err, "Bytes")
return
}
return
}
// MarshalMsg implements msgp.Marshaler
func (z RStat) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize())
// map header, size 2
// string "Count"
o = append(o, 0x82, 0xa5, 0x43, 0x6f, 0x75, 0x6e, 0x74)
o = msgp.AppendFloat64(o, z.Count)
// string "Bytes"
o = append(o, 0xa5, 0x42, 0x79, 0x74, 0x65, 0x73)
o = msgp.AppendInt64(o, z.Bytes)
return
}
// UnmarshalMsg implements msgp.Unmarshaler
func (z *RStat) UnmarshalMsg(bts []byte) (o []byte, err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
for zb0001 > 0 {
zb0001--
field, bts, err = msgp.ReadMapKeyZC(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "Count":
z.Count, bts, err = msgp.ReadFloat64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "Count")
return
}
case "Bytes":
z.Bytes, bts, err = msgp.ReadInt64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "Bytes")
return
}
default:
bts, err = msgp.Skip(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
o = bts
return
}
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z RStat) Msgsize() (s int) {
s = 1 + 6 + msgp.Float64Size + 6 + msgp.Int64Size
return
}
// DecodeMsg implements msgp.Decodable
func (z *ReplicationMRF) DecodeMsg(dc *msgp.Reader) (err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, err = dc.ReadMapHeader()
if err != nil {
err = msgp.WrapError(err)
return
}
for zb0001 > 0 {
zb0001--
field, err = dc.ReadMapKeyPtr()
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "n":
z.NodeName, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "NodeName")
return
}
case "b":
z.Bucket, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "Bucket")
return
}
case "o":
z.Object, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "Object")
return
}
case "v":
z.VersionID, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "VersionID")
return
}
case "rc":
z.RetryCount, err = dc.ReadInt()
if err != nil {
err = msgp.WrapError(err, "RetryCount")
return
}
case "err":
z.Err, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "Err")
return
}
default:
err = dc.Skip()
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
return
}
// EncodeMsg implements msgp.Encodable
func (z *ReplicationMRF) EncodeMsg(en *msgp.Writer) (err error) {
// map header, size 6
// write "n"
err = en.Append(0x86, 0xa1, 0x6e)
if err != nil {
return
}
err = en.WriteString(z.NodeName)
if err != nil {
err = msgp.WrapError(err, "NodeName")
return
}
// write "b"
err = en.Append(0xa1, 0x62)
if err != nil {
return
}
err = en.WriteString(z.Bucket)
if err != nil {
err = msgp.WrapError(err, "Bucket")
return
}
// write "o"
err = en.Append(0xa1, 0x6f)
if err != nil {
return
}
err = en.WriteString(z.Object)
if err != nil {
err = msgp.WrapError(err, "Object")
return
}
// write "v"
err = en.Append(0xa1, 0x76)
if err != nil {
return
}
err = en.WriteString(z.VersionID)
if err != nil {
err = msgp.WrapError(err, "VersionID")
return
}
// write "rc"
err = en.Append(0xa2, 0x72, 0x63)
if err != nil {
return
}
err = en.WriteInt(z.RetryCount)
if err != nil {
err = msgp.WrapError(err, "RetryCount")
return
}
// write "err"
err = en.Append(0xa3, 0x65, 0x72, 0x72)
if err != nil {
return
}
err = en.WriteString(z.Err)
if err != nil {
err = msgp.WrapError(err, "Err")
return
}
return
}
// MarshalMsg implements msgp.Marshaler
func (z *ReplicationMRF) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize())
// map header, size 6
// string "n"
o = append(o, 0x86, 0xa1, 0x6e)
o = msgp.AppendString(o, z.NodeName)
// string "b"
o = append(o, 0xa1, 0x62)
o = msgp.AppendString(o, z.Bucket)
// string "o"
o = append(o, 0xa1, 0x6f)
o = msgp.AppendString(o, z.Object)
// string "v"
o = append(o, 0xa1, 0x76)
o = msgp.AppendString(o, z.VersionID)
// string "rc"
o = append(o, 0xa2, 0x72, 0x63)
o = msgp.AppendInt(o, z.RetryCount)
// string "err"
o = append(o, 0xa3, 0x65, 0x72, 0x72)
o = msgp.AppendString(o, z.Err)
return
}
// UnmarshalMsg implements msgp.Unmarshaler
func (z *ReplicationMRF) UnmarshalMsg(bts []byte) (o []byte, err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
for zb0001 > 0 {
zb0001--
field, bts, err = msgp.ReadMapKeyZC(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "n":
z.NodeName, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "NodeName")
return
}
case "b":
z.Bucket, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Bucket")
return
}
case "o":
z.Object, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Object")
return
}
case "v":
z.VersionID, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "VersionID")
return
}
case "rc":
z.RetryCount, bts, err = msgp.ReadIntBytes(bts)
if err != nil {
err = msgp.WrapError(err, "RetryCount")
return
}
case "err":
z.Err, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Err")
return
}
default:
bts, err = msgp.Skip(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
o = bts
return
}
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z *ReplicationMRF) Msgsize() (s int) {
s = 1 + 2 + msgp.StringPrefixSize + len(z.NodeName) + 2 + msgp.StringPrefixSize + len(z.Bucket) + 2 + msgp.StringPrefixSize + len(z.Object) + 2 + msgp.StringPrefixSize + len(z.VersionID) + 3 + msgp.IntSize + 4 + msgp.StringPrefixSize + len(z.Err)
return
}
// DecodeMsg implements msgp.Decodable
func (z *StatRecorder) DecodeMsg(dc *msgp.Reader) (err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, err = dc.ReadMapHeader()
if err != nil {
err = msgp.WrapError(err)
return
}
for zb0001 > 0 {
zb0001--
field, err = dc.ReadMapKeyPtr()
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "Total":
z.Total, err = dc.ReadInt64()
if err != nil {
err = msgp.WrapError(err, "Total")
return
}
case "Avg":
z.Avg, err = dc.ReadInt64()
if err != nil {
err = msgp.WrapError(err, "Avg")
return
}
case "Max":
z.Max, err = dc.ReadInt64()
if err != nil {
err = msgp.WrapError(err, "Max")
return
}
default:
err = dc.Skip()
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
return
}
// EncodeMsg implements msgp.Encodable
func (z StatRecorder) EncodeMsg(en *msgp.Writer) (err error) {
// map header, size 3
// write "Total"
err = en.Append(0x83, 0xa5, 0x54, 0x6f, 0x74, 0x61, 0x6c)
if err != nil {
return
}
err = en.WriteInt64(z.Total)
if err != nil {
err = msgp.WrapError(err, "Total")
return
}
// write "Avg"
err = en.Append(0xa3, 0x41, 0x76, 0x67)
if err != nil {
return
}
err = en.WriteInt64(z.Avg)
if err != nil {
err = msgp.WrapError(err, "Avg")
return
}
// write "Max"
err = en.Append(0xa3, 0x4d, 0x61, 0x78)
if err != nil {
return
}
err = en.WriteInt64(z.Max)
if err != nil {
err = msgp.WrapError(err, "Max")
return
}
return
}
// MarshalMsg implements msgp.Marshaler
func (z StatRecorder) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize())
// map header, size 3
// string "Total"
o = append(o, 0x83, 0xa5, 0x54, 0x6f, 0x74, 0x61, 0x6c)
o = msgp.AppendInt64(o, z.Total)
// string "Avg"
o = append(o, 0xa3, 0x41, 0x76, 0x67)
o = msgp.AppendInt64(o, z.Avg)
// string "Max"
o = append(o, 0xa3, 0x4d, 0x61, 0x78)
o = msgp.AppendInt64(o, z.Max)
return
}
// UnmarshalMsg implements msgp.Unmarshaler
func (z *StatRecorder) UnmarshalMsg(bts []byte) (o []byte, err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
for zb0001 > 0 {
zb0001--
field, bts, err = msgp.ReadMapKeyZC(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "Total":
z.Total, bts, err = msgp.ReadInt64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "Total")
return
}
case "Avg":
z.Avg, bts, err = msgp.ReadInt64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "Avg")
return
}
case "Max":
z.Max, bts, err = msgp.ReadInt64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "Max")
return
}
default:
bts, err = msgp.Skip(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
o = bts
return
}
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z StatRecorder) Msgsize() (s int) {
s = 1 + 6 + msgp.Int64Size + 4 + msgp.Int64Size + 4 + msgp.Int64Size
return
}
// DecodeMsg implements msgp.Decodable
func (z *TimedErrStats) DecodeMsg(dc *msgp.Reader) (err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, err = dc.ReadMapHeader()
if err != nil {
err = msgp.WrapError(err)
return
}
for zb0001 > 0 {
zb0001--
field, err = dc.ReadMapKeyPtr()
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "LastMinute":
var zb0002 uint32
zb0002, err = dc.ReadMapHeader()
if err != nil {
err = msgp.WrapError(err, "LastMinute")
return
}
for zb0002 > 0 {
zb0002--
field, err = dc.ReadMapKeyPtr()
if err != nil {
err = msgp.WrapError(err, "LastMinute")
return
}
switch msgp.UnsafeString(field) {
case "Count":
z.LastMinute.Count, err = dc.ReadFloat64()
if err != nil {
err = msgp.WrapError(err, "LastMinute", "Count")
return
}
case "Bytes":
z.LastMinute.Bytes, err = dc.ReadInt64()
if err != nil {
err = msgp.WrapError(err, "LastMinute", "Bytes")
return
}
default:
err = dc.Skip()
if err != nil {
err = msgp.WrapError(err, "LastMinute")
return
}
}
}
case "LastHour":
var zb0003 uint32
zb0003, err = dc.ReadMapHeader()
if err != nil {
err = msgp.WrapError(err, "LastHour")
return
}
for zb0003 > 0 {
zb0003--
field, err = dc.ReadMapKeyPtr()
if err != nil {
err = msgp.WrapError(err, "LastHour")
return
}
switch msgp.UnsafeString(field) {
case "Count":
z.LastHour.Count, err = dc.ReadFloat64()
if err != nil {
err = msgp.WrapError(err, "LastHour", "Count")
return
}
case "Bytes":
z.LastHour.Bytes, err = dc.ReadInt64()
if err != nil {
err = msgp.WrapError(err, "LastHour", "Bytes")
return
}
default:
err = dc.Skip()
if err != nil {
err = msgp.WrapError(err, "LastHour")
return
}
}
}
case "Totals":
var zb0004 uint32
zb0004, err = dc.ReadMapHeader()
if err != nil {
err = msgp.WrapError(err, "Totals")
return
}
for zb0004 > 0 {
zb0004--
field, err = dc.ReadMapKeyPtr()
if err != nil {
err = msgp.WrapError(err, "Totals")
return
}
switch msgp.UnsafeString(field) {
case "Count":
z.Totals.Count, err = dc.ReadFloat64()
if err != nil {
err = msgp.WrapError(err, "Totals", "Count")
return
}
case "Bytes":
z.Totals.Bytes, err = dc.ReadInt64()
if err != nil {
err = msgp.WrapError(err, "Totals", "Bytes")
return
}
default:
err = dc.Skip()
if err != nil {
err = msgp.WrapError(err, "Totals")
return
}
}
}
case "ErrCounts":
var zb0005 uint32
zb0005, err = dc.ReadMapHeader()
if err != nil {
err = msgp.WrapError(err, "ErrCounts")
return
}
if z.ErrCounts == nil {
z.ErrCounts = make(map[string]int, zb0005)
} else if len(z.ErrCounts) > 0 {
for key := range z.ErrCounts {
delete(z.ErrCounts, key)
}
}
for zb0005 > 0 {
zb0005--
var za0001 string
var za0002 int
za0001, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "ErrCounts")
return
}
za0002, err = dc.ReadInt()
if err != nil {
err = msgp.WrapError(err, "ErrCounts", za0001)
return
}
z.ErrCounts[za0001] = za0002
}
default:
err = dc.Skip()
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
return
}
// EncodeMsg implements msgp.Encodable
func (z *TimedErrStats) EncodeMsg(en *msgp.Writer) (err error) {
// map header, size 4
// write "LastMinute"
err = en.Append(0x84, 0xaa, 0x4c, 0x61, 0x73, 0x74, 0x4d, 0x69, 0x6e, 0x75, 0x74, 0x65)
if err != nil {
return
}
// map header, size 2
// write "Count"
err = en.Append(0x82, 0xa5, 0x43, 0x6f, 0x75, 0x6e, 0x74)
if err != nil {
return
}
err = en.WriteFloat64(z.LastMinute.Count)
if err != nil {
err = msgp.WrapError(err, "LastMinute", "Count")
return
}
// write "Bytes"
err = en.Append(0xa5, 0x42, 0x79, 0x74, 0x65, 0x73)
if err != nil {
return
}
err = en.WriteInt64(z.LastMinute.Bytes)
if err != nil {
err = msgp.WrapError(err, "LastMinute", "Bytes")
return
}
// write "LastHour"
err = en.Append(0xa8, 0x4c, 0x61, 0x73, 0x74, 0x48, 0x6f, 0x75, 0x72)
if err != nil {
return
}
// map header, size 2
// write "Count"
err = en.Append(0x82, 0xa5, 0x43, 0x6f, 0x75, 0x6e, 0x74)
if err != nil {
return
}
err = en.WriteFloat64(z.LastHour.Count)
if err != nil {
err = msgp.WrapError(err, "LastHour", "Count")
return
}
// write "Bytes"
err = en.Append(0xa5, 0x42, 0x79, 0x74, 0x65, 0x73)
if err != nil {
return
}
err = en.WriteInt64(z.LastHour.Bytes)
if err != nil {
err = msgp.WrapError(err, "LastHour", "Bytes")
return
}
// write "Totals"
err = en.Append(0xa6, 0x54, 0x6f, 0x74, 0x61, 0x6c, 0x73)
if err != nil {
return
}
// map header, size 2
// write "Count"
err = en.Append(0x82, 0xa5, 0x43, 0x6f, 0x75, 0x6e, 0x74)
if err != nil {
return
}
err = en.WriteFloat64(z.Totals.Count)
if err != nil {
err = msgp.WrapError(err, "Totals", "Count")
return
}
// write "Bytes"
err = en.Append(0xa5, 0x42, 0x79, 0x74, 0x65, 0x73)
if err != nil {
return
}
err = en.WriteInt64(z.Totals.Bytes)
if err != nil {
err = msgp.WrapError(err, "Totals", "Bytes")
return
}
// write "ErrCounts"
err = en.Append(0xa9, 0x45, 0x72, 0x72, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x73)
if err != nil {
return
}
err = en.WriteMapHeader(uint32(len(z.ErrCounts)))
if err != nil {
err = msgp.WrapError(err, "ErrCounts")
return
}
for za0001, za0002 := range z.ErrCounts {
err = en.WriteString(za0001)
if err != nil {
err = msgp.WrapError(err, "ErrCounts")
return
}
err = en.WriteInt(za0002)
if err != nil {
err = msgp.WrapError(err, "ErrCounts", za0001)
return
}
}
return
}
// MarshalMsg implements msgp.Marshaler
func (z *TimedErrStats) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize())
// map header, size 4
// string "LastMinute"
o = append(o, 0x84, 0xaa, 0x4c, 0x61, 0x73, 0x74, 0x4d, 0x69, 0x6e, 0x75, 0x74, 0x65)
// map header, size 2
// string "Count"
o = append(o, 0x82, 0xa5, 0x43, 0x6f, 0x75, 0x6e, 0x74)
o = msgp.AppendFloat64(o, z.LastMinute.Count)
// string "Bytes"
o = append(o, 0xa5, 0x42, 0x79, 0x74, 0x65, 0x73)
o = msgp.AppendInt64(o, z.LastMinute.Bytes)
// string "LastHour"
o = append(o, 0xa8, 0x4c, 0x61, 0x73, 0x74, 0x48, 0x6f, 0x75, 0x72)
// map header, size 2
// string "Count"
o = append(o, 0x82, 0xa5, 0x43, 0x6f, 0x75, 0x6e, 0x74)
o = msgp.AppendFloat64(o, z.LastHour.Count)
// string "Bytes"
o = append(o, 0xa5, 0x42, 0x79, 0x74, 0x65, 0x73)
o = msgp.AppendInt64(o, z.LastHour.Bytes)
// string "Totals"
o = append(o, 0xa6, 0x54, 0x6f, 0x74, 0x61, 0x6c, 0x73)
// map header, size 2
// string "Count"
o = append(o, 0x82, 0xa5, 0x43, 0x6f, 0x75, 0x6e, 0x74)
o = msgp.AppendFloat64(o, z.Totals.Count)
// string "Bytes"
o = append(o, 0xa5, 0x42, 0x79, 0x74, 0x65, 0x73)
o = msgp.AppendInt64(o, z.Totals.Bytes)
// string "ErrCounts"
o = append(o, 0xa9, 0x45, 0x72, 0x72, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x73)
o = msgp.AppendMapHeader(o, uint32(len(z.ErrCounts)))
for za0001, za0002 := range z.ErrCounts {
o = msgp.AppendString(o, za0001)
o = msgp.AppendInt(o, za0002)
}
return
}
// UnmarshalMsg implements msgp.Unmarshaler
func (z *TimedErrStats) UnmarshalMsg(bts []byte) (o []byte, err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
for zb0001 > 0 {
zb0001--
field, bts, err = msgp.ReadMapKeyZC(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "LastMinute":
var zb0002 uint32
zb0002, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "LastMinute")
return
}
for zb0002 > 0 {
zb0002--
field, bts, err = msgp.ReadMapKeyZC(bts)
if err != nil {
err = msgp.WrapError(err, "LastMinute")
return
}
switch msgp.UnsafeString(field) {
case "Count":
z.LastMinute.Count, bts, err = msgp.ReadFloat64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "LastMinute", "Count")
return
}
case "Bytes":
z.LastMinute.Bytes, bts, err = msgp.ReadInt64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "LastMinute", "Bytes")
return
}
default:
bts, err = msgp.Skip(bts)
if err != nil {
err = msgp.WrapError(err, "LastMinute")
return
}
}
}
case "LastHour":
var zb0003 uint32
zb0003, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "LastHour")
return
}
for zb0003 > 0 {
zb0003--
field, bts, err = msgp.ReadMapKeyZC(bts)
if err != nil {
err = msgp.WrapError(err, "LastHour")
return
}
switch msgp.UnsafeString(field) {
case "Count":
z.LastHour.Count, bts, err = msgp.ReadFloat64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "LastHour", "Count")
return
}
case "Bytes":
z.LastHour.Bytes, bts, err = msgp.ReadInt64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "LastHour", "Bytes")
return
}
default:
bts, err = msgp.Skip(bts)
if err != nil {
err = msgp.WrapError(err, "LastHour")
return
}
}
}
case "Totals":
var zb0004 uint32
zb0004, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Totals")
return
}
for zb0004 > 0 {
zb0004--
field, bts, err = msgp.ReadMapKeyZC(bts)
if err != nil {
err = msgp.WrapError(err, "Totals")
return
}
switch msgp.UnsafeString(field) {
case "Count":
z.Totals.Count, bts, err = msgp.ReadFloat64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "Totals", "Count")
return
}
case "Bytes":
z.Totals.Bytes, bts, err = msgp.ReadInt64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "Totals", "Bytes")
return
}
default:
bts, err = msgp.Skip(bts)
if err != nil {
err = msgp.WrapError(err, "Totals")
return
}
}
}
case "ErrCounts":
var zb0005 uint32
zb0005, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "ErrCounts")
return
}
if z.ErrCounts == nil {
z.ErrCounts = make(map[string]int, zb0005)
} else if len(z.ErrCounts) > 0 {
for key := range z.ErrCounts {
delete(z.ErrCounts, key)
}
}
for zb0005 > 0 {
var za0001 string
var za0002 int
zb0005--
za0001, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "ErrCounts")
return
}
za0002, bts, err = msgp.ReadIntBytes(bts)
if err != nil {
err = msgp.WrapError(err, "ErrCounts", za0001)
return
}
z.ErrCounts[za0001] = za0002
}
default:
bts, err = msgp.Skip(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
o = bts
return
}
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z *TimedErrStats) Msgsize() (s int) {
s = 1 + 11 + 1 + 6 + msgp.Float64Size + 6 + msgp.Int64Size + 9 + 1 + 6 + msgp.Float64Size + 6 + msgp.Int64Size + 7 + 1 + 6 + msgp.Float64Size + 6 + msgp.Int64Size + 10 + msgp.MapHeaderSize
if z.ErrCounts != nil {
for za0001, za0002 := range z.ErrCounts {
_ = za0002
s += msgp.StringPrefixSize + len(za0001) + msgp.IntSize
}
}
return
}
golang-github-minio-madmin-go-3.0.104/replication-api_gen_test.go 0000664 0000000 0000000 00000032147 14774251704 0024706 0 ustar 00root root 0000000 0000000 package madmin
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
import (
"bytes"
"testing"
"github.com/tinylib/msgp/msgp"
)
func TestMarshalUnmarshalDowntimeInfo(t *testing.T) {
v := DowntimeInfo{}
bts, err := v.MarshalMsg(nil)
if err != nil {
t.Fatal(err)
}
left, err := v.UnmarshalMsg(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
}
left, err = msgp.Skip(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
}
}
func BenchmarkMarshalMsgDowntimeInfo(b *testing.B) {
v := DowntimeInfo{}
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.MarshalMsg(nil)
}
}
func BenchmarkAppendMsgDowntimeInfo(b *testing.B) {
v := DowntimeInfo{}
bts := make([]byte, 0, v.Msgsize())
bts, _ = v.MarshalMsg(bts[0:0])
b.SetBytes(int64(len(bts)))
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
bts, _ = v.MarshalMsg(bts[0:0])
}
}
func BenchmarkUnmarshalDowntimeInfo(b *testing.B) {
v := DowntimeInfo{}
bts, _ := v.MarshalMsg(nil)
b.ReportAllocs()
b.SetBytes(int64(len(bts)))
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := v.UnmarshalMsg(bts)
if err != nil {
b.Fatal(err)
}
}
}
func TestEncodeDecodeDowntimeInfo(t *testing.T) {
v := DowntimeInfo{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
m := v.Msgsize()
if buf.Len() > m {
t.Log("WARNING: TestEncodeDecodeDowntimeInfo Msgsize() is inaccurate")
}
vn := DowntimeInfo{}
err := msgp.Decode(&buf, &vn)
if err != nil {
t.Error(err)
}
buf.Reset()
msgp.Encode(&buf, &v)
err = msgp.NewReader(&buf).Skip()
if err != nil {
t.Error(err)
}
}
func BenchmarkEncodeDowntimeInfo(b *testing.B) {
v := DowntimeInfo{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
en := msgp.NewWriter(msgp.Nowhere)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.EncodeMsg(en)
}
en.Flush()
}
func BenchmarkDecodeDowntimeInfo(b *testing.B) {
v := DowntimeInfo{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
rd := msgp.NewEndlessReader(buf.Bytes(), b)
dc := msgp.NewReader(rd)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
err := v.DecodeMsg(dc)
if err != nil {
b.Fatal(err)
}
}
}
func TestMarshalUnmarshalLatencyStat(t *testing.T) {
v := LatencyStat{}
bts, err := v.MarshalMsg(nil)
if err != nil {
t.Fatal(err)
}
left, err := v.UnmarshalMsg(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
}
left, err = msgp.Skip(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
}
}
func BenchmarkMarshalMsgLatencyStat(b *testing.B) {
v := LatencyStat{}
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.MarshalMsg(nil)
}
}
func BenchmarkAppendMsgLatencyStat(b *testing.B) {
v := LatencyStat{}
bts := make([]byte, 0, v.Msgsize())
bts, _ = v.MarshalMsg(bts[0:0])
b.SetBytes(int64(len(bts)))
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
bts, _ = v.MarshalMsg(bts[0:0])
}
}
func BenchmarkUnmarshalLatencyStat(b *testing.B) {
v := LatencyStat{}
bts, _ := v.MarshalMsg(nil)
b.ReportAllocs()
b.SetBytes(int64(len(bts)))
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := v.UnmarshalMsg(bts)
if err != nil {
b.Fatal(err)
}
}
}
func TestEncodeDecodeLatencyStat(t *testing.T) {
v := LatencyStat{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
m := v.Msgsize()
if buf.Len() > m {
t.Log("WARNING: TestEncodeDecodeLatencyStat Msgsize() is inaccurate")
}
vn := LatencyStat{}
err := msgp.Decode(&buf, &vn)
if err != nil {
t.Error(err)
}
buf.Reset()
msgp.Encode(&buf, &v)
err = msgp.NewReader(&buf).Skip()
if err != nil {
t.Error(err)
}
}
func BenchmarkEncodeLatencyStat(b *testing.B) {
v := LatencyStat{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
en := msgp.NewWriter(msgp.Nowhere)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.EncodeMsg(en)
}
en.Flush()
}
func BenchmarkDecodeLatencyStat(b *testing.B) {
v := LatencyStat{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
rd := msgp.NewEndlessReader(buf.Bytes(), b)
dc := msgp.NewReader(rd)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
err := v.DecodeMsg(dc)
if err != nil {
b.Fatal(err)
}
}
}
func TestMarshalUnmarshalRStat(t *testing.T) {
v := RStat{}
bts, err := v.MarshalMsg(nil)
if err != nil {
t.Fatal(err)
}
left, err := v.UnmarshalMsg(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
}
left, err = msgp.Skip(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
}
}
func BenchmarkMarshalMsgRStat(b *testing.B) {
v := RStat{}
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.MarshalMsg(nil)
}
}
func BenchmarkAppendMsgRStat(b *testing.B) {
v := RStat{}
bts := make([]byte, 0, v.Msgsize())
bts, _ = v.MarshalMsg(bts[0:0])
b.SetBytes(int64(len(bts)))
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
bts, _ = v.MarshalMsg(bts[0:0])
}
}
func BenchmarkUnmarshalRStat(b *testing.B) {
v := RStat{}
bts, _ := v.MarshalMsg(nil)
b.ReportAllocs()
b.SetBytes(int64(len(bts)))
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := v.UnmarshalMsg(bts)
if err != nil {
b.Fatal(err)
}
}
}
func TestEncodeDecodeRStat(t *testing.T) {
v := RStat{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
m := v.Msgsize()
if buf.Len() > m {
t.Log("WARNING: TestEncodeDecodeRStat Msgsize() is inaccurate")
}
vn := RStat{}
err := msgp.Decode(&buf, &vn)
if err != nil {
t.Error(err)
}
buf.Reset()
msgp.Encode(&buf, &v)
err = msgp.NewReader(&buf).Skip()
if err != nil {
t.Error(err)
}
}
func BenchmarkEncodeRStat(b *testing.B) {
v := RStat{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
en := msgp.NewWriter(msgp.Nowhere)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.EncodeMsg(en)
}
en.Flush()
}
func BenchmarkDecodeRStat(b *testing.B) {
v := RStat{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
rd := msgp.NewEndlessReader(buf.Bytes(), b)
dc := msgp.NewReader(rd)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
err := v.DecodeMsg(dc)
if err != nil {
b.Fatal(err)
}
}
}
func TestMarshalUnmarshalReplicationMRF(t *testing.T) {
v := ReplicationMRF{}
bts, err := v.MarshalMsg(nil)
if err != nil {
t.Fatal(err)
}
left, err := v.UnmarshalMsg(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
}
left, err = msgp.Skip(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
}
}
func BenchmarkMarshalMsgReplicationMRF(b *testing.B) {
v := ReplicationMRF{}
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.MarshalMsg(nil)
}
}
func BenchmarkAppendMsgReplicationMRF(b *testing.B) {
v := ReplicationMRF{}
bts := make([]byte, 0, v.Msgsize())
bts, _ = v.MarshalMsg(bts[0:0])
b.SetBytes(int64(len(bts)))
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
bts, _ = v.MarshalMsg(bts[0:0])
}
}
func BenchmarkUnmarshalReplicationMRF(b *testing.B) {
v := ReplicationMRF{}
bts, _ := v.MarshalMsg(nil)
b.ReportAllocs()
b.SetBytes(int64(len(bts)))
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := v.UnmarshalMsg(bts)
if err != nil {
b.Fatal(err)
}
}
}
func TestEncodeDecodeReplicationMRF(t *testing.T) {
v := ReplicationMRF{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
m := v.Msgsize()
if buf.Len() > m {
t.Log("WARNING: TestEncodeDecodeReplicationMRF Msgsize() is inaccurate")
}
vn := ReplicationMRF{}
err := msgp.Decode(&buf, &vn)
if err != nil {
t.Error(err)
}
buf.Reset()
msgp.Encode(&buf, &v)
err = msgp.NewReader(&buf).Skip()
if err != nil {
t.Error(err)
}
}
func BenchmarkEncodeReplicationMRF(b *testing.B) {
v := ReplicationMRF{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
en := msgp.NewWriter(msgp.Nowhere)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.EncodeMsg(en)
}
en.Flush()
}
func BenchmarkDecodeReplicationMRF(b *testing.B) {
v := ReplicationMRF{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
rd := msgp.NewEndlessReader(buf.Bytes(), b)
dc := msgp.NewReader(rd)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
err := v.DecodeMsg(dc)
if err != nil {
b.Fatal(err)
}
}
}
func TestMarshalUnmarshalStatRecorder(t *testing.T) {
v := StatRecorder{}
bts, err := v.MarshalMsg(nil)
if err != nil {
t.Fatal(err)
}
left, err := v.UnmarshalMsg(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
}
left, err = msgp.Skip(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
}
}
func BenchmarkMarshalMsgStatRecorder(b *testing.B) {
v := StatRecorder{}
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.MarshalMsg(nil)
}
}
func BenchmarkAppendMsgStatRecorder(b *testing.B) {
v := StatRecorder{}
bts := make([]byte, 0, v.Msgsize())
bts, _ = v.MarshalMsg(bts[0:0])
b.SetBytes(int64(len(bts)))
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
bts, _ = v.MarshalMsg(bts[0:0])
}
}
func BenchmarkUnmarshalStatRecorder(b *testing.B) {
v := StatRecorder{}
bts, _ := v.MarshalMsg(nil)
b.ReportAllocs()
b.SetBytes(int64(len(bts)))
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := v.UnmarshalMsg(bts)
if err != nil {
b.Fatal(err)
}
}
}
func TestEncodeDecodeStatRecorder(t *testing.T) {
v := StatRecorder{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
m := v.Msgsize()
if buf.Len() > m {
t.Log("WARNING: TestEncodeDecodeStatRecorder Msgsize() is inaccurate")
}
vn := StatRecorder{}
err := msgp.Decode(&buf, &vn)
if err != nil {
t.Error(err)
}
buf.Reset()
msgp.Encode(&buf, &v)
err = msgp.NewReader(&buf).Skip()
if err != nil {
t.Error(err)
}
}
func BenchmarkEncodeStatRecorder(b *testing.B) {
v := StatRecorder{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
en := msgp.NewWriter(msgp.Nowhere)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.EncodeMsg(en)
}
en.Flush()
}
func BenchmarkDecodeStatRecorder(b *testing.B) {
v := StatRecorder{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
rd := msgp.NewEndlessReader(buf.Bytes(), b)
dc := msgp.NewReader(rd)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
err := v.DecodeMsg(dc)
if err != nil {
b.Fatal(err)
}
}
}
func TestMarshalUnmarshalTimedErrStats(t *testing.T) {
v := TimedErrStats{}
bts, err := v.MarshalMsg(nil)
if err != nil {
t.Fatal(err)
}
left, err := v.UnmarshalMsg(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
}
left, err = msgp.Skip(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
}
}
func BenchmarkMarshalMsgTimedErrStats(b *testing.B) {
v := TimedErrStats{}
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.MarshalMsg(nil)
}
}
func BenchmarkAppendMsgTimedErrStats(b *testing.B) {
v := TimedErrStats{}
bts := make([]byte, 0, v.Msgsize())
bts, _ = v.MarshalMsg(bts[0:0])
b.SetBytes(int64(len(bts)))
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
bts, _ = v.MarshalMsg(bts[0:0])
}
}
func BenchmarkUnmarshalTimedErrStats(b *testing.B) {
v := TimedErrStats{}
bts, _ := v.MarshalMsg(nil)
b.ReportAllocs()
b.SetBytes(int64(len(bts)))
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := v.UnmarshalMsg(bts)
if err != nil {
b.Fatal(err)
}
}
}
func TestEncodeDecodeTimedErrStats(t *testing.T) {
v := TimedErrStats{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
m := v.Msgsize()
if buf.Len() > m {
t.Log("WARNING: TestEncodeDecodeTimedErrStats Msgsize() is inaccurate")
}
vn := TimedErrStats{}
err := msgp.Decode(&buf, &vn)
if err != nil {
t.Error(err)
}
buf.Reset()
msgp.Encode(&buf, &v)
err = msgp.NewReader(&buf).Skip()
if err != nil {
t.Error(err)
}
}
func BenchmarkEncodeTimedErrStats(b *testing.B) {
v := TimedErrStats{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
en := msgp.NewWriter(msgp.Nowhere)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.EncodeMsg(en)
}
en.Flush()
}
func BenchmarkDecodeTimedErrStats(b *testing.B) {
v := TimedErrStats{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
rd := msgp.NewEndlessReader(buf.Bytes(), b)
dc := msgp.NewReader(rd)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
err := v.DecodeMsg(dc)
if err != nil {
b.Fatal(err)
}
}
}
golang-github-minio-madmin-go-3.0.104/replication.go 0000664 0000000 0000000 00000014623 14774251704 0022246 0 ustar 00root root 0000000 0000000 //
// Copyright (c) 2015-2025 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as
// published by the Free Software Foundation, either version 3 of the
// License, or (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see .
//
package madmin
import (
"time"
"github.com/minio/minio-go/v7/pkg/replication"
)
type ReplDiagInfo struct {
Error string `json:"error,omitempty"`
SREnabled bool `json:"site_replication_enabled"`
ActiveWorkers WorkerStat `json:"active_workers,omitempty"`
Queued InQueueMetric `json:"queued,omitempty"`
ReplicaCount int64 `json:"replica_count,omitempty"`
ReplicaSize int64 `json:"replica_size,omitempty"`
Proxying bool `json:"proxying,omitempty"`
Proxied ReplProxyMetric `json:"proxied,omitempty"`
Sites []ReplDiagSite `json:"sites,omitempty"`
RDReplicatedBuckets []ReplDiagReplBucket `json:"replicated_buckets,omitempty"`
}
type ReplDiagSite struct {
Addr string `json:"addr,omitempty"`
DeploymentID string `json:"deployment_id"`
Info ReplDiagSiteInfo `json:"info,omitempty"`
}
type ReplDiagSiteInfo struct {
Nodes []ReplDiagNode `json:"nodes,omitempty"`
LDAPEnabled bool `json:"ldap_enabled,omitempty"`
OpenIDEnabled bool `json:"openid_enabled,omitempty"`
BucketsCount int `json:"buckets_count,omitempty"`
Edge bool `json:"edge,omitempty"`
ILMEnabled bool `json:"ilm_enabled,omitempty"`
EncryptionEnabled bool `json:"encryption_enabled,omitempty"`
ILMExpiryReplication bool `json:"ilm_expiry_replication,omitempty"`
ObjectLockingEnabled bool `json:"object_locking_enabled,omitempty"`
Throttle ReplDiagThrottle `json:"throttle,omitempty"`
ReplicatedCount int64 `json:"replicated_count,omitempty"`
ReplicatedSize int64 `json:"replicated_size,omitempty"`
ResyncStatus string `json:"resync_status"`
}
type ReplDiagNode struct {
Addr string `json:"addr,omitempty"`
MinIOVersion string `json:"minio_version,omitempty"`
Uptime int64 `json:"uptime,omitempty"`
PoolID int `json:"poolid,omitempty"`
// SetID int `json:"setid,omitempty"`
IsLeader bool `json:"is_leader,omitempty"`
ILMExpiryInProgress bool `json:"ilm_expiry_in_progress,omitempty"`
}
type ReplDiagReplBucket struct {
Name string `json:"name,omitempty"`
ReplicationInfo ReplDiagBucketReplInfo `json:"replication_info,omitempty"`
ReplicationTargets []ReplDiagBucketReplTarget `json:"replication_targets,omitempty"`
}
type ReplDiagBucketReplTarget struct {
SourceBucket string `json:"source_bucket,omitempty"`
TargetBucket string `json:"target_bucket,omitempty"`
Addr string `json:"addr,omitempty"`
Online bool `json:"online,omitempty"`
TotalDowntime time.Duration `json:"total_downtime,omitempty"`
CurrentDowntime time.Duration `json:"current_downtime,omitempty"`
AdminPermissions bool `json:"admin_permissions,omitempty"`
SyncReplication bool `json:"sync_replication,omitempty"`
HeartbeatErrCount int64 `json:"heartbeat_err_count,omitempty"`
BandwidthLimit uint64 `json:"bandwidth_limit,omitempty"`
Latency LatencyStat `json:"xfer_rate,omitempty"`
Edge bool `json:"edge,omitempty"`
HealthCheckDuration time.Duration `json:"heath_check,omitempty"`
DisableProxying bool `json:"disable_proxying"`
DeleteReplication bool `json:"delete_replication,omitempty"`
DeleteMarkerReplication bool `json:"delete_marker_replication,omitempty"`
ReplicationPriority int `json:"replication_priority,omitempty"`
ExistingObjectReplication bool `json:"existing_object_replication,omitempty"`
MetadataSync bool `json:"metadata_sync,omitempty"`
}
type ReplDiagBucketReplInfo struct {
VersionEnabled bool `json:"version_enabled,omitempty"`
ObjectLocking bool `json:"object_locking,omitempty"`
ExcludedPrefixes []string `json:"excluded_prefixes,omitempty"`
ILM ReplDiagILMInfo `json:"ilm,omitempty"`
Encryption ReplDiagEncInfo `json:"encryption,omitempty"`
Config replication.Config `json:"config,omitempty"`
Resync ReplDiagBucketResyncInfo `json:"resync,omitempty"`
}
type ReplDiagILMInfo struct {
Enabled bool `json:"enabled,omitempty"`
Rules []ReplDiagILMRule `json:"rules,omitempty"`
}
type ReplDiagILMRule struct {
ID string `json:"id,omitempty"`
Expiration bool `json:"expiration,omitempty"`
Transition bool `json:"transition,omitempty"`
}
type ReplDiagEncInfo struct {
Enabled bool `json:"enabled,omitempty"`
EncRules []BucketEncInfo `json:"enc_rules,omitempty"`
}
type BucketEncInfo struct {
Algorithm string `json:"algorithm,omitempty"`
EncKey string `json:"enc_key,omitempty"`
}
type ReplDiagBucketResyncInfo struct {
InProgress bool `json:"in_progress,omitempty"`
StartTime time.Time `json:"start_time,omitempty"`
FailedCount int64 `json:"failed_count,omitempty"`
FailedSize int64 `json:"failed_size,omitempty"`
ReplicatedCount int64 `json:"replicated_count,omitempty"`
ReplicatedSize int64 `json:"replicated_size,omitempty"`
}
type ReplDiagThrottle struct {
IsSet bool `json:"is_set,omitempty"`
Limit uint64 `json:"limit,omitempty"`
}
golang-github-minio-madmin-go-3.0.104/retry.go 0000664 0000000 0000000 00000010022 14774251704 0021067 0 ustar 00root root 0000000 0000000 //
// Copyright (c) 2015-2024 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as
// published by the Free Software Foundation, either version 3 of the
// License, or (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see .
//
package madmin
import (
"context"
"math/rand"
"net/http"
"sync"
"time"
)
// MaxRetry is the maximum number of retries before stopping.
var MaxRetry = 10
// MaxJitter will randomize over the full exponential backoff time
const MaxJitter = 1.0
// NoJitter disables the use of jitter for randomizing the exponential backoff time
const NoJitter = 0.0
// DefaultRetryUnit - default unit multiplicative per retry.
// defaults to 1 second.
const DefaultRetryUnit = time.Second
// DefaultRetryCap - Each retry attempt never waits no longer than
// this maximum time duration.
const DefaultRetryCap = time.Second * 30
// lockedRandSource provides protected rand source, implements rand.Source interface.
type lockedRandSource struct {
lk sync.Mutex
src rand.Source
}
// Int63 returns a non-negative pseudo-random 63-bit integer as an int64.
func (r *lockedRandSource) Int63() (n int64) {
r.lk.Lock()
n = r.src.Int63()
r.lk.Unlock()
return
}
// Seed uses the provided seed value to initialize the generator to a
// deterministic state.
func (r *lockedRandSource) Seed(seed int64) {
r.lk.Lock()
r.src.Seed(seed)
r.lk.Unlock()
}
// newRetryTimer creates a timer with exponentially increasing
// delays until the maximum retry attempts are reached.
func (adm AdminClient) newRetryTimer(ctx context.Context, maxRetry int, unit time.Duration, cp time.Duration, jitter float64) <-chan int {
attemptCh := make(chan int)
// computes the exponential backoff duration according to
// https://www.awsarchitectureblog.com/2015/03/backoff.html
exponentialBackoffWait := func(attempt int) time.Duration {
// normalize jitter to the range [0, 1.0]
if jitter < NoJitter {
jitter = NoJitter
}
if jitter > MaxJitter {
jitter = MaxJitter
}
// sleep = random_between(0, min(cap, base * 2 ** attempt))
sleep := unit * 1 << uint(attempt)
if sleep > cp {
sleep = cp
}
if jitter > NoJitter {
sleep -= time.Duration(adm.random.Float64() * float64(sleep) * jitter)
}
return sleep
}
go func() {
defer close(attemptCh)
for i := 0; i < maxRetry; i++ {
// Attempts start from 1.
select {
case attemptCh <- i + 1:
case <-ctx.Done():
// Stop the routine.
return
}
select {
case <-time.After(exponentialBackoffWait(i)):
case <-ctx.Done():
// Stop the routine.
return
}
}
}()
return attemptCh
}
// List of admin error codes which are retryable.
var retryableAdminErrCodes = map[string]struct{}{
"RequestError": {},
"RequestTimeout": {},
"Throttling": {},
"ThrottlingException": {},
"RequestLimitExceeded": {},
"RequestThrottled": {},
"SlowDown": {},
// Add more admin error codes here.
}
// isAdminErrCodeRetryable - is admin error code retryable.
func isAdminErrCodeRetryable(code string) (ok bool) {
_, ok = retryableAdminErrCodes[code]
return ok
}
// List of HTTP status codes which are retryable.
var retryableHTTPStatusCodes = map[int]struct{}{
http.StatusRequestTimeout: {},
http.StatusTooManyRequests: {},
http.StatusBadGateway: {},
http.StatusServiceUnavailable: {},
// Add more HTTP status codes here.
}
// isHTTPStatusRetryable - is HTTP error code retryable.
func isHTTPStatusRetryable(httpStatusCode int) (ok bool) {
_, ok = retryableHTTPStatusCodes[httpStatusCode]
return ok
}
golang-github-minio-madmin-go-3.0.104/scanner.go 0000664 0000000 0000000 00000003646 14774251704 0021371 0 ustar 00root root 0000000 0000000 //
// Copyright (c) 2015-2024 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as
// published by the Free Software Foundation, either version 3 of the
// License, or (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see .
//
package madmin
import (
"context"
"encoding/json"
"io"
"net/http"
"time"
)
//msgp:clearomitted
//go:generate msgp
// BucketScanInfo contains information of a bucket scan in a given pool/set
type BucketScanInfo struct {
Pool int `msg:"pool"`
Set int `msg:"set"`
Cycle uint64 `msg:"cycle"`
Ongoing bool `msg:"ongoing"`
LastUpdate time.Time `msg:"last_update"`
LastStarted time.Time `msg:"last_started"`
Completed []time.Time `msg:"completed,omitempty"`
}
// BucketScanInfo returns information of a bucket scan in all pools/sets
func (adm *AdminClient) BucketScanInfo(ctx context.Context, bucket string) ([]BucketScanInfo, error) {
resp, err := adm.executeMethod(ctx,
http.MethodGet,
requestData{relPath: adminAPIPrefix + "/scanner/status/" + bucket})
if err != nil {
return nil, err
}
defer closeResponse(resp)
if resp.StatusCode != http.StatusOK {
return nil, httpRespToErrorResponse(resp)
}
respBytes, err := io.ReadAll(resp.Body)
if err != nil {
return nil, err
}
var info []BucketScanInfo
err = json.Unmarshal(respBytes, &info)
if err != nil {
return nil, err
}
return info, nil
}
golang-github-minio-madmin-go-3.0.104/scanner_gen.go 0000664 0000000 0000000 00000020114 14774251704 0022207 0 ustar 00root root 0000000 0000000 package madmin
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
import (
"time"
"github.com/tinylib/msgp/msgp"
)
// DecodeMsg implements msgp.Decodable
func (z *BucketScanInfo) DecodeMsg(dc *msgp.Reader) (err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, err = dc.ReadMapHeader()
if err != nil {
err = msgp.WrapError(err)
return
}
var zb0001Mask uint8 /* 1 bits */
_ = zb0001Mask
for zb0001 > 0 {
zb0001--
field, err = dc.ReadMapKeyPtr()
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "pool":
z.Pool, err = dc.ReadInt()
if err != nil {
err = msgp.WrapError(err, "Pool")
return
}
case "set":
z.Set, err = dc.ReadInt()
if err != nil {
err = msgp.WrapError(err, "Set")
return
}
case "cycle":
z.Cycle, err = dc.ReadUint64()
if err != nil {
err = msgp.WrapError(err, "Cycle")
return
}
case "ongoing":
z.Ongoing, err = dc.ReadBool()
if err != nil {
err = msgp.WrapError(err, "Ongoing")
return
}
case "last_update":
z.LastUpdate, err = dc.ReadTime()
if err != nil {
err = msgp.WrapError(err, "LastUpdate")
return
}
case "last_started":
z.LastStarted, err = dc.ReadTime()
if err != nil {
err = msgp.WrapError(err, "LastStarted")
return
}
case "completed":
var zb0002 uint32
zb0002, err = dc.ReadArrayHeader()
if err != nil {
err = msgp.WrapError(err, "Completed")
return
}
if cap(z.Completed) >= int(zb0002) {
z.Completed = (z.Completed)[:zb0002]
} else {
z.Completed = make([]time.Time, zb0002)
}
for za0001 := range z.Completed {
z.Completed[za0001], err = dc.ReadTime()
if err != nil {
err = msgp.WrapError(err, "Completed", za0001)
return
}
}
zb0001Mask |= 0x1
default:
err = dc.Skip()
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
// Clear omitted fields.
if (zb0001Mask & 0x1) == 0 {
z.Completed = nil
}
return
}
// EncodeMsg implements msgp.Encodable
func (z *BucketScanInfo) EncodeMsg(en *msgp.Writer) (err error) {
// check for omitted fields
zb0001Len := uint32(7)
var zb0001Mask uint8 /* 7 bits */
_ = zb0001Mask
if z.Completed == nil {
zb0001Len--
zb0001Mask |= 0x40
}
// variable map header, size zb0001Len
err = en.Append(0x80 | uint8(zb0001Len))
if err != nil {
return
}
// skip if no fields are to be emitted
if zb0001Len != 0 {
// write "pool"
err = en.Append(0xa4, 0x70, 0x6f, 0x6f, 0x6c)
if err != nil {
return
}
err = en.WriteInt(z.Pool)
if err != nil {
err = msgp.WrapError(err, "Pool")
return
}
// write "set"
err = en.Append(0xa3, 0x73, 0x65, 0x74)
if err != nil {
return
}
err = en.WriteInt(z.Set)
if err != nil {
err = msgp.WrapError(err, "Set")
return
}
// write "cycle"
err = en.Append(0xa5, 0x63, 0x79, 0x63, 0x6c, 0x65)
if err != nil {
return
}
err = en.WriteUint64(z.Cycle)
if err != nil {
err = msgp.WrapError(err, "Cycle")
return
}
// write "ongoing"
err = en.Append(0xa7, 0x6f, 0x6e, 0x67, 0x6f, 0x69, 0x6e, 0x67)
if err != nil {
return
}
err = en.WriteBool(z.Ongoing)
if err != nil {
err = msgp.WrapError(err, "Ongoing")
return
}
// write "last_update"
err = en.Append(0xab, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65)
if err != nil {
return
}
err = en.WriteTime(z.LastUpdate)
if err != nil {
err = msgp.WrapError(err, "LastUpdate")
return
}
// write "last_started"
err = en.Append(0xac, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x73, 0x74, 0x61, 0x72, 0x74, 0x65, 0x64)
if err != nil {
return
}
err = en.WriteTime(z.LastStarted)
if err != nil {
err = msgp.WrapError(err, "LastStarted")
return
}
if (zb0001Mask & 0x40) == 0 { // if not omitted
// write "completed"
err = en.Append(0xa9, 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x64)
if err != nil {
return
}
err = en.WriteArrayHeader(uint32(len(z.Completed)))
if err != nil {
err = msgp.WrapError(err, "Completed")
return
}
for za0001 := range z.Completed {
err = en.WriteTime(z.Completed[za0001])
if err != nil {
err = msgp.WrapError(err, "Completed", za0001)
return
}
}
}
}
return
}
// MarshalMsg implements msgp.Marshaler
func (z *BucketScanInfo) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize())
// check for omitted fields
zb0001Len := uint32(7)
var zb0001Mask uint8 /* 7 bits */
_ = zb0001Mask
if z.Completed == nil {
zb0001Len--
zb0001Mask |= 0x40
}
// variable map header, size zb0001Len
o = append(o, 0x80|uint8(zb0001Len))
// skip if no fields are to be emitted
if zb0001Len != 0 {
// string "pool"
o = append(o, 0xa4, 0x70, 0x6f, 0x6f, 0x6c)
o = msgp.AppendInt(o, z.Pool)
// string "set"
o = append(o, 0xa3, 0x73, 0x65, 0x74)
o = msgp.AppendInt(o, z.Set)
// string "cycle"
o = append(o, 0xa5, 0x63, 0x79, 0x63, 0x6c, 0x65)
o = msgp.AppendUint64(o, z.Cycle)
// string "ongoing"
o = append(o, 0xa7, 0x6f, 0x6e, 0x67, 0x6f, 0x69, 0x6e, 0x67)
o = msgp.AppendBool(o, z.Ongoing)
// string "last_update"
o = append(o, 0xab, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65)
o = msgp.AppendTime(o, z.LastUpdate)
// string "last_started"
o = append(o, 0xac, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x73, 0x74, 0x61, 0x72, 0x74, 0x65, 0x64)
o = msgp.AppendTime(o, z.LastStarted)
if (zb0001Mask & 0x40) == 0 { // if not omitted
// string "completed"
o = append(o, 0xa9, 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x64)
o = msgp.AppendArrayHeader(o, uint32(len(z.Completed)))
for za0001 := range z.Completed {
o = msgp.AppendTime(o, z.Completed[za0001])
}
}
}
return
}
// UnmarshalMsg implements msgp.Unmarshaler
func (z *BucketScanInfo) UnmarshalMsg(bts []byte) (o []byte, err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
var zb0001Mask uint8 /* 1 bits */
_ = zb0001Mask
for zb0001 > 0 {
zb0001--
field, bts, err = msgp.ReadMapKeyZC(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "pool":
z.Pool, bts, err = msgp.ReadIntBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Pool")
return
}
case "set":
z.Set, bts, err = msgp.ReadIntBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Set")
return
}
case "cycle":
z.Cycle, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "Cycle")
return
}
case "ongoing":
z.Ongoing, bts, err = msgp.ReadBoolBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Ongoing")
return
}
case "last_update":
z.LastUpdate, bts, err = msgp.ReadTimeBytes(bts)
if err != nil {
err = msgp.WrapError(err, "LastUpdate")
return
}
case "last_started":
z.LastStarted, bts, err = msgp.ReadTimeBytes(bts)
if err != nil {
err = msgp.WrapError(err, "LastStarted")
return
}
case "completed":
var zb0002 uint32
zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Completed")
return
}
if cap(z.Completed) >= int(zb0002) {
z.Completed = (z.Completed)[:zb0002]
} else {
z.Completed = make([]time.Time, zb0002)
}
for za0001 := range z.Completed {
z.Completed[za0001], bts, err = msgp.ReadTimeBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Completed", za0001)
return
}
}
zb0001Mask |= 0x1
default:
bts, err = msgp.Skip(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
// Clear omitted fields.
if (zb0001Mask & 0x1) == 0 {
z.Completed = nil
}
o = bts
return
}
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z *BucketScanInfo) Msgsize() (s int) {
s = 1 + 5 + msgp.IntSize + 4 + msgp.IntSize + 6 + msgp.Uint64Size + 8 + msgp.BoolSize + 12 + msgp.TimeSize + 13 + msgp.TimeSize + 10 + msgp.ArrayHeaderSize + (len(z.Completed) * (msgp.TimeSize))
return
}
golang-github-minio-madmin-go-3.0.104/scanner_gen_test.go 0000664 0000000 0000000 00000004535 14774251704 0023257 0 ustar 00root root 0000000 0000000 package madmin
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
import (
"bytes"
"testing"
"github.com/tinylib/msgp/msgp"
)
func TestMarshalUnmarshalBucketScanInfo(t *testing.T) {
v := BucketScanInfo{}
bts, err := v.MarshalMsg(nil)
if err != nil {
t.Fatal(err)
}
left, err := v.UnmarshalMsg(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
}
left, err = msgp.Skip(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
}
}
func BenchmarkMarshalMsgBucketScanInfo(b *testing.B) {
v := BucketScanInfo{}
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.MarshalMsg(nil)
}
}
func BenchmarkAppendMsgBucketScanInfo(b *testing.B) {
v := BucketScanInfo{}
bts := make([]byte, 0, v.Msgsize())
bts, _ = v.MarshalMsg(bts[0:0])
b.SetBytes(int64(len(bts)))
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
bts, _ = v.MarshalMsg(bts[0:0])
}
}
func BenchmarkUnmarshalBucketScanInfo(b *testing.B) {
v := BucketScanInfo{}
bts, _ := v.MarshalMsg(nil)
b.ReportAllocs()
b.SetBytes(int64(len(bts)))
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := v.UnmarshalMsg(bts)
if err != nil {
b.Fatal(err)
}
}
}
func TestEncodeDecodeBucketScanInfo(t *testing.T) {
v := BucketScanInfo{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
m := v.Msgsize()
if buf.Len() > m {
t.Log("WARNING: TestEncodeDecodeBucketScanInfo Msgsize() is inaccurate")
}
vn := BucketScanInfo{}
err := msgp.Decode(&buf, &vn)
if err != nil {
t.Error(err)
}
buf.Reset()
msgp.Encode(&buf, &v)
err = msgp.NewReader(&buf).Skip()
if err != nil {
t.Error(err)
}
}
func BenchmarkEncodeBucketScanInfo(b *testing.B) {
v := BucketScanInfo{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
en := msgp.NewWriter(msgp.Nowhere)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.EncodeMsg(en)
}
en.Flush()
}
func BenchmarkDecodeBucketScanInfo(b *testing.B) {
v := BucketScanInfo{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
rd := msgp.NewEndlessReader(buf.Bytes(), b)
dc := msgp.NewReader(rd)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
err := v.DecodeMsg(dc)
if err != nil {
b.Fatal(err)
}
}
}
golang-github-minio-madmin-go-3.0.104/service-commands-v1.go 0000664 0000000 0000000 00000004022 14774251704 0023510 0 ustar 00root root 0000000 0000000 //
// Copyright (c) 2015-2024 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as
// published by the Free Software Foundation, either version 3 of the
// License, or (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see .
//
package madmin
import (
"context"
"net/http"
"net/url"
)
// ServiceRestart - restarts the MinIO cluster
//
// Deprecated: use ServiceRestartV2 instead
func (adm *AdminClient) ServiceRestart(ctx context.Context) error {
return adm.serviceCallAction(ctx, ServiceActionRestart)
}
// ServiceStop - stops the MinIO cluster
//
// Deprecated: use ServiceStopV2
func (adm *AdminClient) ServiceStop(ctx context.Context) error {
return adm.serviceCallAction(ctx, ServiceActionStop)
}
// ServiceUnfreeze - un-freezes all incoming S3 API calls on MinIO cluster
//
// Deprecated: use ServiceUnfreezeV2
func (adm *AdminClient) ServiceUnfreeze(ctx context.Context) error {
return adm.serviceCallAction(ctx, ServiceActionUnfreeze)
}
// serviceCallAction - call service restart/update/stop API.
func (adm *AdminClient) serviceCallAction(ctx context.Context, action ServiceAction) error {
queryValues := url.Values{}
queryValues.Set("action", string(action))
// Request API to Restart server
resp, err := adm.executeMethod(ctx,
http.MethodPost, requestData{
relPath: adminAPIPrefix + "/service",
queryValues: queryValues,
},
)
defer closeResponse(resp)
if err != nil {
return err
}
if resp.StatusCode != http.StatusOK {
return httpRespToErrorResponse(resp)
}
return nil
}
golang-github-minio-madmin-go-3.0.104/service-commands.go 0000664 0000000 0000000 00000023720 14774251704 0023172 0 ustar 00root root 0000000 0000000 //
// Copyright (c) 2015-2024 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as
// published by the Free Software Foundation, either version 3 of the
// License, or (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see .
//
package madmin
import (
"context"
"encoding/json"
"net/http"
"net/url"
"strconv"
"strings"
"time"
)
// ServiceRestartV2 - restarts the MinIO cluster
func (adm *AdminClient) ServiceRestartV2(ctx context.Context) error {
_, err := adm.serviceCallActionV2(ctx, ServiceActionOpts{Action: ServiceActionRestart})
return err
}
// ServiceStopV2 - stops the MinIO cluster
func (adm *AdminClient) ServiceStopV2(ctx context.Context) error {
_, err := adm.serviceCallActionV2(ctx, ServiceActionOpts{Action: ServiceActionStop})
return err
}
// ServiceFreezeV2 - freezes all incoming S3 API calls on MinIO cluster
func (adm *AdminClient) ServiceFreezeV2(ctx context.Context) error {
_, err := adm.serviceCallActionV2(ctx, ServiceActionOpts{Action: ServiceActionFreeze})
return err
}
// ServiceUnfreezeV2 - un-freezes all incoming S3 API calls on MinIO cluster
func (adm *AdminClient) ServiceUnfreezeV2(ctx context.Context) error {
_, err := adm.serviceCallActionV2(ctx, ServiceActionOpts{Action: ServiceActionUnfreeze})
return err
}
// ServiceAction - type to restrict service-action values
type ServiceAction string
const (
// ServiceActionRestart represents restart action
ServiceActionRestart ServiceAction = "restart"
// ServiceActionStop represents stop action
ServiceActionStop = "stop"
// ServiceActionFreeze represents freeze action
ServiceActionFreeze = "freeze"
// ServiceActionUnfreeze represents unfreeze a previous freeze action
ServiceActionUnfreeze = "unfreeze"
)
// ServiceActionOpts specifies the action that the service is requested
// to take, dryRun indicates if the action is a no-op, force indicates
// that server must make best effort to restart the process.
type ServiceActionOpts struct {
Action ServiceAction
DryRun bool
}
// ServiceActionPeerResult service peer result
type ServiceActionPeerResult struct {
Host string `json:"host"`
Err string `json:"err,omitempty"`
WaitingDrives map[string]DiskMetrics `json:"waitingDrives,omitempty"`
}
// ServiceActionResult service action result
type ServiceActionResult struct {
Action ServiceAction `json:"action"`
DryRun bool `json:"dryRun"`
Results []ServiceActionPeerResult `json:"results,omitempty"`
}
// ServiceAction - specify the type of service action that we are requesting the server to perform
func (adm *AdminClient) ServiceAction(ctx context.Context, opts ServiceActionOpts) (ServiceActionResult, error) {
return adm.serviceCallActionV2(ctx, opts)
}
// serviceCallActionV2 - call service restart/stop/freeze/unfreeze
func (adm *AdminClient) serviceCallActionV2(ctx context.Context, opts ServiceActionOpts) (ServiceActionResult, error) {
queryValues := url.Values{}
queryValues.Set("action", string(opts.Action))
queryValues.Set("dry-run", strconv.FormatBool(opts.DryRun))
queryValues.Set("type", "2")
// Request API to Restart server
resp, err := adm.executeMethod(ctx,
http.MethodPost, requestData{
relPath: adminAPIPrefix + "/service",
queryValues: queryValues,
},
)
defer closeResponse(resp)
if err != nil {
return ServiceActionResult{}, err
}
if resp.StatusCode != http.StatusOK {
return ServiceActionResult{}, httpRespToErrorResponse(resp)
}
srvRes := ServiceActionResult{}
dec := json.NewDecoder(resp.Body)
if err = dec.Decode(&srvRes); err != nil {
return ServiceActionResult{}, err
}
return srvRes, nil
}
// ServiceTraceInfo holds http trace
type ServiceTraceInfo struct {
Trace TraceInfo
Err error `json:"-"`
}
// ServiceTraceOpts holds tracing options
type ServiceTraceOpts struct {
// Trace types:
S3 bool
Internal bool
Storage bool
OS bool
Scanner bool
Decommission bool
Healing bool
BatchReplication bool
BatchKeyRotation bool
BatchExpire bool
BatchAll bool
Rebalance bool
ReplicationResync bool
Bootstrap bool
FTP bool
ILM bool
KMS bool
Formatting bool
OnlyErrors bool
Threshold time.Duration
}
// TraceTypes returns the enabled traces as a bitfield value.
func (t ServiceTraceOpts) TraceTypes() TraceType {
var tt TraceType
tt.SetIf(t.S3, TraceS3)
tt.SetIf(t.Internal, TraceInternal)
tt.SetIf(t.Storage, TraceStorage)
tt.SetIf(t.OS, TraceOS)
tt.SetIf(t.Scanner, TraceScanner)
tt.SetIf(t.Decommission, TraceDecommission)
tt.SetIf(t.Healing, TraceHealing)
tt.SetIf(t.BatchAll || t.BatchReplication, TraceBatchReplication)
tt.SetIf(t.BatchAll || t.BatchKeyRotation, TraceBatchKeyRotation)
tt.SetIf(t.BatchAll || t.BatchExpire, TraceBatchExpire)
tt.SetIf(t.Rebalance, TraceRebalance)
tt.SetIf(t.ReplicationResync, TraceReplicationResync)
tt.SetIf(t.Bootstrap, TraceBootstrap)
tt.SetIf(t.FTP, TraceFTP)
tt.SetIf(t.ILM, TraceILM)
tt.SetIf(t.KMS, TraceKMS)
tt.SetIf(t.Formatting, TraceFormatting)
return tt
}
// AddParams will add parameter to url values.
func (t ServiceTraceOpts) AddParams(u url.Values) {
u.Set("err", strconv.FormatBool(t.OnlyErrors))
u.Set("threshold", t.Threshold.String())
u.Set("s3", strconv.FormatBool(t.S3))
u.Set("internal", strconv.FormatBool(t.Internal))
u.Set("storage", strconv.FormatBool(t.Storage))
u.Set("os", strconv.FormatBool(t.OS))
u.Set("scanner", strconv.FormatBool(t.Scanner))
u.Set("decommission", strconv.FormatBool(t.Decommission))
u.Set("healing", strconv.FormatBool(t.Healing))
u.Set("batch-replication", strconv.FormatBool(t.BatchAll || t.BatchReplication))
u.Set("batch-keyrotation", strconv.FormatBool(t.BatchAll || t.BatchKeyRotation))
u.Set("batch-expire", strconv.FormatBool(t.BatchAll || t.BatchExpire))
u.Set("rebalance", strconv.FormatBool(t.Rebalance))
u.Set("replication-resync", strconv.FormatBool(t.ReplicationResync))
u.Set("bootstrap", strconv.FormatBool(t.Bootstrap))
u.Set("ftp", strconv.FormatBool(t.FTP))
u.Set("ilm", strconv.FormatBool(t.ILM))
u.Set("kms", strconv.FormatBool(t.KMS))
u.Set("formatting", strconv.FormatBool(t.Formatting))
}
// ParseParams will parse parameters and set them to t.
func (t *ServiceTraceOpts) ParseParams(r *http.Request) (err error) {
t.S3 = r.Form.Get("s3") == "true"
t.OS = r.Form.Get("os") == "true"
t.Scanner = r.Form.Get("scanner") == "true"
t.Decommission = r.Form.Get("decommission") == "true"
t.Healing = r.Form.Get("healing") == "true"
t.BatchReplication = r.Form.Get("batch-replication") == "true"
t.BatchKeyRotation = r.Form.Get("batch-keyrotation") == "true"
t.BatchExpire = r.Form.Get("batch-expire") == "true"
t.Rebalance = r.Form.Get("rebalance") == "true"
t.Storage = r.Form.Get("storage") == "true"
t.Internal = r.Form.Get("internal") == "true"
t.OnlyErrors = r.Form.Get("err") == "true"
t.ReplicationResync = r.Form.Get("replication-resync") == "true"
t.Bootstrap = r.Form.Get("bootstrap") == "true"
t.FTP = r.Form.Get("ftp") == "true"
t.ILM = r.Form.Get("ilm") == "true"
t.KMS = r.Form.Get("kms") == "true"
t.Formatting = r.Form.Get("formatting") == "true"
if th := r.Form.Get("threshold"); th != "" {
d, err := time.ParseDuration(th)
if err != nil {
return err
}
t.Threshold = d
}
return nil
}
// ServiceTrace - listen on http trace notifications.
func (adm AdminClient) ServiceTrace(ctx context.Context, opts ServiceTraceOpts) <-chan ServiceTraceInfo {
traceInfoCh := make(chan ServiceTraceInfo)
// Only success, start a routine to start reading line by line.
go func(traceInfoCh chan<- ServiceTraceInfo) {
defer close(traceInfoCh)
for {
urlValues := make(url.Values)
opts.AddParams(urlValues)
reqData := requestData{
relPath: adminAPIPrefix + "/trace",
queryValues: urlValues,
}
// Execute GET to call trace handler
resp, err := adm.executeMethod(ctx, http.MethodGet, reqData)
if err != nil {
traceInfoCh <- ServiceTraceInfo{Err: err}
return
}
if resp.StatusCode != http.StatusOK {
closeResponse(resp)
traceInfoCh <- ServiceTraceInfo{Err: httpRespToErrorResponse(resp)}
return
}
dec := json.NewDecoder(resp.Body)
for {
var info traceInfoLegacy
if err = dec.Decode(&info); err != nil {
closeResponse(resp)
traceInfoCh <- ServiceTraceInfo{Err: err}
break
}
// Convert if legacy...
if info.TraceType == TraceType(0) {
if strings.HasPrefix(info.FuncName, "s3.") {
info.TraceType = TraceS3
} else {
info.TraceType = TraceInternal
}
info.HTTP = &TraceHTTPStats{}
if info.ReqInfo != nil {
info.Path = info.ReqInfo.Path
info.HTTP.ReqInfo = *info.ReqInfo
}
if info.RespInfo != nil {
info.HTTP.RespInfo = *info.RespInfo
}
if info.CallStats != nil {
info.Duration = info.CallStats.Latency
info.HTTP.CallStats = *info.CallStats
}
}
if info.TraceType == TraceOS && info.OSStats != nil {
info.Path = info.OSStats.Path
info.Duration = info.OSStats.Duration
}
if info.TraceType == TraceStorage && info.StorageStats != nil {
info.Path = info.StorageStats.Path
info.Duration = info.StorageStats.Duration
}
select {
case <-ctx.Done():
closeResponse(resp)
return
case traceInfoCh <- ServiceTraceInfo{Trace: info.TraceInfo}:
}
}
}
}(traceInfoCh)
// Returns the trace info channel, for caller to start reading from.
return traceInfoCh
}
golang-github-minio-madmin-go-3.0.104/summary-object.go 0000664 0000000 0000000 00000006130 14774251704 0022670 0 ustar 00root root 0000000 0000000 // Copyright (c) 2015-2025 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as
// published by the Free Software Foundation, either version 3 of the
// License, or (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see .
//
package madmin
import (
"context"
"errors"
"fmt"
"net/http"
"net/url"
"github.com/tinylib/msgp/msgp"
)
//go:generate msgp -unexported -file=$GOFILE
// ObjectSummaryOptions provides options for ObjectSummary call.
type ObjectSummaryOptions struct {
Bucket, Object string
}
// ObjectSummary calls minio to search for all files and parts
// related to the given object, across all disks.
func (adm *AdminClient) ObjectSummary(ctx context.Context, objOpts ObjectSummaryOptions) (objectSummary *ObjectSummary, err error) {
form := make(url.Values)
if objOpts.Bucket == "" {
return nil, errors.New("no bucket speficied")
}
if objOpts.Object == "" {
return nil, errors.New("no object speficied")
}
form.Add("bucket", objOpts.Bucket)
form.Add("object", objOpts.Object)
resp, err := adm.executeMethod(ctx,
http.MethodGet,
requestData{
relPath: fmt.Sprintf(adminAPIPrefix + "/object-summary"),
queryValues: form,
})
defer closeResponse(resp)
if err != nil {
return nil, err
}
if resp.StatusCode != http.StatusOK {
return nil, httpRespToErrorResponse(resp)
}
objectSummary = new(ObjectSummary)
err = msgp.Decode(resp.Body, objectSummary)
if err != nil {
return nil, err
}
return
}
// ObjectMetaSummary is returned from minio when calling ObjectSummary
// This struct gives specific information about xl.meta files
// belonging to the object being inspected by the ObjectSummary API.
type ObjectMetaSummary struct {
Filename string
Host string
Drive string
Size int64
Errors []string
IsDeleteMarker bool
ModTime int64
Signature [4]byte
}
// ObjectPartSummary is returned from minio when calling ObjectSummary.
// This struct gives specific information about each part of the object
// being inspected by the ObjectSummary API.
type ObjectPartSummary struct {
Part int
Pool int
Host string
Set int
Drive string
Filename string
Size int64
}
// ObjectSummary is returned from minio when calling ObjectSummary.
type ObjectSummary struct {
Name string
Errors []string
// DataDir represents the directory on disk created using
// the version ID's or a random uuid if the object is not
// versioned.
DataDir string
IsInline bool
PartNumbers []int
ErasureDist []uint8
Metas []*ObjectMetaSummary
Parts []*ObjectPartSummary
}
golang-github-minio-madmin-go-3.0.104/summary-object_gen.go 0000664 0000000 0000000 00000072363 14774251704 0023534 0 ustar 00root root 0000000 0000000 package madmin
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
import (
"github.com/tinylib/msgp/msgp"
)
// DecodeMsg implements msgp.Decodable
func (z *ObjectMetaSummary) DecodeMsg(dc *msgp.Reader) (err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, err = dc.ReadMapHeader()
if err != nil {
err = msgp.WrapError(err)
return
}
for zb0001 > 0 {
zb0001--
field, err = dc.ReadMapKeyPtr()
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "Filename":
z.Filename, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "Filename")
return
}
case "Host":
z.Host, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "Host")
return
}
case "Drive":
z.Drive, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "Drive")
return
}
case "Size":
z.Size, err = dc.ReadInt64()
if err != nil {
err = msgp.WrapError(err, "Size")
return
}
case "Errors":
var zb0002 uint32
zb0002, err = dc.ReadArrayHeader()
if err != nil {
err = msgp.WrapError(err, "Errors")
return
}
if cap(z.Errors) >= int(zb0002) {
z.Errors = (z.Errors)[:zb0002]
} else {
z.Errors = make([]string, zb0002)
}
for za0001 := range z.Errors {
z.Errors[za0001], err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "Errors", za0001)
return
}
}
case "IsDeleteMarker":
z.IsDeleteMarker, err = dc.ReadBool()
if err != nil {
err = msgp.WrapError(err, "IsDeleteMarker")
return
}
case "ModTime":
z.ModTime, err = dc.ReadInt64()
if err != nil {
err = msgp.WrapError(err, "ModTime")
return
}
case "Signature":
err = dc.ReadExactBytes((z.Signature)[:])
if err != nil {
err = msgp.WrapError(err, "Signature")
return
}
default:
err = dc.Skip()
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
return
}
// EncodeMsg implements msgp.Encodable
func (z *ObjectMetaSummary) EncodeMsg(en *msgp.Writer) (err error) {
// map header, size 8
// write "Filename"
err = en.Append(0x88, 0xa8, 0x46, 0x69, 0x6c, 0x65, 0x6e, 0x61, 0x6d, 0x65)
if err != nil {
return
}
err = en.WriteString(z.Filename)
if err != nil {
err = msgp.WrapError(err, "Filename")
return
}
// write "Host"
err = en.Append(0xa4, 0x48, 0x6f, 0x73, 0x74)
if err != nil {
return
}
err = en.WriteString(z.Host)
if err != nil {
err = msgp.WrapError(err, "Host")
return
}
// write "Drive"
err = en.Append(0xa5, 0x44, 0x72, 0x69, 0x76, 0x65)
if err != nil {
return
}
err = en.WriteString(z.Drive)
if err != nil {
err = msgp.WrapError(err, "Drive")
return
}
// write "Size"
err = en.Append(0xa4, 0x53, 0x69, 0x7a, 0x65)
if err != nil {
return
}
err = en.WriteInt64(z.Size)
if err != nil {
err = msgp.WrapError(err, "Size")
return
}
// write "Errors"
err = en.Append(0xa6, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x73)
if err != nil {
return
}
err = en.WriteArrayHeader(uint32(len(z.Errors)))
if err != nil {
err = msgp.WrapError(err, "Errors")
return
}
for za0001 := range z.Errors {
err = en.WriteString(z.Errors[za0001])
if err != nil {
err = msgp.WrapError(err, "Errors", za0001)
return
}
}
// write "IsDeleteMarker"
err = en.Append(0xae, 0x49, 0x73, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4d, 0x61, 0x72, 0x6b, 0x65, 0x72)
if err != nil {
return
}
err = en.WriteBool(z.IsDeleteMarker)
if err != nil {
err = msgp.WrapError(err, "IsDeleteMarker")
return
}
// write "ModTime"
err = en.Append(0xa7, 0x4d, 0x6f, 0x64, 0x54, 0x69, 0x6d, 0x65)
if err != nil {
return
}
err = en.WriteInt64(z.ModTime)
if err != nil {
err = msgp.WrapError(err, "ModTime")
return
}
// write "Signature"
err = en.Append(0xa9, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65)
if err != nil {
return
}
err = en.WriteBytes((z.Signature)[:])
if err != nil {
err = msgp.WrapError(err, "Signature")
return
}
return
}
// MarshalMsg implements msgp.Marshaler
func (z *ObjectMetaSummary) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize())
// map header, size 8
// string "Filename"
o = append(o, 0x88, 0xa8, 0x46, 0x69, 0x6c, 0x65, 0x6e, 0x61, 0x6d, 0x65)
o = msgp.AppendString(o, z.Filename)
// string "Host"
o = append(o, 0xa4, 0x48, 0x6f, 0x73, 0x74)
o = msgp.AppendString(o, z.Host)
// string "Drive"
o = append(o, 0xa5, 0x44, 0x72, 0x69, 0x76, 0x65)
o = msgp.AppendString(o, z.Drive)
// string "Size"
o = append(o, 0xa4, 0x53, 0x69, 0x7a, 0x65)
o = msgp.AppendInt64(o, z.Size)
// string "Errors"
o = append(o, 0xa6, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x73)
o = msgp.AppendArrayHeader(o, uint32(len(z.Errors)))
for za0001 := range z.Errors {
o = msgp.AppendString(o, z.Errors[za0001])
}
// string "IsDeleteMarker"
o = append(o, 0xae, 0x49, 0x73, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4d, 0x61, 0x72, 0x6b, 0x65, 0x72)
o = msgp.AppendBool(o, z.IsDeleteMarker)
// string "ModTime"
o = append(o, 0xa7, 0x4d, 0x6f, 0x64, 0x54, 0x69, 0x6d, 0x65)
o = msgp.AppendInt64(o, z.ModTime)
// string "Signature"
o = append(o, 0xa9, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65)
o = msgp.AppendBytes(o, (z.Signature)[:])
return
}
// UnmarshalMsg implements msgp.Unmarshaler
func (z *ObjectMetaSummary) UnmarshalMsg(bts []byte) (o []byte, err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
for zb0001 > 0 {
zb0001--
field, bts, err = msgp.ReadMapKeyZC(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "Filename":
z.Filename, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Filename")
return
}
case "Host":
z.Host, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Host")
return
}
case "Drive":
z.Drive, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Drive")
return
}
case "Size":
z.Size, bts, err = msgp.ReadInt64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "Size")
return
}
case "Errors":
var zb0002 uint32
zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Errors")
return
}
if cap(z.Errors) >= int(zb0002) {
z.Errors = (z.Errors)[:zb0002]
} else {
z.Errors = make([]string, zb0002)
}
for za0001 := range z.Errors {
z.Errors[za0001], bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Errors", za0001)
return
}
}
case "IsDeleteMarker":
z.IsDeleteMarker, bts, err = msgp.ReadBoolBytes(bts)
if err != nil {
err = msgp.WrapError(err, "IsDeleteMarker")
return
}
case "ModTime":
z.ModTime, bts, err = msgp.ReadInt64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "ModTime")
return
}
case "Signature":
bts, err = msgp.ReadExactBytes(bts, (z.Signature)[:])
if err != nil {
err = msgp.WrapError(err, "Signature")
return
}
default:
bts, err = msgp.Skip(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
o = bts
return
}
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z *ObjectMetaSummary) Msgsize() (s int) {
s = 1 + 9 + msgp.StringPrefixSize + len(z.Filename) + 5 + msgp.StringPrefixSize + len(z.Host) + 6 + msgp.StringPrefixSize + len(z.Drive) + 5 + msgp.Int64Size + 7 + msgp.ArrayHeaderSize
for za0001 := range z.Errors {
s += msgp.StringPrefixSize + len(z.Errors[za0001])
}
s += 15 + msgp.BoolSize + 8 + msgp.Int64Size + 10 + msgp.ArrayHeaderSize + (4 * (msgp.ByteSize))
return
}
// DecodeMsg implements msgp.Decodable
func (z *ObjectPartSummary) DecodeMsg(dc *msgp.Reader) (err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, err = dc.ReadMapHeader()
if err != nil {
err = msgp.WrapError(err)
return
}
for zb0001 > 0 {
zb0001--
field, err = dc.ReadMapKeyPtr()
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "Part":
z.Part, err = dc.ReadInt()
if err != nil {
err = msgp.WrapError(err, "Part")
return
}
case "Pool":
z.Pool, err = dc.ReadInt()
if err != nil {
err = msgp.WrapError(err, "Pool")
return
}
case "Host":
z.Host, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "Host")
return
}
case "Set":
z.Set, err = dc.ReadInt()
if err != nil {
err = msgp.WrapError(err, "Set")
return
}
case "Drive":
z.Drive, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "Drive")
return
}
case "Filename":
z.Filename, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "Filename")
return
}
case "Size":
z.Size, err = dc.ReadInt64()
if err != nil {
err = msgp.WrapError(err, "Size")
return
}
default:
err = dc.Skip()
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
return
}
// EncodeMsg implements msgp.Encodable
func (z *ObjectPartSummary) EncodeMsg(en *msgp.Writer) (err error) {
// map header, size 7
// write "Part"
err = en.Append(0x87, 0xa4, 0x50, 0x61, 0x72, 0x74)
if err != nil {
return
}
err = en.WriteInt(z.Part)
if err != nil {
err = msgp.WrapError(err, "Part")
return
}
// write "Pool"
err = en.Append(0xa4, 0x50, 0x6f, 0x6f, 0x6c)
if err != nil {
return
}
err = en.WriteInt(z.Pool)
if err != nil {
err = msgp.WrapError(err, "Pool")
return
}
// write "Host"
err = en.Append(0xa4, 0x48, 0x6f, 0x73, 0x74)
if err != nil {
return
}
err = en.WriteString(z.Host)
if err != nil {
err = msgp.WrapError(err, "Host")
return
}
// write "Set"
err = en.Append(0xa3, 0x53, 0x65, 0x74)
if err != nil {
return
}
err = en.WriteInt(z.Set)
if err != nil {
err = msgp.WrapError(err, "Set")
return
}
// write "Drive"
err = en.Append(0xa5, 0x44, 0x72, 0x69, 0x76, 0x65)
if err != nil {
return
}
err = en.WriteString(z.Drive)
if err != nil {
err = msgp.WrapError(err, "Drive")
return
}
// write "Filename"
err = en.Append(0xa8, 0x46, 0x69, 0x6c, 0x65, 0x6e, 0x61, 0x6d, 0x65)
if err != nil {
return
}
err = en.WriteString(z.Filename)
if err != nil {
err = msgp.WrapError(err, "Filename")
return
}
// write "Size"
err = en.Append(0xa4, 0x53, 0x69, 0x7a, 0x65)
if err != nil {
return
}
err = en.WriteInt64(z.Size)
if err != nil {
err = msgp.WrapError(err, "Size")
return
}
return
}
// MarshalMsg implements msgp.Marshaler
func (z *ObjectPartSummary) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize())
// map header, size 7
// string "Part"
o = append(o, 0x87, 0xa4, 0x50, 0x61, 0x72, 0x74)
o = msgp.AppendInt(o, z.Part)
// string "Pool"
o = append(o, 0xa4, 0x50, 0x6f, 0x6f, 0x6c)
o = msgp.AppendInt(o, z.Pool)
// string "Host"
o = append(o, 0xa4, 0x48, 0x6f, 0x73, 0x74)
o = msgp.AppendString(o, z.Host)
// string "Set"
o = append(o, 0xa3, 0x53, 0x65, 0x74)
o = msgp.AppendInt(o, z.Set)
// string "Drive"
o = append(o, 0xa5, 0x44, 0x72, 0x69, 0x76, 0x65)
o = msgp.AppendString(o, z.Drive)
// string "Filename"
o = append(o, 0xa8, 0x46, 0x69, 0x6c, 0x65, 0x6e, 0x61, 0x6d, 0x65)
o = msgp.AppendString(o, z.Filename)
// string "Size"
o = append(o, 0xa4, 0x53, 0x69, 0x7a, 0x65)
o = msgp.AppendInt64(o, z.Size)
return
}
// UnmarshalMsg implements msgp.Unmarshaler
func (z *ObjectPartSummary) UnmarshalMsg(bts []byte) (o []byte, err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
for zb0001 > 0 {
zb0001--
field, bts, err = msgp.ReadMapKeyZC(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "Part":
z.Part, bts, err = msgp.ReadIntBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Part")
return
}
case "Pool":
z.Pool, bts, err = msgp.ReadIntBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Pool")
return
}
case "Host":
z.Host, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Host")
return
}
case "Set":
z.Set, bts, err = msgp.ReadIntBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Set")
return
}
case "Drive":
z.Drive, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Drive")
return
}
case "Filename":
z.Filename, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Filename")
return
}
case "Size":
z.Size, bts, err = msgp.ReadInt64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "Size")
return
}
default:
bts, err = msgp.Skip(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
o = bts
return
}
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z *ObjectPartSummary) Msgsize() (s int) {
s = 1 + 5 + msgp.IntSize + 5 + msgp.IntSize + 5 + msgp.StringPrefixSize + len(z.Host) + 4 + msgp.IntSize + 6 + msgp.StringPrefixSize + len(z.Drive) + 9 + msgp.StringPrefixSize + len(z.Filename) + 5 + msgp.Int64Size
return
}
// DecodeMsg implements msgp.Decodable
func (z *ObjectSummary) DecodeMsg(dc *msgp.Reader) (err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, err = dc.ReadMapHeader()
if err != nil {
err = msgp.WrapError(err)
return
}
for zb0001 > 0 {
zb0001--
field, err = dc.ReadMapKeyPtr()
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "Name":
z.Name, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "Name")
return
}
case "Errors":
var zb0002 uint32
zb0002, err = dc.ReadArrayHeader()
if err != nil {
err = msgp.WrapError(err, "Errors")
return
}
if cap(z.Errors) >= int(zb0002) {
z.Errors = (z.Errors)[:zb0002]
} else {
z.Errors = make([]string, zb0002)
}
for za0001 := range z.Errors {
z.Errors[za0001], err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "Errors", za0001)
return
}
}
case "DataDir":
z.DataDir, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "DataDir")
return
}
case "IsInline":
z.IsInline, err = dc.ReadBool()
if err != nil {
err = msgp.WrapError(err, "IsInline")
return
}
case "PartNumbers":
var zb0003 uint32
zb0003, err = dc.ReadArrayHeader()
if err != nil {
err = msgp.WrapError(err, "PartNumbers")
return
}
if cap(z.PartNumbers) >= int(zb0003) {
z.PartNumbers = (z.PartNumbers)[:zb0003]
} else {
z.PartNumbers = make([]int, zb0003)
}
for za0002 := range z.PartNumbers {
z.PartNumbers[za0002], err = dc.ReadInt()
if err != nil {
err = msgp.WrapError(err, "PartNumbers", za0002)
return
}
}
case "ErasureDist":
var zb0004 uint32
zb0004, err = dc.ReadArrayHeader()
if err != nil {
err = msgp.WrapError(err, "ErasureDist")
return
}
if cap(z.ErasureDist) >= int(zb0004) {
z.ErasureDist = (z.ErasureDist)[:zb0004]
} else {
z.ErasureDist = make([]uint8, zb0004)
}
for za0003 := range z.ErasureDist {
z.ErasureDist[za0003], err = dc.ReadUint8()
if err != nil {
err = msgp.WrapError(err, "ErasureDist", za0003)
return
}
}
case "Metas":
var zb0005 uint32
zb0005, err = dc.ReadArrayHeader()
if err != nil {
err = msgp.WrapError(err, "Metas")
return
}
if cap(z.Metas) >= int(zb0005) {
z.Metas = (z.Metas)[:zb0005]
} else {
z.Metas = make([]*ObjectMetaSummary, zb0005)
}
for za0004 := range z.Metas {
if dc.IsNil() {
err = dc.ReadNil()
if err != nil {
err = msgp.WrapError(err, "Metas", za0004)
return
}
z.Metas[za0004] = nil
} else {
if z.Metas[za0004] == nil {
z.Metas[za0004] = new(ObjectMetaSummary)
}
err = z.Metas[za0004].DecodeMsg(dc)
if err != nil {
err = msgp.WrapError(err, "Metas", za0004)
return
}
}
}
case "Parts":
var zb0006 uint32
zb0006, err = dc.ReadArrayHeader()
if err != nil {
err = msgp.WrapError(err, "Parts")
return
}
if cap(z.Parts) >= int(zb0006) {
z.Parts = (z.Parts)[:zb0006]
} else {
z.Parts = make([]*ObjectPartSummary, zb0006)
}
for za0005 := range z.Parts {
if dc.IsNil() {
err = dc.ReadNil()
if err != nil {
err = msgp.WrapError(err, "Parts", za0005)
return
}
z.Parts[za0005] = nil
} else {
if z.Parts[za0005] == nil {
z.Parts[za0005] = new(ObjectPartSummary)
}
err = z.Parts[za0005].DecodeMsg(dc)
if err != nil {
err = msgp.WrapError(err, "Parts", za0005)
return
}
}
}
default:
err = dc.Skip()
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
return
}
// EncodeMsg implements msgp.Encodable
func (z *ObjectSummary) EncodeMsg(en *msgp.Writer) (err error) {
// map header, size 8
// write "Name"
err = en.Append(0x88, 0xa4, 0x4e, 0x61, 0x6d, 0x65)
if err != nil {
return
}
err = en.WriteString(z.Name)
if err != nil {
err = msgp.WrapError(err, "Name")
return
}
// write "Errors"
err = en.Append(0xa6, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x73)
if err != nil {
return
}
err = en.WriteArrayHeader(uint32(len(z.Errors)))
if err != nil {
err = msgp.WrapError(err, "Errors")
return
}
for za0001 := range z.Errors {
err = en.WriteString(z.Errors[za0001])
if err != nil {
err = msgp.WrapError(err, "Errors", za0001)
return
}
}
// write "DataDir"
err = en.Append(0xa7, 0x44, 0x61, 0x74, 0x61, 0x44, 0x69, 0x72)
if err != nil {
return
}
err = en.WriteString(z.DataDir)
if err != nil {
err = msgp.WrapError(err, "DataDir")
return
}
// write "IsInline"
err = en.Append(0xa8, 0x49, 0x73, 0x49, 0x6e, 0x6c, 0x69, 0x6e, 0x65)
if err != nil {
return
}
err = en.WriteBool(z.IsInline)
if err != nil {
err = msgp.WrapError(err, "IsInline")
return
}
// write "PartNumbers"
err = en.Append(0xab, 0x50, 0x61, 0x72, 0x74, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x73)
if err != nil {
return
}
err = en.WriteArrayHeader(uint32(len(z.PartNumbers)))
if err != nil {
err = msgp.WrapError(err, "PartNumbers")
return
}
for za0002 := range z.PartNumbers {
err = en.WriteInt(z.PartNumbers[za0002])
if err != nil {
err = msgp.WrapError(err, "PartNumbers", za0002)
return
}
}
// write "ErasureDist"
err = en.Append(0xab, 0x45, 0x72, 0x61, 0x73, 0x75, 0x72, 0x65, 0x44, 0x69, 0x73, 0x74)
if err != nil {
return
}
err = en.WriteArrayHeader(uint32(len(z.ErasureDist)))
if err != nil {
err = msgp.WrapError(err, "ErasureDist")
return
}
for za0003 := range z.ErasureDist {
err = en.WriteUint8(z.ErasureDist[za0003])
if err != nil {
err = msgp.WrapError(err, "ErasureDist", za0003)
return
}
}
// write "Metas"
err = en.Append(0xa5, 0x4d, 0x65, 0x74, 0x61, 0x73)
if err != nil {
return
}
err = en.WriteArrayHeader(uint32(len(z.Metas)))
if err != nil {
err = msgp.WrapError(err, "Metas")
return
}
for za0004 := range z.Metas {
if z.Metas[za0004] == nil {
err = en.WriteNil()
if err != nil {
return
}
} else {
err = z.Metas[za0004].EncodeMsg(en)
if err != nil {
err = msgp.WrapError(err, "Metas", za0004)
return
}
}
}
// write "Parts"
err = en.Append(0xa5, 0x50, 0x61, 0x72, 0x74, 0x73)
if err != nil {
return
}
err = en.WriteArrayHeader(uint32(len(z.Parts)))
if err != nil {
err = msgp.WrapError(err, "Parts")
return
}
for za0005 := range z.Parts {
if z.Parts[za0005] == nil {
err = en.WriteNil()
if err != nil {
return
}
} else {
err = z.Parts[za0005].EncodeMsg(en)
if err != nil {
err = msgp.WrapError(err, "Parts", za0005)
return
}
}
}
return
}
// MarshalMsg implements msgp.Marshaler
func (z *ObjectSummary) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize())
// map header, size 8
// string "Name"
o = append(o, 0x88, 0xa4, 0x4e, 0x61, 0x6d, 0x65)
o = msgp.AppendString(o, z.Name)
// string "Errors"
o = append(o, 0xa6, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x73)
o = msgp.AppendArrayHeader(o, uint32(len(z.Errors)))
for za0001 := range z.Errors {
o = msgp.AppendString(o, z.Errors[za0001])
}
// string "DataDir"
o = append(o, 0xa7, 0x44, 0x61, 0x74, 0x61, 0x44, 0x69, 0x72)
o = msgp.AppendString(o, z.DataDir)
// string "IsInline"
o = append(o, 0xa8, 0x49, 0x73, 0x49, 0x6e, 0x6c, 0x69, 0x6e, 0x65)
o = msgp.AppendBool(o, z.IsInline)
// string "PartNumbers"
o = append(o, 0xab, 0x50, 0x61, 0x72, 0x74, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x73)
o = msgp.AppendArrayHeader(o, uint32(len(z.PartNumbers)))
for za0002 := range z.PartNumbers {
o = msgp.AppendInt(o, z.PartNumbers[za0002])
}
// string "ErasureDist"
o = append(o, 0xab, 0x45, 0x72, 0x61, 0x73, 0x75, 0x72, 0x65, 0x44, 0x69, 0x73, 0x74)
o = msgp.AppendArrayHeader(o, uint32(len(z.ErasureDist)))
for za0003 := range z.ErasureDist {
o = msgp.AppendUint8(o, z.ErasureDist[za0003])
}
// string "Metas"
o = append(o, 0xa5, 0x4d, 0x65, 0x74, 0x61, 0x73)
o = msgp.AppendArrayHeader(o, uint32(len(z.Metas)))
for za0004 := range z.Metas {
if z.Metas[za0004] == nil {
o = msgp.AppendNil(o)
} else {
o, err = z.Metas[za0004].MarshalMsg(o)
if err != nil {
err = msgp.WrapError(err, "Metas", za0004)
return
}
}
}
// string "Parts"
o = append(o, 0xa5, 0x50, 0x61, 0x72, 0x74, 0x73)
o = msgp.AppendArrayHeader(o, uint32(len(z.Parts)))
for za0005 := range z.Parts {
if z.Parts[za0005] == nil {
o = msgp.AppendNil(o)
} else {
o, err = z.Parts[za0005].MarshalMsg(o)
if err != nil {
err = msgp.WrapError(err, "Parts", za0005)
return
}
}
}
return
}
// UnmarshalMsg implements msgp.Unmarshaler
func (z *ObjectSummary) UnmarshalMsg(bts []byte) (o []byte, err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
for zb0001 > 0 {
zb0001--
field, bts, err = msgp.ReadMapKeyZC(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "Name":
z.Name, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Name")
return
}
case "Errors":
var zb0002 uint32
zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Errors")
return
}
if cap(z.Errors) >= int(zb0002) {
z.Errors = (z.Errors)[:zb0002]
} else {
z.Errors = make([]string, zb0002)
}
for za0001 := range z.Errors {
z.Errors[za0001], bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Errors", za0001)
return
}
}
case "DataDir":
z.DataDir, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "DataDir")
return
}
case "IsInline":
z.IsInline, bts, err = msgp.ReadBoolBytes(bts)
if err != nil {
err = msgp.WrapError(err, "IsInline")
return
}
case "PartNumbers":
var zb0003 uint32
zb0003, bts, err = msgp.ReadArrayHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "PartNumbers")
return
}
if cap(z.PartNumbers) >= int(zb0003) {
z.PartNumbers = (z.PartNumbers)[:zb0003]
} else {
z.PartNumbers = make([]int, zb0003)
}
for za0002 := range z.PartNumbers {
z.PartNumbers[za0002], bts, err = msgp.ReadIntBytes(bts)
if err != nil {
err = msgp.WrapError(err, "PartNumbers", za0002)
return
}
}
case "ErasureDist":
var zb0004 uint32
zb0004, bts, err = msgp.ReadArrayHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "ErasureDist")
return
}
if cap(z.ErasureDist) >= int(zb0004) {
z.ErasureDist = (z.ErasureDist)[:zb0004]
} else {
z.ErasureDist = make([]uint8, zb0004)
}
for za0003 := range z.ErasureDist {
z.ErasureDist[za0003], bts, err = msgp.ReadUint8Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "ErasureDist", za0003)
return
}
}
case "Metas":
var zb0005 uint32
zb0005, bts, err = msgp.ReadArrayHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Metas")
return
}
if cap(z.Metas) >= int(zb0005) {
z.Metas = (z.Metas)[:zb0005]
} else {
z.Metas = make([]*ObjectMetaSummary, zb0005)
}
for za0004 := range z.Metas {
if msgp.IsNil(bts) {
bts, err = msgp.ReadNilBytes(bts)
if err != nil {
return
}
z.Metas[za0004] = nil
} else {
if z.Metas[za0004] == nil {
z.Metas[za0004] = new(ObjectMetaSummary)
}
bts, err = z.Metas[za0004].UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "Metas", za0004)
return
}
}
}
case "Parts":
var zb0006 uint32
zb0006, bts, err = msgp.ReadArrayHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Parts")
return
}
if cap(z.Parts) >= int(zb0006) {
z.Parts = (z.Parts)[:zb0006]
} else {
z.Parts = make([]*ObjectPartSummary, zb0006)
}
for za0005 := range z.Parts {
if msgp.IsNil(bts) {
bts, err = msgp.ReadNilBytes(bts)
if err != nil {
return
}
z.Parts[za0005] = nil
} else {
if z.Parts[za0005] == nil {
z.Parts[za0005] = new(ObjectPartSummary)
}
bts, err = z.Parts[za0005].UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "Parts", za0005)
return
}
}
}
default:
bts, err = msgp.Skip(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
o = bts
return
}
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z *ObjectSummary) Msgsize() (s int) {
s = 1 + 5 + msgp.StringPrefixSize + len(z.Name) + 7 + msgp.ArrayHeaderSize
for za0001 := range z.Errors {
s += msgp.StringPrefixSize + len(z.Errors[za0001])
}
s += 8 + msgp.StringPrefixSize + len(z.DataDir) + 9 + msgp.BoolSize + 12 + msgp.ArrayHeaderSize + (len(z.PartNumbers) * (msgp.IntSize)) + 12 + msgp.ArrayHeaderSize + (len(z.ErasureDist) * (msgp.Uint8Size)) + 6 + msgp.ArrayHeaderSize
for za0004 := range z.Metas {
if z.Metas[za0004] == nil {
s += msgp.NilSize
} else {
s += z.Metas[za0004].Msgsize()
}
}
s += 6 + msgp.ArrayHeaderSize
for za0005 := range z.Parts {
if z.Parts[za0005] == nil {
s += msgp.NilSize
} else {
s += z.Parts[za0005].Msgsize()
}
}
return
}
// DecodeMsg implements msgp.Decodable
func (z *ObjectSummaryOptions) DecodeMsg(dc *msgp.Reader) (err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, err = dc.ReadMapHeader()
if err != nil {
err = msgp.WrapError(err)
return
}
for zb0001 > 0 {
zb0001--
field, err = dc.ReadMapKeyPtr()
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "Bucket":
z.Bucket, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "Bucket")
return
}
case "Object":
z.Object, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "Object")
return
}
default:
err = dc.Skip()
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
return
}
// EncodeMsg implements msgp.Encodable
func (z ObjectSummaryOptions) EncodeMsg(en *msgp.Writer) (err error) {
// map header, size 2
// write "Bucket"
err = en.Append(0x82, 0xa6, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74)
if err != nil {
return
}
err = en.WriteString(z.Bucket)
if err != nil {
err = msgp.WrapError(err, "Bucket")
return
}
// write "Object"
err = en.Append(0xa6, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74)
if err != nil {
return
}
err = en.WriteString(z.Object)
if err != nil {
err = msgp.WrapError(err, "Object")
return
}
return
}
// MarshalMsg implements msgp.Marshaler
func (z ObjectSummaryOptions) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize())
// map header, size 2
// string "Bucket"
o = append(o, 0x82, 0xa6, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74)
o = msgp.AppendString(o, z.Bucket)
// string "Object"
o = append(o, 0xa6, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74)
o = msgp.AppendString(o, z.Object)
return
}
// UnmarshalMsg implements msgp.Unmarshaler
func (z *ObjectSummaryOptions) UnmarshalMsg(bts []byte) (o []byte, err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
for zb0001 > 0 {
zb0001--
field, bts, err = msgp.ReadMapKeyZC(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "Bucket":
z.Bucket, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Bucket")
return
}
case "Object":
z.Object, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Object")
return
}
default:
bts, err = msgp.Skip(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
o = bts
return
}
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z ObjectSummaryOptions) Msgsize() (s int) {
s = 1 + 7 + msgp.StringPrefixSize + len(z.Bucket) + 7 + msgp.StringPrefixSize + len(z.Object)
return
}
golang-github-minio-madmin-go-3.0.104/summary-object_gen_test.go 0000664 0000000 0000000 00000022203 14774251704 0024557 0 ustar 00root root 0000000 0000000 package madmin
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
import (
"bytes"
"testing"
"github.com/tinylib/msgp/msgp"
)
func TestMarshalUnmarshalObjectMetaSummary(t *testing.T) {
v := ObjectMetaSummary{}
bts, err := v.MarshalMsg(nil)
if err != nil {
t.Fatal(err)
}
left, err := v.UnmarshalMsg(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
}
left, err = msgp.Skip(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
}
}
func BenchmarkMarshalMsgObjectMetaSummary(b *testing.B) {
v := ObjectMetaSummary{}
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.MarshalMsg(nil)
}
}
func BenchmarkAppendMsgObjectMetaSummary(b *testing.B) {
v := ObjectMetaSummary{}
bts := make([]byte, 0, v.Msgsize())
bts, _ = v.MarshalMsg(bts[0:0])
b.SetBytes(int64(len(bts)))
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
bts, _ = v.MarshalMsg(bts[0:0])
}
}
func BenchmarkUnmarshalObjectMetaSummary(b *testing.B) {
v := ObjectMetaSummary{}
bts, _ := v.MarshalMsg(nil)
b.ReportAllocs()
b.SetBytes(int64(len(bts)))
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := v.UnmarshalMsg(bts)
if err != nil {
b.Fatal(err)
}
}
}
func TestEncodeDecodeObjectMetaSummary(t *testing.T) {
v := ObjectMetaSummary{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
m := v.Msgsize()
if buf.Len() > m {
t.Log("WARNING: TestEncodeDecodeObjectMetaSummary Msgsize() is inaccurate")
}
vn := ObjectMetaSummary{}
err := msgp.Decode(&buf, &vn)
if err != nil {
t.Error(err)
}
buf.Reset()
msgp.Encode(&buf, &v)
err = msgp.NewReader(&buf).Skip()
if err != nil {
t.Error(err)
}
}
func BenchmarkEncodeObjectMetaSummary(b *testing.B) {
v := ObjectMetaSummary{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
en := msgp.NewWriter(msgp.Nowhere)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.EncodeMsg(en)
}
en.Flush()
}
func BenchmarkDecodeObjectMetaSummary(b *testing.B) {
v := ObjectMetaSummary{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
rd := msgp.NewEndlessReader(buf.Bytes(), b)
dc := msgp.NewReader(rd)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
err := v.DecodeMsg(dc)
if err != nil {
b.Fatal(err)
}
}
}
func TestMarshalUnmarshalObjectPartSummary(t *testing.T) {
v := ObjectPartSummary{}
bts, err := v.MarshalMsg(nil)
if err != nil {
t.Fatal(err)
}
left, err := v.UnmarshalMsg(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
}
left, err = msgp.Skip(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
}
}
func BenchmarkMarshalMsgObjectPartSummary(b *testing.B) {
v := ObjectPartSummary{}
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.MarshalMsg(nil)
}
}
func BenchmarkAppendMsgObjectPartSummary(b *testing.B) {
v := ObjectPartSummary{}
bts := make([]byte, 0, v.Msgsize())
bts, _ = v.MarshalMsg(bts[0:0])
b.SetBytes(int64(len(bts)))
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
bts, _ = v.MarshalMsg(bts[0:0])
}
}
func BenchmarkUnmarshalObjectPartSummary(b *testing.B) {
v := ObjectPartSummary{}
bts, _ := v.MarshalMsg(nil)
b.ReportAllocs()
b.SetBytes(int64(len(bts)))
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := v.UnmarshalMsg(bts)
if err != nil {
b.Fatal(err)
}
}
}
func TestEncodeDecodeObjectPartSummary(t *testing.T) {
v := ObjectPartSummary{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
m := v.Msgsize()
if buf.Len() > m {
t.Log("WARNING: TestEncodeDecodeObjectPartSummary Msgsize() is inaccurate")
}
vn := ObjectPartSummary{}
err := msgp.Decode(&buf, &vn)
if err != nil {
t.Error(err)
}
buf.Reset()
msgp.Encode(&buf, &v)
err = msgp.NewReader(&buf).Skip()
if err != nil {
t.Error(err)
}
}
func BenchmarkEncodeObjectPartSummary(b *testing.B) {
v := ObjectPartSummary{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
en := msgp.NewWriter(msgp.Nowhere)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.EncodeMsg(en)
}
en.Flush()
}
func BenchmarkDecodeObjectPartSummary(b *testing.B) {
v := ObjectPartSummary{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
rd := msgp.NewEndlessReader(buf.Bytes(), b)
dc := msgp.NewReader(rd)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
err := v.DecodeMsg(dc)
if err != nil {
b.Fatal(err)
}
}
}
func TestMarshalUnmarshalObjectSummary(t *testing.T) {
v := ObjectSummary{}
bts, err := v.MarshalMsg(nil)
if err != nil {
t.Fatal(err)
}
left, err := v.UnmarshalMsg(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
}
left, err = msgp.Skip(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
}
}
func BenchmarkMarshalMsgObjectSummary(b *testing.B) {
v := ObjectSummary{}
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.MarshalMsg(nil)
}
}
func BenchmarkAppendMsgObjectSummary(b *testing.B) {
v := ObjectSummary{}
bts := make([]byte, 0, v.Msgsize())
bts, _ = v.MarshalMsg(bts[0:0])
b.SetBytes(int64(len(bts)))
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
bts, _ = v.MarshalMsg(bts[0:0])
}
}
func BenchmarkUnmarshalObjectSummary(b *testing.B) {
v := ObjectSummary{}
bts, _ := v.MarshalMsg(nil)
b.ReportAllocs()
b.SetBytes(int64(len(bts)))
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := v.UnmarshalMsg(bts)
if err != nil {
b.Fatal(err)
}
}
}
func TestEncodeDecodeObjectSummary(t *testing.T) {
v := ObjectSummary{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
m := v.Msgsize()
if buf.Len() > m {
t.Log("WARNING: TestEncodeDecodeObjectSummary Msgsize() is inaccurate")
}
vn := ObjectSummary{}
err := msgp.Decode(&buf, &vn)
if err != nil {
t.Error(err)
}
buf.Reset()
msgp.Encode(&buf, &v)
err = msgp.NewReader(&buf).Skip()
if err != nil {
t.Error(err)
}
}
func BenchmarkEncodeObjectSummary(b *testing.B) {
v := ObjectSummary{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
en := msgp.NewWriter(msgp.Nowhere)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.EncodeMsg(en)
}
en.Flush()
}
func BenchmarkDecodeObjectSummary(b *testing.B) {
v := ObjectSummary{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
rd := msgp.NewEndlessReader(buf.Bytes(), b)
dc := msgp.NewReader(rd)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
err := v.DecodeMsg(dc)
if err != nil {
b.Fatal(err)
}
}
}
func TestMarshalUnmarshalObjectSummaryOptions(t *testing.T) {
v := ObjectSummaryOptions{}
bts, err := v.MarshalMsg(nil)
if err != nil {
t.Fatal(err)
}
left, err := v.UnmarshalMsg(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
}
left, err = msgp.Skip(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
}
}
func BenchmarkMarshalMsgObjectSummaryOptions(b *testing.B) {
v := ObjectSummaryOptions{}
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.MarshalMsg(nil)
}
}
func BenchmarkAppendMsgObjectSummaryOptions(b *testing.B) {
v := ObjectSummaryOptions{}
bts := make([]byte, 0, v.Msgsize())
bts, _ = v.MarshalMsg(bts[0:0])
b.SetBytes(int64(len(bts)))
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
bts, _ = v.MarshalMsg(bts[0:0])
}
}
func BenchmarkUnmarshalObjectSummaryOptions(b *testing.B) {
v := ObjectSummaryOptions{}
bts, _ := v.MarshalMsg(nil)
b.ReportAllocs()
b.SetBytes(int64(len(bts)))
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := v.UnmarshalMsg(bts)
if err != nil {
b.Fatal(err)
}
}
}
func TestEncodeDecodeObjectSummaryOptions(t *testing.T) {
v := ObjectSummaryOptions{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
m := v.Msgsize()
if buf.Len() > m {
t.Log("WARNING: TestEncodeDecodeObjectSummaryOptions Msgsize() is inaccurate")
}
vn := ObjectSummaryOptions{}
err := msgp.Decode(&buf, &vn)
if err != nil {
t.Error(err)
}
buf.Reset()
msgp.Encode(&buf, &v)
err = msgp.NewReader(&buf).Skip()
if err != nil {
t.Error(err)
}
}
func BenchmarkEncodeObjectSummaryOptions(b *testing.B) {
v := ObjectSummaryOptions{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
en := msgp.NewWriter(msgp.Nowhere)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.EncodeMsg(en)
}
en.Flush()
}
func BenchmarkDecodeObjectSummaryOptions(b *testing.B) {
v := ObjectSummaryOptions{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
rd := msgp.NewEndlessReader(buf.Bytes(), b)
dc := msgp.NewReader(rd)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
err := v.DecodeMsg(dc)
if err != nil {
b.Fatal(err)
}
}
}
golang-github-minio-madmin-go-3.0.104/tier-azure.go 0000664 0000000 0000000 00000007542 14774251704 0022026 0 ustar 00root root 0000000 0000000 //
// Copyright (c) 2015-2024 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as
// published by the Free Software Foundation, either version 3 of the
// License, or (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see .
//
package madmin
import "errors"
//go:generate msgp -file $GOFILE
// ServicePrincipalAuth holds fields for a successful SP authentication with Azure
type ServicePrincipalAuth struct {
TenantID string `json:",omitempty"`
ClientID string `json:",omitempty"`
ClientSecret string `json:",omitempty"`
}
// TierAzure represents the remote tier configuration for Azure Blob Storage.
type TierAzure struct {
Endpoint string `json:",omitempty"`
AccountName string `json:",omitempty"`
AccountKey string `json:",omitempty"`
Bucket string `json:",omitempty"`
Prefix string `json:",omitempty"`
Region string `json:",omitempty"`
StorageClass string `json:",omitempty"`
SPAuth ServicePrincipalAuth `json:",omitempty"`
}
// IsSPEnabled returns true if all SP related fields are provided
func (ti TierAzure) IsSPEnabled() bool {
return ti.SPAuth.TenantID != "" && ti.SPAuth.ClientID != "" && ti.SPAuth.ClientSecret != ""
}
// AzureOptions supports NewTierAzure to take variadic options
type AzureOptions func(*TierAzure) error
// AzureServicePrincipal helper to supply optional service principal credentials
func AzureServicePrincipal(tenantID, clientID, clientSecret string) func(az *TierAzure) error {
return func(az *TierAzure) error {
if tenantID == "" {
return errors.New("empty tenant ID unsupported")
}
if clientID == "" {
return errors.New("empty client ID unsupported")
}
if clientSecret == "" {
return errors.New("empty client secret unsupported")
}
az.SPAuth.TenantID = tenantID
az.SPAuth.ClientID = clientID
az.SPAuth.ClientSecret = clientSecret
return nil
}
}
// AzurePrefix helper to supply optional object prefix to NewTierAzure
func AzurePrefix(prefix string) func(az *TierAzure) error {
return func(az *TierAzure) error {
az.Prefix = prefix
return nil
}
}
// AzureEndpoint helper to supply optional endpoint to NewTierAzure
func AzureEndpoint(endpoint string) func(az *TierAzure) error {
return func(az *TierAzure) error {
az.Endpoint = endpoint
return nil
}
}
// AzureRegion helper to supply optional region to NewTierAzure
func AzureRegion(region string) func(az *TierAzure) error {
return func(az *TierAzure) error {
az.Region = region
return nil
}
}
// AzureStorageClass helper to supply optional storage class to NewTierAzure
func AzureStorageClass(sc string) func(az *TierAzure) error {
return func(az *TierAzure) error {
az.StorageClass = sc
return nil
}
}
// NewTierAzure returns a TierConfig of Azure type. Returns error if the given
// parameters are invalid like name is empty etc.
func NewTierAzure(name, accountName, accountKey, bucket string, options ...AzureOptions) (*TierConfig, error) {
if name == "" {
return nil, ErrTierNameEmpty
}
az := &TierAzure{
AccountName: accountName,
AccountKey: accountKey,
Bucket: bucket,
// Defaults
Endpoint: "",
Prefix: "",
Region: "",
StorageClass: "",
}
for _, option := range options {
err := option(az)
if err != nil {
return nil, err
}
}
return &TierConfig{
Version: TierConfigVer,
Type: Azure,
Name: name,
Azure: az,
}, nil
}
golang-github-minio-madmin-go-3.0.104/tier-azure_gen.go 0000664 0000000 0000000 00000033060 14774251704 0022651 0 ustar 00root root 0000000 0000000 package madmin
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
import (
"github.com/tinylib/msgp/msgp"
)
// DecodeMsg implements msgp.Decodable
func (z *ServicePrincipalAuth) DecodeMsg(dc *msgp.Reader) (err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, err = dc.ReadMapHeader()
if err != nil {
err = msgp.WrapError(err)
return
}
for zb0001 > 0 {
zb0001--
field, err = dc.ReadMapKeyPtr()
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "TenantID":
z.TenantID, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "TenantID")
return
}
case "ClientID":
z.ClientID, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "ClientID")
return
}
case "ClientSecret":
z.ClientSecret, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "ClientSecret")
return
}
default:
err = dc.Skip()
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
return
}
// EncodeMsg implements msgp.Encodable
func (z ServicePrincipalAuth) EncodeMsg(en *msgp.Writer) (err error) {
// map header, size 3
// write "TenantID"
err = en.Append(0x83, 0xa8, 0x54, 0x65, 0x6e, 0x61, 0x6e, 0x74, 0x49, 0x44)
if err != nil {
return
}
err = en.WriteString(z.TenantID)
if err != nil {
err = msgp.WrapError(err, "TenantID")
return
}
// write "ClientID"
err = en.Append(0xa8, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x49, 0x44)
if err != nil {
return
}
err = en.WriteString(z.ClientID)
if err != nil {
err = msgp.WrapError(err, "ClientID")
return
}
// write "ClientSecret"
err = en.Append(0xac, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74)
if err != nil {
return
}
err = en.WriteString(z.ClientSecret)
if err != nil {
err = msgp.WrapError(err, "ClientSecret")
return
}
return
}
// MarshalMsg implements msgp.Marshaler
func (z ServicePrincipalAuth) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize())
// map header, size 3
// string "TenantID"
o = append(o, 0x83, 0xa8, 0x54, 0x65, 0x6e, 0x61, 0x6e, 0x74, 0x49, 0x44)
o = msgp.AppendString(o, z.TenantID)
// string "ClientID"
o = append(o, 0xa8, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x49, 0x44)
o = msgp.AppendString(o, z.ClientID)
// string "ClientSecret"
o = append(o, 0xac, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74)
o = msgp.AppendString(o, z.ClientSecret)
return
}
// UnmarshalMsg implements msgp.Unmarshaler
func (z *ServicePrincipalAuth) UnmarshalMsg(bts []byte) (o []byte, err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
for zb0001 > 0 {
zb0001--
field, bts, err = msgp.ReadMapKeyZC(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "TenantID":
z.TenantID, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "TenantID")
return
}
case "ClientID":
z.ClientID, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "ClientID")
return
}
case "ClientSecret":
z.ClientSecret, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "ClientSecret")
return
}
default:
bts, err = msgp.Skip(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
o = bts
return
}
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z ServicePrincipalAuth) Msgsize() (s int) {
s = 1 + 9 + msgp.StringPrefixSize + len(z.TenantID) + 9 + msgp.StringPrefixSize + len(z.ClientID) + 13 + msgp.StringPrefixSize + len(z.ClientSecret)
return
}
// DecodeMsg implements msgp.Decodable
func (z *TierAzure) DecodeMsg(dc *msgp.Reader) (err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, err = dc.ReadMapHeader()
if err != nil {
err = msgp.WrapError(err)
return
}
for zb0001 > 0 {
zb0001--
field, err = dc.ReadMapKeyPtr()
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "Endpoint":
z.Endpoint, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "Endpoint")
return
}
case "AccountName":
z.AccountName, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "AccountName")
return
}
case "AccountKey":
z.AccountKey, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "AccountKey")
return
}
case "Bucket":
z.Bucket, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "Bucket")
return
}
case "Prefix":
z.Prefix, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "Prefix")
return
}
case "Region":
z.Region, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "Region")
return
}
case "StorageClass":
z.StorageClass, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "StorageClass")
return
}
case "SPAuth":
var zb0002 uint32
zb0002, err = dc.ReadMapHeader()
if err != nil {
err = msgp.WrapError(err, "SPAuth")
return
}
for zb0002 > 0 {
zb0002--
field, err = dc.ReadMapKeyPtr()
if err != nil {
err = msgp.WrapError(err, "SPAuth")
return
}
switch msgp.UnsafeString(field) {
case "TenantID":
z.SPAuth.TenantID, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "SPAuth", "TenantID")
return
}
case "ClientID":
z.SPAuth.ClientID, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "SPAuth", "ClientID")
return
}
case "ClientSecret":
z.SPAuth.ClientSecret, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "SPAuth", "ClientSecret")
return
}
default:
err = dc.Skip()
if err != nil {
err = msgp.WrapError(err, "SPAuth")
return
}
}
}
default:
err = dc.Skip()
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
return
}
// EncodeMsg implements msgp.Encodable
func (z *TierAzure) EncodeMsg(en *msgp.Writer) (err error) {
// map header, size 8
// write "Endpoint"
err = en.Append(0x88, 0xa8, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74)
if err != nil {
return
}
err = en.WriteString(z.Endpoint)
if err != nil {
err = msgp.WrapError(err, "Endpoint")
return
}
// write "AccountName"
err = en.Append(0xab, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x4e, 0x61, 0x6d, 0x65)
if err != nil {
return
}
err = en.WriteString(z.AccountName)
if err != nil {
err = msgp.WrapError(err, "AccountName")
return
}
// write "AccountKey"
err = en.Append(0xaa, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x4b, 0x65, 0x79)
if err != nil {
return
}
err = en.WriteString(z.AccountKey)
if err != nil {
err = msgp.WrapError(err, "AccountKey")
return
}
// write "Bucket"
err = en.Append(0xa6, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74)
if err != nil {
return
}
err = en.WriteString(z.Bucket)
if err != nil {
err = msgp.WrapError(err, "Bucket")
return
}
// write "Prefix"
err = en.Append(0xa6, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78)
if err != nil {
return
}
err = en.WriteString(z.Prefix)
if err != nil {
err = msgp.WrapError(err, "Prefix")
return
}
// write "Region"
err = en.Append(0xa6, 0x52, 0x65, 0x67, 0x69, 0x6f, 0x6e)
if err != nil {
return
}
err = en.WriteString(z.Region)
if err != nil {
err = msgp.WrapError(err, "Region")
return
}
// write "StorageClass"
err = en.Append(0xac, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73)
if err != nil {
return
}
err = en.WriteString(z.StorageClass)
if err != nil {
err = msgp.WrapError(err, "StorageClass")
return
}
// write "SPAuth"
err = en.Append(0xa6, 0x53, 0x50, 0x41, 0x75, 0x74, 0x68)
if err != nil {
return
}
// map header, size 3
// write "TenantID"
err = en.Append(0x83, 0xa8, 0x54, 0x65, 0x6e, 0x61, 0x6e, 0x74, 0x49, 0x44)
if err != nil {
return
}
err = en.WriteString(z.SPAuth.TenantID)
if err != nil {
err = msgp.WrapError(err, "SPAuth", "TenantID")
return
}
// write "ClientID"
err = en.Append(0xa8, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x49, 0x44)
if err != nil {
return
}
err = en.WriteString(z.SPAuth.ClientID)
if err != nil {
err = msgp.WrapError(err, "SPAuth", "ClientID")
return
}
// write "ClientSecret"
err = en.Append(0xac, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74)
if err != nil {
return
}
err = en.WriteString(z.SPAuth.ClientSecret)
if err != nil {
err = msgp.WrapError(err, "SPAuth", "ClientSecret")
return
}
return
}
// MarshalMsg implements msgp.Marshaler
func (z *TierAzure) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize())
// map header, size 8
// string "Endpoint"
o = append(o, 0x88, 0xa8, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74)
o = msgp.AppendString(o, z.Endpoint)
// string "AccountName"
o = append(o, 0xab, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x4e, 0x61, 0x6d, 0x65)
o = msgp.AppendString(o, z.AccountName)
// string "AccountKey"
o = append(o, 0xaa, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x4b, 0x65, 0x79)
o = msgp.AppendString(o, z.AccountKey)
// string "Bucket"
o = append(o, 0xa6, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74)
o = msgp.AppendString(o, z.Bucket)
// string "Prefix"
o = append(o, 0xa6, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78)
o = msgp.AppendString(o, z.Prefix)
// string "Region"
o = append(o, 0xa6, 0x52, 0x65, 0x67, 0x69, 0x6f, 0x6e)
o = msgp.AppendString(o, z.Region)
// string "StorageClass"
o = append(o, 0xac, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73)
o = msgp.AppendString(o, z.StorageClass)
// string "SPAuth"
o = append(o, 0xa6, 0x53, 0x50, 0x41, 0x75, 0x74, 0x68)
// map header, size 3
// string "TenantID"
o = append(o, 0x83, 0xa8, 0x54, 0x65, 0x6e, 0x61, 0x6e, 0x74, 0x49, 0x44)
o = msgp.AppendString(o, z.SPAuth.TenantID)
// string "ClientID"
o = append(o, 0xa8, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x49, 0x44)
o = msgp.AppendString(o, z.SPAuth.ClientID)
// string "ClientSecret"
o = append(o, 0xac, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74)
o = msgp.AppendString(o, z.SPAuth.ClientSecret)
return
}
// UnmarshalMsg implements msgp.Unmarshaler
func (z *TierAzure) UnmarshalMsg(bts []byte) (o []byte, err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
for zb0001 > 0 {
zb0001--
field, bts, err = msgp.ReadMapKeyZC(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "Endpoint":
z.Endpoint, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Endpoint")
return
}
case "AccountName":
z.AccountName, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "AccountName")
return
}
case "AccountKey":
z.AccountKey, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "AccountKey")
return
}
case "Bucket":
z.Bucket, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Bucket")
return
}
case "Prefix":
z.Prefix, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Prefix")
return
}
case "Region":
z.Region, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Region")
return
}
case "StorageClass":
z.StorageClass, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "StorageClass")
return
}
case "SPAuth":
var zb0002 uint32
zb0002, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "SPAuth")
return
}
for zb0002 > 0 {
zb0002--
field, bts, err = msgp.ReadMapKeyZC(bts)
if err != nil {
err = msgp.WrapError(err, "SPAuth")
return
}
switch msgp.UnsafeString(field) {
case "TenantID":
z.SPAuth.TenantID, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "SPAuth", "TenantID")
return
}
case "ClientID":
z.SPAuth.ClientID, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "SPAuth", "ClientID")
return
}
case "ClientSecret":
z.SPAuth.ClientSecret, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "SPAuth", "ClientSecret")
return
}
default:
bts, err = msgp.Skip(bts)
if err != nil {
err = msgp.WrapError(err, "SPAuth")
return
}
}
}
default:
bts, err = msgp.Skip(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
o = bts
return
}
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z *TierAzure) Msgsize() (s int) {
s = 1 + 9 + msgp.StringPrefixSize + len(z.Endpoint) + 12 + msgp.StringPrefixSize + len(z.AccountName) + 11 + msgp.StringPrefixSize + len(z.AccountKey) + 7 + msgp.StringPrefixSize + len(z.Bucket) + 7 + msgp.StringPrefixSize + len(z.Prefix) + 7 + msgp.StringPrefixSize + len(z.Region) + 13 + msgp.StringPrefixSize + len(z.StorageClass) + 7 + 1 + 9 + msgp.StringPrefixSize + len(z.SPAuth.TenantID) + 9 + msgp.StringPrefixSize + len(z.SPAuth.ClientID) + 13 + msgp.StringPrefixSize + len(z.SPAuth.ClientSecret)
return
}
golang-github-minio-madmin-go-3.0.104/tier-azure_gen_test.go 0000664 0000000 0000000 00000011077 14774251704 0023714 0 ustar 00root root 0000000 0000000 package madmin
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
import (
"bytes"
"testing"
"github.com/tinylib/msgp/msgp"
)
func TestMarshalUnmarshalServicePrincipalAuth(t *testing.T) {
v := ServicePrincipalAuth{}
bts, err := v.MarshalMsg(nil)
if err != nil {
t.Fatal(err)
}
left, err := v.UnmarshalMsg(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
}
left, err = msgp.Skip(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
}
}
func BenchmarkMarshalMsgServicePrincipalAuth(b *testing.B) {
v := ServicePrincipalAuth{}
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.MarshalMsg(nil)
}
}
func BenchmarkAppendMsgServicePrincipalAuth(b *testing.B) {
v := ServicePrincipalAuth{}
bts := make([]byte, 0, v.Msgsize())
bts, _ = v.MarshalMsg(bts[0:0])
b.SetBytes(int64(len(bts)))
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
bts, _ = v.MarshalMsg(bts[0:0])
}
}
func BenchmarkUnmarshalServicePrincipalAuth(b *testing.B) {
v := ServicePrincipalAuth{}
bts, _ := v.MarshalMsg(nil)
b.ReportAllocs()
b.SetBytes(int64(len(bts)))
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := v.UnmarshalMsg(bts)
if err != nil {
b.Fatal(err)
}
}
}
func TestEncodeDecodeServicePrincipalAuth(t *testing.T) {
v := ServicePrincipalAuth{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
m := v.Msgsize()
if buf.Len() > m {
t.Log("WARNING: TestEncodeDecodeServicePrincipalAuth Msgsize() is inaccurate")
}
vn := ServicePrincipalAuth{}
err := msgp.Decode(&buf, &vn)
if err != nil {
t.Error(err)
}
buf.Reset()
msgp.Encode(&buf, &v)
err = msgp.NewReader(&buf).Skip()
if err != nil {
t.Error(err)
}
}
func BenchmarkEncodeServicePrincipalAuth(b *testing.B) {
v := ServicePrincipalAuth{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
en := msgp.NewWriter(msgp.Nowhere)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.EncodeMsg(en)
}
en.Flush()
}
func BenchmarkDecodeServicePrincipalAuth(b *testing.B) {
v := ServicePrincipalAuth{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
rd := msgp.NewEndlessReader(buf.Bytes(), b)
dc := msgp.NewReader(rd)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
err := v.DecodeMsg(dc)
if err != nil {
b.Fatal(err)
}
}
}
func TestMarshalUnmarshalTierAzure(t *testing.T) {
v := TierAzure{}
bts, err := v.MarshalMsg(nil)
if err != nil {
t.Fatal(err)
}
left, err := v.UnmarshalMsg(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
}
left, err = msgp.Skip(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
}
}
func BenchmarkMarshalMsgTierAzure(b *testing.B) {
v := TierAzure{}
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.MarshalMsg(nil)
}
}
func BenchmarkAppendMsgTierAzure(b *testing.B) {
v := TierAzure{}
bts := make([]byte, 0, v.Msgsize())
bts, _ = v.MarshalMsg(bts[0:0])
b.SetBytes(int64(len(bts)))
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
bts, _ = v.MarshalMsg(bts[0:0])
}
}
func BenchmarkUnmarshalTierAzure(b *testing.B) {
v := TierAzure{}
bts, _ := v.MarshalMsg(nil)
b.ReportAllocs()
b.SetBytes(int64(len(bts)))
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := v.UnmarshalMsg(bts)
if err != nil {
b.Fatal(err)
}
}
}
func TestEncodeDecodeTierAzure(t *testing.T) {
v := TierAzure{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
m := v.Msgsize()
if buf.Len() > m {
t.Log("WARNING: TestEncodeDecodeTierAzure Msgsize() is inaccurate")
}
vn := TierAzure{}
err := msgp.Decode(&buf, &vn)
if err != nil {
t.Error(err)
}
buf.Reset()
msgp.Encode(&buf, &v)
err = msgp.NewReader(&buf).Skip()
if err != nil {
t.Error(err)
}
}
func BenchmarkEncodeTierAzure(b *testing.B) {
v := TierAzure{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
en := msgp.NewWriter(msgp.Nowhere)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.EncodeMsg(en)
}
en.Flush()
}
func BenchmarkDecodeTierAzure(b *testing.B) {
v := TierAzure{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
rd := msgp.NewEndlessReader(buf.Bytes(), b)
dc := msgp.NewReader(rd)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
err := v.DecodeMsg(dc)
if err != nil {
b.Fatal(err)
}
}
}
golang-github-minio-madmin-go-3.0.104/tier-config.go 0000664 0000000 0000000 00000014006 14774251704 0022136 0 ustar 00root root 0000000 0000000 //
// Copyright (c) 2015-2024 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as
// published by the Free Software Foundation, either version 3 of the
// License, or (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see .
//
package madmin
import (
"encoding/json"
"errors"
"log"
)
//go:generate msgp -file $GOFILE
// TierConfigVer refers to the current tier config version
const TierConfigVer = "v1"
// TierType enumerates different remote tier backends.
type TierType int
const (
// Unsupported refers to remote tier backend that is not supported in this version
Unsupported TierType = iota
// S3 refers to AWS S3 compatible backend
S3
// Azure refers to Azure Blob Storage
Azure
// GCS refers to Google Cloud Storage
GCS
// MinIO refers to MinIO object storage backend
MinIO
)
// String returns the name of tt's remote tier backend.
func (tt TierType) String() string {
switch tt {
case S3:
return "s3"
case Azure:
return "azure"
case GCS:
return "gcs"
case MinIO:
return "minio"
}
return "unsupported"
}
// MarshalJSON returns the canonical json representation of tt.
func (tt TierType) MarshalJSON() ([]byte, error) {
typ := tt.String()
return json.Marshal(typ)
}
// UnmarshalJSON parses the provided tier type string, failing unmarshal
// if data contains invalid tier type.
func (tt *TierType) UnmarshalJSON(data []byte) error {
var s string
err := json.Unmarshal(data, &s)
if err != nil {
return err
}
newtt, err := NewTierType(s)
if err != nil {
return err
}
*tt = newtt
return nil
}
// NewTierType creates TierType if scType is a valid tier type string, otherwise
// returns an error.
func NewTierType(scType string) (TierType, error) {
switch scType {
case S3.String():
return S3, nil
case Azure.String():
return Azure, nil
case GCS.String():
return GCS, nil
case MinIO.String():
return MinIO, nil
}
return Unsupported, ErrTierTypeUnsupported
}
// TierConfig represents the different remote tier backend configurations
// supported. The specific backend is identified by the Type field. It has a
// Version field to allow for backwards-compatible extension in the future.
type TierConfig struct {
Version string
Type TierType `json:",omitempty"`
Name string `json:",omitempty"`
S3 *TierS3 `json:",omitempty"`
Azure *TierAzure `json:",omitempty"`
GCS *TierGCS `json:",omitempty"`
MinIO *TierMinIO `json:",omitempty"`
}
var (
// ErrTierNameEmpty "remote tier name empty"
ErrTierNameEmpty = errors.New("remote tier name empty")
// ErrTierInvalidConfig "invalid tier config"
ErrTierInvalidConfig = errors.New("invalid tier config")
// ErrTierInvalidConfigVersion "invalid tier config version"
ErrTierInvalidConfigVersion = errors.New("invalid tier config version")
// ErrTierTypeUnsupported "unsupported tier type"
ErrTierTypeUnsupported = errors.New("unsupported tier type")
)
// Clone returns a copy of TierConfig with secret key/credentials redacted.
func (cfg *TierConfig) Clone() TierConfig {
var (
s3 TierS3
az TierAzure
gcs TierGCS
m TierMinIO
)
switch cfg.Type {
case S3:
s3 = *cfg.S3
s3.SecretKey = "REDACTED"
case Azure:
az = *cfg.Azure
az.AccountKey = "REDACTED"
case GCS:
gcs = *cfg.GCS
gcs.Creds = "REDACTED"
case MinIO:
m = *cfg.MinIO
m.SecretKey = "REDACTED"
}
return TierConfig{
Version: cfg.Version,
Type: cfg.Type,
Name: cfg.Name,
S3: &s3,
Azure: &az,
GCS: &gcs,
MinIO: &m,
}
}
// UnmarshalJSON unmarshals json value to ensure that Type field is filled in
// correspondence with the tier config supplied.
// See TestUnmarshalTierConfig for an example json.
func (cfg *TierConfig) UnmarshalJSON(b []byte) error {
type tierConfig TierConfig
var m tierConfig
err := json.Unmarshal(b, &m)
if err != nil {
return err
}
switch m.Version {
case TierConfigVer:
default:
return ErrTierInvalidConfigVersion
}
switch m.Type {
case S3:
if m.S3 == nil {
return ErrTierInvalidConfig
}
case Azure:
if m.Azure == nil {
return ErrTierInvalidConfig
}
case GCS:
if m.GCS == nil {
return ErrTierInvalidConfig
}
case MinIO:
if m.MinIO == nil {
return ErrTierInvalidConfig
}
}
if m.Name == "" {
return ErrTierNameEmpty
}
*cfg = TierConfig(m)
return nil
}
// Endpoint returns the remote tier backend endpoint.
func (cfg *TierConfig) Endpoint() string {
switch cfg.Type {
case S3:
return cfg.S3.Endpoint
case Azure:
return cfg.Azure.Endpoint
case GCS:
return cfg.GCS.Endpoint
case MinIO:
return cfg.MinIO.Endpoint
}
log.Printf("unexpected tier type %s", cfg.Type)
return ""
}
// Bucket returns the remote tier backend bucket.
func (cfg *TierConfig) Bucket() string {
switch cfg.Type {
case S3:
return cfg.S3.Bucket
case Azure:
return cfg.Azure.Bucket
case GCS:
return cfg.GCS.Bucket
case MinIO:
return cfg.MinIO.Bucket
}
log.Printf("unexpected tier type %s", cfg.Type)
return ""
}
// Prefix returns the remote tier backend prefix.
func (cfg *TierConfig) Prefix() string {
switch cfg.Type {
case S3:
return cfg.S3.Prefix
case Azure:
return cfg.Azure.Prefix
case GCS:
return cfg.GCS.Prefix
case MinIO:
return cfg.MinIO.Prefix
}
log.Printf("unexpected tier type %s", cfg.Type)
return ""
}
// Region returns the remote tier backend region.
func (cfg *TierConfig) Region() string {
switch cfg.Type {
case S3:
return cfg.S3.Region
case Azure:
return cfg.Azure.Region
case GCS:
return cfg.GCS.Region
case MinIO:
return cfg.MinIO.Region
}
log.Printf("unexpected tier type %s", cfg.Type)
return ""
}
golang-github-minio-madmin-go-3.0.104/tier-config_gen.go 0000664 0000000 0000000 00000022602 14774251704 0022770 0 ustar 00root root 0000000 0000000 package madmin
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
import (
"github.com/tinylib/msgp/msgp"
)
// DecodeMsg implements msgp.Decodable
func (z *TierConfig) DecodeMsg(dc *msgp.Reader) (err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, err = dc.ReadMapHeader()
if err != nil {
err = msgp.WrapError(err)
return
}
for zb0001 > 0 {
zb0001--
field, err = dc.ReadMapKeyPtr()
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "Version":
z.Version, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "Version")
return
}
case "Type":
{
var zb0002 int
zb0002, err = dc.ReadInt()
if err != nil {
err = msgp.WrapError(err, "Type")
return
}
z.Type = TierType(zb0002)
}
case "Name":
z.Name, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "Name")
return
}
case "S3":
if dc.IsNil() {
err = dc.ReadNil()
if err != nil {
err = msgp.WrapError(err, "S3")
return
}
z.S3 = nil
} else {
if z.S3 == nil {
z.S3 = new(TierS3)
}
err = z.S3.DecodeMsg(dc)
if err != nil {
err = msgp.WrapError(err, "S3")
return
}
}
case "Azure":
if dc.IsNil() {
err = dc.ReadNil()
if err != nil {
err = msgp.WrapError(err, "Azure")
return
}
z.Azure = nil
} else {
if z.Azure == nil {
z.Azure = new(TierAzure)
}
err = z.Azure.DecodeMsg(dc)
if err != nil {
err = msgp.WrapError(err, "Azure")
return
}
}
case "GCS":
if dc.IsNil() {
err = dc.ReadNil()
if err != nil {
err = msgp.WrapError(err, "GCS")
return
}
z.GCS = nil
} else {
if z.GCS == nil {
z.GCS = new(TierGCS)
}
err = z.GCS.DecodeMsg(dc)
if err != nil {
err = msgp.WrapError(err, "GCS")
return
}
}
case "MinIO":
if dc.IsNil() {
err = dc.ReadNil()
if err != nil {
err = msgp.WrapError(err, "MinIO")
return
}
z.MinIO = nil
} else {
if z.MinIO == nil {
z.MinIO = new(TierMinIO)
}
err = z.MinIO.DecodeMsg(dc)
if err != nil {
err = msgp.WrapError(err, "MinIO")
return
}
}
default:
err = dc.Skip()
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
return
}
// EncodeMsg implements msgp.Encodable
func (z *TierConfig) EncodeMsg(en *msgp.Writer) (err error) {
// map header, size 7
// write "Version"
err = en.Append(0x87, 0xa7, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e)
if err != nil {
return
}
err = en.WriteString(z.Version)
if err != nil {
err = msgp.WrapError(err, "Version")
return
}
// write "Type"
err = en.Append(0xa4, 0x54, 0x79, 0x70, 0x65)
if err != nil {
return
}
err = en.WriteInt(int(z.Type))
if err != nil {
err = msgp.WrapError(err, "Type")
return
}
// write "Name"
err = en.Append(0xa4, 0x4e, 0x61, 0x6d, 0x65)
if err != nil {
return
}
err = en.WriteString(z.Name)
if err != nil {
err = msgp.WrapError(err, "Name")
return
}
// write "S3"
err = en.Append(0xa2, 0x53, 0x33)
if err != nil {
return
}
if z.S3 == nil {
err = en.WriteNil()
if err != nil {
return
}
} else {
err = z.S3.EncodeMsg(en)
if err != nil {
err = msgp.WrapError(err, "S3")
return
}
}
// write "Azure"
err = en.Append(0xa5, 0x41, 0x7a, 0x75, 0x72, 0x65)
if err != nil {
return
}
if z.Azure == nil {
err = en.WriteNil()
if err != nil {
return
}
} else {
err = z.Azure.EncodeMsg(en)
if err != nil {
err = msgp.WrapError(err, "Azure")
return
}
}
// write "GCS"
err = en.Append(0xa3, 0x47, 0x43, 0x53)
if err != nil {
return
}
if z.GCS == nil {
err = en.WriteNil()
if err != nil {
return
}
} else {
err = z.GCS.EncodeMsg(en)
if err != nil {
err = msgp.WrapError(err, "GCS")
return
}
}
// write "MinIO"
err = en.Append(0xa5, 0x4d, 0x69, 0x6e, 0x49, 0x4f)
if err != nil {
return
}
if z.MinIO == nil {
err = en.WriteNil()
if err != nil {
return
}
} else {
err = z.MinIO.EncodeMsg(en)
if err != nil {
err = msgp.WrapError(err, "MinIO")
return
}
}
return
}
// MarshalMsg implements msgp.Marshaler
func (z *TierConfig) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize())
// map header, size 7
// string "Version"
o = append(o, 0x87, 0xa7, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e)
o = msgp.AppendString(o, z.Version)
// string "Type"
o = append(o, 0xa4, 0x54, 0x79, 0x70, 0x65)
o = msgp.AppendInt(o, int(z.Type))
// string "Name"
o = append(o, 0xa4, 0x4e, 0x61, 0x6d, 0x65)
o = msgp.AppendString(o, z.Name)
// string "S3"
o = append(o, 0xa2, 0x53, 0x33)
if z.S3 == nil {
o = msgp.AppendNil(o)
} else {
o, err = z.S3.MarshalMsg(o)
if err != nil {
err = msgp.WrapError(err, "S3")
return
}
}
// string "Azure"
o = append(o, 0xa5, 0x41, 0x7a, 0x75, 0x72, 0x65)
if z.Azure == nil {
o = msgp.AppendNil(o)
} else {
o, err = z.Azure.MarshalMsg(o)
if err != nil {
err = msgp.WrapError(err, "Azure")
return
}
}
// string "GCS"
o = append(o, 0xa3, 0x47, 0x43, 0x53)
if z.GCS == nil {
o = msgp.AppendNil(o)
} else {
o, err = z.GCS.MarshalMsg(o)
if err != nil {
err = msgp.WrapError(err, "GCS")
return
}
}
// string "MinIO"
o = append(o, 0xa5, 0x4d, 0x69, 0x6e, 0x49, 0x4f)
if z.MinIO == nil {
o = msgp.AppendNil(o)
} else {
o, err = z.MinIO.MarshalMsg(o)
if err != nil {
err = msgp.WrapError(err, "MinIO")
return
}
}
return
}
// UnmarshalMsg implements msgp.Unmarshaler
func (z *TierConfig) UnmarshalMsg(bts []byte) (o []byte, err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
for zb0001 > 0 {
zb0001--
field, bts, err = msgp.ReadMapKeyZC(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "Version":
z.Version, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Version")
return
}
case "Type":
{
var zb0002 int
zb0002, bts, err = msgp.ReadIntBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Type")
return
}
z.Type = TierType(zb0002)
}
case "Name":
z.Name, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Name")
return
}
case "S3":
if msgp.IsNil(bts) {
bts, err = msgp.ReadNilBytes(bts)
if err != nil {
return
}
z.S3 = nil
} else {
if z.S3 == nil {
z.S3 = new(TierS3)
}
bts, err = z.S3.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "S3")
return
}
}
case "Azure":
if msgp.IsNil(bts) {
bts, err = msgp.ReadNilBytes(bts)
if err != nil {
return
}
z.Azure = nil
} else {
if z.Azure == nil {
z.Azure = new(TierAzure)
}
bts, err = z.Azure.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "Azure")
return
}
}
case "GCS":
if msgp.IsNil(bts) {
bts, err = msgp.ReadNilBytes(bts)
if err != nil {
return
}
z.GCS = nil
} else {
if z.GCS == nil {
z.GCS = new(TierGCS)
}
bts, err = z.GCS.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "GCS")
return
}
}
case "MinIO":
if msgp.IsNil(bts) {
bts, err = msgp.ReadNilBytes(bts)
if err != nil {
return
}
z.MinIO = nil
} else {
if z.MinIO == nil {
z.MinIO = new(TierMinIO)
}
bts, err = z.MinIO.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "MinIO")
return
}
}
default:
bts, err = msgp.Skip(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
o = bts
return
}
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z *TierConfig) Msgsize() (s int) {
s = 1 + 8 + msgp.StringPrefixSize + len(z.Version) + 5 + msgp.IntSize + 5 + msgp.StringPrefixSize + len(z.Name) + 3
if z.S3 == nil {
s += msgp.NilSize
} else {
s += z.S3.Msgsize()
}
s += 6
if z.Azure == nil {
s += msgp.NilSize
} else {
s += z.Azure.Msgsize()
}
s += 4
if z.GCS == nil {
s += msgp.NilSize
} else {
s += z.GCS.Msgsize()
}
s += 6
if z.MinIO == nil {
s += msgp.NilSize
} else {
s += z.MinIO.Msgsize()
}
return
}
// DecodeMsg implements msgp.Decodable
func (z *TierType) DecodeMsg(dc *msgp.Reader) (err error) {
{
var zb0001 int
zb0001, err = dc.ReadInt()
if err != nil {
err = msgp.WrapError(err)
return
}
(*z) = TierType(zb0001)
}
return
}
// EncodeMsg implements msgp.Encodable
func (z TierType) EncodeMsg(en *msgp.Writer) (err error) {
err = en.WriteInt(int(z))
if err != nil {
err = msgp.WrapError(err)
return
}
return
}
// MarshalMsg implements msgp.Marshaler
func (z TierType) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize())
o = msgp.AppendInt(o, int(z))
return
}
// UnmarshalMsg implements msgp.Unmarshaler
func (z *TierType) UnmarshalMsg(bts []byte) (o []byte, err error) {
{
var zb0001 int
zb0001, bts, err = msgp.ReadIntBytes(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
(*z) = TierType(zb0001)
}
o = bts
return
}
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z TierType) Msgsize() (s int) {
s = msgp.IntSize
return
}
golang-github-minio-madmin-go-3.0.104/tier-config_gen_test.go 0000664 0000000 0000000 00000004435 14774251704 0024033 0 ustar 00root root 0000000 0000000 package madmin
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
import (
"bytes"
"testing"
"github.com/tinylib/msgp/msgp"
)
func TestMarshalUnmarshalTierConfig(t *testing.T) {
v := TierConfig{}
bts, err := v.MarshalMsg(nil)
if err != nil {
t.Fatal(err)
}
left, err := v.UnmarshalMsg(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
}
left, err = msgp.Skip(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
}
}
func BenchmarkMarshalMsgTierConfig(b *testing.B) {
v := TierConfig{}
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.MarshalMsg(nil)
}
}
func BenchmarkAppendMsgTierConfig(b *testing.B) {
v := TierConfig{}
bts := make([]byte, 0, v.Msgsize())
bts, _ = v.MarshalMsg(bts[0:0])
b.SetBytes(int64(len(bts)))
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
bts, _ = v.MarshalMsg(bts[0:0])
}
}
func BenchmarkUnmarshalTierConfig(b *testing.B) {
v := TierConfig{}
bts, _ := v.MarshalMsg(nil)
b.ReportAllocs()
b.SetBytes(int64(len(bts)))
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := v.UnmarshalMsg(bts)
if err != nil {
b.Fatal(err)
}
}
}
func TestEncodeDecodeTierConfig(t *testing.T) {
v := TierConfig{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
m := v.Msgsize()
if buf.Len() > m {
t.Log("WARNING: TestEncodeDecodeTierConfig Msgsize() is inaccurate")
}
vn := TierConfig{}
err := msgp.Decode(&buf, &vn)
if err != nil {
t.Error(err)
}
buf.Reset()
msgp.Encode(&buf, &v)
err = msgp.NewReader(&buf).Skip()
if err != nil {
t.Error(err)
}
}
func BenchmarkEncodeTierConfig(b *testing.B) {
v := TierConfig{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
en := msgp.NewWriter(msgp.Nowhere)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.EncodeMsg(en)
}
en.Flush()
}
func BenchmarkDecodeTierConfig(b *testing.B) {
v := TierConfig{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
rd := msgp.NewEndlessReader(buf.Bytes(), b)
dc := msgp.NewReader(rd)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
err := v.DecodeMsg(dc)
if err != nil {
b.Fatal(err)
}
}
}
golang-github-minio-madmin-go-3.0.104/tier-config_test.go 0000664 0000000 0000000 00000007305 14774251704 0023201 0 ustar 00root root 0000000 0000000 //
// Copyright (c) 2015-2024 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as
// published by the Free Software Foundation, either version 3 of the
// License, or (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see .
//
package madmin
import (
"encoding/json"
"testing"
)
// TestUnmarshalInvalidTierConfig tests that TierConfig parsing can catch invalid tier configs
func TestUnmarshalInvalidTierConfig(t *testing.T) {
testCases := []struct {
cfg TierConfig
err error
}{
{
cfg: TierConfig{
Version: TierConfigVer,
Name: "S3TIER?",
Type: S3,
GCS: &TierGCS{
Creds: "VWJ1bnR1IDIwLjA0LjEgTFRTIFxuIFxsCgo",
Bucket: "ilmtesting",
Endpoint: "https://storage.googleapis.com/",
Prefix: "testprefix",
Region: "us-west-2",
StorageClass: "",
},
},
err: ErrTierInvalidConfig,
},
{
cfg: TierConfig{
Version: "invalid-version",
Name: "INVALIDTIER",
Type: GCS,
GCS: &TierGCS{
Creds: "VWJ1bnR1IDIwLjA0LjEgTFRTIFxuIFxsCgo",
Bucket: "ilmtesting",
Endpoint: "https://storage.googleapis.com/",
Prefix: "testprefix",
Region: "us-west-2",
StorageClass: "",
},
},
err: ErrTierInvalidConfigVersion,
},
{
cfg: TierConfig{
Version: TierConfigVer,
Type: GCS,
GCS: &TierGCS{
Creds: "VWJ1bnR1IDIwLjA0LjEgTFRTIFxuIFxsCgo",
Bucket: "ilmtesting",
Endpoint: "https://storage.googleapis.com/",
Prefix: "testprefix",
Region: "us-west-2",
StorageClass: "",
},
},
err: ErrTierNameEmpty,
},
{
cfg: TierConfig{
Version: TierConfigVer,
Name: "GCSTIER",
Type: GCS,
GCS: &TierGCS{
Creds: "VWJ1bnR1IDIwLjA0LjEgTFRTIFxuIFxsCgo",
Bucket: "ilmtesting",
Endpoint: "https://storage.googleapis.com/",
Prefix: "testprefix",
Region: "us-west-2",
StorageClass: "",
},
},
err: nil,
},
}
for i, tc := range testCases {
data, err := json.Marshal(tc.cfg)
if err != nil {
t.Fatalf("Test %d: Failed to marshal tier config %v: %v", i+1, tc.cfg, err)
}
var cfg TierConfig
err = json.Unmarshal(data, &cfg)
if err != tc.err {
t.Fatalf("Test %d: Failed in unmarshal tier config %s: expected %v got %v", i+1, data, tc.err, err)
}
}
// Test invalid tier type
evilJSON := []byte(`{
"Version": "v1",
"Type" : "not-a-type",
"Name" : "GCSTIER3",
"GCS" : {
"Bucket" : "ilmtesting",
"Prefix" : "testprefix3",
"Endpoint" : "https://storage.googleapis.com/",
"Creds": "VWJ1bnR1IDIwLjA0LjEgTFRTIFxuIFxsCgo",
"Region" : "us-west-2",
"StorageClass" : ""
}
}`)
var cfg TierConfig
err := json.Unmarshal(evilJSON, &cfg)
if err != ErrTierTypeUnsupported {
t.Fatalf("Expected to fail with unsupported type but got %v", err)
}
}
golang-github-minio-madmin-go-3.0.104/tier-gcs.go 0000664 0000000 0000000 00000005565 14774251704 0021457 0 ustar 00root root 0000000 0000000 //
// Copyright (c) 2015-2024 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as
// published by the Free Software Foundation, either version 3 of the
// License, or (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see .
//
package madmin
import (
"encoding/base64"
)
//go:generate msgp -file $GOFILE
// TierGCS represents the remote tier configuration for Google Cloud Storage
type TierGCS struct {
Endpoint string `json:",omitempty"` // custom endpoint is not supported for GCS
Creds string `json:",omitempty"` // base64 encoding of credentials.json
Bucket string `json:",omitempty"`
Prefix string `json:",omitempty"`
Region string `json:",omitempty"`
StorageClass string `json:",omitempty"`
}
// GCSOptions supports NewTierGCS to take variadic options
type GCSOptions func(*TierGCS) error
// GCSPrefix helper to supply optional object prefix to NewTierGCS
func GCSPrefix(prefix string) func(*TierGCS) error {
return func(gcs *TierGCS) error {
gcs.Prefix = prefix
return nil
}
}
// GCSRegion helper to supply optional region to NewTierGCS
func GCSRegion(region string) func(*TierGCS) error {
return func(gcs *TierGCS) error {
gcs.Region = region
return nil
}
}
// GCSStorageClass helper to supply optional storage class to NewTierGCS
func GCSStorageClass(sc string) func(*TierGCS) error {
return func(gcs *TierGCS) error {
gcs.StorageClass = sc
return nil
}
}
// GetCredentialJSON method returns the credentials JSON bytes.
func (gcs *TierGCS) GetCredentialJSON() ([]byte, error) {
return base64.URLEncoding.DecodeString(gcs.Creds)
}
// NewTierGCS returns a TierConfig of GCS type. Returns error if the given
// parameters are invalid like name is empty etc.
func NewTierGCS(name string, credsJSON []byte, bucket string, options ...GCSOptions) (*TierConfig, error) {
if name == "" {
return nil, ErrTierNameEmpty
}
creds := base64.URLEncoding.EncodeToString(credsJSON)
gcs := &TierGCS{
Creds: creds,
Bucket: bucket,
// Defaults
// endpoint is meant only for client-side display purposes
Endpoint: "https://storage.googleapis.com/",
Prefix: "",
Region: "",
StorageClass: "",
}
for _, option := range options {
err := option(gcs)
if err != nil {
return nil, err
}
}
return &TierConfig{
Version: TierConfigVer,
Type: GCS,
Name: name,
GCS: gcs,
}, nil
}
golang-github-minio-madmin-go-3.0.104/tier-gcs_gen.go 0000664 0000000 0000000 00000012773 14774251704 0022307 0 ustar 00root root 0000000 0000000 package madmin
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
import (
"github.com/tinylib/msgp/msgp"
)
// DecodeMsg implements msgp.Decodable
func (z *TierGCS) DecodeMsg(dc *msgp.Reader) (err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, err = dc.ReadMapHeader()
if err != nil {
err = msgp.WrapError(err)
return
}
for zb0001 > 0 {
zb0001--
field, err = dc.ReadMapKeyPtr()
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "Endpoint":
z.Endpoint, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "Endpoint")
return
}
case "Creds":
z.Creds, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "Creds")
return
}
case "Bucket":
z.Bucket, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "Bucket")
return
}
case "Prefix":
z.Prefix, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "Prefix")
return
}
case "Region":
z.Region, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "Region")
return
}
case "StorageClass":
z.StorageClass, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "StorageClass")
return
}
default:
err = dc.Skip()
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
return
}
// EncodeMsg implements msgp.Encodable
func (z *TierGCS) EncodeMsg(en *msgp.Writer) (err error) {
// map header, size 6
// write "Endpoint"
err = en.Append(0x86, 0xa8, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74)
if err != nil {
return
}
err = en.WriteString(z.Endpoint)
if err != nil {
err = msgp.WrapError(err, "Endpoint")
return
}
// write "Creds"
err = en.Append(0xa5, 0x43, 0x72, 0x65, 0x64, 0x73)
if err != nil {
return
}
err = en.WriteString(z.Creds)
if err != nil {
err = msgp.WrapError(err, "Creds")
return
}
// write "Bucket"
err = en.Append(0xa6, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74)
if err != nil {
return
}
err = en.WriteString(z.Bucket)
if err != nil {
err = msgp.WrapError(err, "Bucket")
return
}
// write "Prefix"
err = en.Append(0xa6, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78)
if err != nil {
return
}
err = en.WriteString(z.Prefix)
if err != nil {
err = msgp.WrapError(err, "Prefix")
return
}
// write "Region"
err = en.Append(0xa6, 0x52, 0x65, 0x67, 0x69, 0x6f, 0x6e)
if err != nil {
return
}
err = en.WriteString(z.Region)
if err != nil {
err = msgp.WrapError(err, "Region")
return
}
// write "StorageClass"
err = en.Append(0xac, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73)
if err != nil {
return
}
err = en.WriteString(z.StorageClass)
if err != nil {
err = msgp.WrapError(err, "StorageClass")
return
}
return
}
// MarshalMsg implements msgp.Marshaler
func (z *TierGCS) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize())
// map header, size 6
// string "Endpoint"
o = append(o, 0x86, 0xa8, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74)
o = msgp.AppendString(o, z.Endpoint)
// string "Creds"
o = append(o, 0xa5, 0x43, 0x72, 0x65, 0x64, 0x73)
o = msgp.AppendString(o, z.Creds)
// string "Bucket"
o = append(o, 0xa6, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74)
o = msgp.AppendString(o, z.Bucket)
// string "Prefix"
o = append(o, 0xa6, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78)
o = msgp.AppendString(o, z.Prefix)
// string "Region"
o = append(o, 0xa6, 0x52, 0x65, 0x67, 0x69, 0x6f, 0x6e)
o = msgp.AppendString(o, z.Region)
// string "StorageClass"
o = append(o, 0xac, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73)
o = msgp.AppendString(o, z.StorageClass)
return
}
// UnmarshalMsg implements msgp.Unmarshaler
func (z *TierGCS) UnmarshalMsg(bts []byte) (o []byte, err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
for zb0001 > 0 {
zb0001--
field, bts, err = msgp.ReadMapKeyZC(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "Endpoint":
z.Endpoint, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Endpoint")
return
}
case "Creds":
z.Creds, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Creds")
return
}
case "Bucket":
z.Bucket, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Bucket")
return
}
case "Prefix":
z.Prefix, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Prefix")
return
}
case "Region":
z.Region, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Region")
return
}
case "StorageClass":
z.StorageClass, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "StorageClass")
return
}
default:
bts, err = msgp.Skip(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
o = bts
return
}
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z *TierGCS) Msgsize() (s int) {
s = 1 + 9 + msgp.StringPrefixSize + len(z.Endpoint) + 6 + msgp.StringPrefixSize + len(z.Creds) + 7 + msgp.StringPrefixSize + len(z.Bucket) + 7 + msgp.StringPrefixSize + len(z.Prefix) + 7 + msgp.StringPrefixSize + len(z.Region) + 13 + msgp.StringPrefixSize + len(z.StorageClass)
return
}
golang-github-minio-madmin-go-3.0.104/tier-gcs_gen_test.go 0000664 0000000 0000000 00000004355 14774251704 0023343 0 ustar 00root root 0000000 0000000 package madmin
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
import (
"bytes"
"testing"
"github.com/tinylib/msgp/msgp"
)
func TestMarshalUnmarshalTierGCS(t *testing.T) {
v := TierGCS{}
bts, err := v.MarshalMsg(nil)
if err != nil {
t.Fatal(err)
}
left, err := v.UnmarshalMsg(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
}
left, err = msgp.Skip(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
}
}
func BenchmarkMarshalMsgTierGCS(b *testing.B) {
v := TierGCS{}
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.MarshalMsg(nil)
}
}
func BenchmarkAppendMsgTierGCS(b *testing.B) {
v := TierGCS{}
bts := make([]byte, 0, v.Msgsize())
bts, _ = v.MarshalMsg(bts[0:0])
b.SetBytes(int64(len(bts)))
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
bts, _ = v.MarshalMsg(bts[0:0])
}
}
func BenchmarkUnmarshalTierGCS(b *testing.B) {
v := TierGCS{}
bts, _ := v.MarshalMsg(nil)
b.ReportAllocs()
b.SetBytes(int64(len(bts)))
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := v.UnmarshalMsg(bts)
if err != nil {
b.Fatal(err)
}
}
}
func TestEncodeDecodeTierGCS(t *testing.T) {
v := TierGCS{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
m := v.Msgsize()
if buf.Len() > m {
t.Log("WARNING: TestEncodeDecodeTierGCS Msgsize() is inaccurate")
}
vn := TierGCS{}
err := msgp.Decode(&buf, &vn)
if err != nil {
t.Error(err)
}
buf.Reset()
msgp.Encode(&buf, &v)
err = msgp.NewReader(&buf).Skip()
if err != nil {
t.Error(err)
}
}
func BenchmarkEncodeTierGCS(b *testing.B) {
v := TierGCS{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
en := msgp.NewWriter(msgp.Nowhere)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.EncodeMsg(en)
}
en.Flush()
}
func BenchmarkDecodeTierGCS(b *testing.B) {
v := TierGCS{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
rd := msgp.NewEndlessReader(buf.Bytes(), b)
dc := msgp.NewReader(rd)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
err := v.DecodeMsg(dc)
if err != nil {
b.Fatal(err)
}
}
}
golang-github-minio-madmin-go-3.0.104/tier-minio.go 0000664 0000000 0000000 00000004154 14774251704 0022007 0 ustar 00root root 0000000 0000000 //
// Copyright (c) 2015-2024 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as
// published by the Free Software Foundation, either version 3 of the
// License, or (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see .
//
package madmin
//go:generate msgp -file $GOFILE
// TierMinIO represents the remote tier configuration for MinIO object storage backend.
type TierMinIO struct {
Endpoint string `json:",omitempty"`
AccessKey string `json:",omitempty"`
SecretKey string `json:",omitempty"`
Bucket string `json:",omitempty"`
Prefix string `json:",omitempty"`
Region string `json:",omitempty"`
}
// MinIOOptions supports NewTierMinIO to take variadic options
type MinIOOptions func(*TierMinIO) error
// MinIORegion helper to supply optional region to NewTierMinIO
func MinIORegion(region string) func(m *TierMinIO) error {
return func(m *TierMinIO) error {
m.Region = region
return nil
}
}
// MinIOPrefix helper to supply optional object prefix to NewTierMinIO
func MinIOPrefix(prefix string) func(m *TierMinIO) error {
return func(m *TierMinIO) error {
m.Prefix = prefix
return nil
}
}
func NewTierMinIO(name, endpoint, accessKey, secretKey, bucket string, options ...MinIOOptions) (*TierConfig, error) {
if name == "" {
return nil, ErrTierNameEmpty
}
m := &TierMinIO{
AccessKey: accessKey,
SecretKey: secretKey,
Bucket: bucket,
Endpoint: endpoint,
}
for _, option := range options {
err := option(m)
if err != nil {
return nil, err
}
}
return &TierConfig{
Version: TierConfigVer,
Type: MinIO,
Name: name,
MinIO: m,
}, nil
}
golang-github-minio-madmin-go-3.0.104/tier-minio_gen.go 0000664 0000000 0000000 00000013036 14774251704 0022637 0 ustar 00root root 0000000 0000000 package madmin
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
import (
"github.com/tinylib/msgp/msgp"
)
// DecodeMsg implements msgp.Decodable
func (z *TierMinIO) DecodeMsg(dc *msgp.Reader) (err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, err = dc.ReadMapHeader()
if err != nil {
err = msgp.WrapError(err)
return
}
for zb0001 > 0 {
zb0001--
field, err = dc.ReadMapKeyPtr()
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "Endpoint":
z.Endpoint, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "Endpoint")
return
}
case "AccessKey":
z.AccessKey, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "AccessKey")
return
}
case "SecretKey":
z.SecretKey, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "SecretKey")
return
}
case "Bucket":
z.Bucket, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "Bucket")
return
}
case "Prefix":
z.Prefix, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "Prefix")
return
}
case "Region":
z.Region, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "Region")
return
}
default:
err = dc.Skip()
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
return
}
// EncodeMsg implements msgp.Encodable
func (z *TierMinIO) EncodeMsg(en *msgp.Writer) (err error) {
// map header, size 6
// write "Endpoint"
err = en.Append(0x86, 0xa8, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74)
if err != nil {
return
}
err = en.WriteString(z.Endpoint)
if err != nil {
err = msgp.WrapError(err, "Endpoint")
return
}
// write "AccessKey"
err = en.Append(0xa9, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4b, 0x65, 0x79)
if err != nil {
return
}
err = en.WriteString(z.AccessKey)
if err != nil {
err = msgp.WrapError(err, "AccessKey")
return
}
// write "SecretKey"
err = en.Append(0xa9, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x4b, 0x65, 0x79)
if err != nil {
return
}
err = en.WriteString(z.SecretKey)
if err != nil {
err = msgp.WrapError(err, "SecretKey")
return
}
// write "Bucket"
err = en.Append(0xa6, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74)
if err != nil {
return
}
err = en.WriteString(z.Bucket)
if err != nil {
err = msgp.WrapError(err, "Bucket")
return
}
// write "Prefix"
err = en.Append(0xa6, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78)
if err != nil {
return
}
err = en.WriteString(z.Prefix)
if err != nil {
err = msgp.WrapError(err, "Prefix")
return
}
// write "Region"
err = en.Append(0xa6, 0x52, 0x65, 0x67, 0x69, 0x6f, 0x6e)
if err != nil {
return
}
err = en.WriteString(z.Region)
if err != nil {
err = msgp.WrapError(err, "Region")
return
}
return
}
// MarshalMsg implements msgp.Marshaler
func (z *TierMinIO) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize())
// map header, size 6
// string "Endpoint"
o = append(o, 0x86, 0xa8, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74)
o = msgp.AppendString(o, z.Endpoint)
// string "AccessKey"
o = append(o, 0xa9, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4b, 0x65, 0x79)
o = msgp.AppendString(o, z.AccessKey)
// string "SecretKey"
o = append(o, 0xa9, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x4b, 0x65, 0x79)
o = msgp.AppendString(o, z.SecretKey)
// string "Bucket"
o = append(o, 0xa6, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74)
o = msgp.AppendString(o, z.Bucket)
// string "Prefix"
o = append(o, 0xa6, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78)
o = msgp.AppendString(o, z.Prefix)
// string "Region"
o = append(o, 0xa6, 0x52, 0x65, 0x67, 0x69, 0x6f, 0x6e)
o = msgp.AppendString(o, z.Region)
return
}
// UnmarshalMsg implements msgp.Unmarshaler
func (z *TierMinIO) UnmarshalMsg(bts []byte) (o []byte, err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
for zb0001 > 0 {
zb0001--
field, bts, err = msgp.ReadMapKeyZC(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "Endpoint":
z.Endpoint, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Endpoint")
return
}
case "AccessKey":
z.AccessKey, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "AccessKey")
return
}
case "SecretKey":
z.SecretKey, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "SecretKey")
return
}
case "Bucket":
z.Bucket, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Bucket")
return
}
case "Prefix":
z.Prefix, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Prefix")
return
}
case "Region":
z.Region, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Region")
return
}
default:
bts, err = msgp.Skip(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
o = bts
return
}
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z *TierMinIO) Msgsize() (s int) {
s = 1 + 9 + msgp.StringPrefixSize + len(z.Endpoint) + 10 + msgp.StringPrefixSize + len(z.AccessKey) + 10 + msgp.StringPrefixSize + len(z.SecretKey) + 7 + msgp.StringPrefixSize + len(z.Bucket) + 7 + msgp.StringPrefixSize + len(z.Prefix) + 7 + msgp.StringPrefixSize + len(z.Region)
return
}
golang-github-minio-madmin-go-3.0.104/tier-minio_gen_test.go 0000664 0000000 0000000 00000004415 14774251704 0023677 0 ustar 00root root 0000000 0000000 package madmin
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
import (
"bytes"
"testing"
"github.com/tinylib/msgp/msgp"
)
func TestMarshalUnmarshalTierMinIO(t *testing.T) {
v := TierMinIO{}
bts, err := v.MarshalMsg(nil)
if err != nil {
t.Fatal(err)
}
left, err := v.UnmarshalMsg(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
}
left, err = msgp.Skip(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
}
}
func BenchmarkMarshalMsgTierMinIO(b *testing.B) {
v := TierMinIO{}
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.MarshalMsg(nil)
}
}
func BenchmarkAppendMsgTierMinIO(b *testing.B) {
v := TierMinIO{}
bts := make([]byte, 0, v.Msgsize())
bts, _ = v.MarshalMsg(bts[0:0])
b.SetBytes(int64(len(bts)))
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
bts, _ = v.MarshalMsg(bts[0:0])
}
}
func BenchmarkUnmarshalTierMinIO(b *testing.B) {
v := TierMinIO{}
bts, _ := v.MarshalMsg(nil)
b.ReportAllocs()
b.SetBytes(int64(len(bts)))
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := v.UnmarshalMsg(bts)
if err != nil {
b.Fatal(err)
}
}
}
func TestEncodeDecodeTierMinIO(t *testing.T) {
v := TierMinIO{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
m := v.Msgsize()
if buf.Len() > m {
t.Log("WARNING: TestEncodeDecodeTierMinIO Msgsize() is inaccurate")
}
vn := TierMinIO{}
err := msgp.Decode(&buf, &vn)
if err != nil {
t.Error(err)
}
buf.Reset()
msgp.Encode(&buf, &v)
err = msgp.NewReader(&buf).Skip()
if err != nil {
t.Error(err)
}
}
func BenchmarkEncodeTierMinIO(b *testing.B) {
v := TierMinIO{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
en := msgp.NewWriter(msgp.Nowhere)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.EncodeMsg(en)
}
en.Flush()
}
func BenchmarkDecodeTierMinIO(b *testing.B) {
v := TierMinIO{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
rd := msgp.NewEndlessReader(buf.Bytes(), b)
dc := msgp.NewReader(rd)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
err := v.DecodeMsg(dc)
if err != nil {
b.Fatal(err)
}
}
}
golang-github-minio-madmin-go-3.0.104/tier-s3.go 0000664 0000000 0000000 00000010260 14774251704 0021214 0 ustar 00root root 0000000 0000000 //
// Copyright (c) 2015-2024 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as
// published by the Free Software Foundation, either version 3 of the
// License, or (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see .
//
package madmin
//go:generate msgp -file $GOFILE
// TierS3 represents the remote tier configuration for AWS S3 compatible backend.
type TierS3 struct {
Endpoint string `json:",omitempty"`
AccessKey string `json:",omitempty"`
SecretKey string `json:",omitempty"`
Bucket string `json:",omitempty"`
Prefix string `json:",omitempty"`
Region string `json:",omitempty"`
StorageClass string `json:",omitempty"`
AWSRole bool `json:",omitempty"`
AWSRoleWebIdentityTokenFile string `json:",omitempty"`
AWSRoleARN string `json:",omitempty"`
AWSRoleSessionName string `json:",omitempty"`
AWSRoleDurationSeconds int `json:",omitempty"`
}
// S3Options supports NewTierS3 to take variadic options
type S3Options func(*TierS3) error
// S3Region helper to supply optional region to NewTierS3
func S3Region(region string) func(s3 *TierS3) error {
return func(s3 *TierS3) error {
s3.Region = region
return nil
}
}
// S3Prefix helper to supply optional object prefix to NewTierS3
func S3Prefix(prefix string) func(s3 *TierS3) error {
return func(s3 *TierS3) error {
s3.Prefix = prefix
return nil
}
}
// S3Endpoint helper to supply optional endpoint to NewTierS3
func S3Endpoint(endpoint string) func(s3 *TierS3) error {
return func(s3 *TierS3) error {
s3.Endpoint = endpoint
return nil
}
}
// S3StorageClass helper to supply optional storage class to NewTierS3
func S3StorageClass(storageClass string) func(s3 *TierS3) error {
return func(s3 *TierS3) error {
s3.StorageClass = storageClass
return nil
}
}
// S3AWSRole helper to use optional AWS Role to NewTierS3
func S3AWSRole() func(s3 *TierS3) error {
return func(s3 *TierS3) error {
s3.AWSRole = true
return nil
}
}
// S3AWSRoleWebIdentityTokenFile helper to use optional AWS Role token file to NewTierS3
func S3AWSRoleWebIdentityTokenFile(tokenFile string) func(s3 *TierS3) error {
return func(s3 *TierS3) error {
s3.AWSRoleWebIdentityTokenFile = tokenFile
return nil
}
}
// S3AWSRoleARN helper to use optional AWS RoleARN to NewTierS3
func S3AWSRoleARN(roleARN string) func(s3 *TierS3) error {
return func(s3 *TierS3) error {
s3.AWSRoleARN = roleARN
return nil
}
}
// S3AWSRoleSessionName helper to use optional AWS RoleSessionName to NewTierS3
func S3AWSRoleSessionName(roleSessionName string) func(s3 *TierS3) error {
return func(s3 *TierS3) error {
s3.AWSRoleSessionName = roleSessionName
return nil
}
}
// S3AWSRoleDurationSeconds helper to use optional token duration to NewTierS3
func S3AWSRoleDurationSeconds(dsecs int) func(s3 *TierS3) error {
return func(s3 *TierS3) error {
s3.AWSRoleDurationSeconds = dsecs
return nil
}
}
// NewTierS3 returns a TierConfig of S3 type. Returns error if the given
// parameters are invalid like name is empty etc.
func NewTierS3(name, accessKey, secretKey, bucket string, options ...S3Options) (*TierConfig, error) {
if name == "" {
return nil, ErrTierNameEmpty
}
sc := &TierS3{
AccessKey: accessKey,
SecretKey: secretKey,
Bucket: bucket,
// Defaults
Endpoint: "https://s3.amazonaws.com",
Region: "",
StorageClass: "",
}
for _, option := range options {
err := option(sc)
if err != nil {
return nil, err
}
}
return &TierConfig{
Version: TierConfigVer,
Type: S3,
Name: name,
S3: sc,
}, nil
}
golang-github-minio-madmin-go-3.0.104/tier-s3_gen.go 0000664 0000000 0000000 00000025060 14774251704 0022051 0 ustar 00root root 0000000 0000000 package madmin
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
import (
"github.com/tinylib/msgp/msgp"
)
// DecodeMsg implements msgp.Decodable
func (z *TierS3) DecodeMsg(dc *msgp.Reader) (err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, err = dc.ReadMapHeader()
if err != nil {
err = msgp.WrapError(err)
return
}
for zb0001 > 0 {
zb0001--
field, err = dc.ReadMapKeyPtr()
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "Endpoint":
z.Endpoint, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "Endpoint")
return
}
case "AccessKey":
z.AccessKey, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "AccessKey")
return
}
case "SecretKey":
z.SecretKey, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "SecretKey")
return
}
case "Bucket":
z.Bucket, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "Bucket")
return
}
case "Prefix":
z.Prefix, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "Prefix")
return
}
case "Region":
z.Region, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "Region")
return
}
case "StorageClass":
z.StorageClass, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "StorageClass")
return
}
case "AWSRole":
z.AWSRole, err = dc.ReadBool()
if err != nil {
err = msgp.WrapError(err, "AWSRole")
return
}
case "AWSRoleWebIdentityTokenFile":
z.AWSRoleWebIdentityTokenFile, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "AWSRoleWebIdentityTokenFile")
return
}
case "AWSRoleARN":
z.AWSRoleARN, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "AWSRoleARN")
return
}
case "AWSRoleSessionName":
z.AWSRoleSessionName, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "AWSRoleSessionName")
return
}
case "AWSRoleDurationSeconds":
z.AWSRoleDurationSeconds, err = dc.ReadInt()
if err != nil {
err = msgp.WrapError(err, "AWSRoleDurationSeconds")
return
}
default:
err = dc.Skip()
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
return
}
// EncodeMsg implements msgp.Encodable
func (z *TierS3) EncodeMsg(en *msgp.Writer) (err error) {
// map header, size 12
// write "Endpoint"
err = en.Append(0x8c, 0xa8, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74)
if err != nil {
return
}
err = en.WriteString(z.Endpoint)
if err != nil {
err = msgp.WrapError(err, "Endpoint")
return
}
// write "AccessKey"
err = en.Append(0xa9, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4b, 0x65, 0x79)
if err != nil {
return
}
err = en.WriteString(z.AccessKey)
if err != nil {
err = msgp.WrapError(err, "AccessKey")
return
}
// write "SecretKey"
err = en.Append(0xa9, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x4b, 0x65, 0x79)
if err != nil {
return
}
err = en.WriteString(z.SecretKey)
if err != nil {
err = msgp.WrapError(err, "SecretKey")
return
}
// write "Bucket"
err = en.Append(0xa6, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74)
if err != nil {
return
}
err = en.WriteString(z.Bucket)
if err != nil {
err = msgp.WrapError(err, "Bucket")
return
}
// write "Prefix"
err = en.Append(0xa6, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78)
if err != nil {
return
}
err = en.WriteString(z.Prefix)
if err != nil {
err = msgp.WrapError(err, "Prefix")
return
}
// write "Region"
err = en.Append(0xa6, 0x52, 0x65, 0x67, 0x69, 0x6f, 0x6e)
if err != nil {
return
}
err = en.WriteString(z.Region)
if err != nil {
err = msgp.WrapError(err, "Region")
return
}
// write "StorageClass"
err = en.Append(0xac, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73)
if err != nil {
return
}
err = en.WriteString(z.StorageClass)
if err != nil {
err = msgp.WrapError(err, "StorageClass")
return
}
// write "AWSRole"
err = en.Append(0xa7, 0x41, 0x57, 0x53, 0x52, 0x6f, 0x6c, 0x65)
if err != nil {
return
}
err = en.WriteBool(z.AWSRole)
if err != nil {
err = msgp.WrapError(err, "AWSRole")
return
}
// write "AWSRoleWebIdentityTokenFile"
err = en.Append(0xbb, 0x41, 0x57, 0x53, 0x52, 0x6f, 0x6c, 0x65, 0x57, 0x65, 0x62, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x46, 0x69, 0x6c, 0x65)
if err != nil {
return
}
err = en.WriteString(z.AWSRoleWebIdentityTokenFile)
if err != nil {
err = msgp.WrapError(err, "AWSRoleWebIdentityTokenFile")
return
}
// write "AWSRoleARN"
err = en.Append(0xaa, 0x41, 0x57, 0x53, 0x52, 0x6f, 0x6c, 0x65, 0x41, 0x52, 0x4e)
if err != nil {
return
}
err = en.WriteString(z.AWSRoleARN)
if err != nil {
err = msgp.WrapError(err, "AWSRoleARN")
return
}
// write "AWSRoleSessionName"
err = en.Append(0xb2, 0x41, 0x57, 0x53, 0x52, 0x6f, 0x6c, 0x65, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x4e, 0x61, 0x6d, 0x65)
if err != nil {
return
}
err = en.WriteString(z.AWSRoleSessionName)
if err != nil {
err = msgp.WrapError(err, "AWSRoleSessionName")
return
}
// write "AWSRoleDurationSeconds"
err = en.Append(0xb6, 0x41, 0x57, 0x53, 0x52, 0x6f, 0x6c, 0x65, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73)
if err != nil {
return
}
err = en.WriteInt(z.AWSRoleDurationSeconds)
if err != nil {
err = msgp.WrapError(err, "AWSRoleDurationSeconds")
return
}
return
}
// MarshalMsg implements msgp.Marshaler
func (z *TierS3) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize())
// map header, size 12
// string "Endpoint"
o = append(o, 0x8c, 0xa8, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74)
o = msgp.AppendString(o, z.Endpoint)
// string "AccessKey"
o = append(o, 0xa9, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4b, 0x65, 0x79)
o = msgp.AppendString(o, z.AccessKey)
// string "SecretKey"
o = append(o, 0xa9, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x4b, 0x65, 0x79)
o = msgp.AppendString(o, z.SecretKey)
// string "Bucket"
o = append(o, 0xa6, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74)
o = msgp.AppendString(o, z.Bucket)
// string "Prefix"
o = append(o, 0xa6, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78)
o = msgp.AppendString(o, z.Prefix)
// string "Region"
o = append(o, 0xa6, 0x52, 0x65, 0x67, 0x69, 0x6f, 0x6e)
o = msgp.AppendString(o, z.Region)
// string "StorageClass"
o = append(o, 0xac, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73)
o = msgp.AppendString(o, z.StorageClass)
// string "AWSRole"
o = append(o, 0xa7, 0x41, 0x57, 0x53, 0x52, 0x6f, 0x6c, 0x65)
o = msgp.AppendBool(o, z.AWSRole)
// string "AWSRoleWebIdentityTokenFile"
o = append(o, 0xbb, 0x41, 0x57, 0x53, 0x52, 0x6f, 0x6c, 0x65, 0x57, 0x65, 0x62, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x46, 0x69, 0x6c, 0x65)
o = msgp.AppendString(o, z.AWSRoleWebIdentityTokenFile)
// string "AWSRoleARN"
o = append(o, 0xaa, 0x41, 0x57, 0x53, 0x52, 0x6f, 0x6c, 0x65, 0x41, 0x52, 0x4e)
o = msgp.AppendString(o, z.AWSRoleARN)
// string "AWSRoleSessionName"
o = append(o, 0xb2, 0x41, 0x57, 0x53, 0x52, 0x6f, 0x6c, 0x65, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x4e, 0x61, 0x6d, 0x65)
o = msgp.AppendString(o, z.AWSRoleSessionName)
// string "AWSRoleDurationSeconds"
o = append(o, 0xb6, 0x41, 0x57, 0x53, 0x52, 0x6f, 0x6c, 0x65, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73)
o = msgp.AppendInt(o, z.AWSRoleDurationSeconds)
return
}
// UnmarshalMsg implements msgp.Unmarshaler
func (z *TierS3) UnmarshalMsg(bts []byte) (o []byte, err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
for zb0001 > 0 {
zb0001--
field, bts, err = msgp.ReadMapKeyZC(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "Endpoint":
z.Endpoint, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Endpoint")
return
}
case "AccessKey":
z.AccessKey, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "AccessKey")
return
}
case "SecretKey":
z.SecretKey, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "SecretKey")
return
}
case "Bucket":
z.Bucket, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Bucket")
return
}
case "Prefix":
z.Prefix, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Prefix")
return
}
case "Region":
z.Region, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Region")
return
}
case "StorageClass":
z.StorageClass, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "StorageClass")
return
}
case "AWSRole":
z.AWSRole, bts, err = msgp.ReadBoolBytes(bts)
if err != nil {
err = msgp.WrapError(err, "AWSRole")
return
}
case "AWSRoleWebIdentityTokenFile":
z.AWSRoleWebIdentityTokenFile, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "AWSRoleWebIdentityTokenFile")
return
}
case "AWSRoleARN":
z.AWSRoleARN, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "AWSRoleARN")
return
}
case "AWSRoleSessionName":
z.AWSRoleSessionName, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "AWSRoleSessionName")
return
}
case "AWSRoleDurationSeconds":
z.AWSRoleDurationSeconds, bts, err = msgp.ReadIntBytes(bts)
if err != nil {
err = msgp.WrapError(err, "AWSRoleDurationSeconds")
return
}
default:
bts, err = msgp.Skip(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
o = bts
return
}
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z *TierS3) Msgsize() (s int) {
s = 1 + 9 + msgp.StringPrefixSize + len(z.Endpoint) + 10 + msgp.StringPrefixSize + len(z.AccessKey) + 10 + msgp.StringPrefixSize + len(z.SecretKey) + 7 + msgp.StringPrefixSize + len(z.Bucket) + 7 + msgp.StringPrefixSize + len(z.Prefix) + 7 + msgp.StringPrefixSize + len(z.Region) + 13 + msgp.StringPrefixSize + len(z.StorageClass) + 8 + msgp.BoolSize + 28 + msgp.StringPrefixSize + len(z.AWSRoleWebIdentityTokenFile) + 11 + msgp.StringPrefixSize + len(z.AWSRoleARN) + 19 + msgp.StringPrefixSize + len(z.AWSRoleSessionName) + 23 + msgp.IntSize
return
}
golang-github-minio-madmin-go-3.0.104/tier-s3_gen_test.go 0000664 0000000 0000000 00000004335 14774251704 0023112 0 ustar 00root root 0000000 0000000 package madmin
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
import (
"bytes"
"testing"
"github.com/tinylib/msgp/msgp"
)
func TestMarshalUnmarshalTierS3(t *testing.T) {
v := TierS3{}
bts, err := v.MarshalMsg(nil)
if err != nil {
t.Fatal(err)
}
left, err := v.UnmarshalMsg(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
}
left, err = msgp.Skip(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
}
}
func BenchmarkMarshalMsgTierS3(b *testing.B) {
v := TierS3{}
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.MarshalMsg(nil)
}
}
func BenchmarkAppendMsgTierS3(b *testing.B) {
v := TierS3{}
bts := make([]byte, 0, v.Msgsize())
bts, _ = v.MarshalMsg(bts[0:0])
b.SetBytes(int64(len(bts)))
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
bts, _ = v.MarshalMsg(bts[0:0])
}
}
func BenchmarkUnmarshalTierS3(b *testing.B) {
v := TierS3{}
bts, _ := v.MarshalMsg(nil)
b.ReportAllocs()
b.SetBytes(int64(len(bts)))
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := v.UnmarshalMsg(bts)
if err != nil {
b.Fatal(err)
}
}
}
func TestEncodeDecodeTierS3(t *testing.T) {
v := TierS3{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
m := v.Msgsize()
if buf.Len() > m {
t.Log("WARNING: TestEncodeDecodeTierS3 Msgsize() is inaccurate")
}
vn := TierS3{}
err := msgp.Decode(&buf, &vn)
if err != nil {
t.Error(err)
}
buf.Reset()
msgp.Encode(&buf, &v)
err = msgp.NewReader(&buf).Skip()
if err != nil {
t.Error(err)
}
}
func BenchmarkEncodeTierS3(b *testing.B) {
v := TierS3{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
en := msgp.NewWriter(msgp.Nowhere)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.EncodeMsg(en)
}
en.Flush()
}
func BenchmarkDecodeTierS3(b *testing.B) {
v := TierS3{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
rd := msgp.NewEndlessReader(buf.Bytes(), b)
dc := msgp.NewReader(rd)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
err := v.DecodeMsg(dc)
if err != nil {
b.Fatal(err)
}
}
}
golang-github-minio-madmin-go-3.0.104/tier.go 0000664 0000000 0000000 00000015736 14774251704 0020706 0 ustar 00root root 0000000 0000000 //
// Copyright (c) 2015-2024 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as
// published by the Free Software Foundation, either version 3 of the
// License, or (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see .
//
package madmin
import (
"context"
"encoding/json"
"io"
"net/http"
"net/url"
"path"
"strconv"
"time"
)
// tierAPI is API path prefix for tier related admin APIs
const tierAPI = "tier"
// AddTierIgnoreInUse adds a new remote tier, ignoring if it's being used by another MinIO deployment.
func (adm *AdminClient) AddTierIgnoreInUse(ctx context.Context, cfg *TierConfig) error {
return adm.addTier(ctx, cfg, true)
}
// AddTier adds a new remote tier.
func (adm *AdminClient) addTier(ctx context.Context, cfg *TierConfig, ignoreInUse bool) error {
data, err := json.Marshal(cfg)
if err != nil {
return err
}
encData, err := EncryptData(adm.getSecretKey(), data)
if err != nil {
return err
}
queryVals := url.Values{}
queryVals.Set("force", strconv.FormatBool(ignoreInUse))
reqData := requestData{
relPath: path.Join(adminAPIPrefix, tierAPI),
content: encData,
queryValues: queryVals,
}
// Execute PUT on /minio/admin/v3/tier to add a remote tier
resp, err := adm.executeMethod(ctx, http.MethodPut, reqData)
defer closeResponse(resp)
if err != nil {
return err
}
if resp.StatusCode != http.StatusNoContent {
return httpRespToErrorResponse(resp)
}
return nil
}
// AddTier adds a new remote tier.
func (adm *AdminClient) AddTier(ctx context.Context, cfg *TierConfig) error {
return adm.addTier(ctx, cfg, false)
}
// ListTiers returns a list of remote tiers configured.
func (adm *AdminClient) ListTiers(ctx context.Context) ([]*TierConfig, error) {
reqData := requestData{
relPath: path.Join(adminAPIPrefix, tierAPI),
}
// Execute GET on /minio/admin/v3/tier to list remote tiers configured.
resp, err := adm.executeMethod(ctx, http.MethodGet, reqData)
defer closeResponse(resp)
if err != nil {
return nil, err
}
if resp.StatusCode != http.StatusOK {
return nil, httpRespToErrorResponse(resp)
}
var tiers []*TierConfig
b, err := io.ReadAll(resp.Body)
if err != nil {
return tiers, err
}
err = json.Unmarshal(b, &tiers)
if err != nil {
return tiers, err
}
return tiers, nil
}
// TierCreds is used to pass remote tier credentials in a tier-edit operation.
type TierCreds struct {
AccessKey string `json:"access,omitempty"`
SecretKey string `json:"secret,omitempty"`
AWSRole bool `json:"awsrole"`
AWSRoleWebIdentityTokenFile string `json:"awsroleWebIdentity,omitempty"`
AWSRoleARN string `json:"awsroleARN,omitempty"`
AzSP ServicePrincipalAuth `json:"azSP,omitempty"`
CredsJSON []byte `json:"creds,omitempty"`
}
// EditTier supports updating credentials for the remote tier identified by tierName.
func (adm *AdminClient) EditTier(ctx context.Context, tierName string, creds TierCreds) error {
data, err := json.Marshal(creds)
if err != nil {
return err
}
var encData []byte
encData, err = EncryptData(adm.getSecretKey(), data)
if err != nil {
return err
}
reqData := requestData{
relPath: path.Join(adminAPIPrefix, tierAPI, tierName),
content: encData,
}
// Execute POST on /minio/admin/v3/tier/tierName to edit a tier
// configured.
resp, err := adm.executeMethod(ctx, http.MethodPost, reqData)
defer closeResponse(resp)
if err != nil {
return err
}
if resp.StatusCode != http.StatusNoContent {
return httpRespToErrorResponse(resp)
}
return nil
}
// RemoveTier removes an empty tier identified by tierName
func (adm *AdminClient) RemoveTier(ctx context.Context, tierName string) error {
if tierName == "" {
return ErrTierNameEmpty
}
reqData := requestData{
relPath: path.Join(adminAPIPrefix, tierAPI, tierName),
}
// Execute DELETE on /minio/admin/v3/tier/tierName to remove an empty tier.
resp, err := adm.executeMethod(ctx, http.MethodDelete, reqData)
defer closeResponse(resp)
if err != nil {
return err
}
if resp.StatusCode != http.StatusNoContent {
return httpRespToErrorResponse(resp)
}
return nil
}
// RemoveTierOpts - options for a remote tiering removal
type RemoveTierOpts struct {
Force bool
}
// RemoveTierV2 removes an empty tier identified by tierName, the tier is not
// required to be reachable or empty if force flag is set to true.
func (adm *AdminClient) RemoveTierV2(ctx context.Context, tierName string, opts RemoveTierOpts) error {
if tierName == "" {
return ErrTierNameEmpty
}
queryVals := url.Values{}
queryVals.Set("force", strconv.FormatBool(opts.Force))
reqData := requestData{
relPath: path.Join(adminAPIPrefix, tierAPI, tierName),
queryValues: queryVals,
}
// Execute DELETE on /minio/admin/v3/tier/tierName to remove an empty tier.
resp, err := adm.executeMethod(ctx, http.MethodDelete, reqData)
defer closeResponse(resp)
if err != nil {
return err
}
if resp.StatusCode != http.StatusNoContent {
return httpRespToErrorResponse(resp)
}
return nil
}
// VerifyTier verifies tierName's remote tier config
func (adm *AdminClient) VerifyTier(ctx context.Context, tierName string) error {
if tierName == "" {
return ErrTierNameEmpty
}
reqData := requestData{
relPath: path.Join(adminAPIPrefix, tierAPI, tierName),
}
// Execute GET on /minio/admin/v3/tier/tierName to verify tierName's config.
resp, err := adm.executeMethod(ctx, http.MethodGet, reqData)
defer closeResponse(resp)
if err != nil {
return err
}
if resp.StatusCode != http.StatusNoContent {
return httpRespToErrorResponse(resp)
}
return nil
}
// TierInfo contains tier name, type and statistics
type TierInfo struct {
Name string
Type string
Stats TierStats
DailyStats DailyTierStats
}
type DailyTierStats struct {
Bins [24]TierStats
UpdatedAt time.Time
}
// TierStats returns per-tier stats of all configured tiers (incl. internal
// hot-tier)
func (adm *AdminClient) TierStats(ctx context.Context) ([]TierInfo, error) {
reqData := requestData{
relPath: path.Join(adminAPIPrefix, "tier-stats"),
}
// Execute GET on /minio/admin/v3/tier-stats to list tier-stats.
resp, err := adm.executeMethod(ctx, http.MethodGet, reqData)
defer closeResponse(resp)
if err != nil {
return nil, err
}
if resp.StatusCode != http.StatusOK {
return nil, httpRespToErrorResponse(resp)
}
var tierInfos []TierInfo
b, err := io.ReadAll(resp.Body)
if err != nil {
return tierInfos, err
}
err = json.Unmarshal(b, &tierInfos)
if err != nil {
return tierInfos, err
}
return tierInfos, nil
}
golang-github-minio-madmin-go-3.0.104/tier_test.go 0000664 0000000 0000000 00000011652 14774251704 0021736 0 ustar 00root root 0000000 0000000 //
// Copyright (c) 2015-2024 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as
// published by the Free Software Foundation, either version 3 of the
// License, or (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see .
//
package madmin
import (
"encoding/base64"
"fmt"
"log"
"reflect"
"testing"
)
func ExampleNewTierS3() {
simpleS3SC, err := NewTierS3("simple-s3", "accessKey", "secretKey", "testbucket")
if err != nil {
log.Fatalln(err, "Failed to create s3 backed tier")
}
fmt.Println(simpleS3SC)
fullyCustomS3SC, err := NewTierS3("custom-s3", "accessKey", "secretKey", "testbucket",
S3Endpoint("https://s3.amazonaws.com"), S3Prefix("testprefix"), S3Region("us-west-1"), S3StorageClass("S3_IA"))
if err != nil {
log.Fatalln(err, "Failed to create s3 tier")
}
fmt.Println(fullyCustomS3SC)
}
func ExampleNewTierAzure() {
simpleAzSC, err := NewTierAzure("simple-az", "accessKey", "secretKey", "testbucket")
if err != nil {
log.Fatalln(err, "Failed to create azure backed tier")
}
fmt.Println(simpleAzSC)
fullyCustomAzSC, err := NewTierAzure("custom-az", "accessKey", "secretKey", "testbucket", AzureEndpoint("http://blob.core.windows.net"), AzurePrefix("testprefix"))
if err != nil {
log.Fatalln(err, "Failed to create azure backed tier")
}
fmt.Println(fullyCustomAzSC)
}
func ExampleNewTierGCS() {
credsJSON := []byte("credentials json content goes here")
simpleGCSSC, err := NewTierGCS("simple-gcs", credsJSON, "testbucket")
if err != nil {
log.Fatalln(err, "Failed to create GCS backed tier")
}
fmt.Println(simpleGCSSC)
fullyCustomGCSSC, err := NewTierGCS("custom-gcs", credsJSON, "testbucket", GCSPrefix("testprefix"))
if err != nil {
log.Fatalln(err, "Failed to create GCS backed tier")
}
fmt.Println(fullyCustomGCSSC)
}
// TestS3Tier tests S3Options helpers
func TestS3Tier(t *testing.T) {
scName := "test-s3"
endpoint := "https://mys3.com"
accessKey, secretKey := "accessKey", "secretKey"
bucket, prefix := "testbucket", "testprefix"
region := "us-west-1"
storageClass := "S3_IA"
want := &TierConfig{
Version: TierConfigVer,
Type: S3,
Name: scName,
S3: &TierS3{
AccessKey: accessKey,
SecretKey: secretKey,
Bucket: bucket,
// custom values
Endpoint: endpoint,
Prefix: prefix,
Region: region,
StorageClass: storageClass,
},
}
options := []S3Options{
S3Endpoint(endpoint),
S3Prefix(prefix),
S3Region(region),
S3StorageClass(storageClass),
}
got, err := NewTierS3(scName, accessKey, secretKey, bucket, options...)
if err != nil {
t.Fatalf("Failed to create a custom s3 tier %s", err)
}
if !reflect.DeepEqual(got, want) {
t.Fatalf("got != want, got = %v want = %v", *got, *want)
}
}
// TestAzTier tests AzureOptions helpers
func TestAzTier(t *testing.T) {
scName := "test-az"
endpoint := "https://myazure.com"
accountName, accountKey := "accountName", "accountKey"
bucket, prefix := "testbucket", "testprefix"
region := "us-east-1"
want := &TierConfig{
Version: TierConfigVer,
Type: Azure,
Name: scName,
Azure: &TierAzure{
AccountName: accountName,
AccountKey: accountKey,
Bucket: bucket,
// custom values
Endpoint: endpoint,
Prefix: prefix,
Region: region,
},
}
options := []AzureOptions{
AzureEndpoint(endpoint),
AzurePrefix(prefix),
AzureRegion(region),
}
got, err := NewTierAzure(scName, accountName, accountKey, bucket, options...)
if err != nil {
t.Fatalf("Failed to create a custom azure tier %s", err)
}
if !reflect.DeepEqual(got, want) {
t.Fatalf("got != want, got = %v want = %v", *got, *want)
}
}
// TestGCSStorageClass tests GCSOptions helpers
func TestGCSStorageClass(t *testing.T) {
scName := "test-gcs"
credsJSON := []byte("test-creds-json")
encodedCreds := base64.URLEncoding.EncodeToString(credsJSON)
bucket, prefix := "testbucket", "testprefix"
region := "us-west-2"
want := &TierConfig{
Version: TierConfigVer,
Type: GCS,
Name: scName,
GCS: &TierGCS{
Bucket: bucket,
Creds: encodedCreds,
// custom values
Endpoint: "https://storage.googleapis.com/",
Prefix: prefix,
Region: region,
},
}
options := []GCSOptions{
GCSRegion(region),
GCSPrefix(prefix),
}
got, err := NewTierGCS(scName, credsJSON, bucket, options...)
if err != nil {
t.Fatalf("Failed to create a custom gcs tier %s", err)
}
if !reflect.DeepEqual(got, want) {
t.Fatalf("got != want, got = %v want = %v", *got, *want)
}
}
golang-github-minio-madmin-go-3.0.104/timings.go 0000664 0000000 0000000 00000007320 14774251704 0021403 0 ustar 00root root 0000000 0000000 //
// Copyright (c) 2015-2024 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as
// published by the Free Software Foundation, either version 3 of the
// License, or (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see .
//
package madmin
import (
"math"
"sort"
"time"
)
// Timings captures all latency metrics
type Timings struct {
Avg time.Duration `json:"avg"` // Average duration per sample
P50 time.Duration `json:"p50"` // 50th %ile of all the sample durations
P75 time.Duration `json:"p75"` // 75th %ile of all the sample durations
P95 time.Duration `json:"p95"` // 95th %ile of all the sample durations
P99 time.Duration `json:"p99"` // 99th %ile of all the sample durations
P999 time.Duration `json:"p999"` // 99.9th %ile of all the sample durations
Long5p time.Duration `json:"l5p"` // Average duration of the longest 5%
Short5p time.Duration `json:"s5p"` // Average duration of the shortest 5%
Max time.Duration `json:"max"` // Max duration
Min time.Duration `json:"min"` // Min duration
StdDev time.Duration `json:"sdev"` // Standard deviation among all the sample durations
Range time.Duration `json:"range"` // Delta between Max and Min
}
// Measure - calculate all the latency measurements
func (ts TimeDurations) Measure() Timings {
if len(ts) == 0 {
return Timings{
Avg: 0,
P50: 0,
P75: 0,
P95: 0,
P99: 0,
P999: 0,
Long5p: 0,
Short5p: 0,
Min: 0,
Max: 0,
Range: 0,
StdDev: 0,
}
}
sort.Slice(ts, func(i, j int) bool {
return int64(ts[i]) < int64(ts[j])
})
return Timings{
Avg: ts.avg(),
P50: ts[ts.Len()/2],
P75: ts.p(0.75),
P95: ts.p(0.95),
P99: ts.p(0.99),
P999: ts.p(0.999),
Long5p: ts.long5p(),
Short5p: ts.short5p(),
Min: ts.min(),
Max: ts.max(),
Range: ts.srange(),
StdDev: ts.stdDev(),
}
}
// TimeDurations is time.Duration segments.
type TimeDurations []time.Duration
func (ts TimeDurations) Len() int { return len(ts) }
func (ts TimeDurations) avg() time.Duration {
var total time.Duration
for _, t := range ts {
total += t
}
return time.Duration(int(total) / ts.Len())
}
func (ts TimeDurations) p(p float64) time.Duration {
return ts[int(float64(ts.Len())*p+0.5)-1]
}
func (ts TimeDurations) stdDev() time.Duration {
m := ts.avg()
s := 0.00
for _, t := range ts {
s += math.Pow(float64(m-t), 2)
}
msq := s / float64(ts.Len())
return time.Duration(math.Sqrt(msq))
}
func (ts TimeDurations) long5p() time.Duration {
set := ts[int(float64(ts.Len())*0.95+0.5):]
if len(set) <= 1 {
return ts[ts.Len()-1]
}
var t time.Duration
var i int
for _, n := range set {
t += n
i++
}
return time.Duration(int(t) / i)
}
func (ts TimeDurations) short5p() time.Duration {
set := ts[:int(float64(ts.Len())*0.05+0.5)]
if len(set) <= 1 {
return ts[0]
}
var t time.Duration
var i int
for _, n := range set {
t += n
i++
}
return time.Duration(int(t) / i)
}
func (ts TimeDurations) min() time.Duration {
return ts[0]
}
func (ts TimeDurations) max() time.Duration {
return ts[ts.Len()-1]
}
func (ts TimeDurations) srange() time.Duration {
return ts.max() - ts.min()
}
golang-github-minio-madmin-go-3.0.104/timings_test.go 0000664 0000000 0000000 00000004631 14774251704 0022444 0 ustar 00root root 0000000 0000000 //
// Copyright (c) 2015-2024 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as
// published by the Free Software Foundation, either version 3 of the
// License, or (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see .
//
package madmin
import (
"sort"
"testing"
)
func TestTimings(t *testing.T) {
durations := TimeDurations{
4000000,
4000000,
9000000,
9000000,
12000000,
12000000,
14000000,
14000000,
17000000,
17000000,
21000000,
21000000,
36000000,
36000000,
37000000,
37000000,
42000000,
42000000,
54000000,
54000000,
67000000,
67000000,
77000000,
77000000,
88000000,
88000000,
89000000,
89000000,
93000000,
93000000,
}
sort.Slice(durations, func(i, j int) bool {
return int64(durations[i]) < int64(durations[j])
})
timings := durations.Measure()
if timings.Avg != 44000000 {
t.Errorf("Expected 44000000, got %d\n", timings.Avg)
}
if timings.P50 != 37000000 {
t.Errorf("Expected 37000000, got %d\n", timings.P50)
}
if timings.P75 != 77000000 {
t.Errorf("Expected 77000000, got %d\n", timings.P75)
}
if timings.P95 != 93000000 {
t.Errorf("Expected 93000000, got %d\n", timings.P95)
}
if timings.P99 != 93000000 {
t.Errorf("Expected 93000000, got %d\n", timings.P99)
}
if timings.P999 != 93000000 {
t.Errorf("Expected 93000000, got %d\n", timings.P999)
}
if timings.Long5p != 93000000 {
t.Errorf("Expected 93000000, got %d\n", timings.Long5p)
}
if timings.Short5p != 4000000 {
t.Errorf("Expected 4000000, got %d\n", timings.Short5p)
}
if timings.Max != 93000000 {
t.Errorf("Expected 93000000, got %d\n", timings.Max)
}
if timings.Min != 4000000 {
t.Errorf("Expected 4000000, got %d\n", timings.Min)
}
if timings.Range != 89000000 {
t.Errorf("Expected 89000000, got %d\n", timings.Range)
}
if timings.StdDev != 30772281 {
t.Errorf("Expected abc, got %d\n", timings.StdDev)
}
}
golang-github-minio-madmin-go-3.0.104/top-commands.go 0000664 0000000 0000000 00000010020 14774251704 0022321 0 ustar 00root root 0000000 0000000 //
// Copyright (c) 2015-2024 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as
// published by the Free Software Foundation, either version 3 of the
// License, or (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see .
//
package madmin
import (
"context"
"encoding/json"
"io"
"net/http"
"net/url"
"strconv"
"strings"
"time"
)
// LockEntry holds information about client requesting the lock,
// servers holding the lock, source on the client machine,
// ID, type(read or write) and time stamp.
type LockEntry struct {
Timestamp time.Time `json:"time"` // When the lock was first granted
Elapsed time.Duration `json:"elapsed"` // Duration for which lock has been held
Resource string `json:"resource"` // Resource contains info like bucket+object
Type string `json:"type"` // Type indicates if 'Write' or 'Read' lock
Source string `json:"source"` // Source at which lock was granted
ServerList []string `json:"serverlist"` // List of servers participating in the lock.
Owner string `json:"owner"` // Owner UUID indicates server owns the lock.
ID string `json:"id"` // UID to uniquely identify request of client.
// Represents quorum number of servers required to hold this lock, used to look for stale locks.
Quorum int `json:"quorum"`
}
// LockEntries - To sort the locks
type LockEntries []LockEntry
func (l LockEntries) Len() int {
return len(l)
}
func (l LockEntries) Less(i, j int) bool {
return l[i].Timestamp.Before(l[j].Timestamp)
}
func (l LockEntries) Swap(i, j int) {
l[i], l[j] = l[j], l[i]
}
// TopLockOpts top lock options
type TopLockOpts struct {
Count int
Stale bool
}
// ForceUnlock force unlocks input paths...
func (adm *AdminClient) ForceUnlock(ctx context.Context, paths ...string) error {
// Execute POST on /minio/admin/v3/force-unlock
queryVals := make(url.Values)
queryVals.Set("paths", strings.Join(paths, ","))
resp, err := adm.executeMethod(ctx,
http.MethodPost,
requestData{
relPath: adminAPIPrefix + "/force-unlock",
queryValues: queryVals,
},
)
defer closeResponse(resp)
if err != nil {
return err
}
if resp.StatusCode != http.StatusOK {
return httpRespToErrorResponse(resp)
}
return nil
}
// TopLocksWithOpts - returns the count number of oldest locks currently active on the server.
// additionally we can also enable `stale` to get stale locks currently present on server.
func (adm *AdminClient) TopLocksWithOpts(ctx context.Context, opts TopLockOpts) (LockEntries, error) {
// Execute GET on /minio/admin/v3/top/locks?count=10
// to get the 'count' number of oldest locks currently
// active on the server.
queryVals := make(url.Values)
queryVals.Set("count", strconv.Itoa(opts.Count))
queryVals.Set("stale", strconv.FormatBool(opts.Stale))
resp, err := adm.executeMethod(ctx,
http.MethodGet,
requestData{
relPath: adminAPIPrefix + "/top/locks",
queryValues: queryVals,
},
)
defer closeResponse(resp)
if err != nil {
return nil, err
}
if resp.StatusCode != http.StatusOK {
return nil, httpRespToErrorResponse(resp)
}
response, err := io.ReadAll(resp.Body)
if err != nil {
return LockEntries{}, err
}
var lockEntries LockEntries
err = json.Unmarshal(response, &lockEntries)
return lockEntries, err
}
// TopLocks - returns top '10' oldest locks currently active on the server.
func (adm *AdminClient) TopLocks(ctx context.Context) (LockEntries, error) {
return adm.TopLocksWithOpts(ctx, TopLockOpts{Count: 10})
}
golang-github-minio-madmin-go-3.0.104/trace.go 0000664 0000000 0000000 00000014306 14774251704 0021031 0 ustar 00root root 0000000 0000000 //
// Copyright (c) 2015-2024 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as
// published by the Free Software Foundation, either version 3 of the
// License, or (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see .
//
package madmin
import (
"math/bits"
"net/http"
"strings"
"time"
)
//go:generate stringer -type=TraceType -trimprefix=Trace $GOFILE
// TraceType indicates the type of the tracing Info
type TraceType uint64
const (
// TraceOS tracing (Golang os package calls)
TraceOS TraceType = 1 << iota
// TraceStorage tracing (MinIO Storage Layer)
TraceStorage
// TraceS3 provides tracing of S3 API calls
TraceS3
// TraceInternal tracing internal (.minio.sys/...) HTTP calls
TraceInternal
// TraceScanner will trace scan operations.
TraceScanner
// TraceDecommission will trace decommission operations.
TraceDecommission
// TraceHealing will trace healing operations.
TraceHealing
// TraceBatchReplication will trace batch replication operations.
TraceBatchReplication
// TraceBatchKeyRotation will trace batch keyrotation operations.
TraceBatchKeyRotation
// TraceBatchExpire will trace batch expiration operations.
TraceBatchExpire
// TraceRebalance will trace rebalance operations
TraceRebalance
// TraceReplicationResync will trace replication resync operations.
TraceReplicationResync
// TraceBootstrap will trace events during MinIO cluster bootstrap
TraceBootstrap
// TraceFTP will trace events from MinIO FTP Server
TraceFTP
// TraceILM will trace events during MinIO ILM operations
TraceILM
// TraceKMS are traces for interactions with KMS.
TraceKMS
// TraceFormatting will trace formatting events
TraceFormatting
// TraceAdmin will trace admin calls
TraceAdmin
// TraceObject will trade object layer operations
TraceObject
// Add more here...
// TraceAll contains all valid trace modes.
// This *must* be the last entry.
TraceAll TraceType = (1 << iota) - 1
)
const (
// TraceBatch will trace all batch operations.
TraceBatch = TraceBatchReplication | TraceBatchKeyRotation | TraceBatchExpire // |TraceBatch
)
// FindTraceType will find a single trace type from a string,
// as returned by String(). Matching is not case sensitive.
// Will return 0 if not found.
func FindTraceType(s string) TraceType {
bitIdx := uint(0)
for {
idx := TraceType(1 << bitIdx)
if idx > TraceAll {
return 0
}
if strings.EqualFold(idx.String(), s) {
return idx
}
bitIdx++
}
}
// Contains returns whether all flags in other is present in t.
func (t TraceType) Contains(other TraceType) bool {
return t&other == other
}
// Overlaps returns whether any flags in t overlaps with other.
func (t TraceType) Overlaps(other TraceType) bool {
return t&other != 0
}
// SingleType returns whether t has a single type set.
func (t TraceType) SingleType() bool {
// Include
return bits.OnesCount64(uint64(t)) == 1
}
// Merge will merge other into t.
func (t *TraceType) Merge(other TraceType) {
*t = *t | other
}
// SetIf will add other if b is true.
func (t *TraceType) SetIf(b bool, other TraceType) {
if b {
*t = *t | other
}
}
// Mask returns the trace type as uint32.
func (t TraceType) Mask() uint64 {
return uint64(t)
}
// TraceInfo - represents a trace record, additionally
// also reports errors if any while listening on trace.
type TraceInfo struct {
TraceType TraceType `json:"type"`
NodeName string `json:"nodename"`
FuncName string `json:"funcname"`
Time time.Time `json:"time"`
Path string `json:"path"`
Duration time.Duration `json:"dur"`
Bytes int64 `json:"bytes,omitempty"`
Message string `json:"msg,omitempty"`
Error string `json:"error,omitempty"`
Custom map[string]string `json:"custom,omitempty"`
HTTP *TraceHTTPStats `json:"http,omitempty"`
HealResult *HealResultItem `json:"healResult,omitempty"`
}
// Mask returns the trace type as uint32.
func (t TraceInfo) Mask() uint64 {
return t.TraceType.Mask()
}
// traceInfoLegacy - represents a trace record, additionally
// also reports errors if any while listening on trace.
// For minio versions before July 2022.
type traceInfoLegacy struct {
TraceInfo
ReqInfo *TraceRequestInfo `json:"request"`
RespInfo *TraceResponseInfo `json:"response"`
CallStats *TraceCallStats `json:"stats"`
StorageStats *struct {
Path string `json:"path"`
Duration time.Duration `json:"duration"`
} `json:"storageStats"`
OSStats *struct {
Path string `json:"path"`
Duration time.Duration `json:"duration"`
} `json:"osStats"`
}
type TraceHTTPStats struct {
ReqInfo TraceRequestInfo `json:"request"`
RespInfo TraceResponseInfo `json:"response"`
CallStats TraceCallStats `json:"stats"`
}
// TraceCallStats records request stats
type TraceCallStats struct {
InputBytes int `json:"inputbytes"`
OutputBytes int `json:"outputbytes"`
// Deprecated: Use TraceInfo.Duration (June 2022)
Latency time.Duration `json:"latency"`
TimeToFirstByte time.Duration `json:"timetofirstbyte"`
}
// TraceRequestInfo represents trace of http request
type TraceRequestInfo struct {
Time time.Time `json:"time"`
Proto string `json:"proto"`
Method string `json:"method"`
Path string `json:"path,omitempty"`
RawQuery string `json:"rawquery,omitempty"`
Headers http.Header `json:"headers,omitempty"`
Body []byte `json:"body,omitempty"`
Client string `json:"client"`
}
// TraceResponseInfo represents trace of http request
type TraceResponseInfo struct {
Time time.Time `json:"time"`
Headers http.Header `json:"headers,omitempty"`
Body []byte `json:"body,omitempty"`
StatusCode int `json:"statuscode,omitempty"`
}
golang-github-minio-madmin-go-3.0.104/tracetype_string.go 0000664 0000000 0000000 00000003537 14774251704 0023325 0 ustar 00root root 0000000 0000000 // Code generated by "stringer -type=TraceType -trimprefix=Trace trace.go"; DO NOT EDIT.
package madmin
import "strconv"
func _() {
// An "invalid array index" compiler error signifies that the constant values have changed.
// Re-run the stringer command to generate them again.
var x [1]struct{}
_ = x[TraceOS-1]
_ = x[TraceStorage-2]
_ = x[TraceS3-4]
_ = x[TraceInternal-8]
_ = x[TraceScanner-16]
_ = x[TraceDecommission-32]
_ = x[TraceHealing-64]
_ = x[TraceBatchReplication-128]
_ = x[TraceBatchKeyRotation-256]
_ = x[TraceBatchExpire-512]
_ = x[TraceRebalance-1024]
_ = x[TraceReplicationResync-2048]
_ = x[TraceBootstrap-4096]
_ = x[TraceFTP-8192]
_ = x[TraceILM-16384]
_ = x[TraceKMS-32768]
_ = x[TraceFormatting-65536]
_ = x[TraceAdmin-131072]
_ = x[TraceObject-262144]
_ = x[TraceAll-524287]
}
const _TraceType_name = "OSStorageS3InternalScannerDecommissionHealingBatchReplicationBatchKeyRotationBatchExpireRebalanceReplicationResyncBootstrapFTPILMKMSFormattingAdminObjectAll"
var _TraceType_map = map[TraceType]string{
1: _TraceType_name[0:2],
2: _TraceType_name[2:9],
4: _TraceType_name[9:11],
8: _TraceType_name[11:19],
16: _TraceType_name[19:26],
32: _TraceType_name[26:38],
64: _TraceType_name[38:45],
128: _TraceType_name[45:61],
256: _TraceType_name[61:77],
512: _TraceType_name[77:88],
1024: _TraceType_name[88:97],
2048: _TraceType_name[97:114],
4096: _TraceType_name[114:123],
8192: _TraceType_name[123:126],
16384: _TraceType_name[126:129],
32768: _TraceType_name[129:132],
65536: _TraceType_name[132:142],
131072: _TraceType_name[142:147],
262144: _TraceType_name[147:153],
524287: _TraceType_name[153:156],
}
func (i TraceType) String() string {
if str, ok := _TraceType_map[i]; ok {
return str
}
return "TraceType(" + strconv.FormatInt(int64(i), 10) + ")"
}
golang-github-minio-madmin-go-3.0.104/transport.go 0000664 0000000 0000000 00000004073 14774251704 0021767 0 ustar 00root root 0000000 0000000 //
// Copyright (c) 2015-2024 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as
// published by the Free Software Foundation, either version 3 of the
// License, or (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see .
//
package madmin
import (
"crypto/tls"
"net"
"net/http"
"time"
)
// DefaultTransport - this default transport is similar to
// http.DefaultTransport but with additional param DisableCompression
// is set to true to avoid decompressing content with 'gzip' encoding.
var DefaultTransport = func(secure bool) http.RoundTripper {
tr := &http.Transport{
Proxy: http.ProxyFromEnvironment,
DialContext: (&net.Dialer{
Timeout: 5 * time.Second,
KeepAlive: 15 * time.Second,
FallbackDelay: 100 * time.Millisecond,
}).DialContext,
MaxIdleConns: 1024,
MaxIdleConnsPerHost: 1024,
ResponseHeaderTimeout: 60 * time.Second,
IdleConnTimeout: 60 * time.Second,
TLSHandshakeTimeout: 10 * time.Second,
ExpectContinueTimeout: 1 * time.Second,
// Set this value so that the underlying transport round-tripper
// doesn't try to auto decode the body of objects with
// content-encoding set to `gzip`.
//
// Refer:
// https://golang.org/src/net/http/transport.go?h=roundTrip#L1843
DisableCompression: true,
}
if secure {
tr.TLSClientConfig = &tls.Config{
// Can't use SSLv3 because of POODLE and BEAST
// Can't use TLSv1.0 because of POODLE and BEAST using CBC cipher
// Can't use TLSv1.1 because of RC4 cipher usage
MinVersion: tls.VersionTLS12,
}
}
return tr
}
golang-github-minio-madmin-go-3.0.104/update-commands.go 0000664 0000000 0000000 00000007236 14774251704 0023020 0 ustar 00root root 0000000 0000000 //
// Copyright (c) 2015-2024 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as
// published by the Free Software Foundation, either version 3 of the
// License, or (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see .
//
package madmin
import (
"context"
"encoding/json"
"net/http"
"net/url"
"strconv"
)
// ServerPeerUpdateStatus server update peer binary update result
type ServerPeerUpdateStatus struct {
Host string `json:"host"`
Err string `json:"err,omitempty"`
CurrentVersion string `json:"currentVersion"`
UpdatedVersion string `json:"updatedVersion"`
WaitingDrives map[string]DiskMetrics `json:"waitingDrives,omitempty"`
}
// ServerUpdateStatusV2 server update status
type ServerUpdateStatusV2 struct {
DryRun bool `json:"dryRun"`
Results []ServerPeerUpdateStatus `json:"results,omitempty"`
}
// ServerUpdateOpts specifies the URL (optionally to download the binary from)
// also allows a dry-run, the new API is idempotent which means you can
// run it as many times as you want and any server that is not upgraded
// automatically does get upgraded eventually to the relevant version.
type ServerUpdateOpts struct {
UpdateURL string
DryRun bool
}
// ServerUpdateV2 - updates and restarts the MinIO cluster to latest version.
// optionally takes an input URL to specify a custom update binary link
func (adm *AdminClient) ServerUpdateV2(ctx context.Context, opts ServerUpdateOpts) (us ServerUpdateStatusV2, err error) {
queryValues := url.Values{}
queryValues.Set("type", "2")
queryValues.Set("updateURL", opts.UpdateURL)
queryValues.Set("dry-run", strconv.FormatBool(opts.DryRun))
// Request API to Restart server
resp, err := adm.executeMethod(ctx,
http.MethodPost, requestData{
relPath: adminAPIPrefix + "/update",
queryValues: queryValues,
},
)
defer closeResponse(resp)
if err != nil {
return us, err
}
if resp.StatusCode != http.StatusOK {
return us, httpRespToErrorResponse(resp)
}
if err = json.NewDecoder(resp.Body).Decode(&us); err != nil {
return us, err
}
return us, nil
}
// ServerUpdateStatus - contains the response of service update API
type ServerUpdateStatus struct {
// Deprecated: this struct is fully deprecated since Jan 2024.
CurrentVersion string `json:"currentVersion"`
UpdatedVersion string `json:"updatedVersion"`
}
// ServerUpdate - updates and restarts the MinIO cluster to latest version.
// optionally takes an input URL to specify a custom update binary link
func (adm *AdminClient) ServerUpdate(ctx context.Context, updateURL string) (us ServerUpdateStatus, err error) {
queryValues := url.Values{}
queryValues.Set("updateURL", updateURL)
// Request API to Restart server
resp, err := adm.executeMethod(ctx,
http.MethodPost, requestData{
relPath: adminAPIPrefix + "/update",
queryValues: queryValues,
},
)
defer closeResponse(resp)
if err != nil {
return us, err
}
if resp.StatusCode != http.StatusOK {
return us, httpRespToErrorResponse(resp)
}
if err = json.NewDecoder(resp.Body).Decode(&us); err != nil {
return us, err
}
return us, nil
}
golang-github-minio-madmin-go-3.0.104/user-commands.go 0000664 0000000 0000000 00000056565 14774251704 0022525 0 ustar 00root root 0000000 0000000 //
// Copyright (c) 2015-2024 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as
// published by the Free Software Foundation, either version 3 of the
// License, or (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see .
//
package madmin
import (
"context"
"encoding/json"
"errors"
"io"
"net/http"
"net/url"
"regexp"
"time"
"github.com/minio/minio-go/v7/pkg/tags"
)
// AccountAccess contains information about
type AccountAccess struct {
Read bool `json:"read"`
Write bool `json:"write"`
}
// BucketDetails provides information about features currently
// turned-on per bucket.
type BucketDetails struct {
Versioning bool `json:"versioning"`
VersioningSuspended bool `json:"versioningSuspended"`
Locking bool `json:"locking"`
Replication bool `json:"replication"`
Tagging *tags.Tags `json:"tags"`
Quota *BucketQuota `json:"quota"`
}
// BucketAccessInfo represents bucket usage of a bucket, and its relevant
// access type for an account
type BucketAccessInfo struct {
Name string `json:"name"`
Size uint64 `json:"size"`
Objects uint64 `json:"objects"`
ObjectSizesHistogram map[string]uint64 `json:"objectHistogram"`
ObjectVersionsHistogram map[string]uint64 `json:"objectsVersionsHistogram"`
Details *BucketDetails `json:"details"`
PrefixUsage map[string]uint64 `json:"prefixUsage"`
Created time.Time `json:"created"`
Access AccountAccess `json:"access"`
}
// AccountInfo represents the account usage info of an
// account across buckets.
type AccountInfo struct {
AccountName string
Server BackendInfo
Policy json.RawMessage // Use iam/policy.Parse to parse the result, to be done by the caller.
Buckets []BucketAccessInfo
}
// AccountOpts allows for configurable behavior with "prefix-usage"
type AccountOpts struct {
PrefixUsage bool
}
// AccountInfo returns the usage info for the authenticating account.
func (adm *AdminClient) AccountInfo(ctx context.Context, opts AccountOpts) (AccountInfo, error) {
q := make(url.Values)
if opts.PrefixUsage {
q.Set("prefix-usage", "true")
}
resp, err := adm.executeMethod(ctx, http.MethodGet,
requestData{
relPath: adminAPIPrefix + "/accountinfo",
queryValues: q,
},
)
defer closeResponse(resp)
if err != nil {
return AccountInfo{}, err
}
// Check response http status code
if resp.StatusCode != http.StatusOK {
return AccountInfo{}, httpRespToErrorResponse(resp)
}
// Unmarshal the server's json response
var accountInfo AccountInfo
respBytes, err := io.ReadAll(resp.Body)
if err != nil {
return AccountInfo{}, err
}
err = json.Unmarshal(respBytes, &accountInfo)
if err != nil {
return AccountInfo{}, err
}
return accountInfo, nil
}
// AccountStatus - account status.
type AccountStatus string
// Account status per user.
const (
AccountEnabled AccountStatus = "enabled"
AccountDisabled AccountStatus = "disabled"
)
// UserAuthType indicates the type of authentication for the user.
type UserAuthType string
// Valid values for UserAuthType.
const (
BuiltinUserAuthType UserAuthType = "builtin"
LDAPUserAuthType UserAuthType = "ldap"
)
// UserAuthInfo contains info about how the user is authenticated.
type UserAuthInfo struct {
Type UserAuthType `json:"type"`
// Specifies the external server that authenticated the server (empty for
// builtin IDP)
AuthServer string `json:"authServer,omitempty"`
// Specifies the user ID as present in the external auth server (e.g. in
// OIDC could be the email of the user). For builtin, this would be the same
// as the access key.
AuthServerUserID string `json:"authServerUserID,omitempty"`
}
// UserInfo carries information about long term users.
type UserInfo struct {
AuthInfo *UserAuthInfo `json:"userAuthInfo,omitempty"`
SecretKey string `json:"secretKey,omitempty"`
PolicyName string `json:"policyName,omitempty"`
Status AccountStatus `json:"status"`
MemberOf []string `json:"memberOf,omitempty"`
UpdatedAt time.Time `json:"updatedAt,omitempty"`
}
// RemoveUser - remove a user.
func (adm *AdminClient) RemoveUser(ctx context.Context, accessKey string) error {
queryValues := url.Values{}
queryValues.Set("accessKey", accessKey)
reqData := requestData{
relPath: adminAPIPrefix + "/remove-user",
queryValues: queryValues,
}
// Execute DELETE on /minio/admin/v3/remove-user to remove a user.
resp, err := adm.executeMethod(ctx, http.MethodDelete, reqData)
defer closeResponse(resp)
if err != nil {
return err
}
if resp.StatusCode != http.StatusOK {
return httpRespToErrorResponse(resp)
}
return nil
}
// ListUsers - list all users.
func (adm *AdminClient) ListUsers(ctx context.Context) (map[string]UserInfo, error) {
reqData := requestData{
relPath: adminAPIPrefix + "/list-users",
}
// Execute GET on /minio/admin/v3/list-users
resp, err := adm.executeMethod(ctx, http.MethodGet, reqData)
defer closeResponse(resp)
if err != nil {
return nil, err
}
if resp.StatusCode != http.StatusOK {
return nil, httpRespToErrorResponse(resp)
}
data, err := DecryptData(adm.getSecretKey(), resp.Body)
if err != nil {
return nil, err
}
users := make(map[string]UserInfo)
if err = json.Unmarshal(data, &users); err != nil {
return nil, err
}
return users, nil
}
// GetUserInfo - get info on a user
func (adm *AdminClient) GetUserInfo(ctx context.Context, name string) (u UserInfo, err error) {
queryValues := url.Values{}
queryValues.Set("accessKey", name)
reqData := requestData{
relPath: adminAPIPrefix + "/user-info",
queryValues: queryValues,
}
// Execute GET on /minio/admin/v3/user-info
resp, err := adm.executeMethod(ctx, http.MethodGet, reqData)
defer closeResponse(resp)
if err != nil {
return u, err
}
if resp.StatusCode != http.StatusOK {
return u, httpRespToErrorResponse(resp)
}
b, err := io.ReadAll(resp.Body)
if err != nil {
return u, err
}
if err = json.Unmarshal(b, &u); err != nil {
return u, err
}
return u, nil
}
// AddOrUpdateUserReq allows to update
// - user details such as secret key
// - account status.
// - optionally a comma separated list of policies
// to be applied for the user.
type AddOrUpdateUserReq struct {
SecretKey string `json:"secretKey,omitempty"`
Policy string `json:"policy,omitempty"`
Status AccountStatus `json:"status"`
}
// SetUserReq - update user secret key, account status or policies.
func (adm *AdminClient) SetUserReq(ctx context.Context, accessKey string, req AddOrUpdateUserReq) error {
data, err := json.Marshal(req)
if err != nil {
return err
}
econfigBytes, err := EncryptData(adm.getSecretKey(), data)
if err != nil {
return err
}
queryValues := url.Values{}
queryValues.Set("accessKey", accessKey)
reqData := requestData{
relPath: adminAPIPrefix + "/add-user",
queryValues: queryValues,
content: econfigBytes,
}
// Execute PUT on /minio/admin/v3/add-user to set a user.
resp, err := adm.executeMethod(ctx, http.MethodPut, reqData)
defer closeResponse(resp)
if err != nil {
return err
}
if resp.StatusCode != http.StatusOK {
return httpRespToErrorResponse(resp)
}
return nil
}
// SetUser - update user secret key or account status.
func (adm *AdminClient) SetUser(ctx context.Context, accessKey, secretKey string, status AccountStatus) error {
return adm.SetUserReq(ctx, accessKey, AddOrUpdateUserReq{
SecretKey: secretKey,
Status: status,
})
}
// AddUser - adds a user.
func (adm *AdminClient) AddUser(ctx context.Context, accessKey, secretKey string) error {
return adm.SetUser(ctx, accessKey, secretKey, AccountEnabled)
}
// SetUserStatus - adds a status for a user.
func (adm *AdminClient) SetUserStatus(ctx context.Context, accessKey string, status AccountStatus) error {
queryValues := url.Values{}
queryValues.Set("accessKey", accessKey)
queryValues.Set("status", string(status))
reqData := requestData{
relPath: adminAPIPrefix + "/set-user-status",
queryValues: queryValues,
}
// Execute PUT on /minio/admin/v3/set-user-status to set status.
resp, err := adm.executeMethod(ctx, http.MethodPut, reqData)
defer closeResponse(resp)
if err != nil {
return err
}
if resp.StatusCode != http.StatusOK {
return httpRespToErrorResponse(resp)
}
return nil
}
// AddServiceAccountReq is the request options of the add service account admin call
type AddServiceAccountReq struct {
Policy json.RawMessage `json:"policy,omitempty"` // Parsed value from iam/policy.Parse()
TargetUser string `json:"targetUser,omitempty"`
AccessKey string `json:"accessKey,omitempty"`
SecretKey string `json:"secretKey,omitempty"`
// Name for this access key
Name string `json:"name,omitempty"`
// Description for this access key
Description string `json:"description,omitempty"`
// Time at which this access key expires
Expiration *time.Time `json:"expiration,omitempty"`
// Deprecated: use description instead
Comment string `json:"comment,omitempty"`
}
var serviceAcctValidNameRegex = regexp.MustCompile(`^[a-zA-Z][a-zA-Z0-9_-]*`)
func validateSAName(name string) error {
if name == "" {
return nil
}
if len(name) > 32 {
return errors.New("name must not be longer than 32 characters")
}
if !serviceAcctValidNameRegex.MatchString(name) {
return errors.New("name must contain only ASCII letters, digits, underscores and hyphens and must start with a letter")
}
return nil
}
func validateSADescription(desc string) error {
if desc == "" {
return nil
}
if len(desc) > 256 {
return errors.New("description must be at most 256 bytes long")
}
return nil
}
var timeSentinel = time.Unix(0, 0).UTC()
func validateSAExpiration(expiration *time.Time) error {
// Zero value is valid, it means no expiration.
if expiration == nil || expiration.UTC().IsZero() || expiration.UTC().Equal(timeSentinel) {
return nil
}
if expiration.Before(time.Now()) {
return errors.New("the expiration time should be in the future")
}
return nil
}
// Validate validates the request parameters.
func (r *AddServiceAccountReq) Validate() error {
if err := validateSAName(r.Name); err != nil {
return err
}
if err := validateSAExpiration(r.Expiration); err != nil {
return err
}
return validateSADescription(r.Description)
}
// AddServiceAccountResp is the response body of the add service account admin call
type AddServiceAccountResp struct {
Credentials Credentials `json:"credentials"`
}
// AddServiceAccount - creates a new service account belonging to the user sending
// the request while restricting the service account permission by the given policy document.
func (adm *AdminClient) AddServiceAccount(ctx context.Context, opts AddServiceAccountReq) (Credentials, error) {
if err := opts.Validate(); err != nil {
return Credentials{}, err
}
data, err := json.Marshal(opts)
if err != nil {
return Credentials{}, err
}
econfigBytes, err := EncryptData(adm.getSecretKey(), data)
if err != nil {
return Credentials{}, err
}
reqData := requestData{
relPath: adminAPIPrefix + "/add-service-account",
content: econfigBytes,
}
// Execute PUT on /minio/admin/v3/add-service-account to set a user.
resp, err := adm.executeMethod(ctx, http.MethodPut, reqData)
defer closeResponse(resp)
if err != nil {
return Credentials{}, err
}
if resp.StatusCode != http.StatusOK {
return Credentials{}, httpRespToErrorResponse(resp)
}
data, err = DecryptData(adm.getSecretKey(), resp.Body)
if err != nil {
return Credentials{}, err
}
var serviceAccountResp AddServiceAccountResp
if err = json.Unmarshal(data, &serviceAccountResp); err != nil {
return Credentials{}, err
}
return serviceAccountResp.Credentials, nil
}
// AddServiceAccountLDAP - AddServiceAccount with extra features, restricted to LDAP users.
func (adm *AdminClient) AddServiceAccountLDAP(ctx context.Context, opts AddServiceAccountReq) (Credentials, error) {
if err := opts.Validate(); err != nil {
return Credentials{}, err
}
data, err := json.Marshal(opts)
if err != nil {
return Credentials{}, err
}
econfigBytes, err := EncryptData(adm.getSecretKey(), data)
if err != nil {
return Credentials{}, err
}
reqData := requestData{
relPath: adminAPIPrefix + "/idp/ldap/add-service-account",
content: econfigBytes,
}
resp, err := adm.executeMethod(ctx, http.MethodPut, reqData)
defer closeResponse(resp)
if err != nil {
return Credentials{}, err
}
if resp.StatusCode != http.StatusOK {
return Credentials{}, httpRespToErrorResponse(resp)
}
data, err = DecryptData(adm.getSecretKey(), resp.Body)
if err != nil {
return Credentials{}, err
}
var serviceAccountResp AddServiceAccountResp
if err = json.Unmarshal(data, &serviceAccountResp); err != nil {
return Credentials{}, err
}
return serviceAccountResp.Credentials, nil
}
// UpdateServiceAccountReq is the request options of the edit service account admin call
type UpdateServiceAccountReq struct {
NewPolicy json.RawMessage `json:"newPolicy,omitempty"` // Parsed policy from iam/policy.Parse
NewSecretKey string `json:"newSecretKey,omitempty"`
NewStatus string `json:"newStatus,omitempty"`
NewName string `json:"newName,omitempty"`
NewDescription string `json:"newDescription,omitempty"`
NewExpiration *time.Time `json:"newExpiration,omitempty"`
}
func (u *UpdateServiceAccountReq) Validate() error {
if err := validateSAName(u.NewName); err != nil {
return err
}
if err := validateSAExpiration(u.NewExpiration); err != nil {
return err
}
return validateSADescription(u.NewDescription)
}
// UpdateServiceAccount - edit an existing service account
func (adm *AdminClient) UpdateServiceAccount(ctx context.Context, accessKey string, opts UpdateServiceAccountReq) error {
if err := opts.Validate(); err != nil {
return err
}
data, err := json.Marshal(opts)
if err != nil {
return err
}
econfigBytes, err := EncryptData(adm.getSecretKey(), data)
if err != nil {
return err
}
queryValues := url.Values{}
queryValues.Set("accessKey", accessKey)
reqData := requestData{
relPath: adminAPIPrefix + "/update-service-account",
content: econfigBytes,
queryValues: queryValues,
}
// Execute POST on /minio/admin/v3/update-service-account to edit a service account
resp, err := adm.executeMethod(ctx, http.MethodPost, reqData)
defer closeResponse(resp)
if err != nil {
return err
}
if resp.StatusCode != http.StatusNoContent {
return httpRespToErrorResponse(resp)
}
return nil
}
type ServiceAccountInfo struct {
ParentUser string `json:"parentUser"`
AccountStatus string `json:"accountStatus"`
ImpliedPolicy bool `json:"impliedPolicy"`
AccessKey string `json:"accessKey"`
Name string `json:"name,omitempty"`
Description string `json:"description,omitempty"`
Expiration *time.Time `json:"expiration,omitempty"`
}
// ListServiceAccountsResp is the response body of the list service accounts call
type ListServiceAccountsResp struct {
Accounts []ServiceAccountInfo `json:"accounts"`
}
// ListServiceAccounts - list service accounts belonging to the specified user
func (adm *AdminClient) ListServiceAccounts(ctx context.Context, user string) (ListServiceAccountsResp, error) {
queryValues := url.Values{}
queryValues.Set("user", user)
reqData := requestData{
relPath: adminAPIPrefix + "/list-service-accounts",
queryValues: queryValues,
}
// Execute GET on /minio/admin/v3/list-service-accounts
resp, err := adm.executeMethod(ctx, http.MethodGet, reqData)
defer closeResponse(resp)
if err != nil {
return ListServiceAccountsResp{}, err
}
if resp.StatusCode != http.StatusOK {
return ListServiceAccountsResp{}, httpRespToErrorResponse(resp)
}
data, err := DecryptData(adm.getSecretKey(), resp.Body)
if err != nil {
return ListServiceAccountsResp{}, err
}
var listResp ListServiceAccountsResp
if err = json.Unmarshal(data, &listResp); err != nil {
return ListServiceAccountsResp{}, err
}
return listResp, nil
}
type ListAccessKeysResp struct {
ServiceAccounts []ServiceAccountInfo `json:"serviceAccounts"`
STSKeys []ServiceAccountInfo `json:"stsKeys"`
}
const (
AccessKeyListUsersOnly = "users-only"
AccessKeyListSTSOnly = "sts-only"
AccessKeyListSvcaccOnly = "svcacc-only"
AccessKeyListAll = "all"
)
// ListAccessKeysOpts - options for listing access keys
type ListAccessKeysOpts struct {
ListType string
All bool
}
// ListAccessKeysBulk - list access keys belonging to the given users or all users
func (adm *AdminClient) ListAccessKeysBulk(ctx context.Context, users []string, opts ListAccessKeysOpts) (map[string]ListAccessKeysResp, error) {
if len(users) > 0 && opts.All {
return nil, errors.New("either specify users or all, not both")
}
queryValues := url.Values{}
queryValues.Set("listType", opts.ListType)
queryValues["users"] = users
if opts.All {
queryValues.Set("all", "true")
}
reqData := requestData{
relPath: adminAPIPrefix + "/list-access-keys-bulk",
queryValues: queryValues,
}
// Execute GET on /minio/admin/v3/list-access-keys-bulk
resp, err := adm.executeMethod(ctx, http.MethodGet, reqData)
defer closeResponse(resp)
if err != nil {
return nil, err
}
if resp.StatusCode != http.StatusOK {
return nil, httpRespToErrorResponse(resp)
}
data, err := DecryptData(adm.getSecretKey(), resp.Body)
if err != nil {
return nil, err
}
listResp := make(map[string]ListAccessKeysResp)
if err = json.Unmarshal(data, &listResp); err != nil {
return nil, err
}
return listResp, nil
}
// InfoServiceAccountResp is the response body of the info service account call
type InfoServiceAccountResp struct {
ParentUser string `json:"parentUser"`
AccountStatus string `json:"accountStatus"`
ImpliedPolicy bool `json:"impliedPolicy"`
Policy string `json:"policy"`
Name string `json:"name,omitempty"`
Description string `json:"description,omitempty"`
Expiration *time.Time `json:"expiration,omitempty"`
}
// InfoServiceAccount - returns the info of service account belonging to the specified user
func (adm *AdminClient) InfoServiceAccount(ctx context.Context, accessKey string) (InfoServiceAccountResp, error) {
queryValues := url.Values{}
queryValues.Set("accessKey", accessKey)
reqData := requestData{
relPath: adminAPIPrefix + "/info-service-account",
queryValues: queryValues,
}
// Execute GET on /minio/admin/v3/info-service-account
resp, err := adm.executeMethod(ctx, http.MethodGet, reqData)
defer closeResponse(resp)
if err != nil {
return InfoServiceAccountResp{}, err
}
if resp.StatusCode != http.StatusOK {
return InfoServiceAccountResp{}, httpRespToErrorResponse(resp)
}
data, err := DecryptData(adm.getSecretKey(), resp.Body)
if err != nil {
return InfoServiceAccountResp{}, err
}
var infoResp InfoServiceAccountResp
if err = json.Unmarshal(data, &infoResp); err != nil {
return InfoServiceAccountResp{}, err
}
return infoResp, nil
}
// DeleteServiceAccount - delete a specified service account. The server will reject
// the request if the service account does not belong to the user initiating the request
func (adm *AdminClient) DeleteServiceAccount(ctx context.Context, serviceAccount string) error {
queryValues := url.Values{}
queryValues.Set("accessKey", serviceAccount)
reqData := requestData{
relPath: adminAPIPrefix + "/delete-service-account",
queryValues: queryValues,
}
// Execute DELETE on /minio/admin/v3/delete-service-account
resp, err := adm.executeMethod(ctx, http.MethodDelete, reqData)
defer closeResponse(resp)
if err != nil {
return err
}
if resp.StatusCode != http.StatusNoContent {
return httpRespToErrorResponse(resp)
}
return nil
}
// TemporaryAccountInfoResp is the response body of the info temporary call
type TemporaryAccountInfoResp InfoServiceAccountResp
// TemporaryAccountInfo - returns the info of a temporary account
func (adm *AdminClient) TemporaryAccountInfo(ctx context.Context, accessKey string) (TemporaryAccountInfoResp, error) {
queryValues := url.Values{}
queryValues.Set("accessKey", accessKey)
reqData := requestData{
relPath: adminAPIPrefix + "/temporary-account-info",
queryValues: queryValues,
}
// Execute GET on /minio/admin/v3/temporary-account-info
resp, err := adm.executeMethod(ctx, http.MethodGet, reqData)
defer closeResponse(resp)
if err != nil {
return TemporaryAccountInfoResp{}, err
}
if resp.StatusCode != http.StatusOK {
return TemporaryAccountInfoResp{}, httpRespToErrorResponse(resp)
}
data, err := DecryptData(adm.getSecretKey(), resp.Body)
if err != nil {
return TemporaryAccountInfoResp{}, err
}
var infoResp TemporaryAccountInfoResp
if err = json.Unmarshal(data, &infoResp); err != nil {
return TemporaryAccountInfoResp{}, err
}
return infoResp, nil
}
// User provider types
const (
BuiltinProvider = "builtin"
LDAPProvider = "ldap"
OpenIDProvider = "openid"
K8SProvider = "k8s"
CertificateProvider = "tls"
CustomTokenProvider = "custom"
)
// RevokeTokensReq is the request options of the revoke tokens admin call.
// If User is empty, the requestor's tokens are revoked.
// If requestor is STS, leaving TokenRevokeType empty revokes requestor's type of tokens.
type RevokeTokensReq struct {
User string `json:"user"`
TokenRevokeType string `json:"tokenRevokeType"`
FullRevoke bool `json:"fullRevoke"`
}
func (r *RevokeTokensReq) Validate() error {
if r.User != "" && r.TokenRevokeType == "" && !r.FullRevoke {
return errors.New("one of TokenRevokeType or FullRevoke must be set when User is set")
}
if r.TokenRevokeType != "" && r.FullRevoke {
return errors.New("only one of TokenRevokeType or FullRevoke must be set, not both")
}
return nil
}
func (adm *AdminClient) revokeTokens(ctx context.Context, opts RevokeTokensReq, provider string) error {
queryValues := url.Values{}
queryValues.Set("user", opts.User)
queryValues.Set("tokenRevokeType", opts.TokenRevokeType)
if opts.FullRevoke {
queryValues.Set("fullRevoke", "true")
}
reqData := requestData{
relPath: adminAPIPrefix + "/revoke-tokens/" + provider,
queryValues: queryValues,
}
// Execute POST on /minio/admin/v3/revoke-tokens/{provider}
resp, err := adm.executeMethod(ctx, http.MethodPost, reqData)
defer closeResponse(resp)
if err != nil {
return err
}
if resp.StatusCode != http.StatusNoContent {
return httpRespToErrorResponse(resp)
}
return nil
}
// RevokeTokens - revokes tokens for the specified builtin user, or
// for an external (LDAP, OpenID, etc.) user being sent by one of its STS credentials.
func (adm *AdminClient) RevokeTokens(ctx context.Context, opts RevokeTokensReq) error {
return adm.revokeTokens(ctx, opts, BuiltinProvider)
}
// RevokeTokensLDAP - revokes tokens for the specified LDAP user.
func (adm *AdminClient) RevokeTokensLDAP(ctx context.Context, opts RevokeTokensReq) error {
return adm.revokeTokens(ctx, opts, LDAPProvider)
}
golang-github-minio-madmin-go-3.0.104/utils.go 0000664 0000000 0000000 00000011342 14774251704 0021070 0 ustar 00root root 0000000 0000000 //
// Copyright (c) 2015-2024 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as
// published by the Free Software Foundation, either version 3 of the
// License, or (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see .
//
package madmin
import (
"io"
"net"
"net/http"
"net/url"
"strings"
"time"
"github.com/minio/minio-go/v7/pkg/s3utils"
)
//msgp:clearomitted
//msgp:tag json
//go:generate msgp
// AdminAPIVersion - admin api version used in the request.
const (
AdminAPIVersion = "v3"
AdminAPIVersionV2 = "v2"
adminAPIPrefix = "/" + AdminAPIVersion
kmsAPIVersion = "v1"
kmsAPIPrefix = "/" + kmsAPIVersion
)
// getEndpointURL - construct a new endpoint.
func getEndpointURL(endpoint string, secure bool) (*url.URL, error) {
if strings.Contains(endpoint, ":") {
host, _, err := net.SplitHostPort(endpoint)
if err != nil {
return nil, err
}
if !s3utils.IsValidIP(host) && !s3utils.IsValidDomain(host) {
msg := "Endpoint: " + endpoint + " does not follow ip address or domain name standards."
return nil, ErrInvalidArgument(msg)
}
} else {
if !s3utils.IsValidIP(endpoint) && !s3utils.IsValidDomain(endpoint) {
msg := "Endpoint: " + endpoint + " does not follow ip address or domain name standards."
return nil, ErrInvalidArgument(msg)
}
}
// If secure is false, use 'http' scheme.
scheme := "https"
if !secure {
scheme = "http"
}
// Strip the obvious :443 and :80 from the endpoint
// to avoid the signature mismatch error.
if secure && strings.HasSuffix(endpoint, ":443") {
endpoint = strings.TrimSuffix(endpoint, ":443")
}
if !secure && strings.HasSuffix(endpoint, ":80") {
endpoint = strings.TrimSuffix(endpoint, ":80")
}
// Construct a secured endpoint URL.
endpointURLStr := scheme + "://" + endpoint
endpointURL, err := url.Parse(endpointURLStr)
if err != nil {
return nil, err
}
// Validate incoming endpoint URL.
if err := isValidEndpointURL(endpointURL.String()); err != nil {
return nil, err
}
return endpointURL, nil
}
// Verify if input endpoint URL is valid.
func isValidEndpointURL(endpointURL string) error {
if endpointURL == "" {
return ErrInvalidArgument("Endpoint url cannot be empty.")
}
url, err := url.Parse(endpointURL)
if err != nil {
return ErrInvalidArgument("Endpoint url cannot be parsed.")
}
if url.Path != "/" && url.Path != "" {
return ErrInvalidArgument("Endpoint url cannot have fully qualified paths.")
}
return nil
}
// closeResponse close non nil response with any response Body.
// convenient wrapper to drain any remaining data on response body.
//
// Subsequently this allows golang http RoundTripper
// to re-use the same connection for future requests.
func closeResponse(resp *http.Response) {
// Callers should close resp.Body when done reading from it.
// If resp.Body is not closed, the Client's underlying RoundTripper
// (typically Transport) may not be able to re-use a persistent TCP
// connection to the server for a subsequent "keep-alive" request.
if resp != nil && resp.Body != nil {
// Drain any remaining Body and then close the connection.
// Without this closing connection would disallow re-using
// the same connection for future uses.
// - http://stackoverflow.com/a/17961593/4465767
io.Copy(io.Discard, resp.Body)
resp.Body.Close()
}
}
// TimedAction contains a number of actions and their accumulated duration in nanoseconds.
type TimedAction struct {
Count uint64 `json:"count"`
AccTime uint64 `json:"acc_time_ns"`
MinTime uint64 `json:"min_ns,omitempty"`
MaxTime uint64 `json:"max_ns,omitempty"`
Bytes uint64 `json:"bytes,omitempty"`
}
// Avg returns the average time spent on the action.
func (t TimedAction) Avg() time.Duration {
if t.Count == 0 {
return 0
}
return time.Duration(t.AccTime / t.Count)
}
// AvgBytes returns the average time spent on the action.
func (t TimedAction) AvgBytes() uint64 {
if t.Count == 0 {
return 0
}
return t.Bytes / t.Count
}
// Merge other into t.
func (t *TimedAction) Merge(other TimedAction) {
t.Count += other.Count
t.AccTime += other.AccTime
t.Bytes += other.Bytes
if t.Count == 0 {
t.MinTime = other.MinTime
}
if other.Count > 0 {
t.MinTime = min(t.MinTime, other.MinTime)
}
t.MaxTime = max(t.MaxTime, other.MaxTime)
}
golang-github-minio-madmin-go-3.0.104/utils_gen.go 0000664 0000000 0000000 00000015021 14774251704 0021717 0 ustar 00root root 0000000 0000000 package madmin
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
import (
"github.com/tinylib/msgp/msgp"
)
// DecodeMsg implements msgp.Decodable
func (z *TimedAction) DecodeMsg(dc *msgp.Reader) (err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, err = dc.ReadMapHeader()
if err != nil {
err = msgp.WrapError(err)
return
}
var zb0001Mask uint8 /* 3 bits */
_ = zb0001Mask
for zb0001 > 0 {
zb0001--
field, err = dc.ReadMapKeyPtr()
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "count":
z.Count, err = dc.ReadUint64()
if err != nil {
err = msgp.WrapError(err, "Count")
return
}
case "acc_time_ns":
z.AccTime, err = dc.ReadUint64()
if err != nil {
err = msgp.WrapError(err, "AccTime")
return
}
case "min_ns":
z.MinTime, err = dc.ReadUint64()
if err != nil {
err = msgp.WrapError(err, "MinTime")
return
}
zb0001Mask |= 0x1
case "max_ns":
z.MaxTime, err = dc.ReadUint64()
if err != nil {
err = msgp.WrapError(err, "MaxTime")
return
}
zb0001Mask |= 0x2
case "bytes":
z.Bytes, err = dc.ReadUint64()
if err != nil {
err = msgp.WrapError(err, "Bytes")
return
}
zb0001Mask |= 0x4
default:
err = dc.Skip()
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
// Clear omitted fields.
if zb0001Mask != 0x7 {
if (zb0001Mask & 0x1) == 0 {
z.MinTime = 0
}
if (zb0001Mask & 0x2) == 0 {
z.MaxTime = 0
}
if (zb0001Mask & 0x4) == 0 {
z.Bytes = 0
}
}
return
}
// EncodeMsg implements msgp.Encodable
func (z *TimedAction) EncodeMsg(en *msgp.Writer) (err error) {
// check for omitted fields
zb0001Len := uint32(5)
var zb0001Mask uint8 /* 5 bits */
_ = zb0001Mask
if z.MinTime == 0 {
zb0001Len--
zb0001Mask |= 0x4
}
if z.MaxTime == 0 {
zb0001Len--
zb0001Mask |= 0x8
}
if z.Bytes == 0 {
zb0001Len--
zb0001Mask |= 0x10
}
// variable map header, size zb0001Len
err = en.Append(0x80 | uint8(zb0001Len))
if err != nil {
return
}
// skip if no fields are to be emitted
if zb0001Len != 0 {
// write "count"
err = en.Append(0xa5, 0x63, 0x6f, 0x75, 0x6e, 0x74)
if err != nil {
return
}
err = en.WriteUint64(z.Count)
if err != nil {
err = msgp.WrapError(err, "Count")
return
}
// write "acc_time_ns"
err = en.Append(0xab, 0x61, 0x63, 0x63, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x6e, 0x73)
if err != nil {
return
}
err = en.WriteUint64(z.AccTime)
if err != nil {
err = msgp.WrapError(err, "AccTime")
return
}
if (zb0001Mask & 0x4) == 0 { // if not omitted
// write "min_ns"
err = en.Append(0xa6, 0x6d, 0x69, 0x6e, 0x5f, 0x6e, 0x73)
if err != nil {
return
}
err = en.WriteUint64(z.MinTime)
if err != nil {
err = msgp.WrapError(err, "MinTime")
return
}
}
if (zb0001Mask & 0x8) == 0 { // if not omitted
// write "max_ns"
err = en.Append(0xa6, 0x6d, 0x61, 0x78, 0x5f, 0x6e, 0x73)
if err != nil {
return
}
err = en.WriteUint64(z.MaxTime)
if err != nil {
err = msgp.WrapError(err, "MaxTime")
return
}
}
if (zb0001Mask & 0x10) == 0 { // if not omitted
// write "bytes"
err = en.Append(0xa5, 0x62, 0x79, 0x74, 0x65, 0x73)
if err != nil {
return
}
err = en.WriteUint64(z.Bytes)
if err != nil {
err = msgp.WrapError(err, "Bytes")
return
}
}
}
return
}
// MarshalMsg implements msgp.Marshaler
func (z *TimedAction) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize())
// check for omitted fields
zb0001Len := uint32(5)
var zb0001Mask uint8 /* 5 bits */
_ = zb0001Mask
if z.MinTime == 0 {
zb0001Len--
zb0001Mask |= 0x4
}
if z.MaxTime == 0 {
zb0001Len--
zb0001Mask |= 0x8
}
if z.Bytes == 0 {
zb0001Len--
zb0001Mask |= 0x10
}
// variable map header, size zb0001Len
o = append(o, 0x80|uint8(zb0001Len))
// skip if no fields are to be emitted
if zb0001Len != 0 {
// string "count"
o = append(o, 0xa5, 0x63, 0x6f, 0x75, 0x6e, 0x74)
o = msgp.AppendUint64(o, z.Count)
// string "acc_time_ns"
o = append(o, 0xab, 0x61, 0x63, 0x63, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x6e, 0x73)
o = msgp.AppendUint64(o, z.AccTime)
if (zb0001Mask & 0x4) == 0 { // if not omitted
// string "min_ns"
o = append(o, 0xa6, 0x6d, 0x69, 0x6e, 0x5f, 0x6e, 0x73)
o = msgp.AppendUint64(o, z.MinTime)
}
if (zb0001Mask & 0x8) == 0 { // if not omitted
// string "max_ns"
o = append(o, 0xa6, 0x6d, 0x61, 0x78, 0x5f, 0x6e, 0x73)
o = msgp.AppendUint64(o, z.MaxTime)
}
if (zb0001Mask & 0x10) == 0 { // if not omitted
// string "bytes"
o = append(o, 0xa5, 0x62, 0x79, 0x74, 0x65, 0x73)
o = msgp.AppendUint64(o, z.Bytes)
}
}
return
}
// UnmarshalMsg implements msgp.Unmarshaler
func (z *TimedAction) UnmarshalMsg(bts []byte) (o []byte, err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
var zb0001Mask uint8 /* 3 bits */
_ = zb0001Mask
for zb0001 > 0 {
zb0001--
field, bts, err = msgp.ReadMapKeyZC(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "count":
z.Count, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "Count")
return
}
case "acc_time_ns":
z.AccTime, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "AccTime")
return
}
case "min_ns":
z.MinTime, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "MinTime")
return
}
zb0001Mask |= 0x1
case "max_ns":
z.MaxTime, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "MaxTime")
return
}
zb0001Mask |= 0x2
case "bytes":
z.Bytes, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "Bytes")
return
}
zb0001Mask |= 0x4
default:
bts, err = msgp.Skip(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
// Clear omitted fields.
if zb0001Mask != 0x7 {
if (zb0001Mask & 0x1) == 0 {
z.MinTime = 0
}
if (zb0001Mask & 0x2) == 0 {
z.MaxTime = 0
}
if (zb0001Mask & 0x4) == 0 {
z.Bytes = 0
}
}
o = bts
return
}
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z *TimedAction) Msgsize() (s int) {
s = 1 + 6 + msgp.Uint64Size + 12 + msgp.Uint64Size + 7 + msgp.Uint64Size + 7 + msgp.Uint64Size + 6 + msgp.Uint64Size
return
}
golang-github-minio-madmin-go-3.0.104/utils_gen_test.go 0000664 0000000 0000000 00000004455 14774251704 0022767 0 ustar 00root root 0000000 0000000 package madmin
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
import (
"bytes"
"testing"
"github.com/tinylib/msgp/msgp"
)
func TestMarshalUnmarshalTimedAction(t *testing.T) {
v := TimedAction{}
bts, err := v.MarshalMsg(nil)
if err != nil {
t.Fatal(err)
}
left, err := v.UnmarshalMsg(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
}
left, err = msgp.Skip(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
}
}
func BenchmarkMarshalMsgTimedAction(b *testing.B) {
v := TimedAction{}
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.MarshalMsg(nil)
}
}
func BenchmarkAppendMsgTimedAction(b *testing.B) {
v := TimedAction{}
bts := make([]byte, 0, v.Msgsize())
bts, _ = v.MarshalMsg(bts[0:0])
b.SetBytes(int64(len(bts)))
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
bts, _ = v.MarshalMsg(bts[0:0])
}
}
func BenchmarkUnmarshalTimedAction(b *testing.B) {
v := TimedAction{}
bts, _ := v.MarshalMsg(nil)
b.ReportAllocs()
b.SetBytes(int64(len(bts)))
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := v.UnmarshalMsg(bts)
if err != nil {
b.Fatal(err)
}
}
}
func TestEncodeDecodeTimedAction(t *testing.T) {
v := TimedAction{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
m := v.Msgsize()
if buf.Len() > m {
t.Log("WARNING: TestEncodeDecodeTimedAction Msgsize() is inaccurate")
}
vn := TimedAction{}
err := msgp.Decode(&buf, &vn)
if err != nil {
t.Error(err)
}
buf.Reset()
msgp.Encode(&buf, &v)
err = msgp.NewReader(&buf).Skip()
if err != nil {
t.Error(err)
}
}
func BenchmarkEncodeTimedAction(b *testing.B) {
v := TimedAction{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
en := msgp.NewWriter(msgp.Nowhere)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.EncodeMsg(en)
}
en.Flush()
}
func BenchmarkDecodeTimedAction(b *testing.B) {
v := TimedAction{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
rd := msgp.NewEndlessReader(buf.Bytes(), b)
dc := msgp.NewReader(rd)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
err := v.DecodeMsg(dc)
if err != nil {
b.Fatal(err)
}
}
}
golang-github-minio-madmin-go-3.0.104/xtime/ 0000775 0000000 0000000 00000000000 14774251704 0020526 5 ustar 00root root 0000000 0000000 golang-github-minio-madmin-go-3.0.104/xtime/time.go 0000664 0000000 0000000 00000007523 14774251704 0022022 0 ustar 00root root 0000000 0000000 // Copyright (c) 2015-2024 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see .
package xtime
import (
"fmt"
"time"
"github.com/tinylib/msgp/msgp"
"gopkg.in/yaml.v3"
)
// Additional durations, a day is considered to be 24 hours
const (
Day time.Duration = time.Hour * 24
Week = Day * 7
)
var unitMap = map[string]int64{
"ns": int64(time.Nanosecond),
"us": int64(time.Microsecond),
"µs": int64(time.Microsecond), // U+00B5 = micro symbol
"μs": int64(time.Microsecond), // U+03BC = Greek letter mu
"ms": int64(time.Millisecond),
"s": int64(time.Second),
"m": int64(time.Minute),
"h": int64(time.Hour),
"d": int64(Day),
"w": int64(Week),
}
// ParseDuration parses a duration string.
// The following code is borrowed from time.ParseDuration
// https://cs.opensource.google/go/go/+/refs/tags/go1.22.5:src/time/format.go;l=1589
// This function extends this function by allowing support for days and weeks.
// This function must only be used when days and weeks are necessary inputs
// in all other cases it is preferred that a user uses Go's time.ParseDuration
func ParseDuration(s string) (time.Duration, error) {
dur, err := time.ParseDuration(s) // Parse via standard Go, if success return right away.
if err == nil {
return dur, nil
}
return parseDuration(s)
}
// Duration is a wrapper around time.Duration that supports YAML and JSON
type Duration time.Duration
// D will return as a time.Duration.
func (d Duration) D() time.Duration {
return time.Duration(d)
}
// UnmarshalYAML implements yaml.Unmarshaler
func (d *Duration) UnmarshalYAML(value *yaml.Node) error {
if value.Kind == yaml.ScalarNode {
dur, err := ParseDuration(value.Value)
if err != nil {
return err
}
*d = Duration(dur)
return nil
}
return fmt.Errorf("unable to unmarshal %s", value.Tag)
}
// UnmarshalJSON implements json.Unmarshaler
func (d *Duration) UnmarshalJSON(bs []byte) error {
if len(bs) <= 2 {
return nil
}
dur, err := ParseDuration(string(bs[1 : len(bs)-1]))
if err != nil {
return err
}
*d = Duration(dur)
return nil
}
// MarshalMsg appends the marshaled form of the object to the provided
// byte slice, returning the extended slice and any errors encountered.
func (d Duration) MarshalMsg(bytes []byte) ([]byte, error) {
return msgp.AppendInt64(bytes, int64(d)), nil
}
// UnmarshalMsg unmarshals the object from binary,
// returing any leftover bytes and any errors encountered.
func (d *Duration) UnmarshalMsg(b []byte) ([]byte, error) {
i, rem, err := msgp.ReadInt64Bytes(b)
*d = Duration(i)
return rem, err
}
// EncodeMsg writes itself as MessagePack using a *msgp.Writer.
func (d Duration) EncodeMsg(w *msgp.Writer) error {
return w.WriteInt64(int64(d))
}
// DecodeMsg decodes itself as MessagePack using a *msgp.Reader.
func (d *Duration) DecodeMsg(reader *msgp.Reader) error {
i, err := reader.ReadInt64()
*d = Duration(i)
return err
}
// Msgsize returns the maximum serialized size in bytes.
func (d Duration) Msgsize() int {
return msgp.Int64Size
}
// MarshalYAML implements yaml.Marshaler - Converts duration to human-readable format (e.g., "2h", "30m")
func (d Duration) MarshalYAML() (interface{}, error) {
return time.Duration(d).String(), nil
}
golang-github-minio-madmin-go-3.0.104/xtime/time_contrib.go 0000664 0000000 0000000 00000007205 14774251704 0023537 0 ustar 00root root 0000000 0000000 // Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the go.dev/LICENSE file.
package xtime
import (
"errors"
"strconv"
"time"
)
// function borrowed from https://cs.opensource.google/go/go/+/refs/tags/go1.22.5:src/time/format.go;l=1589
// supports days and weeks such as '1d1ms', '1w1ms'
func parseDuration(s string) (time.Duration, error) {
// [-+]?([0-9]*(\.[0-9]*)?[a-z]+)+
orig := s
var d int64
neg := false
// Consume [-+]?
if s != "" {
c := s[0]
if c == '-' || c == '+' {
neg = c == '-'
s = s[1:]
}
}
// Special case: if all that is left is "0", this is zero.
if s == "0" {
return 0, nil
}
if s == "" {
return 0, errors.New("invalid duration " + strconv.Quote(orig))
}
for s != "" {
var (
v, f int64 // integers before, after decimal point
scale float64 = 1 // value = v + f/scale
)
var err error
// The next character must be [0-9.]
if s[0] != '.' && ('0' > s[0] || s[0] > '9') {
return 0, errors.New("invalid duration " + strconv.Quote(orig))
}
// Consume [0-9]*
pl := len(s)
v, s, err = leadingInt(s)
if err != nil {
return 0, errors.New("invalid duration " + strconv.Quote(orig))
}
pre := pl != len(s) // whether we consumed anything before a period
// Consume (\.[0-9]*)?
post := false
if s != "" && s[0] == '.' {
s = s[1:]
pl := len(s)
f, scale, s = leadingFraction(s)
post = pl != len(s)
}
if !pre && !post {
// no digits (e.g. ".s" or "-.s")
return 0, errors.New("invalid duration " + strconv.Quote(orig))
}
// Consume unit.
i := 0
for ; i < len(s); i++ {
c := s[i]
if c == '.' || '0' <= c && c <= '9' {
break
}
}
if i == 0 {
return 0, errors.New("missing unit in duration " + strconv.Quote(orig))
}
u := s[:i]
s = s[i:]
unit, ok := unitMap[u]
if !ok {
return 0, errors.New("unknown unit " + strconv.Quote(u) + " in duration " + strconv.Quote(orig))
}
if v > (1<<63-1)/unit {
// overflow
return 0, errors.New("invalid duration " + strconv.Quote(orig))
}
v *= unit
if f > 0 {
// float64 is needed to be nanosecond accurate for fractions of hours.
// v >= 0 && (f*unit/scale) <= 3.6e+12 (ns/h, h is the largest unit)
v += int64(float64(f) * (float64(unit) / scale))
if v < 0 {
// overflow
return 0, errors.New("invalid duration " + strconv.Quote(orig))
}
}
d += v
if d < 0 {
// overflow
return 0, errors.New("invalid duration " + strconv.Quote(orig))
}
}
if neg {
d = -d
}
return time.Duration(d), nil
}
var errLeadingInt = errors.New("bad [0-9]*") // never printed
// leadingInt consumes the leading [0-9]* from s.
func leadingInt(s string) (x int64, rem string, err error) {
i := 0
for ; i < len(s); i++ {
c := s[i]
if c < '0' || c > '9' {
break
}
if x > (1<<63-1)/10 {
// overflow
return 0, "", errLeadingInt
}
x = x*10 + int64(c) - '0'
if x < 0 {
// overflow
return 0, "", errLeadingInt
}
}
return x, s[i:], nil
}
// leadingFraction consumes the leading [0-9]* from s.
// It is used only for fractions, so does not return an error on overflow,
// it just stops accumulating precision.
func leadingFraction(s string) (x int64, scale float64, rem string) {
i := 0
scale = 1
overflow := false
for ; i < len(s); i++ {
c := s[i]
if c < '0' || c > '9' {
break
}
if overflow {
continue
}
if x > (1<<63-1)/10 {
// It's possible for overflow to give a positive number, so take care.
overflow = true
continue
}
y := x*10 + int64(c) - '0'
if y < 0 {
overflow = true
continue
}
x = y
scale *= 10
}
return x, scale, s[i:]
}
golang-github-minio-madmin-go-3.0.104/xtime/time_contrib_test.go 0000664 0000000 0000000 00000010407 14774251704 0024574 0 ustar 00root root 0000000 0000000 // Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the go.dev/LICENSE file.
package xtime
import (
"fmt"
"strings"
"testing"
"time"
)
var parseDurationTests = []struct {
in string
want time.Duration
}{
// simple
{"0", 0},
{"5s", 5 * time.Second},
{"30s", 30 * time.Second},
{"1478s", 1478 * time.Second},
// sign
{"-5s", -5 * time.Second},
{"+5s", 5 * time.Second},
{"-0", 0},
{"+0", 0},
// decimal
{"5.0s", 5 * time.Second},
{"5.6s", 5*time.Second + 600*time.Millisecond},
{"5.s", 5 * time.Second},
{".5s", 500 * time.Millisecond},
{"1.0s", 1 * time.Second},
{"1.00s", 1 * time.Second},
{"1.004s", 1*time.Second + 4*time.Millisecond},
{"1.0040s", 1*time.Second + 4*time.Millisecond},
{"100.00100s", 100*time.Second + 1*time.Millisecond},
// different units
{"10ns", 10 * time.Nanosecond},
{"11us", 11 * time.Microsecond},
{"12µs", 12 * time.Microsecond}, // U+00B5
{"12μs", 12 * time.Microsecond}, // U+03BC
{"13ms", 13 * time.Millisecond},
{"14s", 14 * time.Second},
{"15m", 15 * time.Minute},
{"16h", 16 * time.Hour},
// composite durations
{"3h30m", 3*time.Hour + 30*time.Minute},
{"10.5s4m", 4*time.Minute + 10*time.Second + 500*time.Millisecond},
{"-2m3.4s", -(2*time.Minute + 3*time.Second + 400*time.Millisecond)},
{"1h2m3s4ms5us6ns", 1*time.Hour + 2*time.Minute + 3*time.Second + 4*time.Millisecond + 5*time.Microsecond + 6*time.Nanosecond},
{"39h9m14.425s", 39*time.Hour + 9*time.Minute + 14*time.Second + 425*time.Millisecond},
// large value
{"52763797000ns", 52763797000 * time.Nanosecond},
// more than 9 digits after decimal point, see https://golang.org/issue/6617
{"0.3333333333333333333h", 20 * time.Minute},
// 9007199254740993 = 1<<53+1 cannot be stored precisely in a float64
{"9007199254740993ns", (1<<53 + 1) * time.Nanosecond},
// largest duration that can be represented by int64 in nanoseconds
{"9223372036854775807ns", (1<<63 - 1) * time.Nanosecond},
{"9223372036854775.807us", (1<<63 - 1) * time.Nanosecond},
{"9223372036s854ms775us807ns", (1<<63 - 1) * time.Nanosecond},
{"-9223372036854775808ns", -1 << 63 * time.Nanosecond},
{"-9223372036854775.808us", -1 << 63 * time.Nanosecond},
{"-9223372036s854ms775us808ns", -1 << 63 * time.Nanosecond},
// largest negative value
{"-9223372036854775808ns", -1 << 63 * time.Nanosecond},
// largest negative round trip value, see https://golang.org/issue/48629
{"-2562047h47m16.854775808s", -1 << 63 * time.Nanosecond},
// huge string; issue 15011.
{"0.100000000000000000000h", 6 * time.Minute},
// This value tests the first overflow check in leadingFraction.
{"0.830103483285477580700h", 49*time.Minute + 48*time.Second + 372539827*time.Nanosecond},
{"1w1d1h", 1*7*24*time.Hour + 1*24*time.Hour + 1*time.Hour},
{"0.1w0.1d0.1h", time.Hour*19 + time.Minute*18},
}
func TestParseDuration(t *testing.T) {
for _, tc := range parseDurationTests {
d, err := ParseDuration(tc.in)
if err != nil || d != tc.want {
t.Errorf("ParseDuration(%q) = %v, %v, want %v, nil", tc.in, d, err, tc.want)
}
}
}
var parseDurationErrorTests = []struct {
in string
expect string
}{
// invalid
{"", `""`},
{"3", `"3"`},
{"-", `"-"`},
{"s", `"s"`},
{".", `"."`},
{"-.", `"-."`},
{".s", `".s"`},
{"+.s", `"+.s"`},
{"\x85\x85", `"\x85\x85"`},
{"\xffff", `"\xffff"`},
{"hello \xffff world", `"hello \xffff world"`},
{"\uFFFD", `"�"`}, // utf8.RuneError
{"\uFFFD hello \uFFFD world", `"� hello � world"`}, // utf8.RuneError
// overflow
{"9223372036854775810ns", `"9223372036854775810ns"`},
{"9223372036854775808ns", `"9223372036854775808ns"`},
{"-9223372036854775809ns", `"-9223372036854775809ns"`},
{"9223372036854776us", `"9223372036854776us"`},
{"3000000h", `"3000000h"`},
{"9223372036854775.808us", `"9223372036854775.808us"`},
{"9223372036854ms775us808ns", `"9223372036854ms775us808ns"`},
}
func TestParseDurationErrors(t *testing.T) {
for _, tc := range parseDurationErrorTests {
_, err := ParseDuration(tc.in)
if err == nil {
t.Errorf("ParseDuration(%q) = _, nil, want _, non-nil", tc.in)
} else if !strings.Contains(err.Error(), tc.expect) {
fmt.Println(err)
t.Errorf("ParseDuration(%q) = _, %q, error does not contain %q", tc.in, err, tc.expect)
}
}
}
golang-github-minio-madmin-go-3.0.104/xtime/time_unmarshal_test.go 0000664 0000000 0000000 00000006627 14774251704 0025137 0 ustar 00root root 0000000 0000000 // Copyright (c) 2015-2024 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see .
package xtime
import (
"bytes"
"encoding/json"
"testing"
"time"
"github.com/tinylib/msgp/msgp"
"gopkg.in/yaml.v3"
)
type testDuration struct {
A string `yaml:"a" json:"a"`
Dur Duration `yaml:"dur" json:"dur"`
DurationPointer *Duration `yaml:"durationPointer" json:"durationPointer"`
}
func TestDuration_Unmarshal(t *testing.T) {
jsonData := []byte(`{"a":"1s","dur":"1w1s","durationPointer":"7d1s"}`)
yamlData := []byte(`a: 1s
dur: 1w1s
durationPointer: 7d1s`)
yamlTest := testDuration{}
if err := yaml.Unmarshal(yamlData, &yamlTest); err != nil {
t.Fatal(err)
}
jsonTest := testDuration{}
if err := json.Unmarshal(jsonData, &jsonTest); err != nil {
t.Fatal(err)
}
jsonData = []byte(`{"a":"1s","dur":"1w1s"}`)
yamlData = []byte(`a: 1s
dur: 1w1s`)
if err := yaml.Unmarshal(yamlData, &yamlTest); err != nil {
t.Fatal(err)
}
if err := json.Unmarshal(jsonData, &jsonTest); err != nil {
t.Fatal(err)
}
}
func TestMarshalUnmarshalDuration(t *testing.T) {
v := Duration(time.Hour)
var vn Duration
bts, err := v.MarshalMsg(nil)
if err != nil {
t.Fatal(err)
}
left, err := vn.UnmarshalMsg(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
}
if vn != v {
t.Errorf("v=%#v; want=%#v", vn, v)
}
left, err = msgp.Skip(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
}
}
func TestEncodeDecodeDuration(t *testing.T) {
v := Duration(time.Hour)
var buf bytes.Buffer
msgp.Encode(&buf, &v)
m := v.Msgsize()
if buf.Len() > m {
t.Log("WARNING: TestEncodeDecodeDuration Msgsize() is inaccurate")
}
var vn Duration
err := msgp.Decode(&buf, &vn)
if err != nil {
t.Error(err)
}
if vn != v {
t.Errorf("v=%#v; want=%#v", vn, v)
}
buf.Reset()
msgp.Encode(&buf, &v)
err = msgp.NewReader(&buf).Skip()
if err != nil {
t.Error(err)
}
}
func TestDuration_Marshal(t *testing.T) {
type testDuration struct {
A Duration `json:"a" yaml:"a"`
Dur Duration `json:"dur" yaml:"dur"`
DurationPointer *Duration `json:"durationPointer,omitempty" yaml:"durationPointer,omitempty"`
}
d1 := Duration(time.Second)
d2 := Duration(0)
d3 := Duration(time.Hour*24*7 + time.Second)
testData := testDuration{
A: d1,
Dur: d2,
DurationPointer: &d3,
}
yamlData, err := yaml.Marshal(&testData)
if err != nil {
t.Fatalf("Failed to marshal YAML: %v", err)
}
expected := `a: 1s
dur: 0s
durationPointer: 168h0m1s
`
if string(yamlData) != expected {
t.Errorf("Expected:\n%s\nGot:\n%s", expected, string(yamlData))
}
}