pax_global_header00006660000000000000000000000064140043266530014515gustar00rootroot0000000000000052 comment=2427a79a004e759b853799a2e6abb58dc622c363 klog-2.5.0/000077500000000000000000000000001400432665300124555ustar00rootroot00000000000000klog-2.5.0/.github/000077500000000000000000000000001400432665300140155ustar00rootroot00000000000000klog-2.5.0/.github/ISSUE_TEMPLATE/000077500000000000000000000000001400432665300162005ustar00rootroot00000000000000klog-2.5.0/.github/ISSUE_TEMPLATE/bug_report.md000066400000000000000000000005221400432665300206710ustar00rootroot00000000000000--- name: Bug report about: Tell us about a problem you are experiencing --- /kind bug **What steps did you take and what happened:** [A clear and concise description of what the bug is.] **What did you expect to happen:** **Anything else you would like to add:** [Miscellaneous information that will assist in solving the issue.] klog-2.5.0/.github/ISSUE_TEMPLATE/feature_request.md000066400000000000000000000004641400432665300217310ustar00rootroot00000000000000--- name: Feature enhancement request about: Suggest an idea for this project --- /kind feature **Describe the solution you'd like** [A clear and concise description of what you want to happen.] **Anything else you would like to add:** [Miscellaneous information that will assist in solving the issue.] klog-2.5.0/.github/PULL_REQUEST_TEMPLATE.md000066400000000000000000000027301400432665300176200ustar00rootroot00000000000000 **What this PR does / why we need it**: **Which issue(s) this PR fixes** *(optional, in `fixes #(, fixes #, ...)` format, will close the issue(s) when PR gets merged)*: Fixes # **Special notes for your reviewer**: _Please confirm that if this PR changes any image versions, then that's the sole change this PR makes._ **Release note**: ```release-note ```klog-2.5.0/.github/workflows/000077500000000000000000000000001400432665300160525ustar00rootroot00000000000000klog-2.5.0/.github/workflows/test.yml000066400000000000000000000031471400432665300175610ustar00rootroot00000000000000name: Test on: [push, pull_request] jobs: test: strategy: matrix: go-versions: [1.12.x, 1.13.x, 1.14.x] platform: [ubuntu-latest, macos-latest, windows-latest] runs-on: ${{ matrix.platform }} steps: - name: Install Go uses: actions/setup-go@v1 with: go-version: ${{ matrix.go-version }} - name: Checkout code uses: actions/checkout@v2 - name: Test run: | go get -t -v ./... go test -v -race ./... lint: runs-on: ubuntu-latest steps: - name: Install Go uses: actions/setup-go@v1 - name: Checkout code uses: actions/checkout@v2 - name: Lint run: | docker run --rm -v `pwd`:/go/src/k8s.io/klog -w /go/src/k8s.io/klog \ golangci/golangci-lint:v1.23.8 golangci-lint run --disable-all -v \ -E govet -E misspell -E gofmt -E ineffassign -E golint apidiff: runs-on: ubuntu-latest if: github.base_ref steps: - name: Install Go uses: actions/setup-go@v1 with: go-version: 1.13.x - name: Add GOBIN to PATH run: echo "PATH=$(go env GOPATH)/bin:$PATH" >>$GITHUB_ENV - name: Install dependencies run: GO111MODULE=off go get golang.org/x/exp/cmd/apidiff - name: Checkout old code uses: actions/checkout@v2 with: ref: ${{ github.base_ref }} path: "old" - name: Checkout new code uses: actions/checkout@v2 with: path: "new" - name: APIDiff run: ./hack/verify-apidiff.sh -d ../old working-directory: "new" klog-2.5.0/.gitignore000066400000000000000000000003271400432665300144470ustar00rootroot00000000000000# OSX leaves these everywhere on SMB shares ._* # OSX trash .DS_Store # Eclipse files .classpath .project .settings/** # Files generated by JetBrains IDEs, e.g. IntelliJ IDEA .idea/ *.iml # Vscode files .vscode klog-2.5.0/CONTRIBUTING.md000066400000000000000000000030501400432665300147040ustar00rootroot00000000000000# Contributing Guidelines Welcome to Kubernetes. We are excited about the prospect of you joining our [community](https://github.com/kubernetes/community)! The Kubernetes community abides by the CNCF [code of conduct](code-of-conduct.md). Here is an excerpt: _As contributors and maintainers of this project, and in the interest of fostering an open and welcoming community, we pledge to respect all people who contribute through reporting issues, posting feature requests, updating documentation, submitting pull requests or patches, and other activities._ ## Getting Started We have full documentation on how to get started contributing here: - [Contributor License Agreement](https://git.k8s.io/community/CLA.md) Kubernetes projects require that you sign a Contributor License Agreement (CLA) before we can accept your pull requests - [Kubernetes Contributor Guide](http://git.k8s.io/community/contributors/guide) - Main contributor documentation, or you can just jump directly to the [contributing section](http://git.k8s.io/community/contributors/guide#contributing) - [Contributor Cheat Sheet](https://git.k8s.io/community/contributors/guide/contributor-cheatsheet) - Common resources for existing developers ## Mentorship - [Mentoring Initiatives](https://git.k8s.io/community/mentoring) - We have a diverse set of mentorship programs available that are always looking for volunteers! ## Contact Information - [Slack](https://kubernetes.slack.com/messages/sig-architecture) - [Mailing List](https://groups.google.com/forum/#!forum/kubernetes-sig-architecture) klog-2.5.0/LICENSE000066400000000000000000000240411400432665300134630ustar00rootroot00000000000000Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: You must give any other recipients of the Work or Derivative Works a copy of this License; and You must cause any modified files to carry prominent notices stating that You changed the files; and You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. klog-2.5.0/OWNERS000066400000000000000000000004261400432665300134170ustar00rootroot00000000000000# See the OWNERS docs at https://go.k8s.io/owners reviewers: - jayunit100 - hoegaarden - andyxning - neolit123 - pohly - yagonobre - vincepri - detiber approvers: - dims - thockin - justinsb - tallclair - piosz - brancz - DirectXMan12 - lavalamp klog-2.5.0/README.md000066400000000000000000000106221400432665300137350ustar00rootroot00000000000000klog ==== klog is a permanent fork of https://github.com/golang/glog. ## Why was klog created? The decision to create klog was one that wasn't made lightly, but it was necessary due to some drawbacks that are present in [glog](https://github.com/golang/glog). Ultimately, the fork was created due to glog not being under active development; this can be seen in the glog README: > The code in this repo [...] is not itself under development This makes us unable to solve many use cases without a fork. The factors that contributed to needing feature development are listed below: * `glog` [presents a lot "gotchas"](https://github.com/kubernetes/kubernetes/issues/61006) and introduces challenges in containerized environments, all of which aren't well documented. * `glog` doesn't provide an easy way to test logs, which detracts from the stability of software using it * A long term goal is to implement a logging interface that allows us to add context, change output format, etc. Historical context is available here: * https://github.com/kubernetes/kubernetes/issues/61006 * https://github.com/kubernetes/kubernetes/issues/70264 * https://groups.google.com/forum/#!msg/kubernetes-sig-architecture/wCWiWf3Juzs/hXRVBH90CgAJ * https://groups.google.com/forum/#!msg/kubernetes-dev/7vnijOMhLS0/1oRiNtigBgAJ ---- How to use klog =============== - Replace imports for `"github.com/golang/glog"` with `"k8s.io/klog/v2"` - Use `klog.InitFlags(nil)` explicitly for initializing global flags as we no longer use `init()` method to register the flags - You can now use `log_file` instead of `log_dir` for logging to a single file (See `examples/log_file/usage_log_file.go`) - If you want to redirect everything logged using klog somewhere else (say syslog!), you can use `klog.SetOutput()` method and supply a `io.Writer`. (See `examples/set_output/usage_set_output.go`) - For more logging conventions (See [Logging Conventions](https://github.com/kubernetes/community/blob/master/contributors/devel/sig-instrumentation/logging.md)) **NOTE**: please use the newer go versions that support semantic import versioning in modules, ideally go 1.11.4 or greater. ### Coexisting with klog/v2 See [this example](examples/coexist_klog_v1_and_v2/) to see how to coexist with both klog/v1 and klog/v2. ### Coexisting with glog This package can be used side by side with glog. [This example](examples/coexist_glog/coexist_glog.go) shows how to initialize and synchronize flags from the global `flag.CommandLine` FlagSet. In addition, the example makes use of stderr as combined output by setting `alsologtostderr` (or `logtostderr`) to `true`. ## Community, discussion, contribution, and support Learn how to engage with the Kubernetes community on the [community page](http://kubernetes.io/community/). You can reach the maintainers of this project at: - [Slack](https://kubernetes.slack.com/messages/klog) - [Mailing List](https://groups.google.com/forum/#!forum/kubernetes-sig-architecture) ### Code of conduct Participation in the Kubernetes community is governed by the [Kubernetes Code of Conduct](code-of-conduct.md). ---- glog ==== Leveled execution logs for Go. This is an efficient pure Go implementation of leveled logs in the manner of the open source C++ package https://github.com/google/glog By binding methods to booleans it is possible to use the log package without paying the expense of evaluating the arguments to the log. Through the -vmodule flag, the package also provides fine-grained control over logging at the file level. The comment from glog.go introduces the ideas: Package glog implements logging analogous to the Google-internal C++ INFO/ERROR/V setup. It provides functions Info, Warning, Error, Fatal, plus formatting variants such as Infof. It also provides V-style logging controlled by the -v and -vmodule=file=2 flags. Basic examples: glog.Info("Prepare to repel boarders") glog.Fatalf("Initialization failed: %s", err) See the documentation for the V function for an explanation of these examples: if glog.V(2) { glog.Info("Starting transaction...") } glog.V(2).Infoln("Processed", nItems, "elements") The repository contains an open source version of the log package used inside Google. The master copy of the source lives inside Google, not here. The code in this repo is for export only and is not itself under development. Feature requests will be ignored. Send bug reports to golang-nuts@googlegroups.com. klog-2.5.0/RELEASE.md000066400000000000000000000007741400432665300140670ustar00rootroot00000000000000# Release Process The `klog` is released on an as-needed basis. The process is as follows: 1. An issue is proposing a new release with a changelog since the last release 1. All [OWNERS](OWNERS) must LGTM this release 1. An OWNER runs `git tag -s $VERSION` and inserts the changelog and pushes the tag with `git push $VERSION` 1. The release issue is closed 1. An announcement email is sent to `kubernetes-dev@googlegroups.com` with the subject `[ANNOUNCE] kubernetes-template-project $VERSION is released` klog-2.5.0/SECURITY.md000066400000000000000000000020551400432665300142500ustar00rootroot00000000000000# Security Policy ## Security Announcements Join the [kubernetes-security-announce] group for security and vulnerability announcements. You can also subscribe to an RSS feed of the above using [this link][kubernetes-security-announce-rss]. ## Reporting a Vulnerability Instructions for reporting a vulnerability can be found on the [Kubernetes Security and Disclosure Information] page. ## Supported Versions Information about supported Kubernetes versions can be found on the [Kubernetes version and version skew support policy] page on the Kubernetes website. [kubernetes-security-announce]: https://groups.google.com/forum/#!forum/kubernetes-security-announce [kubernetes-security-announce-rss]: https://groups.google.com/forum/feed/kubernetes-security-announce/msgs/rss_v2_0.xml?num=50 [Kubernetes version and version skew support policy]: https://kubernetes.io/docs/setup/release/version-skew-policy/#supported-versions [Kubernetes Security and Disclosure Information]: https://kubernetes.io/docs/reference/issues-security/security/#report-a-vulnerability klog-2.5.0/SECURITY_CONTACTS000066400000000000000000000011111400432665300151370ustar00rootroot00000000000000# Defined below are the security contacts for this repo. # # They are the contact point for the Product Security Committee to reach out # to for triaging and handling of incoming issues. # # The below names agree to abide by the # [Embargo Policy](https://git.k8s.io/security/private-distributors-list.md#embargo-policy) # and will be removed and replaced if they violate that agreement. # # DO NOT REPORT SECURITY VULNERABILITIES DIRECTLY TO THESE NAMES, FOLLOW THE # INSTRUCTIONS AT https://kubernetes.io/security/ dims thockin justinsb tallclair piosz brancz DirectXMan12 lavalamp klog-2.5.0/code-of-conduct.md000066400000000000000000000002241400432665300157460ustar00rootroot00000000000000# Kubernetes Community Code of Conduct Please refer to our [Kubernetes Community Code of Conduct](https://git.k8s.io/community/code-of-conduct.md) klog-2.5.0/examples/000077500000000000000000000000001400432665300142735ustar00rootroot00000000000000klog-2.5.0/examples/coexist_glog/000077500000000000000000000000001400432665300167615ustar00rootroot00000000000000klog-2.5.0/examples/coexist_glog/coexist_glog.go000066400000000000000000000010171400432665300217750ustar00rootroot00000000000000package main import ( "flag" "github.com/golang/glog" "k8s.io/klog/v2" ) func main() { flag.Set("alsologtostderr", "true") flag.Parse() klogFlags := flag.NewFlagSet("klog", flag.ExitOnError) klog.InitFlags(klogFlags) // Sync the glog and klog flags. flag.CommandLine.VisitAll(func(f1 *flag.Flag) { f2 := klogFlags.Lookup(f1.Name) if f2 != nil { value := f1.Value.String() f2.Value.Set(value) } }) glog.Info("hello from glog!") klog.Info("nice to meet you, I'm klog") glog.Flush() klog.Flush() } klog-2.5.0/examples/coexist_klog_v1_and_v2/000077500000000000000000000000001400432665300206245ustar00rootroot00000000000000klog-2.5.0/examples/coexist_klog_v1_and_v2/coexist_klog_v1_and_v2.go000066400000000000000000000053171400432665300255120ustar00rootroot00000000000000package main import ( "flag" klogv1 "k8s.io/klog" klogv2 "k8s.io/klog/v2" ) // OutputCallDepth is the stack depth where we can find the origin of this call const OutputCallDepth = 6 // DefaultPrefixLength is the length of the log prefix that we have to strip out const DefaultPrefixLength = 53 // klogWriter is used in SetOutputBySeverity call below to redirect // any calls to klogv1 to end up in klogv2 type klogWriter struct{} func (kw klogWriter) Write(p []byte) (n int, err error) { if len(p) < DefaultPrefixLength { klogv2.InfoDepth(OutputCallDepth, string(p)) return len(p), nil } if p[0] == 'I' { klogv2.InfoDepth(OutputCallDepth, string(p[DefaultPrefixLength:])) } else if p[0] == 'W' { klogv2.WarningDepth(OutputCallDepth, string(p[DefaultPrefixLength:])) } else if p[0] == 'E' { klogv2.ErrorDepth(OutputCallDepth, string(p[DefaultPrefixLength:])) } else if p[0] == 'F' { klogv2.FatalDepth(OutputCallDepth, string(p[DefaultPrefixLength:])) } else { klogv2.InfoDepth(OutputCallDepth, string(p[DefaultPrefixLength:])) } return len(p), nil } func main() { // initialize klog/v2, can also bind to a local flagset if desired klogv2.InitFlags(nil) // In this example, we want to show you that all the lines logged // end up in the myfile.log. You do NOT need them in your application // as all these flags are set up from the command line typically flag.Set("logtostderr", "false") // By default klog logs to stderr, switch that off flag.Set("alsologtostderr", "false") // false is default, but this is informative flag.Set("stderrthreshold", "FATAL") // stderrthreshold defaults to ERROR, we don't want anything in stderr flag.Set("log_file", "myfile.log") // log to a file // parse klog/v2 flags flag.Parse() // make sure we flush before exiting defer klogv2.Flush() // BEGIN : hack to redirect klogv1 calls to klog v2 // Tell klog NOT to log into STDERR. Otherwise, we risk // certain kinds of API errors getting logged into a directory not // available in a `FROM scratch` Docker container, causing us to abort var klogv1Flags flag.FlagSet klogv1.InitFlags(&klogv1Flags) klogv1Flags.Set("logtostderr", "false") // By default klog v1 logs to stderr, switch that off klogv1Flags.Set("stderrthreshold", "FATAL") // stderrthreshold defaults to ERROR, use this if you // don't want anything in your stderr klogv1.SetOutputBySeverity("INFO", klogWriter{}) // tell klog v1 to use the writer // END : hack to redirect klogv1 calls to klog v2 // Now you can mix klogv1 and v2 in the same code base klogv2.Info("hello from klog (v2)!") klogv1.Info("hello from klog (v1)!") klogv1.Warning("beware from klog (v1)!") klogv1.Error("error from klog (v1)!") klogv2.Info("nice to meet you (v2)") } klog-2.5.0/examples/coexist_klog_v1_and_v2/go.mod000066400000000000000000000001641400432665300217330ustar00rootroot00000000000000module k8s.io/klog/examples/coexist_klog_v1_and_v2 go 1.13 require ( k8s.io/klog v1.0.0 k8s.io/klog/v2 v2.0.0 ) klog-2.5.0/examples/go.mod000066400000000000000000000002231400432665300153760ustar00rootroot00000000000000module example go 1.13 require ( github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b k8s.io/klog/v2 v2.0.0-20200324194303-db919253a3bc ) klog-2.5.0/examples/go.sum000066400000000000000000000011131400432665300154220ustar00rootroot00000000000000github.com/go-logr/logr v0.1.0 h1:M1Tv3VzNlEHg6uyACnRdtrploV2P7wZqH8BoQMtz0cg= github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= k8s.io/klog/v2 v2.0.0-20200324194303-db919253a3bc h1:E/enZ+SqXD3ChluFNvXqlLcUkqMQQDpiyGruRq5pjvY= k8s.io/klog/v2 v2.0.0-20200324194303-db919253a3bc/go.mod h1:q4PVo0BneA7GsUJvFqoEvOCVmYJP0c5Y4VxrAYpJrIk= klog-2.5.0/examples/klogr/000077500000000000000000000000001400432665300154115ustar00rootroot00000000000000klog-2.5.0/examples/klogr/main.go000066400000000000000000000010551400432665300166650ustar00rootroot00000000000000package main import ( "flag" "k8s.io/klog/v2" "k8s.io/klog/v2/klogr" ) type myError struct { str string } func (e myError) Error() string { return e.str } func main() { klog.InitFlags(nil) flag.Set("v", "3") flag.Parse() log := klogr.New().WithName("MyName").WithValues("user", "you") log.Info("hello", "val1", 1, "val2", map[string]int{"k": 1}) log.V(3).Info("nice to meet you") log.Error(nil, "uh oh", "trouble", true, "reasons", []float64{0.1, 0.11, 3.14}) log.Error(myError{"an error occurred"}, "goodbye", "code", -1) klog.Flush() } klog-2.5.0/examples/log_file/000077500000000000000000000000001400432665300160535ustar00rootroot00000000000000klog-2.5.0/examples/log_file/usage_log_file.go000066400000000000000000000005021400432665300213430ustar00rootroot00000000000000package main import ( "flag" "k8s.io/klog/v2" ) func main() { klog.InitFlags(nil) // By default klog writes to stderr. Setting logtostderr to false makes klog // write to a log file. flag.Set("logtostderr", "false") flag.Set("log_file", "myfile.log") flag.Parse() klog.Info("nice to meet you") klog.Flush() } klog-2.5.0/examples/set_output/000077500000000000000000000000001400432665300165065ustar00rootroot00000000000000klog-2.5.0/examples/set_output/usage_set_output.go000066400000000000000000000005051400432665300224340ustar00rootroot00000000000000package main import ( "bytes" "flag" "fmt" "k8s.io/klog/v2" ) func main() { klog.InitFlags(nil) flag.Set("logtostderr", "false") flag.Set("alsologtostderr", "false") flag.Parse() buf := new(bytes.Buffer) klog.SetOutput(buf) klog.Info("nice to meet you") klog.Flush() fmt.Printf("LOGGED: %s", buf.String()) } klog-2.5.0/go.mod000066400000000000000000000001071400432665300135610ustar00rootroot00000000000000module k8s.io/klog/v2 go 1.13 require github.com/go-logr/logr v0.4.0 klog-2.5.0/go.sum000066400000000000000000000002451400432665300136110ustar00rootroot00000000000000github.com/go-logr/logr v0.4.0 h1:K7/B1jt6fIBQVd4Owv2MqGQClcgf0R266+7C/QjRcLc= github.com/go-logr/logr v0.4.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= klog-2.5.0/hack/000077500000000000000000000000001400432665300133635ustar00rootroot00000000000000klog-2.5.0/hack/verify-apidiff.sh000077500000000000000000000062071400432665300166330ustar00rootroot00000000000000#!/usr/bin/env bash # Copyright 2020 The Kubernetes Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. set -o errexit set -o nounset set -o pipefail function usage { local script="$(basename $0)" echo >&2 "Usage: ${script} [-r | -d ] This script should be run at the root of a module. -r Compare the exported API of the local working copy with the exported API of the local repo at the specified branch or tag. -d Compare the exported API of the local working copy with the exported API of the specified directory, which should point to the root of a different version of the same module. Examples: ${script} -r master ${script} -r v1.10.0 ${script} -r release-1.10 ${script} -d /path/to/historical/version " exit 1 } ref="" dir="" while getopts r:d: o do case "$o" in r) ref="$OPTARG";; d) dir="$OPTARG";; [?]) usage;; esac done # If REF and DIR are empty, print usage and error if [[ -z "${ref}" && -z "${dir}" ]]; then usage; fi # If REF and DIR are both set, print usage and error if [[ -n "${ref}" && -n "${dir}" ]]; then usage; fi if ! which apidiff > /dev/null; then echo "Installing golang.org/x/exp/cmd/apidiff..." pushd "${TMPDIR:-/tmp}" > /dev/null GO111MODULE=off go get golang.org/x/exp/cmd/apidiff popd > /dev/null fi output=$(mktemp -d -t "apidiff.output.XXXX") cleanup_output () { rm -fr "${output}"; } trap cleanup_output EXIT # If ref is set, clone . to temp dir at $ref, and set $dir to the temp dir clone="" base="${dir}" if [[ -n "${ref}" ]]; then base="${ref}" clone=$(mktemp -d -t "apidiff.clone.XXXX") cleanup_clone_and_output () { rm -fr "${clone}"; cleanup_output; } trap cleanup_clone_and_output EXIT git clone . -q --no-tags -b "${ref}" "${clone}" dir="${clone}" fi pushd "${dir}" >/dev/null echo "Inspecting API of ${base}..." go list ./... > packages.txt for pkg in $(cat packages.txt); do mkdir -p "${output}/${pkg}" apidiff -w "${output}/${pkg}/apidiff.output" "${pkg}" done popd >/dev/null retval=0 echo "Comparing with ${base}..." for pkg in $(go list ./...); do # New packages are ok if [ ! -f "${output}/${pkg}/apidiff.output" ]; then continue fi # Check for incompatible changes to previous packages incompatible=$(apidiff -incompatible "${output}/${pkg}/apidiff.output" "${pkg}") if [[ -n "${incompatible}" ]]; then echo >&2 "FAIL: ${pkg} contains incompatible changes: ${incompatible} " retval=1 fi done # Check for removed packages removed=$(comm -23 "${dir}/packages.txt" <(go list ./...)) if [[ -n "${removed}" ]]; then echo >&2 "FAIL: removed packages: ${removed} " retval=1 fi exit $retval klog-2.5.0/integration_tests/000077500000000000000000000000001400432665300162225ustar00rootroot00000000000000klog-2.5.0/integration_tests/internal/000077500000000000000000000000001400432665300200365ustar00rootroot00000000000000klog-2.5.0/integration_tests/internal/main.go000066400000000000000000000020051400432665300213060ustar00rootroot00000000000000/* This file is intended to be used as a standin for a klog'ed executable. It is called by the integration test via `go run` and with different klog flags to assert on klog behaviour, especially where klog logs its output when different combinations of the klog flags are at play. This file is not intended to be used outside of the integration tests and is not supposed to be a (good) example on how to use klog. */ package main import ( "flag" "fmt" "os" "k8s.io/klog/v2" ) func main() { infoLogLine := getEnvOrDie("KLOG_INFO_LOG") warningLogLine := getEnvOrDie("KLOG_WARNING_LOG") errorLogLine := getEnvOrDie("KLOG_ERROR_LOG") fatalLogLine := getEnvOrDie("KLOG_FATAL_LOG") klog.InitFlags(nil) flag.Parse() klog.Info(infoLogLine) klog.Warning(warningLogLine) klog.Error(errorLogLine) klog.Flush() klog.Fatal(fatalLogLine) } func getEnvOrDie(name string) string { val, ok := os.LookupEnv(name) if !ok { fmt.Fprintf(os.Stderr, name+" could not be found in environment") os.Exit(1) } return val } klog-2.5.0/integration_tests/klog_test.go000066400000000000000000000262571400432665300205600ustar00rootroot00000000000000package integration_tests_test import ( "bytes" "fmt" "io" "io/ioutil" "os" "os/exec" "path/filepath" "regexp" "runtime" "strings" "testing" ) const ( infoLog = "this is a info log line" warningLog = "this is a warning log line" errorLog = "this is a error log line" fatalLog = "this is a fatal log line" ) // res is a type alias to a slice of pointers to regular expressions. type res = []*regexp.Regexp var ( infoLogRE = regexp.MustCompile(regexp.QuoteMeta(infoLog)) warningLogRE = regexp.MustCompile(regexp.QuoteMeta(warningLog)) errorLogRE = regexp.MustCompile(regexp.QuoteMeta(errorLog)) fatalLogRE = regexp.MustCompile(regexp.QuoteMeta(fatalLog)) stackTraceRE = regexp.MustCompile(`\ngoroutine \d+ \[[^]]+\]:\n`) allLogREs = res{infoLogRE, warningLogRE, errorLogRE, fatalLogRE, stackTraceRE} defaultExpectedInDirREs = map[int]res{ 0: {stackTraceRE, fatalLogRE, errorLogRE, warningLogRE, infoLogRE}, 1: {stackTraceRE, fatalLogRE, errorLogRE, warningLogRE}, 2: {stackTraceRE, fatalLogRE, errorLogRE}, 3: {stackTraceRE, fatalLogRE}, } expectedOneOutputInDirREs = map[int]res{ 0: {infoLogRE}, 1: {warningLogRE}, 2: {errorLogRE}, 3: {fatalLogRE}, } defaultNotExpectedInDirREs = map[int]res{ 0: {}, 1: {infoLogRE}, 2: {infoLogRE, warningLogRE}, 3: {infoLogRE, warningLogRE, errorLogRE}, } ) func TestDestinationsWithDifferentFlags(t *testing.T) { tests := map[string]struct { // logfile states if the flag -log_file should be set logfile bool // logdir states if the flag -log_dir should be set logdir bool // flags is for additional flags to pass to the klog'ed executable flags []string // expectedLogFile states if we generally expect the log file to exist. // If this is not set, we expect the file not to exist and will error if it // does. expectedLogFile bool // expectedLogDir states if we generally expect the log files in the log // dir to exist. // If this is not set, we expect the log files in the log dir not to exist and // will error if they do. expectedLogDir bool // expectedOnStderr is a list of REs we expect to find on stderr expectedOnStderr res // notExpectedOnStderr is a list of REs that we must not find on stderr notExpectedOnStderr res // expectedInFile is a list of REs we expect to find in the log file expectedInFile res // notExpectedInFile is a list of REs we must not find in the log file notExpectedInFile res // expectedInDir is a list of REs we expect to find in the log files in the // log dir, specified by log severity (0 = warning, 1 = info, ...) expectedInDir map[int]res // notExpectedInDir is a list of REs we must not find in the log files in // the log dir, specified by log severity (0 = warning, 1 = info, ...) notExpectedInDir map[int]res }{ "default flags": { // Everything, including the trace on fatal, goes to stderr expectedOnStderr: allLogREs, }, "everything disabled": { // Nothing, including the trace on fatal, is showing anywhere flags: []string{"-logtostderr=false", "-alsologtostderr=false", "-stderrthreshold=1000"}, notExpectedOnStderr: allLogREs, }, "everything disabled but low stderrthreshold": { // Everything above -stderrthreshold, including the trace on fatal, will // be logged to stderr, even if we set -logtostderr to false. flags: []string{"-logtostderr=false", "-alsologtostderr=false", "-stderrthreshold=1"}, expectedOnStderr: res{warningLogRE, errorLogRE, stackTraceRE}, notExpectedOnStderr: res{infoLogRE}, }, "with logtostderr only": { // Everything, including the trace on fatal, goes to stderr flags: []string{"-logtostderr=true", "-alsologtostderr=false", "-stderrthreshold=1000"}, expectedOnStderr: allLogREs, }, "with log file only": { // Everything, including the trace on fatal, goes to the single log file logfile: true, flags: []string{"-logtostderr=false", "-alsologtostderr=false", "-stderrthreshold=1000"}, expectedLogFile: true, notExpectedOnStderr: allLogREs, expectedInFile: allLogREs, }, "with log dir only": { // Everything, including the trace on fatal, goes to the log files in the log dir logdir: true, flags: []string{"-logtostderr=false", "-alsologtostderr=false", "-stderrthreshold=1000"}, expectedLogDir: true, notExpectedOnStderr: allLogREs, expectedInDir: defaultExpectedInDirREs, notExpectedInDir: defaultNotExpectedInDirREs, }, "with log dir only and one_output": { // Everything, including the trace on fatal, goes to the log files in the log dir logdir: true, flags: []string{"-logtostderr=false", "-alsologtostderr=false", "-stderrthreshold=1000", "-one_output=true"}, expectedLogDir: true, notExpectedOnStderr: allLogREs, expectedInDir: expectedOneOutputInDirREs, notExpectedInDir: defaultNotExpectedInDirREs, }, "with log dir and logtostderr": { // Everything, including the trace on fatal, goes to stderr. The -log_dir is // ignored, nothing goes to the log files in the log dir. logdir: true, flags: []string{"-logtostderr=true", "-alsologtostderr=false", "-stderrthreshold=1000"}, expectedOnStderr: allLogREs, }, "with log file and log dir": { // Everything, including the trace on fatal, goes to the single log file. // The -log_dir is ignored, nothing goes to the log file in the log dir. logdir: true, logfile: true, flags: []string{"-logtostderr=false", "-alsologtostderr=false", "-stderrthreshold=1000"}, expectedLogFile: true, notExpectedOnStderr: allLogREs, expectedInFile: allLogREs, }, "with log file and alsologtostderr": { // Everything, including the trace on fatal, goes to the single log file // AND to stderr. flags: []string{"-alsologtostderr=true", "-logtostderr=false", "-stderrthreshold=1000"}, logfile: true, expectedLogFile: true, expectedOnStderr: allLogREs, expectedInFile: allLogREs, }, "with log dir and alsologtostderr": { // Everything, including the trace on fatal, goes to the log file in the // log dir AND to stderr. logdir: true, flags: []string{"-alsologtostderr=true", "-logtostderr=false", "-stderrthreshold=1000"}, expectedLogDir: true, expectedOnStderr: allLogREs, expectedInDir: defaultExpectedInDirREs, notExpectedInDir: defaultNotExpectedInDirREs, }, "with log dir, alsologtostderr and one_output": { // Everything, including the trace on fatal, goes to the log file in the // log dir AND to stderr. logdir: true, flags: []string{"-alsologtostderr=true", "-logtostderr=false", "-stderrthreshold=1000", "-one_output=true"}, expectedLogDir: true, expectedOnStderr: allLogREs, expectedInDir: expectedOneOutputInDirREs, notExpectedInDir: defaultNotExpectedInDirREs, }, } binaryFileExtention := "" if runtime.GOOS == "windows" { binaryFileExtention = ".exe" } for tcName, tc := range tests { tc := tc t.Run(tcName, func(t *testing.T) { t.Parallel() withTmpDir(t, func(logdir string) { // :: Setup flags := tc.flags stderr := &bytes.Buffer{} logfile := filepath.Join(logdir, "the_single_log_file") // /some/tmp/dir/the_single_log_file if tc.logfile { flags = append(flags, "-log_file="+logfile) } if tc.logdir { flags = append(flags, "-log_dir="+logdir) } // :: Execute klogRun(t, flags, stderr) // :: Assert // check stderr checkForLogs(t, tc.expectedOnStderr, tc.notExpectedOnStderr, stderr.String(), "stderr") // check log_file if tc.expectedLogFile { content := getFileContent(t, logfile) checkForLogs(t, tc.expectedInFile, tc.notExpectedInFile, content, "logfile") } else { assertFileIsAbsent(t, logfile) } // check files in log_dir for level, levelName := range logFileLevels { binaryName := "main" + binaryFileExtention logfile, err := getLogFilePath(logdir, binaryName, levelName) if tc.expectedLogDir { if err != nil { t.Errorf("Unable to find log file: %v", err) } content := getFileContent(t, logfile) checkForLogs(t, tc.expectedInDir[level], tc.notExpectedInDir[level], content, "logfile["+logfile+"]") } else { if err == nil { t.Errorf("Unexpectedly found log file %s", logfile) } } } }) }) } } const klogExampleGoFile = "./internal/main.go" // klogRun spawns a simple executable that uses klog, to later inspect its // stderr and potentially created log files func klogRun(t *testing.T, flags []string, stderr io.Writer) { callFlags := []string{"run", klogExampleGoFile} callFlags = append(callFlags, flags...) cmd := exec.Command("go", callFlags...) cmd.Stderr = stderr cmd.Env = append(os.Environ(), "KLOG_INFO_LOG="+infoLog, "KLOG_WARNING_LOG="+warningLog, "KLOG_ERROR_LOG="+errorLog, "KLOG_FATAL_LOG="+fatalLog, ) err := cmd.Run() if _, ok := err.(*exec.ExitError); !ok { t.Fatalf("Run failed: %v", err) } } var logFileLevels = map[int]string{ 0: "INFO", 1: "WARNING", 2: "ERROR", 3: "FATAL", } func getFileContent(t *testing.T, filePath string) string { content, err := ioutil.ReadFile(filePath) if err != nil { t.Errorf("Could not read file '%s': %v", filePath, err) } return string(content) } func assertFileIsAbsent(t *testing.T, filePath string) { if _, err := os.Stat(filePath); !os.IsNotExist(err) { t.Errorf("Expected file '%s' not to exist", filePath) } } func checkForLogs(t *testing.T, expected, disallowed res, content, name string) { for _, re := range expected { checkExpected(t, true, name, content, re) } for _, re := range disallowed { checkExpected(t, false, name, content, re) } } func checkExpected(t *testing.T, expected bool, where string, haystack string, needle *regexp.Regexp) { found := needle.MatchString(haystack) if expected && !found { t.Errorf("Expected to find '%s' in %s", needle, where) } if !expected && found { t.Errorf("Expected not to find '%s' in %s", needle, where) } } func withTmpDir(t *testing.T, f func(string)) { tmpDir, err := ioutil.TempDir("", "klog_e2e_") if err != nil { t.Fatalf("Could not create temp directory: %v", err) } defer func() { if err := os.RemoveAll(tmpDir); err != nil { t.Fatalf("Could not remove temp directory '%s': %v", tmpDir, err) } }() f(tmpDir) } // getLogFileFromDir returns the path of either the symbolic link to the logfile, or the the logfile itself. This must // be done as the creation of a symlink is not guaranteed on any platform. On Windows, only users with administration // privileges can create a symlink. func getLogFilePath(dir, binaryName, levelName string) (string, error) { symlink := filepath.Join(dir, binaryName+"."+levelName) if _, err := os.Stat(symlink); err == nil { return symlink, nil } files, err := ioutil.ReadDir(dir) if err != nil { return "", fmt.Errorf("could not read directory %s: %v", dir, err) } var foundFile string for _, file := range files { if strings.HasPrefix(file.Name(), binaryName) && strings.Contains(file.Name(), levelName) { if foundFile != "" { return "", fmt.Errorf("found multiple matching files") } foundFile = file.Name() } } if foundFile != "" { return filepath.Join(dir, foundFile), nil } return "", fmt.Errorf("file missing from directory") } klog-2.5.0/klog.go000066400000000000000000001430661400432665300137520ustar00rootroot00000000000000// Go support for leveled logs, analogous to https://code.google.com/p/google-glog/ // // Copyright 2013 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Package klog implements logging analogous to the Google-internal C++ INFO/ERROR/V setup. // It provides functions Info, Warning, Error, Fatal, plus formatting variants such as // Infof. It also provides V-style logging controlled by the -v and -vmodule=file=2 flags. // // Basic examples: // // klog.Info("Prepare to repel boarders") // // klog.Fatalf("Initialization failed: %s", err) // // See the documentation for the V function for an explanation of these examples: // // if klog.V(2) { // klog.Info("Starting transaction...") // } // // klog.V(2).Infoln("Processed", nItems, "elements") // // Log output is buffered and written periodically using Flush. Programs // should call Flush before exiting to guarantee all log output is written. // // By default, all log statements write to standard error. // This package provides several flags that modify this behavior. // As a result, flag.Parse must be called before any logging is done. // // -logtostderr=true // Logs are written to standard error instead of to files. // -alsologtostderr=false // Logs are written to standard error as well as to files. // -stderrthreshold=ERROR // Log events at or above this severity are logged to standard // error as well as to files. // -log_dir="" // Log files will be written to this directory instead of the // default temporary directory. // // Other flags provide aids to debugging. // // -log_backtrace_at="" // When set to a file and line number holding a logging statement, // such as // -log_backtrace_at=gopherflakes.go:234 // a stack trace will be written to the Info log whenever execution // hits that statement. (Unlike with -vmodule, the ".go" must be // present.) // -v=0 // Enable V-leveled logging at the specified level. // -vmodule="" // The syntax of the argument is a comma-separated list of pattern=N, // where pattern is a literal file name (minus the ".go" suffix) or // "glob" pattern and N is a V level. For instance, // -vmodule=gopher*=3 // sets the V level to 3 in all Go files whose names begin "gopher". // package klog import ( "bufio" "bytes" "errors" "flag" "fmt" "io" stdLog "log" "math" "os" "path/filepath" "runtime" "strconv" "strings" "sync" "sync/atomic" "time" "github.com/go-logr/logr" ) // severity identifies the sort of log: info, warning etc. It also implements // the flag.Value interface. The -stderrthreshold flag is of type severity and // should be modified only through the flag.Value interface. The values match // the corresponding constants in C++. type severity int32 // sync/atomic int32 // These constants identify the log levels in order of increasing severity. // A message written to a high-severity log file is also written to each // lower-severity log file. const ( infoLog severity = iota warningLog errorLog fatalLog numSeverity = 4 ) const severityChar = "IWEF" var severityName = []string{ infoLog: "INFO", warningLog: "WARNING", errorLog: "ERROR", fatalLog: "FATAL", } // get returns the value of the severity. func (s *severity) get() severity { return severity(atomic.LoadInt32((*int32)(s))) } // set sets the value of the severity. func (s *severity) set(val severity) { atomic.StoreInt32((*int32)(s), int32(val)) } // String is part of the flag.Value interface. func (s *severity) String() string { return strconv.FormatInt(int64(*s), 10) } // Get is part of the flag.Getter interface. func (s *severity) Get() interface{} { return *s } // Set is part of the flag.Value interface. func (s *severity) Set(value string) error { var threshold severity // Is it a known name? if v, ok := severityByName(value); ok { threshold = v } else { v, err := strconv.ParseInt(value, 10, 32) if err != nil { return err } threshold = severity(v) } logging.stderrThreshold.set(threshold) return nil } func severityByName(s string) (severity, bool) { s = strings.ToUpper(s) for i, name := range severityName { if name == s { return severity(i), true } } return 0, false } // OutputStats tracks the number of output lines and bytes written. type OutputStats struct { lines int64 bytes int64 } // Lines returns the number of lines written. func (s *OutputStats) Lines() int64 { return atomic.LoadInt64(&s.lines) } // Bytes returns the number of bytes written. func (s *OutputStats) Bytes() int64 { return atomic.LoadInt64(&s.bytes) } // Stats tracks the number of lines of output and number of bytes // per severity level. Values must be read with atomic.LoadInt64. var Stats struct { Info, Warning, Error OutputStats } var severityStats = [numSeverity]*OutputStats{ infoLog: &Stats.Info, warningLog: &Stats.Warning, errorLog: &Stats.Error, } // Level is exported because it appears in the arguments to V and is // the type of the v flag, which can be set programmatically. // It's a distinct type because we want to discriminate it from logType. // Variables of type level are only changed under logging.mu. // The -v flag is read only with atomic ops, so the state of the logging // module is consistent. // Level is treated as a sync/atomic int32. // Level specifies a level of verbosity for V logs. *Level implements // flag.Value; the -v flag is of type Level and should be modified // only through the flag.Value interface. type Level int32 // get returns the value of the Level. func (l *Level) get() Level { return Level(atomic.LoadInt32((*int32)(l))) } // set sets the value of the Level. func (l *Level) set(val Level) { atomic.StoreInt32((*int32)(l), int32(val)) } // String is part of the flag.Value interface. func (l *Level) String() string { return strconv.FormatInt(int64(*l), 10) } // Get is part of the flag.Getter interface. func (l *Level) Get() interface{} { return *l } // Set is part of the flag.Value interface. func (l *Level) Set(value string) error { v, err := strconv.ParseInt(value, 10, 32) if err != nil { return err } logging.mu.Lock() defer logging.mu.Unlock() logging.setVState(Level(v), logging.vmodule.filter, false) return nil } // moduleSpec represents the setting of the -vmodule flag. type moduleSpec struct { filter []modulePat } // modulePat contains a filter for the -vmodule flag. // It holds a verbosity level and a file pattern to match. type modulePat struct { pattern string literal bool // The pattern is a literal string level Level } // match reports whether the file matches the pattern. It uses a string // comparison if the pattern contains no metacharacters. func (m *modulePat) match(file string) bool { if m.literal { return file == m.pattern } match, _ := filepath.Match(m.pattern, file) return match } func (m *moduleSpec) String() string { // Lock because the type is not atomic. TODO: clean this up. logging.mu.Lock() defer logging.mu.Unlock() var b bytes.Buffer for i, f := range m.filter { if i > 0 { b.WriteRune(',') } fmt.Fprintf(&b, "%s=%d", f.pattern, f.level) } return b.String() } // Get is part of the (Go 1.2) flag.Getter interface. It always returns nil for this flag type since the // struct is not exported. func (m *moduleSpec) Get() interface{} { return nil } var errVmoduleSyntax = errors.New("syntax error: expect comma-separated list of filename=N") // Syntax: -vmodule=recordio=2,file=1,gfs*=3 func (m *moduleSpec) Set(value string) error { var filter []modulePat for _, pat := range strings.Split(value, ",") { if len(pat) == 0 { // Empty strings such as from a trailing comma can be ignored. continue } patLev := strings.Split(pat, "=") if len(patLev) != 2 || len(patLev[0]) == 0 || len(patLev[1]) == 0 { return errVmoduleSyntax } pattern := patLev[0] v, err := strconv.ParseInt(patLev[1], 10, 32) if err != nil { return errors.New("syntax error: expect comma-separated list of filename=N") } if v < 0 { return errors.New("negative value for vmodule level") } if v == 0 { continue // Ignore. It's harmless but no point in paying the overhead. } // TODO: check syntax of filter? filter = append(filter, modulePat{pattern, isLiteral(pattern), Level(v)}) } logging.mu.Lock() defer logging.mu.Unlock() logging.setVState(logging.verbosity, filter, true) return nil } // isLiteral reports whether the pattern is a literal string, that is, has no metacharacters // that require filepath.Match to be called to match the pattern. func isLiteral(pattern string) bool { return !strings.ContainsAny(pattern, `\*?[]`) } // traceLocation represents the setting of the -log_backtrace_at flag. type traceLocation struct { file string line int } // isSet reports whether the trace location has been specified. // logging.mu is held. func (t *traceLocation) isSet() bool { return t.line > 0 } // match reports whether the specified file and line matches the trace location. // The argument file name is the full path, not the basename specified in the flag. // logging.mu is held. func (t *traceLocation) match(file string, line int) bool { if t.line != line { return false } if i := strings.LastIndex(file, "/"); i >= 0 { file = file[i+1:] } return t.file == file } func (t *traceLocation) String() string { // Lock because the type is not atomic. TODO: clean this up. logging.mu.Lock() defer logging.mu.Unlock() return fmt.Sprintf("%s:%d", t.file, t.line) } // Get is part of the (Go 1.2) flag.Getter interface. It always returns nil for this flag type since the // struct is not exported func (t *traceLocation) Get() interface{} { return nil } var errTraceSyntax = errors.New("syntax error: expect file.go:234") // Syntax: -log_backtrace_at=gopherflakes.go:234 // Note that unlike vmodule the file extension is included here. func (t *traceLocation) Set(value string) error { if value == "" { // Unset. logging.mu.Lock() defer logging.mu.Unlock() t.line = 0 t.file = "" return nil } fields := strings.Split(value, ":") if len(fields) != 2 { return errTraceSyntax } file, line := fields[0], fields[1] if !strings.Contains(file, ".") { return errTraceSyntax } v, err := strconv.Atoi(line) if err != nil { return errTraceSyntax } if v <= 0 { return errors.New("negative or zero value for level") } logging.mu.Lock() defer logging.mu.Unlock() t.line = v t.file = file return nil } // flushSyncWriter is the interface satisfied by logging destinations. type flushSyncWriter interface { Flush() error Sync() error io.Writer } // init sets up the defaults and runs flushDaemon. func init() { logging.stderrThreshold = errorLog // Default stderrThreshold is ERROR. logging.setVState(0, nil, false) logging.logDir = "" logging.logFile = "" logging.logFileMaxSizeMB = 1800 logging.toStderr = true logging.alsoToStderr = false logging.skipHeaders = false logging.addDirHeader = false logging.skipLogHeaders = false logging.oneOutput = false go logging.flushDaemon() } // InitFlags is for explicitly initializing the flags. func InitFlags(flagset *flag.FlagSet) { if flagset == nil { flagset = flag.CommandLine } flagset.StringVar(&logging.logDir, "log_dir", logging.logDir, "If non-empty, write log files in this directory") flagset.StringVar(&logging.logFile, "log_file", logging.logFile, "If non-empty, use this log file") flagset.Uint64Var(&logging.logFileMaxSizeMB, "log_file_max_size", logging.logFileMaxSizeMB, "Defines the maximum size a log file can grow to. Unit is megabytes. "+ "If the value is 0, the maximum file size is unlimited.") flagset.BoolVar(&logging.toStderr, "logtostderr", logging.toStderr, "log to standard error instead of files") flagset.BoolVar(&logging.alsoToStderr, "alsologtostderr", logging.alsoToStderr, "log to standard error as well as files") flagset.Var(&logging.verbosity, "v", "number for the log level verbosity") flagset.BoolVar(&logging.addDirHeader, "add_dir_header", logging.addDirHeader, "If true, adds the file directory to the header of the log messages") flagset.BoolVar(&logging.skipHeaders, "skip_headers", logging.skipHeaders, "If true, avoid header prefixes in the log messages") flagset.BoolVar(&logging.oneOutput, "one_output", logging.oneOutput, "If true, only write logs to their native severity level (vs also writing to each lower severity level") flagset.BoolVar(&logging.skipLogHeaders, "skip_log_headers", logging.skipLogHeaders, "If true, avoid headers when opening log files") flagset.Var(&logging.stderrThreshold, "stderrthreshold", "logs at or above this threshold go to stderr") flagset.Var(&logging.vmodule, "vmodule", "comma-separated list of pattern=N settings for file-filtered logging") flagset.Var(&logging.traceLocation, "log_backtrace_at", "when logging hits line file:N, emit a stack trace") } // Flush flushes all pending log I/O. func Flush() { logging.lockAndFlushAll() } // loggingT collects all the global state of the logging setup. type loggingT struct { // Boolean flags. Not handled atomically because the flag.Value interface // does not let us avoid the =true, and that shorthand is necessary for // compatibility. TODO: does this matter enough to fix? Seems unlikely. toStderr bool // The -logtostderr flag. alsoToStderr bool // The -alsologtostderr flag. // Level flag. Handled atomically. stderrThreshold severity // The -stderrthreshold flag. // freeList is a list of byte buffers, maintained under freeListMu. freeList *buffer // freeListMu maintains the free list. It is separate from the main mutex // so buffers can be grabbed and printed to without holding the main lock, // for better parallelization. freeListMu sync.Mutex // mu protects the remaining elements of this structure and is // used to synchronize logging. mu sync.Mutex // file holds writer for each of the log types. file [numSeverity]flushSyncWriter // pcs is used in V to avoid an allocation when computing the caller's PC. pcs [1]uintptr // vmap is a cache of the V Level for each V() call site, identified by PC. // It is wiped whenever the vmodule flag changes state. vmap map[uintptr]Level // filterLength stores the length of the vmodule filter chain. If greater // than zero, it means vmodule is enabled. It may be read safely // using sync.LoadInt32, but is only modified under mu. filterLength int32 // traceLocation is the state of the -log_backtrace_at flag. traceLocation traceLocation // These flags are modified only under lock, although verbosity may be fetched // safely using atomic.LoadInt32. vmodule moduleSpec // The state of the -vmodule flag. verbosity Level // V logging level, the value of the -v flag/ // If non-empty, overrides the choice of directory in which to write logs. // See createLogDirs for the full list of possible destinations. logDir string // If non-empty, specifies the path of the file to write logs. mutually exclusive // with the log_dir option. logFile string // When logFile is specified, this limiter makes sure the logFile won't exceeds a certain size. When exceeds, the // logFile will be cleaned up. If this value is 0, no size limitation will be applied to logFile. logFileMaxSizeMB uint64 // If true, do not add the prefix headers, useful when used with SetOutput skipHeaders bool // If true, do not add the headers to log files skipLogHeaders bool // If true, add the file directory to the header addDirHeader bool // If set, all output will be redirected unconditionally to the provided logr.Logger logr logr.Logger // If true, messages will not be propagated to lower severity log levels oneOutput bool // If set, all output will be filtered through the filter. filter LogFilter } // buffer holds a byte Buffer for reuse. The zero value is ready for use. type buffer struct { bytes.Buffer tmp [64]byte // temporary byte array for creating headers. next *buffer } var logging loggingT // setVState sets a consistent state for V logging. // l.mu is held. func (l *loggingT) setVState(verbosity Level, filter []modulePat, setFilter bool) { // Turn verbosity off so V will not fire while we are in transition. l.verbosity.set(0) // Ditto for filter length. atomic.StoreInt32(&l.filterLength, 0) // Set the new filters and wipe the pc->Level map if the filter has changed. if setFilter { l.vmodule.filter = filter l.vmap = make(map[uintptr]Level) } // Things are consistent now, so enable filtering and verbosity. // They are enabled in order opposite to that in V. atomic.StoreInt32(&l.filterLength, int32(len(filter))) l.verbosity.set(verbosity) } // getBuffer returns a new, ready-to-use buffer. func (l *loggingT) getBuffer() *buffer { l.freeListMu.Lock() b := l.freeList if b != nil { l.freeList = b.next } l.freeListMu.Unlock() if b == nil { b = new(buffer) } else { b.next = nil b.Reset() } return b } // putBuffer returns a buffer to the free list. func (l *loggingT) putBuffer(b *buffer) { if b.Len() >= 256 { // Let big buffers die a natural death. return } l.freeListMu.Lock() b.next = l.freeList l.freeList = b l.freeListMu.Unlock() } var timeNow = time.Now // Stubbed out for testing. /* header formats a log header as defined by the C++ implementation. It returns a buffer containing the formatted header and the user's file and line number. The depth specifies how many stack frames above lives the source line to be identified in the log message. Log lines have this form: Lmmdd hh:mm:ss.uuuuuu threadid file:line] msg... where the fields are defined as follows: L A single character, representing the log level (eg 'I' for INFO) mm The month (zero padded; ie May is '05') dd The day (zero padded) hh:mm:ss.uuuuuu Time in hours, minutes and fractional seconds threadid The space-padded thread ID as returned by GetTID() file The file name line The line number msg The user-supplied message */ func (l *loggingT) header(s severity, depth int) (*buffer, string, int) { _, file, line, ok := runtime.Caller(3 + depth) if !ok { file = "???" line = 1 } else { if slash := strings.LastIndex(file, "/"); slash >= 0 { path := file file = path[slash+1:] if l.addDirHeader { if dirsep := strings.LastIndex(path[:slash], "/"); dirsep >= 0 { file = path[dirsep+1:] } } } } return l.formatHeader(s, file, line), file, line } // formatHeader formats a log header using the provided file name and line number. func (l *loggingT) formatHeader(s severity, file string, line int) *buffer { now := timeNow() if line < 0 { line = 0 // not a real line number, but acceptable to someDigits } if s > fatalLog { s = infoLog // for safety. } buf := l.getBuffer() if l.skipHeaders { return buf } // Avoid Fprintf, for speed. The format is so simple that we can do it quickly by hand. // It's worth about 3X. Fprintf is hard. _, month, day := now.Date() hour, minute, second := now.Clock() // Lmmdd hh:mm:ss.uuuuuu threadid file:line] buf.tmp[0] = severityChar[s] buf.twoDigits(1, int(month)) buf.twoDigits(3, day) buf.tmp[5] = ' ' buf.twoDigits(6, hour) buf.tmp[8] = ':' buf.twoDigits(9, minute) buf.tmp[11] = ':' buf.twoDigits(12, second) buf.tmp[14] = '.' buf.nDigits(6, 15, now.Nanosecond()/1000, '0') buf.tmp[21] = ' ' buf.nDigits(7, 22, pid, ' ') // TODO: should be TID buf.tmp[29] = ' ' buf.Write(buf.tmp[:30]) buf.WriteString(file) buf.tmp[0] = ':' n := buf.someDigits(1, line) buf.tmp[n+1] = ']' buf.tmp[n+2] = ' ' buf.Write(buf.tmp[:n+3]) return buf } // Some custom tiny helper functions to print the log header efficiently. const digits = "0123456789" // twoDigits formats a zero-prefixed two-digit integer at buf.tmp[i]. func (buf *buffer) twoDigits(i, d int) { buf.tmp[i+1] = digits[d%10] d /= 10 buf.tmp[i] = digits[d%10] } // nDigits formats an n-digit integer at buf.tmp[i], // padding with pad on the left. // It assumes d >= 0. func (buf *buffer) nDigits(n, i, d int, pad byte) { j := n - 1 for ; j >= 0 && d > 0; j-- { buf.tmp[i+j] = digits[d%10] d /= 10 } for ; j >= 0; j-- { buf.tmp[i+j] = pad } } // someDigits formats a zero-prefixed variable-width integer at buf.tmp[i]. func (buf *buffer) someDigits(i, d int) int { // Print into the top, then copy down. We know there's space for at least // a 10-digit number. j := len(buf.tmp) for { j-- buf.tmp[j] = digits[d%10] d /= 10 if d == 0 { break } } return copy(buf.tmp[i:], buf.tmp[j:]) } func (l *loggingT) println(s severity, logr logr.Logger, filter LogFilter, args ...interface{}) { buf, file, line := l.header(s, 0) // if logr is set, we clear the generated header as we rely on the backing // logr implementation to print headers if logr != nil { l.putBuffer(buf) buf = l.getBuffer() } if filter != nil { args = filter.Filter(args) } fmt.Fprintln(buf, args...) l.output(s, logr, buf, file, line, false) } func (l *loggingT) print(s severity, logr logr.Logger, filter LogFilter, args ...interface{}) { l.printDepth(s, logr, filter, 1, args...) } func (l *loggingT) printDepth(s severity, logr logr.Logger, filter LogFilter, depth int, args ...interface{}) { buf, file, line := l.header(s, depth) // if logr is set, we clear the generated header as we rely on the backing // logr implementation to print headers if logr != nil { l.putBuffer(buf) buf = l.getBuffer() } if filter != nil { args = filter.Filter(args) } fmt.Fprint(buf, args...) if buf.Bytes()[buf.Len()-1] != '\n' { buf.WriteByte('\n') } l.output(s, logr, buf, file, line, false) } func (l *loggingT) printf(s severity, logr logr.Logger, filter LogFilter, format string, args ...interface{}) { buf, file, line := l.header(s, 0) // if logr is set, we clear the generated header as we rely on the backing // logr implementation to print headers if logr != nil { l.putBuffer(buf) buf = l.getBuffer() } if filter != nil { format, args = filter.FilterF(format, args) } fmt.Fprintf(buf, format, args...) if buf.Bytes()[buf.Len()-1] != '\n' { buf.WriteByte('\n') } l.output(s, logr, buf, file, line, false) } // printWithFileLine behaves like print but uses the provided file and line number. If // alsoLogToStderr is true, the log message always appears on standard error; it // will also appear in the log file unless --logtostderr is set. func (l *loggingT) printWithFileLine(s severity, logr logr.Logger, filter LogFilter, file string, line int, alsoToStderr bool, args ...interface{}) { buf := l.formatHeader(s, file, line) // if logr is set, we clear the generated header as we rely on the backing // logr implementation to print headers if logr != nil { l.putBuffer(buf) buf = l.getBuffer() } if filter != nil { args = filter.Filter(args) } fmt.Fprint(buf, args...) if buf.Bytes()[buf.Len()-1] != '\n' { buf.WriteByte('\n') } l.output(s, logr, buf, file, line, alsoToStderr) } // if loggr is specified, will call loggr.Error, otherwise output with logging module. func (l *loggingT) errorS(err error, loggr logr.Logger, filter LogFilter, depth int, msg string, keysAndValues ...interface{}) { if filter != nil { msg, keysAndValues = filter.FilterS(msg, keysAndValues) } if loggr != nil { loggr.Error(err, msg, keysAndValues...) return } l.printS(err, depth+1, msg, keysAndValues...) } // if loggr is specified, will call loggr.Info, otherwise output with logging module. func (l *loggingT) infoS(loggr logr.Logger, filter LogFilter, depth int, msg string, keysAndValues ...interface{}) { if filter != nil { msg, keysAndValues = filter.FilterS(msg, keysAndValues) } if loggr != nil { loggr.Info(msg, keysAndValues...) return } l.printS(nil, depth+1, msg, keysAndValues...) } // printS is called from infoS and errorS if loggr is not specified. // if err arguments is specified, will output to errorLog severity func (l *loggingT) printS(err error, depth int, msg string, keysAndValues ...interface{}) { b := &bytes.Buffer{} b.WriteString(fmt.Sprintf("%q", msg)) if err != nil { b.WriteByte(' ') b.WriteString(fmt.Sprintf("err=%q", err.Error())) } kvListFormat(b, keysAndValues...) var s severity if err == nil { s = infoLog } else { s = errorLog } l.printDepth(s, logging.logr, nil, depth+1, b) } const missingValue = "(MISSING)" func kvListFormat(b *bytes.Buffer, keysAndValues ...interface{}) { for i := 0; i < len(keysAndValues); i += 2 { var v interface{} k := keysAndValues[i] if i+1 < len(keysAndValues) { v = keysAndValues[i+1] } else { v = missingValue } b.WriteByte(' ') switch v.(type) { case string, error: b.WriteString(fmt.Sprintf("%s=%q", k, v)) default: if _, ok := v.(fmt.Stringer); ok { b.WriteString(fmt.Sprintf("%s=%q", k, v)) } else { b.WriteString(fmt.Sprintf("%s=%+v", k, v)) } } } } // redirectBuffer is used to set an alternate destination for the logs type redirectBuffer struct { w io.Writer } func (rb *redirectBuffer) Sync() error { return nil } func (rb *redirectBuffer) Flush() error { return nil } func (rb *redirectBuffer) Write(bytes []byte) (n int, err error) { return rb.w.Write(bytes) } // SetLogger will set the backing logr implementation for klog. // If set, all log lines will be suppressed from the regular Output, and // redirected to the logr implementation. // All log lines include the 'severity', 'file' and 'line' values attached as // structured logging values. // Use as: // ... // klog.SetLogger(zapr.NewLogger(zapLog)) func SetLogger(logr logr.Logger) { logging.logr = logr } // SetOutput sets the output destination for all severities func SetOutput(w io.Writer) { logging.mu.Lock() defer logging.mu.Unlock() for s := fatalLog; s >= infoLog; s-- { rb := &redirectBuffer{ w: w, } logging.file[s] = rb } } // SetOutputBySeverity sets the output destination for specific severity func SetOutputBySeverity(name string, w io.Writer) { logging.mu.Lock() defer logging.mu.Unlock() sev, ok := severityByName(name) if !ok { panic(fmt.Sprintf("SetOutputBySeverity(%q): unrecognized severity name", name)) } rb := &redirectBuffer{ w: w, } logging.file[sev] = rb } // LogToStderr sets whether to log exclusively to stderr, bypassing outputs func LogToStderr(stderr bool) { logging.mu.Lock() defer logging.mu.Unlock() logging.toStderr = stderr } // output writes the data to the log files and releases the buffer. func (l *loggingT) output(s severity, log logr.Logger, buf *buffer, file string, line int, alsoToStderr bool) { l.mu.Lock() if l.traceLocation.isSet() { if l.traceLocation.match(file, line) { buf.Write(stacks(false)) } } data := buf.Bytes() if log != nil { // TODO: set 'severity' and caller information as structured log info // keysAndValues := []interface{}{"severity", severityName[s], "file", file, "line", line} if s == errorLog { l.logr.Error(nil, string(data)) } else { log.Info(string(data)) } } else if l.toStderr { os.Stderr.Write(data) } else { if alsoToStderr || l.alsoToStderr || s >= l.stderrThreshold.get() { os.Stderr.Write(data) } if logging.logFile != "" { // Since we are using a single log file, all of the items in l.file array // will point to the same file, so just use one of them to write data. if l.file[infoLog] == nil { if err := l.createFiles(infoLog); err != nil { os.Stderr.Write(data) // Make sure the message appears somewhere. l.exit(err) } } l.file[infoLog].Write(data) } else { if l.file[s] == nil { if err := l.createFiles(s); err != nil { os.Stderr.Write(data) // Make sure the message appears somewhere. l.exit(err) } } if l.oneOutput { l.file[s].Write(data) } else { switch s { case fatalLog: l.file[fatalLog].Write(data) fallthrough case errorLog: l.file[errorLog].Write(data) fallthrough case warningLog: l.file[warningLog].Write(data) fallthrough case infoLog: l.file[infoLog].Write(data) } } } } if s == fatalLog { // If we got here via Exit rather than Fatal, print no stacks. if atomic.LoadUint32(&fatalNoStacks) > 0 { l.mu.Unlock() timeoutFlush(10 * time.Second) os.Exit(1) } // Dump all goroutine stacks before exiting. trace := stacks(true) // Write the stack trace for all goroutines to the stderr. if l.toStderr || l.alsoToStderr || s >= l.stderrThreshold.get() || alsoToStderr { os.Stderr.Write(trace) } // Write the stack trace for all goroutines to the files. logExitFunc = func(error) {} // If we get a write error, we'll still exit below. for log := fatalLog; log >= infoLog; log-- { if f := l.file[log]; f != nil { // Can be nil if -logtostderr is set. f.Write(trace) } } l.mu.Unlock() timeoutFlush(10 * time.Second) os.Exit(255) // C++ uses -1, which is silly because it's anded with 255 anyway. } l.putBuffer(buf) l.mu.Unlock() if stats := severityStats[s]; stats != nil { atomic.AddInt64(&stats.lines, 1) atomic.AddInt64(&stats.bytes, int64(len(data))) } } // timeoutFlush calls Flush and returns when it completes or after timeout // elapses, whichever happens first. This is needed because the hooks invoked // by Flush may deadlock when klog.Fatal is called from a hook that holds // a lock. func timeoutFlush(timeout time.Duration) { done := make(chan bool, 1) go func() { Flush() // calls logging.lockAndFlushAll() done <- true }() select { case <-done: case <-time.After(timeout): fmt.Fprintln(os.Stderr, "klog: Flush took longer than", timeout) } } // stacks is a wrapper for runtime.Stack that attempts to recover the data for all goroutines. func stacks(all bool) []byte { // We don't know how big the traces are, so grow a few times if they don't fit. Start large, though. n := 10000 if all { n = 100000 } var trace []byte for i := 0; i < 5; i++ { trace = make([]byte, n) nbytes := runtime.Stack(trace, all) if nbytes < len(trace) { return trace[:nbytes] } n *= 2 } return trace } // logExitFunc provides a simple mechanism to override the default behavior // of exiting on error. Used in testing and to guarantee we reach a required exit // for fatal logs. Instead, exit could be a function rather than a method but that // would make its use clumsier. var logExitFunc func(error) // exit is called if there is trouble creating or writing log files. // It flushes the logs and exits the program; there's no point in hanging around. // l.mu is held. func (l *loggingT) exit(err error) { fmt.Fprintf(os.Stderr, "log: exiting because of error: %s\n", err) // If logExitFunc is set, we do that instead of exiting. if logExitFunc != nil { logExitFunc(err) return } l.flushAll() os.Exit(2) } // syncBuffer joins a bufio.Writer to its underlying file, providing access to the // file's Sync method and providing a wrapper for the Write method that provides log // file rotation. There are conflicting methods, so the file cannot be embedded. // l.mu is held for all its methods. type syncBuffer struct { logger *loggingT *bufio.Writer file *os.File sev severity nbytes uint64 // The number of bytes written to this file maxbytes uint64 // The max number of bytes this syncBuffer.file can hold before cleaning up. } func (sb *syncBuffer) Sync() error { return sb.file.Sync() } // CalculateMaxSize returns the real max size in bytes after considering the default max size and the flag options. func CalculateMaxSize() uint64 { if logging.logFile != "" { if logging.logFileMaxSizeMB == 0 { // If logFileMaxSizeMB is zero, we don't have limitations on the log size. return math.MaxUint64 } // Flag logFileMaxSizeMB is in MB for user convenience. return logging.logFileMaxSizeMB * 1024 * 1024 } // If "log_file" flag is not specified, the target file (sb.file) will be cleaned up when reaches a fixed size. return MaxSize } func (sb *syncBuffer) Write(p []byte) (n int, err error) { if sb.nbytes+uint64(len(p)) >= sb.maxbytes { if err := sb.rotateFile(time.Now(), false); err != nil { sb.logger.exit(err) } } n, err = sb.Writer.Write(p) sb.nbytes += uint64(n) if err != nil { sb.logger.exit(err) } return } // rotateFile closes the syncBuffer's file and starts a new one. // The startup argument indicates whether this is the initial startup of klog. // If startup is true, existing files are opened for appending instead of truncated. func (sb *syncBuffer) rotateFile(now time.Time, startup bool) error { if sb.file != nil { sb.Flush() sb.file.Close() } var err error sb.file, _, err = create(severityName[sb.sev], now, startup) if err != nil { return err } if startup { fileInfo, err := sb.file.Stat() if err != nil { return fmt.Errorf("file stat could not get fileinfo: %v", err) } // init file size sb.nbytes = uint64(fileInfo.Size()) } else { sb.nbytes = 0 } sb.Writer = bufio.NewWriterSize(sb.file, bufferSize) if sb.logger.skipLogHeaders { return nil } // Write header. var buf bytes.Buffer fmt.Fprintf(&buf, "Log file created at: %s\n", now.Format("2006/01/02 15:04:05")) fmt.Fprintf(&buf, "Running on machine: %s\n", host) fmt.Fprintf(&buf, "Binary: Built with %s %s for %s/%s\n", runtime.Compiler, runtime.Version(), runtime.GOOS, runtime.GOARCH) fmt.Fprintf(&buf, "Log line format: [IWEF]mmdd hh:mm:ss.uuuuuu threadid file:line] msg\n") n, err := sb.file.Write(buf.Bytes()) sb.nbytes += uint64(n) return err } // bufferSize sizes the buffer associated with each log file. It's large // so that log records can accumulate without the logging thread blocking // on disk I/O. The flushDaemon will block instead. const bufferSize = 256 * 1024 // createFiles creates all the log files for severity from sev down to infoLog. // l.mu is held. func (l *loggingT) createFiles(sev severity) error { now := time.Now() // Files are created in decreasing severity order, so as soon as we find one // has already been created, we can stop. for s := sev; s >= infoLog && l.file[s] == nil; s-- { sb := &syncBuffer{ logger: l, sev: s, maxbytes: CalculateMaxSize(), } if err := sb.rotateFile(now, true); err != nil { return err } l.file[s] = sb } return nil } const flushInterval = 5 * time.Second // flushDaemon periodically flushes the log file buffers. func (l *loggingT) flushDaemon() { for range time.NewTicker(flushInterval).C { l.lockAndFlushAll() } } // lockAndFlushAll is like flushAll but locks l.mu first. func (l *loggingT) lockAndFlushAll() { l.mu.Lock() l.flushAll() l.mu.Unlock() } // flushAll flushes all the logs and attempts to "sync" their data to disk. // l.mu is held. func (l *loggingT) flushAll() { // Flush from fatal down, in case there's trouble flushing. for s := fatalLog; s >= infoLog; s-- { file := l.file[s] if file != nil { file.Flush() // ignore error file.Sync() // ignore error } } } // CopyStandardLogTo arranges for messages written to the Go "log" package's // default logs to also appear in the Google logs for the named and lower // severities. Subsequent changes to the standard log's default output location // or format may break this behavior. // // Valid names are "INFO", "WARNING", "ERROR", and "FATAL". If the name is not // recognized, CopyStandardLogTo panics. func CopyStandardLogTo(name string) { sev, ok := severityByName(name) if !ok { panic(fmt.Sprintf("log.CopyStandardLogTo(%q): unrecognized severity name", name)) } // Set a log format that captures the user's file and line: // d.go:23: message stdLog.SetFlags(stdLog.Lshortfile) stdLog.SetOutput(logBridge(sev)) } // logBridge provides the Write method that enables CopyStandardLogTo to connect // Go's standard logs to the logs provided by this package. type logBridge severity // Write parses the standard logging line and passes its components to the // logger for severity(lb). func (lb logBridge) Write(b []byte) (n int, err error) { var ( file = "???" line = 1 text string ) // Split "d.go:23: message" into "d.go", "23", and "message". if parts := bytes.SplitN(b, []byte{':'}, 3); len(parts) != 3 || len(parts[0]) < 1 || len(parts[2]) < 1 { text = fmt.Sprintf("bad log format: %s", b) } else { file = string(parts[0]) text = string(parts[2][1:]) // skip leading space line, err = strconv.Atoi(string(parts[1])) if err != nil { text = fmt.Sprintf("bad line number: %s", b) line = 1 } } // printWithFileLine with alsoToStderr=true, so standard log messages // always appear on standard error. logging.printWithFileLine(severity(lb), logging.logr, logging.filter, file, line, true, text) return len(b), nil } // setV computes and remembers the V level for a given PC // when vmodule is enabled. // File pattern matching takes the basename of the file, stripped // of its .go suffix, and uses filepath.Match, which is a little more // general than the *? matching used in C++. // l.mu is held. func (l *loggingT) setV(pc uintptr) Level { fn := runtime.FuncForPC(pc) file, _ := fn.FileLine(pc) // The file is something like /a/b/c/d.go. We want just the d. if strings.HasSuffix(file, ".go") { file = file[:len(file)-3] } if slash := strings.LastIndex(file, "/"); slash >= 0 { file = file[slash+1:] } for _, filter := range l.vmodule.filter { if filter.match(file) { l.vmap[pc] = filter.level return filter.level } } l.vmap[pc] = 0 return 0 } // Verbose is a boolean type that implements Infof (like Printf) etc. // See the documentation of V for more information. type Verbose struct { enabled bool logr logr.Logger filter LogFilter } func newVerbose(level Level, b bool) Verbose { if logging.logr == nil { return Verbose{b, nil, logging.filter} } return Verbose{b, logging.logr.V(int(level)), logging.filter} } // V reports whether verbosity at the call site is at least the requested level. // The returned value is a struct of type Verbose, which implements Info, Infoln // and Infof. These methods will write to the Info log if called. // Thus, one may write either // if glog.V(2).Enabled() { klog.Info("log this") } // or // klog.V(2).Info("log this") // The second form is shorter but the first is cheaper if logging is off because it does // not evaluate its arguments. // // Whether an individual call to V generates a log record depends on the setting of // the -v and -vmodule flags; both are off by default. The V call will log if its level // is less than or equal to the value of the -v flag, or alternatively if its level is // less than or equal to the value of the -vmodule pattern matching the source file // containing the call. func V(level Level) Verbose { // This function tries hard to be cheap unless there's work to do. // The fast path is two atomic loads and compares. // Here is a cheap but safe test to see if V logging is enabled globally. if logging.verbosity.get() >= level { return newVerbose(level, true) } // It's off globally but vmodule may still be set. // Here is another cheap but safe test to see if vmodule is enabled. if atomic.LoadInt32(&logging.filterLength) > 0 { // Now we need a proper lock to use the logging structure. The pcs field // is shared so we must lock before accessing it. This is fairly expensive, // but if V logging is enabled we're slow anyway. logging.mu.Lock() defer logging.mu.Unlock() if runtime.Callers(2, logging.pcs[:]) == 0 { return newVerbose(level, false) } v, ok := logging.vmap[logging.pcs[0]] if !ok { v = logging.setV(logging.pcs[0]) } return newVerbose(level, v >= level) } return newVerbose(level, false) } // Enabled will return true if this log level is enabled, guarded by the value // of v. // See the documentation of V for usage. func (v Verbose) Enabled() bool { return v.enabled } // Info is equivalent to the global Info function, guarded by the value of v. // See the documentation of V for usage. func (v Verbose) Info(args ...interface{}) { if v.enabled { logging.print(infoLog, v.logr, v.filter, args...) } } // Infoln is equivalent to the global Infoln function, guarded by the value of v. // See the documentation of V for usage. func (v Verbose) Infoln(args ...interface{}) { if v.enabled { logging.println(infoLog, v.logr, v.filter, args...) } } // Infof is equivalent to the global Infof function, guarded by the value of v. // See the documentation of V for usage. func (v Verbose) Infof(format string, args ...interface{}) { if v.enabled { logging.printf(infoLog, v.logr, v.filter, format, args...) } } // InfoS is equivalent to the global InfoS function, guarded by the value of v. // See the documentation of V for usage. func (v Verbose) InfoS(msg string, keysAndValues ...interface{}) { if v.enabled { logging.infoS(v.logr, v.filter, 0, msg, keysAndValues...) } } // InfoSDepth acts as InfoS but uses depth to determine which call frame to log. // InfoSDepth(0, "msg") is the same as InfoS("msg"). func InfoSDepth(depth int, msg string, keysAndValues ...interface{}) { logging.infoS(logging.logr, logging.filter, depth, msg, keysAndValues...) } // Deprecated: Use ErrorS instead. func (v Verbose) Error(err error, msg string, args ...interface{}) { if v.enabled { logging.errorS(err, v.logr, v.filter, 0, msg, args...) } } // ErrorS is equivalent to the global Error function, guarded by the value of v. // See the documentation of V for usage. func (v Verbose) ErrorS(err error, msg string, keysAndValues ...interface{}) { if v.enabled { logging.errorS(err, v.logr, v.filter, 0, msg, keysAndValues...) } } // Info logs to the INFO log. // Arguments are handled in the manner of fmt.Print; a newline is appended if missing. func Info(args ...interface{}) { logging.print(infoLog, logging.logr, logging.filter, args...) } // InfoDepth acts as Info but uses depth to determine which call frame to log. // InfoDepth(0, "msg") is the same as Info("msg"). func InfoDepth(depth int, args ...interface{}) { logging.printDepth(infoLog, logging.logr, logging.filter, depth, args...) } // Infoln logs to the INFO log. // Arguments are handled in the manner of fmt.Println; a newline is always appended. func Infoln(args ...interface{}) { logging.println(infoLog, logging.logr, logging.filter, args...) } // Infof logs to the INFO log. // Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. func Infof(format string, args ...interface{}) { logging.printf(infoLog, logging.logr, logging.filter, format, args...) } // InfoS structured logs to the INFO log. // The msg argument used to add constant description to the log line. // The key/value pairs would be join by "=" ; a newline is always appended. // // Basic examples: // >> klog.InfoS("Pod status updated", "pod", "kubedns", "status", "ready") // output: // >> I1025 00:15:15.525108 1 controller_utils.go:116] "Pod status updated" pod="kubedns" status="ready" func InfoS(msg string, keysAndValues ...interface{}) { logging.infoS(logging.logr, logging.filter, 0, msg, keysAndValues...) } // Warning logs to the WARNING and INFO logs. // Arguments are handled in the manner of fmt.Print; a newline is appended if missing. func Warning(args ...interface{}) { logging.print(warningLog, logging.logr, logging.filter, args...) } // WarningDepth acts as Warning but uses depth to determine which call frame to log. // WarningDepth(0, "msg") is the same as Warning("msg"). func WarningDepth(depth int, args ...interface{}) { logging.printDepth(warningLog, logging.logr, logging.filter, depth, args...) } // Warningln logs to the WARNING and INFO logs. // Arguments are handled in the manner of fmt.Println; a newline is always appended. func Warningln(args ...interface{}) { logging.println(warningLog, logging.logr, logging.filter, args...) } // Warningf logs to the WARNING and INFO logs. // Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. func Warningf(format string, args ...interface{}) { logging.printf(warningLog, logging.logr, logging.filter, format, args...) } // Error logs to the ERROR, WARNING, and INFO logs. // Arguments are handled in the manner of fmt.Print; a newline is appended if missing. func Error(args ...interface{}) { logging.print(errorLog, logging.logr, logging.filter, args...) } // ErrorDepth acts as Error but uses depth to determine which call frame to log. // ErrorDepth(0, "msg") is the same as Error("msg"). func ErrorDepth(depth int, args ...interface{}) { logging.printDepth(errorLog, logging.logr, logging.filter, depth, args...) } // Errorln logs to the ERROR, WARNING, and INFO logs. // Arguments are handled in the manner of fmt.Println; a newline is always appended. func Errorln(args ...interface{}) { logging.println(errorLog, logging.logr, logging.filter, args...) } // Errorf logs to the ERROR, WARNING, and INFO logs. // Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. func Errorf(format string, args ...interface{}) { logging.printf(errorLog, logging.logr, logging.filter, format, args...) } // ErrorS structured logs to the ERROR, WARNING, and INFO logs. // the err argument used as "err" field of log line. // The msg argument used to add constant description to the log line. // The key/value pairs would be join by "=" ; a newline is always appended. // // Basic examples: // >> klog.ErrorS(err, "Failed to update pod status") // output: // >> E1025 00:15:15.525108 1 controller_utils.go:114] "Failed to update pod status" err="timeout" func ErrorS(err error, msg string, keysAndValues ...interface{}) { logging.errorS(err, logging.logr, logging.filter, 0, msg, keysAndValues...) } // ErrorSDepth acts as ErrorS but uses depth to determine which call frame to log. // ErrorSDepth(0, "msg") is the same as ErrorS("msg"). func ErrorSDepth(depth int, err error, msg string, keysAndValues ...interface{}) { logging.errorS(err, logging.logr, logging.filter, depth, msg, keysAndValues...) } // Fatal logs to the FATAL, ERROR, WARNING, and INFO logs, // including a stack trace of all running goroutines, then calls os.Exit(255). // Arguments are handled in the manner of fmt.Print; a newline is appended if missing. func Fatal(args ...interface{}) { logging.print(fatalLog, logging.logr, logging.filter, args...) } // FatalDepth acts as Fatal but uses depth to determine which call frame to log. // FatalDepth(0, "msg") is the same as Fatal("msg"). func FatalDepth(depth int, args ...interface{}) { logging.printDepth(fatalLog, logging.logr, logging.filter, depth, args...) } // Fatalln logs to the FATAL, ERROR, WARNING, and INFO logs, // including a stack trace of all running goroutines, then calls os.Exit(255). // Arguments are handled in the manner of fmt.Println; a newline is always appended. func Fatalln(args ...interface{}) { logging.println(fatalLog, logging.logr, logging.filter, args...) } // Fatalf logs to the FATAL, ERROR, WARNING, and INFO logs, // including a stack trace of all running goroutines, then calls os.Exit(255). // Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. func Fatalf(format string, args ...interface{}) { logging.printf(fatalLog, logging.logr, logging.filter, format, args...) } // fatalNoStacks is non-zero if we are to exit without dumping goroutine stacks. // It allows Exit and relatives to use the Fatal logs. var fatalNoStacks uint32 // Exit logs to the FATAL, ERROR, WARNING, and INFO logs, then calls os.Exit(1). // Arguments are handled in the manner of fmt.Print; a newline is appended if missing. func Exit(args ...interface{}) { atomic.StoreUint32(&fatalNoStacks, 1) logging.print(fatalLog, logging.logr, logging.filter, args...) } // ExitDepth acts as Exit but uses depth to determine which call frame to log. // ExitDepth(0, "msg") is the same as Exit("msg"). func ExitDepth(depth int, args ...interface{}) { atomic.StoreUint32(&fatalNoStacks, 1) logging.printDepth(fatalLog, logging.logr, logging.filter, depth, args...) } // Exitln logs to the FATAL, ERROR, WARNING, and INFO logs, then calls os.Exit(1). func Exitln(args ...interface{}) { atomic.StoreUint32(&fatalNoStacks, 1) logging.println(fatalLog, logging.logr, logging.filter, args...) } // Exitf logs to the FATAL, ERROR, WARNING, and INFO logs, then calls os.Exit(1). // Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. func Exitf(format string, args ...interface{}) { atomic.StoreUint32(&fatalNoStacks, 1) logging.printf(fatalLog, logging.logr, logging.filter, format, args...) } // LogFilter is a collection of functions that can filter all logging calls, // e.g. for sanitization of arguments and prevent accidental leaking of secrets. type LogFilter interface { Filter(args []interface{}) []interface{} FilterF(format string, args []interface{}) (string, []interface{}) FilterS(msg string, keysAndValues []interface{}) (string, []interface{}) } func SetLogFilter(filter LogFilter) { logging.mu.Lock() defer logging.mu.Unlock() logging.filter = filter } // ObjectRef references a kubernetes object type ObjectRef struct { Name string `json:"name"` Namespace string `json:"namespace,omitempty"` } func (ref ObjectRef) String() string { if ref.Namespace != "" { return fmt.Sprintf("%s/%s", ref.Namespace, ref.Name) } return ref.Name } // KMetadata is a subset of the kubernetes k8s.io/apimachinery/pkg/apis/meta/v1.Object interface // this interface may expand in the future, but will always be a subset of the // kubernetes k8s.io/apimachinery/pkg/apis/meta/v1.Object interface type KMetadata interface { GetName() string GetNamespace() string } // KObj returns ObjectRef from ObjectMeta func KObj(obj KMetadata) ObjectRef { return ObjectRef{ Name: obj.GetName(), Namespace: obj.GetNamespace(), } } // KRef returns ObjectRef from name and namespace func KRef(namespace, name string) ObjectRef { return ObjectRef{ Name: name, Namespace: namespace, } } klog-2.5.0/klog_file.go000066400000000000000000000106271400432665300147450ustar00rootroot00000000000000// Go support for leveled logs, analogous to https://code.google.com/p/google-glog/ // // Copyright 2013 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // File I/O for logs. package klog import ( "errors" "fmt" "os" "os/user" "path/filepath" "runtime" "strings" "sync" "time" ) // MaxSize is the maximum size of a log file in bytes. var MaxSize uint64 = 1024 * 1024 * 1800 // logDirs lists the candidate directories for new log files. var logDirs []string func createLogDirs() { if logging.logDir != "" { logDirs = append(logDirs, logging.logDir) } logDirs = append(logDirs, os.TempDir()) } var ( pid = os.Getpid() program = filepath.Base(os.Args[0]) host = "unknownhost" userName = "unknownuser" userNameOnce sync.Once ) func init() { if h, err := os.Hostname(); err == nil { host = shortHostname(h) } } func getUserName() string { userNameOnce.Do(func() { // On Windows, the Go 'user' package requires netapi32.dll. // This affects Windows Nano Server: // https://github.com/golang/go/issues/21867 // Fallback to using environment variables. if runtime.GOOS == "windows" { u := os.Getenv("USERNAME") if len(u) == 0 { return } // Sanitize the USERNAME since it may contain filepath separators. u = strings.Replace(u, `\`, "_", -1) // user.Current().Username normally produces something like 'USERDOMAIN\USERNAME' d := os.Getenv("USERDOMAIN") if len(d) != 0 { userName = d + "_" + u } else { userName = u } } else { current, err := user.Current() if err == nil { userName = current.Username } } }) return userName } // shortHostname returns its argument, truncating at the first period. // For instance, given "www.google.com" it returns "www". func shortHostname(hostname string) string { if i := strings.Index(hostname, "."); i >= 0 { return hostname[:i] } return hostname } // logName returns a new log file name containing tag, with start time t, and // the name for the symlink for tag. func logName(tag string, t time.Time) (name, link string) { name = fmt.Sprintf("%s.%s.%s.log.%s.%04d%02d%02d-%02d%02d%02d.%d", program, host, getUserName(), tag, t.Year(), t.Month(), t.Day(), t.Hour(), t.Minute(), t.Second(), pid) return name, program + "." + tag } var onceLogDirs sync.Once // create creates a new log file and returns the file and its filename, which // contains tag ("INFO", "FATAL", etc.) and t. If the file is created // successfully, create also attempts to update the symlink for that tag, ignoring // errors. // The startup argument indicates whether this is the initial startup of klog. // If startup is true, existing files are opened for appending instead of truncated. func create(tag string, t time.Time, startup bool) (f *os.File, filename string, err error) { if logging.logFile != "" { f, err := openOrCreate(logging.logFile, startup) if err == nil { return f, logging.logFile, nil } return nil, "", fmt.Errorf("log: unable to create log: %v", err) } onceLogDirs.Do(createLogDirs) if len(logDirs) == 0 { return nil, "", errors.New("log: no log dirs") } name, link := logName(tag, t) var lastErr error for _, dir := range logDirs { fname := filepath.Join(dir, name) f, err := openOrCreate(fname, startup) if err == nil { symlink := filepath.Join(dir, link) os.Remove(symlink) // ignore err os.Symlink(name, symlink) // ignore err return f, fname, nil } lastErr = err } return nil, "", fmt.Errorf("log: cannot create log: %v", lastErr) } // The startup argument indicates whether this is the initial startup of klog. // If startup is true, existing files are opened for appending instead of truncated. func openOrCreate(name string, startup bool) (*os.File, error) { if startup { f, err := os.OpenFile(name, os.O_RDWR|os.O_CREATE|os.O_APPEND, 0666) return f, err } f, err := os.Create(name) return f, err } klog-2.5.0/klog_test.go000066400000000000000000001111231400432665300147760ustar00rootroot00000000000000// Go support for leveled logs, analogous to https://code.google.com/p/google-glog/ // // Copyright 2013 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package klog import ( "bytes" "errors" "flag" "fmt" "io/ioutil" stdLog "log" "os" "path/filepath" "reflect" "regexp" "runtime" "strconv" "strings" "sync" "testing" "time" "github.com/go-logr/logr" ) // TODO: This test package should be refactored so that tests cannot // interfere with each-other. // Test that shortHostname works as advertised. func TestShortHostname(t *testing.T) { for hostname, expect := range map[string]string{ "": "", "host": "host", "host.google.com": "host", } { if got := shortHostname(hostname); expect != got { t.Errorf("shortHostname(%q): expected %q, got %q", hostname, expect, got) } } } // flushBuffer wraps a bytes.Buffer to satisfy flushSyncWriter. type flushBuffer struct { bytes.Buffer } func (f *flushBuffer) Flush() error { return nil } func (f *flushBuffer) Sync() error { return nil } // swap sets the log writers and returns the old array. func (l *loggingT) swap(writers [numSeverity]flushSyncWriter) (old [numSeverity]flushSyncWriter) { l.mu.Lock() defer l.mu.Unlock() old = l.file for i, w := range writers { logging.file[i] = w } return } // newBuffers sets the log writers to all new byte buffers and returns the old array. func (l *loggingT) newBuffers() [numSeverity]flushSyncWriter { return l.swap([numSeverity]flushSyncWriter{new(flushBuffer), new(flushBuffer), new(flushBuffer), new(flushBuffer)}) } // contents returns the specified log value as a string. func contents(s severity) string { return logging.file[s].(*flushBuffer).String() } // contains reports whether the string is contained in the log. func contains(s severity, str string, t *testing.T) bool { return strings.Contains(contents(s), str) } // setFlags configures the logging flags how the test expects them. func setFlags() { logging.toStderr = false logging.addDirHeader = false } // Test that Info works as advertised. func TestInfo(t *testing.T) { setFlags() defer logging.swap(logging.newBuffers()) Info("test") if !contains(infoLog, "I", t) { t.Errorf("Info has wrong character: %q", contents(infoLog)) } if !contains(infoLog, "test", t) { t.Error("Info failed") } } func TestInfoDepth(t *testing.T) { setFlags() defer logging.swap(logging.newBuffers()) f := func() { InfoDepth(1, "depth-test1") } // The next three lines must stay together _, _, wantLine, _ := runtime.Caller(0) InfoDepth(0, "depth-test0") f() msgs := strings.Split(strings.TrimSuffix(contents(infoLog), "\n"), "\n") if len(msgs) != 2 { t.Fatalf("Got %d lines, expected 2", len(msgs)) } for i, m := range msgs { if !strings.HasPrefix(m, "I") { t.Errorf("InfoDepth[%d] has wrong character: %q", i, m) } w := fmt.Sprintf("depth-test%d", i) if !strings.Contains(m, w) { t.Errorf("InfoDepth[%d] missing %q: %q", i, w, m) } // pull out the line number (between : and ]) msg := m[strings.LastIndex(m, ":")+1:] x := strings.Index(msg, "]") if x < 0 { t.Errorf("InfoDepth[%d]: missing ']': %q", i, m) continue } line, err := strconv.Atoi(msg[:x]) if err != nil { t.Errorf("InfoDepth[%d]: bad line number: %q", i, m) continue } wantLine++ if wantLine != line { t.Errorf("InfoDepth[%d]: got line %d, want %d", i, line, wantLine) } } } func init() { CopyStandardLogTo("INFO") } // Test that CopyStandardLogTo panics on bad input. func TestCopyStandardLogToPanic(t *testing.T) { defer func() { if s, ok := recover().(string); !ok || !strings.Contains(s, "LOG") { t.Errorf(`CopyStandardLogTo("LOG") should have panicked: %v`, s) } }() CopyStandardLogTo("LOG") } // Test that using the standard log package logs to INFO. func TestStandardLog(t *testing.T) { setFlags() defer logging.swap(logging.newBuffers()) stdLog.Print("test") if !contains(infoLog, "I", t) { t.Errorf("Info has wrong character: %q", contents(infoLog)) } if !contains(infoLog, "test", t) { t.Error("Info failed") } } // Test that the header has the correct format. func TestHeader(t *testing.T) { setFlags() defer logging.swap(logging.newBuffers()) defer func(previous func() time.Time) { timeNow = previous }(timeNow) timeNow = func() time.Time { return time.Date(2006, 1, 2, 15, 4, 5, .067890e9, time.Local) } pid = 1234 Info("test") var line int format := "I0102 15:04:05.067890 1234 klog_test.go:%d] test\n" n, err := fmt.Sscanf(contents(infoLog), format, &line) if n != 1 || err != nil { t.Errorf("log format error: %d elements, error %s:\n%s", n, err, contents(infoLog)) } // Scanf treats multiple spaces as equivalent to a single space, // so check for correct space-padding also. want := fmt.Sprintf(format, line) if contents(infoLog) != want { t.Errorf("log format error: got:\n\t%q\nwant:\t%q", contents(infoLog), want) } } func TestHeaderWithDir(t *testing.T) { setFlags() logging.addDirHeader = true defer logging.swap(logging.newBuffers()) defer func(previous func() time.Time) { timeNow = previous }(timeNow) timeNow = func() time.Time { return time.Date(2006, 1, 2, 15, 4, 5, .067890e9, time.Local) } pid = 1234 Info("test") re := regexp.MustCompile(`I0102 15:04:05.067890 1234 (klog|v2)/klog_test.go:(\d+)] test\n`) if !re.MatchString(contents(infoLog)) { t.Errorf("log format error: line does not match regex:\n\t%q\n", contents(infoLog)) } } // Test that an Error log goes to Warning and Info. // Even in the Info log, the source character will be E, so the data should // all be identical. func TestError(t *testing.T) { setFlags() defer logging.swap(logging.newBuffers()) Error("test") if !contains(errorLog, "E", t) { t.Errorf("Error has wrong character: %q", contents(errorLog)) } if !contains(errorLog, "test", t) { t.Error("Error failed") } str := contents(errorLog) if !contains(warningLog, str, t) { t.Error("Warning failed") } if !contains(infoLog, str, t) { t.Error("Info failed") } } // Test that an Error log does not goes to Warning and Info. // Even in the Info log, the source character will be E, so the data should // all be identical. func TestErrorWithOneOutput(t *testing.T) { setFlags() logging.oneOutput = true buf := logging.newBuffers() defer func() { logging.swap(buf) logging.oneOutput = false }() Error("test") if !contains(errorLog, "E", t) { t.Errorf("Error has wrong character: %q", contents(errorLog)) } if !contains(errorLog, "test", t) { t.Error("Error failed") } str := contents(errorLog) if contains(warningLog, str, t) { t.Error("Warning failed") } if contains(infoLog, str, t) { t.Error("Info failed") } } // Test that a Warning log goes to Info. // Even in the Info log, the source character will be W, so the data should // all be identical. func TestWarning(t *testing.T) { setFlags() defer logging.swap(logging.newBuffers()) Warning("test") if !contains(warningLog, "W", t) { t.Errorf("Warning has wrong character: %q", contents(warningLog)) } if !contains(warningLog, "test", t) { t.Error("Warning failed") } str := contents(warningLog) if !contains(infoLog, str, t) { t.Error("Info failed") } } // Test that a Warning log does not goes to Info. // Even in the Info log, the source character will be W, so the data should // all be identical. func TestWarningWithOneOutput(t *testing.T) { setFlags() logging.oneOutput = true buf := logging.newBuffers() defer func() { logging.swap(buf) logging.oneOutput = false }() Warning("test") if !contains(warningLog, "W", t) { t.Errorf("Warning has wrong character: %q", contents(warningLog)) } if !contains(warningLog, "test", t) { t.Error("Warning failed") } str := contents(warningLog) if contains(infoLog, str, t) { t.Error("Info failed") } } // Test that a V log goes to Info. func TestV(t *testing.T) { setFlags() defer logging.swap(logging.newBuffers()) logging.verbosity.Set("2") defer logging.verbosity.Set("0") V(2).Info("test") if !contains(infoLog, "I", t) { t.Errorf("Info has wrong character: %q", contents(infoLog)) } if !contains(infoLog, "test", t) { t.Error("Info failed") } } // Test that a vmodule enables a log in this file. func TestVmoduleOn(t *testing.T) { setFlags() defer logging.swap(logging.newBuffers()) logging.vmodule.Set("klog_test=2") defer logging.vmodule.Set("") if !V(1).Enabled() { t.Error("V not enabled for 1") } if !V(2).Enabled() { t.Error("V not enabled for 2") } if V(3).Enabled() { t.Error("V enabled for 3") } V(2).Info("test") if !contains(infoLog, "I", t) { t.Errorf("Info has wrong character: %q", contents(infoLog)) } if !contains(infoLog, "test", t) { t.Error("Info failed") } } // Test that a vmodule of another file does not enable a log in this file. func TestVmoduleOff(t *testing.T) { setFlags() defer logging.swap(logging.newBuffers()) logging.vmodule.Set("notthisfile=2") defer logging.vmodule.Set("") for i := 1; i <= 3; i++ { if V(Level(i)).Enabled() { t.Errorf("V enabled for %d", i) } } V(2).Info("test") if contents(infoLog) != "" { t.Error("V logged incorrectly") } } func TestSetOutputDataRace(t *testing.T) { setFlags() defer logging.swap(logging.newBuffers()) var wg sync.WaitGroup for i := 1; i <= 50; i++ { go func() { logging.flushDaemon() }() } for i := 1; i <= 50; i++ { wg.Add(1) go func() { defer wg.Done() SetOutput(ioutil.Discard) }() } for i := 1; i <= 50; i++ { go func() { logging.flushDaemon() }() } for i := 1; i <= 50; i++ { wg.Add(1) go func() { defer wg.Done() SetOutputBySeverity("INFO", ioutil.Discard) }() } for i := 1; i <= 50; i++ { go func() { logging.flushDaemon() }() } wg.Wait() } func TestLogToOutput(t *testing.T) { logging.toStderr = true defer logging.swap(logging.newBuffers()) buf := new(bytes.Buffer) SetOutput(buf) LogToStderr(false) Info("Does logging to an output work?") str := buf.String() if !strings.Contains(str, "Does logging to an output work?") { t.Fatalf("Expected %q to contain \"Does logging to an output work?\"", str) } } // vGlobs are patterns that match/don't match this file at V=2. var vGlobs = map[string]bool{ // Easy to test the numeric match here. "klog_test=1": false, // If -vmodule sets V to 1, V(2) will fail. "klog_test=2": true, "klog_test=3": true, // If -vmodule sets V to 1, V(3) will succeed. // These all use 2 and check the patterns. All are true. "*=2": true, "?l*=2": true, "????_*=2": true, "??[mno]?_*t=2": true, // These all use 2 and check the patterns. All are false. "*x=2": false, "m*=2": false, "??_*=2": false, "?[abc]?_*t=2": false, } // Test that vmodule globbing works as advertised. func testVmoduleGlob(pat string, match bool, t *testing.T) { setFlags() defer logging.swap(logging.newBuffers()) defer logging.vmodule.Set("") logging.vmodule.Set(pat) if V(2).Enabled() != match { t.Errorf("incorrect match for %q: got %t expected %t", pat, V(2), match) } } // Test that a vmodule globbing works as advertised. func TestVmoduleGlob(t *testing.T) { for glob, match := range vGlobs { testVmoduleGlob(glob, match, t) } } func TestRollover(t *testing.T) { setFlags() var err error defer func(previous func(error)) { logExitFunc = previous }(logExitFunc) logExitFunc = func(e error) { err = e } defer func(previous uint64) { MaxSize = previous }(MaxSize) MaxSize = 512 Info("x") // Be sure we have a file. info, ok := logging.file[infoLog].(*syncBuffer) if !ok { t.Fatal("info wasn't created") } if err != nil { t.Fatalf("info has initial error: %v", err) } fname0 := info.file.Name() Info(strings.Repeat("x", int(MaxSize))) // force a rollover if err != nil { t.Fatalf("info has error after big write: %v", err) } // Make sure the next log file gets a file name with a different // time stamp. // // TODO: determine whether we need to support subsecond log // rotation. C++ does not appear to handle this case (nor does it // handle Daylight Savings Time properly). time.Sleep(1 * time.Second) Info("x") // create a new file if err != nil { t.Fatalf("error after rotation: %v", err) } fname1 := info.file.Name() if fname0 == fname1 { t.Errorf("info.f.Name did not change: %v", fname0) } if info.nbytes >= info.maxbytes { t.Errorf("file size was not reset: %d", info.nbytes) } } func TestOpenAppendOnStart(t *testing.T) { const ( x string = "xxxxxxxxxx" y string = "yyyyyyyyyy" ) setFlags() var err error defer func(previous func(error)) { logExitFunc = previous }(logExitFunc) logExitFunc = func(e error) { err = e } f, err := ioutil.TempFile("", "test_klog_OpenAppendOnStart") if err != nil { t.Fatalf("unexpected error: %v", err) } defer os.Remove(f.Name()) logging.logFile = f.Name() // Erase files created by prior tests, for i := range logging.file { logging.file[i] = nil } // Logging creates the file Info(x) _, ok := logging.file[infoLog].(*syncBuffer) if !ok { t.Fatal("info wasn't created") } // ensure we wrote what we expected logging.flushAll() b, err := ioutil.ReadFile(logging.logFile) if err != nil { t.Fatalf("unexpected error: %v", err) } if !strings.Contains(string(b), x) { t.Fatalf("got %s, missing expected Info log: %s", string(b), x) } // Set the file to nil so it gets "created" (opened) again on the next write. for i := range logging.file { logging.file[i] = nil } // Logging again should open the file again with O_APPEND instead of O_TRUNC Info(y) // ensure we wrote what we expected logging.lockAndFlushAll() b, err = ioutil.ReadFile(logging.logFile) if err != nil { t.Fatalf("unexpected error: %v", err) } if !strings.Contains(string(b), y) { t.Fatalf("got %s, missing expected Info log: %s", string(b), y) } // The initial log message should be preserved across create calls. logging.lockAndFlushAll() b, err = ioutil.ReadFile(logging.logFile) if err != nil { t.Fatalf("unexpected error: %v", err) } if !strings.Contains(string(b), x) { t.Fatalf("got %s, missing expected Info log: %s", string(b), x) } } func TestLogBacktraceAt(t *testing.T) { setFlags() defer logging.swap(logging.newBuffers()) // The peculiar style of this code simplifies line counting and maintenance of the // tracing block below. var infoLine string setTraceLocation := func(file string, line int, ok bool, delta int) { if !ok { t.Fatal("could not get file:line") } _, file = filepath.Split(file) infoLine = fmt.Sprintf("%s:%d", file, line+delta) err := logging.traceLocation.Set(infoLine) if err != nil { t.Fatal("error setting log_backtrace_at: ", err) } } { // Start of tracing block. These lines know about each other's relative position. _, file, line, ok := runtime.Caller(0) setTraceLocation(file, line, ok, +2) // Two lines between Caller and Info calls. Info("we want a stack trace here") } numAppearances := strings.Count(contents(infoLog), infoLine) if numAppearances < 2 { // Need 2 appearances, one in the log header and one in the trace: // log_test.go:281: I0511 16:36:06.952398 02238 log_test.go:280] we want a stack trace here // ... // k8s.io/klog/klog_test.go:280 (0x41ba91) // ... // We could be more precise but that would require knowing the details // of the traceback format, which may not be dependable. t.Fatal("got no trace back; log is ", contents(infoLog)) } } func BenchmarkHeader(b *testing.B) { for i := 0; i < b.N; i++ { buf, _, _ := logging.header(infoLog, 0) logging.putBuffer(buf) } } func BenchmarkHeaderWithDir(b *testing.B) { logging.addDirHeader = true for i := 0; i < b.N; i++ { buf, _, _ := logging.header(infoLog, 0) logging.putBuffer(buf) } } func BenchmarkLogs(b *testing.B) { setFlags() defer logging.swap(logging.newBuffers()) testFile, err := ioutil.TempFile("", "test.log") if err != nil { b.Error("unable to create temporary file") } defer os.Remove(testFile.Name()) logging.verbosity.Set("0") logging.toStderr = false logging.alsoToStderr = false logging.stderrThreshold = fatalLog logging.logFile = testFile.Name() logging.swap([numSeverity]flushSyncWriter{nil, nil, nil, nil}) for i := 0; i < b.N; i++ { Error("error") Warning("warning") Info("info") } logging.flushAll() } // Test the logic on checking log size limitation. func TestFileSizeCheck(t *testing.T) { setFlags() testData := map[string]struct { testLogFile string testLogFileMaxSizeMB uint64 testCurrentSize uint64 expectedResult bool }{ "logFile not specified, exceeds max size": { testLogFile: "", testLogFileMaxSizeMB: 1, testCurrentSize: 1024 * 1024 * 2000, //exceeds the maxSize expectedResult: true, }, "logFile not specified, not exceeds max size": { testLogFile: "", testLogFileMaxSizeMB: 1, testCurrentSize: 1024 * 1024 * 1000, //smaller than the maxSize expectedResult: false, }, "logFile specified, exceeds max size": { testLogFile: "/tmp/test.log", testLogFileMaxSizeMB: 500, // 500MB testCurrentSize: 1024 * 1024 * 1000, //exceeds the logFileMaxSizeMB expectedResult: true, }, "logFile specified, not exceeds max size": { testLogFile: "/tmp/test.log", testLogFileMaxSizeMB: 500, // 500MB testCurrentSize: 1024 * 1024 * 300, //smaller than the logFileMaxSizeMB expectedResult: false, }, } for name, test := range testData { logging.logFile = test.testLogFile logging.logFileMaxSizeMB = test.testLogFileMaxSizeMB actualResult := test.testCurrentSize >= CalculateMaxSize() if test.expectedResult != actualResult { t.Fatalf("Error on test case '%v': Was expecting result equals %v, got %v", name, test.expectedResult, actualResult) } } } func TestInitFlags(t *testing.T) { fs1 := flag.NewFlagSet("test1", flag.PanicOnError) InitFlags(fs1) fs1.Set("log_dir", "/test1") fs1.Set("log_file_max_size", "1") fs2 := flag.NewFlagSet("test2", flag.PanicOnError) InitFlags(fs2) if logging.logDir != "/test1" { t.Fatalf("Expected log_dir to be %q, got %q", "/test1", logging.logDir) } fs2.Set("log_file_max_size", "2048") if logging.logFileMaxSizeMB != 2048 { t.Fatal("Expected log_file_max_size to be 2048") } } func TestInfoObjectRef(t *testing.T) { setFlags() defer logging.swap(logging.newBuffers()) tests := []struct { name string ref ObjectRef want string }{ { name: "with ns", ref: ObjectRef{ Name: "test-name", Namespace: "test-ns", }, want: "test-ns/test-name", }, { name: "without ns", ref: ObjectRef{ Name: "test-name", Namespace: "", }, want: "test-name", }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { Info(tt.ref) if !contains(infoLog, tt.want, t) { t.Errorf("expected %v, got %v", tt.want, contents(infoLog)) } }) } } type mockKmeta struct { name, ns string } func (m mockKmeta) GetName() string { return m.name } func (m mockKmeta) GetNamespace() string { return m.ns } func TestKObj(t *testing.T) { tests := []struct { name string obj KMetadata want ObjectRef }{ { name: "with ns", obj: mockKmeta{"test-name", "test-ns"}, want: ObjectRef{ Name: "test-name", Namespace: "test-ns", }, }, { name: "without ns", obj: mockKmeta{"test-name", ""}, want: ObjectRef{ Name: "test-name", }, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { if KObj(tt.obj) != tt.want { t.Errorf("expected %v, got %v", tt.want, KObj(tt.obj)) } }) } } func TestKRef(t *testing.T) { tests := []struct { testname string name string namespace string want ObjectRef }{ { testname: "with ns", name: "test-name", namespace: "test-ns", want: ObjectRef{ Name: "test-name", Namespace: "test-ns", }, }, { testname: "without ns", name: "test-name", want: ObjectRef{ Name: "test-name", }, }, } for _, tt := range tests { t.Run(tt.testname, func(t *testing.T) { if KRef(tt.namespace, tt.name) != tt.want { t.Errorf("expected %v, got %v", tt.want, KRef(tt.namespace, tt.name)) } }) } } // Test that InfoS and InfoSDepth work as advertised. func TestInfoS(t *testing.T) { setFlags() defer logging.swap(logging.newBuffers()) timeNow = func() time.Time { return time.Date(2006, 1, 2, 15, 4, 5, .067890e9, time.Local) } pid = 1234 var testDataInfo = []struct { msg string format string keysValues []interface{} }{ { msg: "test", format: "I0102 15:04:05.067890 1234 klog_test.go:%d] \"test\" pod=\"kubedns\"\n", keysValues: []interface{}{"pod", "kubedns"}, }, { msg: "test", format: "I0102 15:04:05.067890 1234 klog_test.go:%d] \"test\" replicaNum=20\n", keysValues: []interface{}{"replicaNum", 20}, }, { msg: "test", format: "I0102 15:04:05.067890 1234 klog_test.go:%d] \"test\" err=\"test error\"\n", keysValues: []interface{}{"err", errors.New("test error")}, }, { msg: "test", format: "I0102 15:04:05.067890 1234 klog_test.go:%d] \"test\" err=\"test error\"\n", keysValues: []interface{}{"err", errors.New("test error")}, }, } functions := []func(msg string, keyAndValues ...interface{}){ InfoS, myInfoS, } for _, f := range functions { for _, data := range testDataInfo { logging.file[infoLog] = &flushBuffer{} f(data.msg, data.keysValues...) var line int n, err := fmt.Sscanf(contents(infoLog), data.format, &line) if n != 1 || err != nil { t.Errorf("log format error: %d elements, error %s:\n%s", n, err, contents(infoLog)) } want := fmt.Sprintf(data.format, line) if contents(infoLog) != want { t.Errorf("InfoS has wrong format: \n got:\t%s\nwant:\t%s", contents(infoLog), want) } } } } // Test that Verbose.InfoS works as advertised. func TestVInfoS(t *testing.T) { setFlags() defer logging.swap(logging.newBuffers()) timeNow = func() time.Time { return time.Date(2006, 1, 2, 15, 4, 5, .067890e9, time.Local) } pid = 1234 var testDataInfo = []struct { msg string format string keysValues []interface{} }{ { msg: "test", format: "I0102 15:04:05.067890 1234 klog_test.go:%d] \"test\" pod=\"kubedns\"\n", keysValues: []interface{}{"pod", "kubedns"}, }, { msg: "test", format: "I0102 15:04:05.067890 1234 klog_test.go:%d] \"test\" replicaNum=20\n", keysValues: []interface{}{"replicaNum", 20}, }, { msg: "test", format: "I0102 15:04:05.067890 1234 klog_test.go:%d] \"test\" err=\"test error\"\n", keysValues: []interface{}{"err", errors.New("test error")}, }, } logging.verbosity.Set("2") defer logging.verbosity.Set("0") for l := Level(0); l < Level(4); l++ { for _, data := range testDataInfo { logging.file[infoLog] = &flushBuffer{} V(l).InfoS(data.msg, data.keysValues...) var want string var line int if l <= 2 { n, err := fmt.Sscanf(contents(infoLog), data.format, &line) if n != 1 || err != nil { t.Errorf("log format error: %d elements, error %s:\n%s", n, err, contents(infoLog)) } want = fmt.Sprintf(data.format, line) } else { want = "" } if contents(infoLog) != want { t.Errorf("V(%d).InfoS has unexpected output: \n got:\t%s\nwant:\t%s", l, contents(infoLog), want) } } } } // Test that ErrorS and ErrorSDepth work as advertised. func TestErrorS(t *testing.T) { setFlags() defer logging.swap(logging.newBuffers()) timeNow = func() time.Time { return time.Date(2006, 1, 2, 15, 4, 5, .067890e9, time.Local) } logging.logFile = "" pid = 1234 functions := []func(err error, msg string, keyAndValues ...interface{}){ ErrorS, myErrorS, } for _, f := range functions { logging.file[errorLog] = &flushBuffer{} f(fmt.Errorf("update status failed"), "Failed to update pod status", "pod", "kubedns") var line int format := "E0102 15:04:05.067890 1234 klog_test.go:%d] \"Failed to update pod status\" err=\"update status failed\" pod=\"kubedns\"\n" n, err := fmt.Sscanf(contents(errorLog), format, &line) if n != 1 || err != nil { t.Errorf("log format error: %d elements, error %s:\n%s", n, err, contents(errorLog)) } want := fmt.Sprintf(format, line) if contents(errorLog) != want { t.Errorf("ErrorS has wrong format: \n got:\t%s\nwant:\t%s", contents(errorLog), want) } } } // Test that kvListFormat works as advertised. func TestKvListFormat(t *testing.T) { var testKVList = []struct { keysValues []interface{} want string }{ { keysValues: []interface{}{"pod", "kubedns"}, want: " pod=\"kubedns\"", }, { keysValues: []interface{}{"pod", "kubedns", "update", true}, want: " pod=\"kubedns\" update=true", }, { keysValues: []interface{}{"pod", "kubedns", "spec", struct { X int Y string N time.Time }{X: 76, Y: "strval", N: time.Date(2006, 1, 2, 15, 4, 5, .067890e9, time.UTC)}}, want: " pod=\"kubedns\" spec={X:76 Y:strval N:2006-01-02 15:04:05.06789 +0000 UTC}", }, { keysValues: []interface{}{"pod", "kubedns", "values", []int{8, 6, 7, 5, 3, 0, 9}}, want: " pod=\"kubedns\" values=[8 6 7 5 3 0 9]", }, { keysValues: []interface{}{"pod", "kubedns", "values", []string{"deployment", "svc", "configmap"}}, want: " pod=\"kubedns\" values=[deployment svc configmap]", }, { keysValues: []interface{}{"pod", "kubedns", "maps", map[string]int{"three": 4}}, want: " pod=\"kubedns\" maps=map[three:4]", }, { keysValues: []interface{}{"pod", KRef("kube-system", "kubedns"), "status", "ready"}, want: " pod=\"kube-system/kubedns\" status=\"ready\"", }, { keysValues: []interface{}{"pod", KRef("", "kubedns"), "status", "ready"}, want: " pod=\"kubedns\" status=\"ready\"", }, { keysValues: []interface{}{"pod", KObj(mockKmeta{"test-name", "test-ns"}), "status", "ready"}, want: " pod=\"test-ns/test-name\" status=\"ready\"", }, { keysValues: []interface{}{"pod", KObj(mockKmeta{"test-name", ""}), "status", "ready"}, want: " pod=\"test-name\" status=\"ready\"", }, } for _, d := range testKVList { b := &bytes.Buffer{} kvListFormat(b, d.keysValues...) if b.String() != d.want { t.Errorf("kvlist format error:\n got:\n\t%s\nwant:\t%s", b.String(), d.want) } } } func createTestValueOfLoggingT() *loggingT { l := new(loggingT) l.toStderr = true l.alsoToStderr = false l.stderrThreshold = errorLog l.verbosity = Level(0) l.skipHeaders = false l.skipLogHeaders = false l.addDirHeader = false return l } func createTestValueOfModulePat(p string, li bool, le Level) modulePat { m := modulePat{} m.pattern = p m.literal = li m.level = le return m } func compareModuleSpec(a, b moduleSpec) bool { if len(a.filter) != len(b.filter) { return false } for i := 0; i < len(a.filter); i++ { if a.filter[i] != b.filter[i] { return false } } return true } func TestSetVState(t *testing.T) { //Target loggingT value want := createTestValueOfLoggingT() want.verbosity = Level(3) want.vmodule.filter = []modulePat{ createTestValueOfModulePat("recordio", true, Level(2)), createTestValueOfModulePat("file", true, Level(1)), createTestValueOfModulePat("gfs*", false, Level(3)), createTestValueOfModulePat("gopher*", false, Level(3)), } want.filterLength = 4 //loggingT value to which test is run target := createTestValueOfLoggingT() tf := []modulePat{ createTestValueOfModulePat("recordio", true, Level(2)), createTestValueOfModulePat("file", true, Level(1)), createTestValueOfModulePat("gfs*", false, Level(3)), createTestValueOfModulePat("gopher*", false, Level(3)), } target.setVState(Level(3), tf, true) if want.verbosity != target.verbosity || !compareModuleSpec(want.vmodule, target.vmodule) || want.filterLength != target.filterLength { t.Errorf("setVState method doesn't configure loggingT values' verbosity, vmodule or filterLength:\nwant:\n\tverbosity:\t%v\n\tvmodule:\t%v\n\tfilterLength:\t%v\ngot:\n\tverbosity:\t%v\n\tvmodule:\t%v\n\tfilterLength:\t%v", want.verbosity, want.vmodule, want.filterLength, target.verbosity, target.vmodule, target.filterLength) } } type sampleLogFilter struct{} func (f *sampleLogFilter) Filter(args []interface{}) []interface{} { for i, arg := range args { v, ok := arg.(string) if ok && strings.Contains(v, "filter me") { args[i] = "[FILTERED]" } } return args } func (f *sampleLogFilter) FilterF(format string, args []interface{}) (string, []interface{}) { return strings.Replace(format, "filter me", "[FILTERED]", 1), f.Filter(args) } func (f *sampleLogFilter) FilterS(msg string, keysAndValues []interface{}) (string, []interface{}) { return strings.Replace(msg, "filter me", "[FILTERED]", 1), f.Filter(keysAndValues) } func TestLogFilter(t *testing.T) { setFlags() defer logging.swap(logging.newBuffers()) SetLogFilter(&sampleLogFilter{}) defer SetLogFilter(nil) funcs := []struct { name string logFunc func(args ...interface{}) severity severity }{{ name: "Info", logFunc: Info, severity: infoLog, }, { name: "InfoDepth", logFunc: func(args ...interface{}) { InfoDepth(1, args...) }, severity: infoLog, }, { name: "Infoln", logFunc: Infoln, severity: infoLog, }, { name: "Infof", logFunc: func(args ...interface{}) { Infof(args[0].(string), args[1:]...) }, severity: infoLog, }, { name: "InfoS", logFunc: func(args ...interface{}) { InfoS(args[0].(string), args[1:]...) }, severity: infoLog, }, { name: "Warning", logFunc: Warning, severity: warningLog, }, { name: "WarningDepth", logFunc: func(args ...interface{}) { WarningDepth(1, args...) }, severity: warningLog, }, { name: "Warningln", logFunc: Warningln, severity: warningLog, }, { name: "Warningf", logFunc: func(args ...interface{}) { Warningf(args[0].(string), args[1:]...) }, severity: warningLog, }, { name: "Error", logFunc: Error, severity: errorLog, }, { name: "ErrorDepth", logFunc: func(args ...interface{}) { ErrorDepth(1, args...) }, severity: errorLog, }, { name: "Errorln", logFunc: Errorln, severity: errorLog, }, { name: "Errorf", logFunc: func(args ...interface{}) { Errorf(args[0].(string), args[1:]...) }, severity: errorLog, }, { name: "ErrorS", logFunc: func(args ...interface{}) { ErrorS(errors.New("testerror"), args[0].(string), args[1:]...) }, severity: errorLog, }, { name: "V().Info", logFunc: func(args ...interface{}) { V(0).Info(args...) }, severity: infoLog, }, { name: "V().Infoln", logFunc: func(args ...interface{}) { V(0).Infoln(args...) }, severity: infoLog, }, { name: "V().Infof", logFunc: func(args ...interface{}) { V(0).Infof(args[0].(string), args[1:]...) }, severity: infoLog, }, { name: "V().InfoS", logFunc: func(args ...interface{}) { V(0).InfoS(args[0].(string), args[1:]...) }, severity: infoLog, }, { name: "V().Error", logFunc: func(args ...interface{}) { V(0).Error(errors.New("test error"), args[0].(string), args[1:]...) }, severity: errorLog, }, { name: "V().ErrorS", logFunc: func(args ...interface{}) { V(0).ErrorS(errors.New("test error"), args[0].(string), args[1:]...) }, severity: errorLog, }} testcases := []struct { name string args []interface{} expectFiltered bool }{{ args: []interface{}{"%s:%s", "foo", "bar"}, expectFiltered: false, }, { args: []interface{}{"%s:%s", "foo", "filter me"}, expectFiltered: true, }, { args: []interface{}{"filter me %s:%s", "foo", "bar"}, expectFiltered: true, }} for _, f := range funcs { for _, tc := range testcases { logging.newBuffers() f.logFunc(tc.args...) got := contains(f.severity, "[FILTERED]", t) if got != tc.expectFiltered { t.Errorf("%s filter application failed, got %v, want %v", f.name, got, tc.expectFiltered) } } } } func TestInfoSWithLogr(t *testing.T) { logger := new(testLogr) testDataInfo := []struct { msg string keysValues []interface{} expected testLogrEntry }{{ msg: "foo", keysValues: []interface{}{}, expected: testLogrEntry{ severity: infoLog, msg: "foo", keysAndValues: []interface{}{}, }, }, { msg: "bar", keysValues: []interface{}{"a", 1}, expected: testLogrEntry{ severity: infoLog, msg: "bar", keysAndValues: []interface{}{"a", 1}, }, }} for _, data := range testDataInfo { t.Run(data.msg, func(t *testing.T) { SetLogger(logger) defer SetLogger(nil) defer logger.reset() InfoS(data.msg, data.keysValues...) if !reflect.DeepEqual(logger.entries, []testLogrEntry{data.expected}) { t.Errorf("expected: %+v; but got: %+v", []testLogrEntry{data.expected}, logger.entries) } }) } } func TestErrorSWithLogr(t *testing.T) { logger := new(testLogr) testError := errors.New("testError") testDataInfo := []struct { err error msg string keysValues []interface{} expected testLogrEntry }{{ err: testError, msg: "foo1", keysValues: []interface{}{}, expected: testLogrEntry{ severity: errorLog, msg: "foo1", keysAndValues: []interface{}{}, err: testError, }, }, { err: testError, msg: "bar1", keysValues: []interface{}{"a", 1}, expected: testLogrEntry{ severity: errorLog, msg: "bar1", keysAndValues: []interface{}{"a", 1}, err: testError, }, }, { err: nil, msg: "foo2", keysValues: []interface{}{}, expected: testLogrEntry{ severity: errorLog, msg: "foo2", keysAndValues: []interface{}{}, err: nil, }, }, { err: nil, msg: "bar2", keysValues: []interface{}{"a", 1}, expected: testLogrEntry{ severity: errorLog, msg: "bar2", keysAndValues: []interface{}{"a", 1}, err: nil, }, }} for _, data := range testDataInfo { t.Run(data.msg, func(t *testing.T) { SetLogger(logger) defer SetLogger(nil) defer logger.reset() ErrorS(data.err, data.msg, data.keysValues...) if !reflect.DeepEqual(logger.entries, []testLogrEntry{data.expected}) { t.Errorf("expected: %+v; but got: %+v", []testLogrEntry{data.expected}, logger.entries) } }) } } type testLogr struct { entries []testLogrEntry mutex sync.Mutex } type testLogrEntry struct { severity severity msg string keysAndValues []interface{} err error } func (l *testLogr) reset() { l.mutex.Lock() defer l.mutex.Unlock() l.entries = []testLogrEntry{} } func (l *testLogr) Info(msg string, keysAndValues ...interface{}) { l.mutex.Lock() defer l.mutex.Unlock() l.entries = append(l.entries, testLogrEntry{ severity: infoLog, msg: msg, keysAndValues: keysAndValues, }) } func (l *testLogr) Error(err error, msg string, keysAndValues ...interface{}) { l.mutex.Lock() defer l.mutex.Unlock() l.entries = append(l.entries, testLogrEntry{ severity: errorLog, msg: msg, keysAndValues: keysAndValues, err: err, }) } func (l *testLogr) Enabled() bool { panic("not implemented") } func (l *testLogr) V(int) logr.Logger { panic("not implemented") } func (l *testLogr) WithName(string) logr.Logger { panic("not implemented") } func (l *testLogr) WithValues(...interface{}) logr.Logger { panic("not implemented") } // existedFlag contains all existed flag, without KlogPrefix var existedFlag = map[string]struct{}{ "log_dir": {}, "add_dir_header": {}, "alsologtostderr": {}, "log_backtrace_at": {}, "log_file": {}, "log_file_max_size": {}, "logtostderr": {}, "one_output": {}, "skip_headers": {}, "skip_log_headers": {}, "stderrthreshold": {}, "v": {}, "vmodule": {}, } // KlogPrefix define new flag prefix const KlogPrefix string = "klog" // TestKlogFlagPrefix check every klog flag's prefix, exclude flag in existedFlag func TestKlogFlagPrefix(t *testing.T) { fs := &flag.FlagSet{} InitFlags(fs) fs.VisitAll(func(f *flag.Flag) { if _, found := existedFlag[f.Name]; !found { if !strings.HasPrefix(f.Name, KlogPrefix) { t.Errorf("flag %s not have klog prefix: %s", f.Name, KlogPrefix) } } }) } klog-2.5.0/klog_wrappers_test.go000066400000000000000000000020141400432665300167170ustar00rootroot00000000000000// Copyright 2020 The Kubernetes Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package klog // These helper functions must be in a separate source file because the // tests in klog_test.go compare the logged source code file name against // "klog_test.go". "klog_wrappers_test.go" must *not* be logged. func myInfoS(msg string, keyAndValues ...interface{}) { InfoSDepth(1, msg, keyAndValues...) } func myErrorS(err error, msg string, keyAndValues ...interface{}) { ErrorSDepth(1, err, msg, keyAndValues...) } klog-2.5.0/klogr/000077500000000000000000000000001400432665300135735ustar00rootroot00000000000000klog-2.5.0/klogr/README.md000066400000000000000000000015511400432665300150540ustar00rootroot00000000000000# Minimal Go logging using klog This package implements the [logr interface](https://github.com/go-logr/logr) in terms of Kubernetes' [klog](https://github.com/kubernetes/klog). This provides a relatively minimalist API to logging in Go, backed by a well-proven implementation. Because klogr was implemented before klog itself added supported for structured logging, the default in klogr is to serialize key/value pairs with JSON and log the result as text messages via klog. This does not work well when klog itself forwards output to a structured logger. Therefore the recommended approach is to let klogr pass all log messages through to klog and deal with structured logging there. Just beware that the output of klog without a structured logger is meant to be human-readable, in contrast to the JSON-based traditional format. This is a BETA grade implementation. klog-2.5.0/klogr/calldepth-test/000077500000000000000000000000001400432665300165105ustar00rootroot00000000000000klog-2.5.0/klogr/calldepth-test/call_depth_helper_test.go000066400000000000000000000005701400432665300235360ustar00rootroot00000000000000package calldepth import ( "github.com/go-logr/logr" ) // Putting these functions into a separate file makes it possible to validate that // their source code file is *not* logged because of WithCallDepth(1). func myInfo(l logr.Logger, msg string) { logr.WithCallDepth(l, 1).Info(msg) } func myInfo2(l logr.Logger, msg string) { myInfo(logr.WithCallDepth(l, 1), msg) } klog-2.5.0/klogr/calldepth-test/call_depth_main_test.go000066400000000000000000000024341400432665300232040ustar00rootroot00000000000000// Package calldepth does black-box testing. // // Another intentional effect is that "go test" compiles // this into a separate binary which we need because // we have to configure klog differently that TestOutput. package calldepth import ( "bytes" "flag" "strings" "testing" "k8s.io/klog/v2" "k8s.io/klog/v2/klogr" ) func TestCallDepth(t *testing.T) { klog.InitFlags(nil) flag.CommandLine.Set("v", "10") flag.CommandLine.Set("skip_headers", "false") flag.CommandLine.Set("logtostderr", "false") flag.CommandLine.Set("alsologtostderr", "false") flag.CommandLine.Set("stderrthreshold", "10") flag.Parse() t.Run("call-depth", func(t *testing.T) { logr := klogr.New() // hijack the klog output tmpWriteBuffer := bytes.NewBuffer(nil) klog.SetOutput(tmpWriteBuffer) validate := func(t *testing.T) { output := tmpWriteBuffer.String() if !strings.Contains(output, "call_depth_main_test.go:") { t.Fatalf("output should have contained call_depth_main_test.go, got instead: %s", output) } } t.Run("direct", func(t *testing.T) { logr.Info("hello world") validate(t) }) t.Run("indirect", func(t *testing.T) { myInfo(logr, "hello world") validate(t) }) t.Run("nested", func(t *testing.T) { myInfo2(logr, "hello world") validate(t) }) }) } klog-2.5.0/klogr/klogr.go000066400000000000000000000165731400432665300152540ustar00rootroot00000000000000// Package klogr implements github.com/go-logr/logr.Logger in terms of // k8s.io/klog. package klogr import ( "bytes" "encoding/json" "fmt" "runtime" "sort" "strings" "github.com/go-logr/logr" "k8s.io/klog/v2" ) // Option is a functional option that reconfigures the logger created with New. type Option func(*klogger) // Format defines how log output is produced. type Format string const ( // FormatSerialize tells klogr to turn key/value pairs into text itself // before invoking klog. FormatSerialize Format = "Serialize" // FormatKlog tells klogr to pass all text messages and key/value pairs // directly to klog. Klog itself then serializes in a human-readable // format and optionally passes on to a structure logging backend. FormatKlog Format = "Klog" ) // WithFormat selects the output format. func WithFormat(format Format) Option { return func(l *klogger) { l.format = format } } // New returns a logr.Logger which serializes output itself // and writes it via klog. func New() logr.Logger { return NewWithOptions(WithFormat(FormatSerialize)) } // NewWithOptions returns a logr.Logger which serializes as determined // by the WithFormat option and writes via klog. The default is // FormatKlog. func NewWithOptions(options ...Option) logr.Logger { l := klogger{ level: 0, prefix: "", values: nil, format: FormatKlog, } for _, option := range options { option(&l) } return l } type klogger struct { level int callDepth int prefix string values []interface{} format Format } func (l klogger) clone() klogger { return klogger{ level: l.level, prefix: l.prefix, values: copySlice(l.values), format: l.format, } } func copySlice(in []interface{}) []interface{} { out := make([]interface{}, len(in)) copy(out, in) return out } // Magic string for intermediate frames that we should ignore. const autogeneratedFrameName = "" // Discover how many frames we need to climb to find the caller. This approach // was suggested by Ian Lance Taylor of the Go team, so it *should* be safe // enough (famous last words). // // It is needed because binding the specific klogger functions to the // logr interface creates one additional call frame that neither we nor // our caller know about. func framesToCaller() int { // 1 is the immediate caller. 3 should be too many. for i := 1; i < 3; i++ { _, file, _, _ := runtime.Caller(i + 1) // +1 for this function's frame if file != autogeneratedFrameName { return i } } return 1 // something went wrong, this is safe } // trimDuplicates will deduplicates elements provided in multiple KV tuple // slices, whilst maintaining the distinction between where the items are // contained. func trimDuplicates(kvLists ...[]interface{}) [][]interface{} { // maintain a map of all seen keys seenKeys := map[interface{}]struct{}{} // build the same number of output slices as inputs outs := make([][]interface{}, len(kvLists)) // iterate over the input slices backwards, as 'later' kv specifications // of the same key will take precedence over earlier ones for i := len(kvLists) - 1; i >= 0; i-- { // initialise this output slice outs[i] = []interface{}{} // obtain a reference to the kvList we are processing kvList := kvLists[i] // start iterating at len(kvList) - 2 (i.e. the 2nd last item) for // slices that have an even number of elements. // We add (len(kvList) % 2) here to handle the case where there is an // odd number of elements in a kvList. // If there is an odd number, then the last element in the slice will // have the value 'null'. for i2 := len(kvList) - 2 + (len(kvList) % 2); i2 >= 0; i2 -= 2 { k := kvList[i2] // if we have already seen this key, do not include it again if _, ok := seenKeys[k]; ok { continue } // make a note that we've observed a new key seenKeys[k] = struct{}{} // attempt to obtain the value of the key var v interface{} // i2+1 should only ever be out of bounds if we handling the first // iteration over a slice with an odd number of elements if i2+1 < len(kvList) { v = kvList[i2+1] } // add this KV tuple to the *start* of the output list to maintain // the original order as we are iterating over the slice backwards outs[i] = append([]interface{}{k, v}, outs[i]...) } } return outs } func flatten(kvList ...interface{}) string { keys := make([]string, 0, len(kvList)) vals := make(map[string]interface{}, len(kvList)) for i := 0; i < len(kvList); i += 2 { k, ok := kvList[i].(string) if !ok { panic(fmt.Sprintf("key is not a string: %s", pretty(kvList[i]))) } var v interface{} if i+1 < len(kvList) { v = kvList[i+1] } keys = append(keys, k) vals[k] = v } sort.Strings(keys) buf := bytes.Buffer{} for i, k := range keys { v := vals[k] if i > 0 { buf.WriteRune(' ') } buf.WriteString(pretty(k)) buf.WriteString("=") buf.WriteString(pretty(v)) } return buf.String() } func pretty(value interface{}) string { if err, ok := value.(error); ok { if _, ok := value.(json.Marshaler); !ok { value = err.Error() } } buffer := &bytes.Buffer{} encoder := json.NewEncoder(buffer) encoder.SetEscapeHTML(false) encoder.Encode(value) return strings.TrimSpace(string(buffer.Bytes())) } func (l klogger) Info(msg string, kvList ...interface{}) { if l.Enabled() { switch l.format { case FormatSerialize: msgStr := flatten("msg", msg) trimmed := trimDuplicates(l.values, kvList) fixedStr := flatten(trimmed[0]...) userStr := flatten(trimmed[1]...) klog.InfoDepth(framesToCaller()+l.callDepth, l.prefix, " ", msgStr, " ", fixedStr, " ", userStr) case FormatKlog: trimmed := trimDuplicates(l.values, kvList) if l.prefix != "" { msg = l.prefix + ": " + msg } klog.InfoSDepth(framesToCaller()+l.callDepth, msg, append(trimmed[0], trimmed[1]...)...) } } } func (l klogger) Enabled() bool { return bool(klog.V(klog.Level(l.level)).Enabled()) } func (l klogger) Error(err error, msg string, kvList ...interface{}) { msgStr := flatten("msg", msg) var loggableErr interface{} if err != nil { loggableErr = err.Error() } switch l.format { case FormatSerialize: errStr := flatten("error", loggableErr) trimmed := trimDuplicates(l.values, kvList) fixedStr := flatten(trimmed[0]...) userStr := flatten(trimmed[1]...) klog.ErrorDepth(framesToCaller()+l.callDepth, l.prefix, " ", msgStr, " ", errStr, " ", fixedStr, " ", userStr) case FormatKlog: trimmed := trimDuplicates(l.values, kvList) if l.prefix != "" { msg = l.prefix + ": " + msg } klog.ErrorSDepth(framesToCaller()+l.callDepth, err, msg, append(trimmed[0], trimmed[1]...)...) } } func (l klogger) V(level int) logr.Logger { new := l.clone() new.level = level return new } // WithName returns a new logr.Logger with the specified name appended. klogr // uses '/' characters to separate name elements. Callers should not pass '/' // in the provided name string, but this library does not actually enforce that. func (l klogger) WithName(name string) logr.Logger { new := l.clone() if len(l.prefix) > 0 { new.prefix = l.prefix + "/" } new.prefix += name return new } func (l klogger) WithValues(kvList ...interface{}) logr.Logger { new := l.clone() new.values = append(new.values, kvList...) return new } func (l klogger) WithCallDepth(depth int) logr.Logger { new := l.clone() new.callDepth += depth return new } var _ logr.Logger = klogger{} var _ logr.CallDepthLogger = klogger{} klog-2.5.0/klogr/klogr_test.go000066400000000000000000000153531400432665300163060ustar00rootroot00000000000000package klogr import ( "bytes" "encoding/json" "errors" "flag" "strings" "testing" "k8s.io/klog/v2" "github.com/go-logr/logr" ) const ( formatDefault = "Default" formatNew = "New" ) func testOutput(t *testing.T, format string) { new := func() logr.Logger { switch format { case formatNew: return New() case formatDefault: return NewWithOptions() default: return NewWithOptions(WithFormat(Format(format))) } } tests := map[string]struct { klogr logr.Logger text string keysAndValues []interface{} err error expectedOutput string expectedKlogOutput string }{ "should log with values passed to keysAndValues": { klogr: new().V(0), text: "test", keysAndValues: []interface{}{"akey", "avalue"}, expectedOutput: ` "msg"="test" "akey"="avalue" `, expectedKlogOutput: `"test" akey="avalue" `, }, "should log with name and values passed to keysAndValues": { klogr: new().V(0).WithName("me"), text: "test", keysAndValues: []interface{}{"akey", "avalue"}, expectedOutput: `me "msg"="test" "akey"="avalue" `, expectedKlogOutput: `"me: test" akey="avalue" `, }, "should log with multiple names and values passed to keysAndValues": { klogr: new().V(0).WithName("hello").WithName("world"), text: "test", keysAndValues: []interface{}{"akey", "avalue"}, expectedOutput: `hello/world "msg"="test" "akey"="avalue" `, expectedKlogOutput: `"hello/world: test" akey="avalue" `, }, "should not print duplicate keys with the same value": { klogr: new().V(0), text: "test", keysAndValues: []interface{}{"akey", "avalue", "akey", "avalue"}, expectedOutput: ` "msg"="test" "akey"="avalue" `, expectedKlogOutput: `"test" akey="avalue" `, }, "should only print the last duplicate key when the values are passed to Info": { klogr: new().V(0), text: "test", keysAndValues: []interface{}{"akey", "avalue", "akey", "avalue2"}, expectedOutput: ` "msg"="test" "akey"="avalue2" `, expectedKlogOutput: `"test" akey="avalue2" `, }, "should only print the duplicate key that is passed to Info if one was passed to the logger": { klogr: new().WithValues("akey", "avalue"), text: "test", keysAndValues: []interface{}{"akey", "avalue"}, expectedOutput: ` "msg"="test" "akey"="avalue" `, expectedKlogOutput: `"test" akey="avalue" `, }, "should sort within logger and parameter key/value pairs in the default format and dump the logger pairs first": { klogr: new().WithValues("akey9", "avalue9", "akey8", "avalue8", "akey1", "avalue1"), text: "test", keysAndValues: []interface{}{"akey5", "avalue5", "akey4", "avalue4"}, expectedOutput: ` "msg"="test" "akey1"="avalue1" "akey8"="avalue8" "akey9"="avalue9" "akey4"="avalue4" "akey5"="avalue5" `, expectedKlogOutput: `"test" akey9="avalue9" akey8="avalue8" akey1="avalue1" akey5="avalue5" akey4="avalue4" `, }, "should only print the key passed to Info when one is already set on the logger": { klogr: new().WithValues("akey", "avalue"), text: "test", keysAndValues: []interface{}{"akey", "avalue2"}, expectedOutput: ` "msg"="test" "akey"="avalue2" `, expectedKlogOutput: `"test" akey="avalue2" `, }, "should correctly handle odd-numbers of KVs": { text: "test", keysAndValues: []interface{}{"akey", "avalue", "akey2"}, expectedOutput: ` "msg"="test" "akey"="avalue" "akey2"=null `, expectedKlogOutput: `"test" akey="avalue" akey2= `, }, "should correctly html characters": { text: "test", keysAndValues: []interface{}{"akey", "<&>"}, expectedOutput: ` "msg"="test" "akey"="<&>" `, expectedKlogOutput: `"test" akey="<&>" `, }, "should correctly handle odd-numbers of KVs in both log values and Info args": { klogr: new().WithValues("basekey1", "basevar1", "basekey2"), text: "test", keysAndValues: []interface{}{"akey", "avalue", "akey2"}, expectedOutput: ` "msg"="test" "basekey1"="basevar1" "basekey2"=null "akey"="avalue" "akey2"=null `, expectedKlogOutput: `"test" basekey1="basevar1" basekey2= akey="avalue" akey2= `, }, "should correctly print regular error types": { klogr: new().V(0), text: "test", keysAndValues: []interface{}{"err", errors.New("whoops")}, expectedOutput: ` "msg"="test" "err"="whoops" `, expectedKlogOutput: `"test" err="whoops" `, }, "should use MarshalJSON in the default format if an error type implements it": { klogr: new().V(0), text: "test", keysAndValues: []interface{}{"err", &customErrorJSON{"whoops"}}, expectedOutput: ` "msg"="test" "err"="WHOOPS" `, expectedKlogOutput: `"test" err="whoops" `, }, "should correctly print regular error types when using logr.Error": { klogr: new().V(0), text: "test", err: errors.New("whoops"), // The message is printed to three different log files (info, warning, error), so we see it three times in our output buffer. expectedOutput: ` "msg"="test" "error"="whoops" "msg"="test" "error"="whoops" "msg"="test" "error"="whoops" `, expectedKlogOutput: `"test" err="whoops" "test" err="whoops" "test" err="whoops" `, }, } for n, test := range tests { t.Run(n, func(t *testing.T) { klogr := test.klogr if klogr == nil { klogr = new() } // hijack the klog output tmpWriteBuffer := bytes.NewBuffer(nil) klog.SetOutput(tmpWriteBuffer) if test.err != nil { klogr.Error(test.err, test.text, test.keysAndValues...) } else { klogr.Info(test.text, test.keysAndValues...) } // call Flush to ensure the text isn't still buffered klog.Flush() actual := tmpWriteBuffer.String() expectedOutput := test.expectedOutput if format == string(FormatKlog) || format == formatDefault { expectedOutput = test.expectedKlogOutput } if actual != expectedOutput { t.Errorf("expected %q did not match actual %q", expectedOutput, actual) } }) } } func TestOutput(t *testing.T) { klog.InitFlags(nil) flag.CommandLine.Set("v", "10") flag.CommandLine.Set("skip_headers", "true") flag.CommandLine.Set("logtostderr", "false") flag.CommandLine.Set("alsologtostderr", "false") flag.CommandLine.Set("stderrthreshold", "10") flag.Parse() formats := []string{ formatNew, formatDefault, string(FormatSerialize), string(FormatKlog), } for _, format := range formats { t.Run(format, func(t *testing.T) { testOutput(t, format) }) } } type customErrorJSON struct { s string } func (e *customErrorJSON) Error() string { return e.s } func (e *customErrorJSON) MarshalJSON() ([]byte, error) { return json.Marshal(strings.ToUpper(e.s)) }