pax_global_header00006660000000000000000000000064143365025620014520gustar00rootroot0000000000000052 comment=05582a75b103f225d0c2b8075e4efcc9ed1b445e golang-github-influxdata-influxdb1-client-0.0~git20220302.a9ab567/000077500000000000000000000000001433650256200242035ustar00rootroot00000000000000golang-github-influxdata-influxdb1-client-0.0~git20220302.a9ab567/LICENSE000066400000000000000000000020531433650256200252100ustar00rootroot00000000000000MIT License Copyright (c) 2019 InfluxData Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. golang-github-influxdata-influxdb1-client-0.0~git20220302.a9ab567/README.md000066400000000000000000000030671433650256200254700ustar00rootroot00000000000000# influxdb1-clientv2 influxdb1-clientv2 is the current Go client API for InfluxDB 1.x. For connecting to InfluxDB 2.x see the [influxdb-client-go](https://github.com/influxdata/influxdb-client-go) client library. InfluxDB is an open-source distributed time series database, find more about [InfluxDB](https://www.influxdata.com/time-series-platform/influxdb/) at https://docs.influxdata.com/influxdb/latest ## Usage To import into your Go project, run the following command in your terminal: `go get github.com/influxdata/influxdb1-client/v2` Then, in your import declaration section of your Go file, paste the following: `import "github.com/influxdata/influxdb1-client/v2"` If you get the error `build github.com/user/influx: cannot find module for path github.com/influxdata/influxdb1-client/v2` when trying to build: change your import to: ```go import( _ "github.com/influxdata/influxdb1-client" // this is important because of the bug in go mod client "github.com/influxdata/influxdb1-client/v2" ) ``` ## Example The following example creates a new client to the InfluxDB host on localhost:8086 and runs a query for the measurement `cpu_load` from the `mydb` database. ``` go func ExampleClient_query() { c, err := client.NewHTTPClient(client.HTTPConfig{ Addr: "http://localhost:8086", }) if err != nil { fmt.Println("Error creating InfluxDB Client: ", err.Error()) } defer c.Close() q := client.NewQuery("SELECT count(value) FROM cpu_load", "mydb", "") if response, err := c.Query(q); err == nil && response.Error() == nil { fmt.Println(response.Results) } } ``` golang-github-influxdata-influxdb1-client-0.0~git20220302.a9ab567/example_test.go000066400000000000000000000046121433650256200272270ustar00rootroot00000000000000package client_test import ( "fmt" "log" "math/rand" "net/url" "os" "strconv" "time" client "github.com/influxdata/influxdb1-client" ) func ExampleNewClient() { host, err := url.Parse(fmt.Sprintf("http://%s:%d", "localhost", 8086)) if err != nil { log.Fatal(err) } // NOTE: this assumes you've setup a user and have setup shell env variables, // namely INFLUX_USER/INFLUX_PWD. If not just omit Username/Password below. conf := client.Config{ URL: *host, Username: os.Getenv("INFLUX_USER"), Password: os.Getenv("INFLUX_PWD"), } con, err := client.NewClient(conf) if err != nil { log.Fatal(err) } log.Println("Connection", con) } func ExampleClient_Ping() { host, err := url.Parse(fmt.Sprintf("http://%s:%d", "localhost", 8086)) if err != nil { log.Fatal(err) } con, err := client.NewClient(client.Config{URL: *host}) if err != nil { log.Fatal(err) } dur, ver, err := con.Ping() if err != nil { log.Fatal(err) } log.Printf("Happy as a hippo! %v, %s", dur, ver) } func ExampleClient_Query() { host, err := url.Parse(fmt.Sprintf("http://%s:%d", "localhost", 8086)) if err != nil { log.Fatal(err) } con, err := client.NewClient(client.Config{URL: *host}) if err != nil { log.Fatal(err) } q := client.Query{ Command: "select count(value) from shapes", Database: "square_holes", } if response, err := con.Query(q); err == nil && response.Error() == nil { log.Println(response.Results) } } func ExampleClient_Write() { host, err := url.Parse(fmt.Sprintf("http://%s:%d", "localhost", 8086)) if err != nil { log.Fatal(err) } con, err := client.NewClient(client.Config{URL: *host}) if err != nil { log.Fatal(err) } var ( shapes = []string{"circle", "rectangle", "square", "triangle"} colors = []string{"red", "blue", "green"} sampleSize = 1000 pts = make([]client.Point, sampleSize) ) rand.Seed(42) for i := 0; i < sampleSize; i++ { pts[i] = client.Point{ Measurement: "shapes", Tags: map[string]string{ "color": strconv.Itoa(rand.Intn(len(colors))), "shape": strconv.Itoa(rand.Intn(len(shapes))), }, Fields: map[string]interface{}{ "value": rand.Intn(sampleSize), }, Time: time.Now(), Precision: "s", } } bps := client.BatchPoints{ Points: pts, Database: "BumbeBeeTuna", RetentionPolicy: "default", } _, err = con.Write(bps) if err != nil { log.Fatal(err) } } golang-github-influxdata-influxdb1-client-0.0~git20220302.a9ab567/influxdb.go000066400000000000000000000555721433650256200263630ustar00rootroot00000000000000// Package client implements a now-deprecated client for InfluxDB; // use github.com/influxdata/influxdb1-client/v2 instead. package client // import "github.com/influxdata/influxdb1-client" import ( "bytes" "context" "crypto/tls" "encoding/json" "errors" "fmt" "io" "io/ioutil" "net" "net/http" "net/url" "path" "strconv" "strings" "time" "github.com/influxdata/influxdb1-client/models" ) const ( // DefaultHost is the default host used to connect to an InfluxDB instance DefaultHost = "localhost" // DefaultPort is the default port used to connect to an InfluxDB instance DefaultPort = 8086 // DefaultTimeout is the default connection timeout used to connect to an InfluxDB instance DefaultTimeout = 0 ) // Query is used to send a command to the server. Both Command and Database are required. type Query struct { Command string Database string // RetentionPolicy tells the server which retention policy to use by default. // This option is only effective when querying a server of version 1.6.0 or later. RetentionPolicy string // Chunked tells the server to send back chunked responses. This places // less load on the server by sending back chunks of the response rather // than waiting for the entire response all at once. Chunked bool // ChunkSize sets the maximum number of rows that will be returned per // chunk. Chunks are either divided based on their series or if they hit // the chunk size limit. // // Chunked must be set to true for this option to be used. ChunkSize int // NodeID sets the data node to use for the query results. This option only // has any effect in the enterprise version of the software where there can be // more than one data node and is primarily useful for analyzing differences in // data. The default behavior is to automatically select the appropriate data // nodes to retrieve all of the data. On a database where the number of data nodes // is greater than the replication factor, it is expected that setting this option // will only retrieve partial data. NodeID int } // ParseConnectionString will parse a string to create a valid connection URL func ParseConnectionString(path string, ssl bool) (url.URL, error) { var host string var port int h, p, err := net.SplitHostPort(path) if err != nil { if path == "" { host = DefaultHost } else { host = path } // If they didn't specify a port, always use the default port port = DefaultPort } else { host = h port, err = strconv.Atoi(p) if err != nil { return url.URL{}, fmt.Errorf("invalid port number %q: %s\n", path, err) } } u := url.URL{ Scheme: "http", Host: host, } if ssl { u.Scheme = "https" if port != 443 { u.Host = net.JoinHostPort(host, strconv.Itoa(port)) } } else if port != 80 { u.Host = net.JoinHostPort(host, strconv.Itoa(port)) } return u, nil } // Config is used to specify what server to connect to. // URL: The URL of the server connecting to. // Username/Password are optional. They will be passed via basic auth if provided. // UserAgent: If not provided, will default "InfluxDBClient", // Timeout: If not provided, will default to 0 (no timeout) type Config struct { URL url.URL UnixSocket string Username string Password string UserAgent string Timeout time.Duration Precision string WriteConsistency string UnsafeSsl bool Proxy func(req *http.Request) (*url.URL, error) TLS *tls.Config } // NewConfig will create a config to be used in connecting to the client func NewConfig() Config { return Config{ Timeout: DefaultTimeout, } } // Client is used to make calls to the server. type Client struct { url url.URL unixSocket string username string password string httpClient *http.Client userAgent string precision string } const ( // ConsistencyOne requires at least one data node acknowledged a write. ConsistencyOne = "one" // ConsistencyAll requires all data nodes to acknowledge a write. ConsistencyAll = "all" // ConsistencyQuorum requires a quorum of data nodes to acknowledge a write. ConsistencyQuorum = "quorum" // ConsistencyAny allows for hinted hand off, potentially no write happened yet. ConsistencyAny = "any" ) // NewClient will instantiate and return a connected client to issue commands to the server. func NewClient(c Config) (*Client, error) { tlsConfig := new(tls.Config) if c.TLS != nil { tlsConfig = c.TLS.Clone() } tlsConfig.InsecureSkipVerify = c.UnsafeSsl tr := &http.Transport{ Proxy: c.Proxy, TLSClientConfig: tlsConfig, } if c.UnixSocket != "" { // No need for compression in local communications. tr.DisableCompression = true tr.DialContext = func(_ context.Context, _, _ string) (net.Conn, error) { return net.Dial("unix", c.UnixSocket) } } client := Client{ url: c.URL, unixSocket: c.UnixSocket, username: c.Username, password: c.Password, httpClient: &http.Client{Timeout: c.Timeout, Transport: tr}, userAgent: c.UserAgent, precision: c.Precision, } if client.userAgent == "" { client.userAgent = "InfluxDBClient" } return &client, nil } // SetAuth will update the username and passwords func (c *Client) SetAuth(u, p string) { c.username = u c.password = p } // SetPrecision will update the precision func (c *Client) SetPrecision(precision string) { c.precision = precision } // Query sends a command to the server and returns the Response func (c *Client) Query(q Query) (*Response, error) { return c.QueryContext(context.Background(), q) } // QueryContext sends a command to the server and returns the Response // It uses a context that can be cancelled by the command line client func (c *Client) QueryContext(ctx context.Context, q Query) (*Response, error) { u := c.url u.Path = path.Join(u.Path, "query") values := u.Query() values.Set("q", q.Command) values.Set("db", q.Database) if q.RetentionPolicy != "" { values.Set("rp", q.RetentionPolicy) } if q.Chunked { values.Set("chunked", "true") if q.ChunkSize > 0 { values.Set("chunk_size", strconv.Itoa(q.ChunkSize)) } } if q.NodeID > 0 { values.Set("node_id", strconv.Itoa(q.NodeID)) } if c.precision != "" { values.Set("epoch", c.precision) } u.RawQuery = values.Encode() req, err := http.NewRequest("POST", u.String(), nil) if err != nil { return nil, err } req.Header.Set("User-Agent", c.userAgent) if c.username != "" { req.SetBasicAuth(c.username, c.password) } req = req.WithContext(ctx) resp, err := c.httpClient.Do(req) if err != nil { return nil, err } defer func() { io.Copy(ioutil.Discard, resp.Body) // https://github.com/influxdata/influxdb1-client/issues/58 resp.Body.Close() }() var response Response if q.Chunked { cr := NewChunkedResponse(resp.Body) for { r, err := cr.NextResponse() if err != nil { // If we got an error while decoding the response, send that back. return nil, err } if r == nil { break } response.Results = append(response.Results, r.Results...) if r.Err != nil { response.Err = r.Err break } } } else { dec := json.NewDecoder(resp.Body) dec.UseNumber() if err := dec.Decode(&response); err != nil { // Ignore EOF errors if we got an invalid status code. if !(err == io.EOF && resp.StatusCode != http.StatusOK) { return nil, err } } } // If we don't have an error in our json response, and didn't get StatusOK, // then send back an error. if resp.StatusCode != http.StatusOK && response.Error() == nil { return &response, fmt.Errorf("received status code %d from server", resp.StatusCode) } return &response, nil } // Write takes BatchPoints and allows for writing of multiple points with defaults // If successful, error is nil and Response is nil // If an error occurs, Response may contain additional information if populated. func (c *Client) Write(bp BatchPoints) (*Response, error) { u := c.url u.Path = path.Join(u.Path, "write") var b bytes.Buffer for _, p := range bp.Points { err := checkPointTypes(p) if err != nil { return nil, err } if p.Raw != "" { if _, err := b.WriteString(p.Raw); err != nil { return nil, err } } else { for k, v := range bp.Tags { if p.Tags == nil { p.Tags = make(map[string]string, len(bp.Tags)) } p.Tags[k] = v } if _, err := b.WriteString(p.MarshalString()); err != nil { return nil, err } } if err := b.WriteByte('\n'); err != nil { return nil, err } } req, err := http.NewRequest("POST", u.String(), &b) if err != nil { return nil, err } req.Header.Set("Content-Type", "") req.Header.Set("User-Agent", c.userAgent) if c.username != "" { req.SetBasicAuth(c.username, c.password) } precision := bp.Precision if precision == "" { precision = c.precision } params := req.URL.Query() params.Set("db", bp.Database) params.Set("rp", bp.RetentionPolicy) params.Set("precision", precision) params.Set("consistency", bp.WriteConsistency) req.URL.RawQuery = params.Encode() resp, err := c.httpClient.Do(req) if err != nil { return nil, err } defer resp.Body.Close() var response Response body, err := ioutil.ReadAll(resp.Body) if err != nil { return nil, err } if resp.StatusCode != http.StatusNoContent && resp.StatusCode != http.StatusOK { var err = fmt.Errorf(string(body)) response.Err = err return &response, err } return nil, nil } // WriteLineProtocol takes a string with line returns to delimit each write // If successful, error is nil and Response is nil // If an error occurs, Response may contain additional information if populated. func (c *Client) WriteLineProtocol(data, database, retentionPolicy, precision, writeConsistency string) (*Response, error) { u := c.url u.Path = path.Join(u.Path, "write") r := strings.NewReader(data) req, err := http.NewRequest("POST", u.String(), r) if err != nil { return nil, err } req.Header.Set("Content-Type", "") req.Header.Set("User-Agent", c.userAgent) if c.username != "" { req.SetBasicAuth(c.username, c.password) } params := req.URL.Query() params.Set("db", database) params.Set("rp", retentionPolicy) params.Set("precision", precision) params.Set("consistency", writeConsistency) req.URL.RawQuery = params.Encode() resp, err := c.httpClient.Do(req) if err != nil { return nil, err } defer resp.Body.Close() var response Response body, err := ioutil.ReadAll(resp.Body) if err != nil { return nil, err } if resp.StatusCode != http.StatusNoContent && resp.StatusCode != http.StatusOK { err := fmt.Errorf(string(body)) response.Err = err return &response, err } return nil, nil } // Ping will check to see if the server is up // Ping returns how long the request took, the version of the server it connected to, and an error if one occurred. func (c *Client) Ping() (time.Duration, string, error) { now := time.Now() u := c.url u.Path = path.Join(u.Path, "ping") req, err := http.NewRequest("GET", u.String(), nil) if err != nil { return 0, "", err } req.Header.Set("User-Agent", c.userAgent) if c.username != "" { req.SetBasicAuth(c.username, c.password) } resp, err := c.httpClient.Do(req) if err != nil { return 0, "", err } defer resp.Body.Close() version := resp.Header.Get("X-Influxdb-Version") return time.Since(now), version, nil } // Structs // Message represents a user message. type Message struct { Level string `json:"level,omitempty"` Text string `json:"text,omitempty"` } // Result represents a resultset returned from a single statement. type Result struct { Series []models.Row Messages []*Message Err error } // MarshalJSON encodes the result into JSON. func (r *Result) MarshalJSON() ([]byte, error) { // Define a struct that outputs "error" as a string. var o struct { Series []models.Row `json:"series,omitempty"` Messages []*Message `json:"messages,omitempty"` Err string `json:"error,omitempty"` } // Copy fields to output struct. o.Series = r.Series o.Messages = r.Messages if r.Err != nil { o.Err = r.Err.Error() } return json.Marshal(&o) } // UnmarshalJSON decodes the data into the Result struct func (r *Result) UnmarshalJSON(b []byte) error { var o struct { Series []models.Row `json:"series,omitempty"` Messages []*Message `json:"messages,omitempty"` Err string `json:"error,omitempty"` } dec := json.NewDecoder(bytes.NewBuffer(b)) dec.UseNumber() err := dec.Decode(&o) if err != nil { return err } r.Series = o.Series r.Messages = o.Messages if o.Err != "" { r.Err = errors.New(o.Err) } return nil } // Response represents a list of statement results. type Response struct { Results []Result Err error } // MarshalJSON encodes the response into JSON. func (r *Response) MarshalJSON() ([]byte, error) { // Define a struct that outputs "error" as a string. var o struct { Results []Result `json:"results,omitempty"` Err string `json:"error,omitempty"` } // Copy fields to output struct. o.Results = r.Results if r.Err != nil { o.Err = r.Err.Error() } return json.Marshal(&o) } // UnmarshalJSON decodes the data into the Response struct func (r *Response) UnmarshalJSON(b []byte) error { var o struct { Results []Result `json:"results,omitempty"` Err string `json:"error,omitempty"` } dec := json.NewDecoder(bytes.NewBuffer(b)) dec.UseNumber() err := dec.Decode(&o) if err != nil { return err } r.Results = o.Results if o.Err != "" { r.Err = errors.New(o.Err) } return nil } // Error returns the first error from any statement. // Returns nil if no errors occurred on any statements. func (r *Response) Error() error { if r.Err != nil { return r.Err } for _, result := range r.Results { if result.Err != nil { return result.Err } } return nil } // duplexReader reads responses and writes it to another writer while // satisfying the reader interface. type duplexReader struct { r io.Reader w io.Writer } func (r *duplexReader) Read(p []byte) (n int, err error) { n, err = r.r.Read(p) if err == nil { r.w.Write(p[:n]) } return n, err } // ChunkedResponse represents a response from the server that // uses chunking to stream the output. type ChunkedResponse struct { dec *json.Decoder duplex *duplexReader buf bytes.Buffer } // NewChunkedResponse reads a stream and produces responses from the stream. func NewChunkedResponse(r io.Reader) *ChunkedResponse { resp := &ChunkedResponse{} resp.duplex = &duplexReader{r: r, w: &resp.buf} resp.dec = json.NewDecoder(resp.duplex) resp.dec.UseNumber() return resp } // NextResponse reads the next line of the stream and returns a response. func (r *ChunkedResponse) NextResponse() (*Response, error) { var response Response if err := r.dec.Decode(&response); err != nil { if err == io.EOF { return nil, nil } // A decoding error happened. This probably means the server crashed // and sent a last-ditch error message to us. Ensure we have read the // entirety of the connection to get any remaining error text. io.Copy(ioutil.Discard, r.duplex) return nil, errors.New(strings.TrimSpace(r.buf.String())) } r.buf.Reset() return &response, nil } // Point defines the fields that will be written to the database // Measurement, Time, and Fields are required // Precision can be specified if the time is in epoch format (integer). // Valid values for Precision are n, u, ms, s, m, and h type Point struct { Measurement string Tags map[string]string Time time.Time Fields map[string]interface{} Precision string Raw string } // MarshalJSON will format the time in RFC3339Nano // Precision is also ignored as it is only used for writing, not reading // Or another way to say it is we always send back in nanosecond precision func (p *Point) MarshalJSON() ([]byte, error) { point := struct { Measurement string `json:"measurement,omitempty"` Tags map[string]string `json:"tags,omitempty"` Time string `json:"time,omitempty"` Fields map[string]interface{} `json:"fields,omitempty"` Precision string `json:"precision,omitempty"` }{ Measurement: p.Measurement, Tags: p.Tags, Fields: p.Fields, Precision: p.Precision, } // Let it omit empty if it's really zero if !p.Time.IsZero() { point.Time = p.Time.UTC().Format(time.RFC3339Nano) } return json.Marshal(&point) } // MarshalString renders string representation of a Point with specified // precision. The default precision is nanoseconds. func (p *Point) MarshalString() string { pt, err := models.NewPoint(p.Measurement, models.NewTags(p.Tags), p.Fields, p.Time) if err != nil { return "# ERROR: " + err.Error() + " " + p.Measurement } if p.Precision == "" || p.Precision == "ns" || p.Precision == "n" { return pt.String() } return pt.PrecisionString(p.Precision) } // UnmarshalJSON decodes the data into the Point struct func (p *Point) UnmarshalJSON(b []byte) error { var normal struct { Measurement string `json:"measurement"` Tags map[string]string `json:"tags"` Time time.Time `json:"time"` Precision string `json:"precision"` Fields map[string]interface{} `json:"fields"` } var epoch struct { Measurement string `json:"measurement"` Tags map[string]string `json:"tags"` Time *int64 `json:"time"` Precision string `json:"precision"` Fields map[string]interface{} `json:"fields"` } if err := func() error { var err error dec := json.NewDecoder(bytes.NewBuffer(b)) dec.UseNumber() if err = dec.Decode(&epoch); err != nil { return err } // Convert from epoch to time.Time, but only if Time // was actually set. var ts time.Time if epoch.Time != nil { ts, err = EpochToTime(*epoch.Time, epoch.Precision) if err != nil { return err } } p.Measurement = epoch.Measurement p.Tags = epoch.Tags p.Time = ts p.Precision = epoch.Precision p.Fields = normalizeFields(epoch.Fields) return nil }(); err == nil { return nil } dec := json.NewDecoder(bytes.NewBuffer(b)) dec.UseNumber() if err := dec.Decode(&normal); err != nil { return err } normal.Time = SetPrecision(normal.Time, normal.Precision) p.Measurement = normal.Measurement p.Tags = normal.Tags p.Time = normal.Time p.Precision = normal.Precision p.Fields = normalizeFields(normal.Fields) return nil } // Remove any notion of json.Number func normalizeFields(fields map[string]interface{}) map[string]interface{} { newFields := map[string]interface{}{} for k, v := range fields { switch v := v.(type) { case json.Number: jv, e := v.Float64() if e != nil { panic(fmt.Sprintf("unable to convert json.Number to float64: %s", e)) } newFields[k] = jv default: newFields[k] = v } } return newFields } // BatchPoints is used to send batched data in a single write. // Database and Points are required // If no retention policy is specified, it will use the databases default retention policy. // If tags are specified, they will be "merged" with all points. If a point already has that tag, it will be ignored. // If time is specified, it will be applied to any point with an empty time. // Precision can be specified if the time is in epoch format (integer). // Valid values for Precision are n, u, ms, s, m, and h type BatchPoints struct { Points []Point `json:"points,omitempty"` Database string `json:"database,omitempty"` RetentionPolicy string `json:"retentionPolicy,omitempty"` Tags map[string]string `json:"tags,omitempty"` Time time.Time `json:"time,omitempty"` Precision string `json:"precision,omitempty"` WriteConsistency string `json:"-"` } // UnmarshalJSON decodes the data into the BatchPoints struct func (bp *BatchPoints) UnmarshalJSON(b []byte) error { var normal struct { Points []Point `json:"points"` Database string `json:"database"` RetentionPolicy string `json:"retentionPolicy"` Tags map[string]string `json:"tags"` Time time.Time `json:"time"` Precision string `json:"precision"` } var epoch struct { Points []Point `json:"points"` Database string `json:"database"` RetentionPolicy string `json:"retentionPolicy"` Tags map[string]string `json:"tags"` Time *int64 `json:"time"` Precision string `json:"precision"` } if err := func() error { var err error if err = json.Unmarshal(b, &epoch); err != nil { return err } // Convert from epoch to time.Time var ts time.Time if epoch.Time != nil { ts, err = EpochToTime(*epoch.Time, epoch.Precision) if err != nil { return err } } bp.Points = epoch.Points bp.Database = epoch.Database bp.RetentionPolicy = epoch.RetentionPolicy bp.Tags = epoch.Tags bp.Time = ts bp.Precision = epoch.Precision return nil }(); err == nil { return nil } if err := json.Unmarshal(b, &normal); err != nil { return err } normal.Time = SetPrecision(normal.Time, normal.Precision) bp.Points = normal.Points bp.Database = normal.Database bp.RetentionPolicy = normal.RetentionPolicy bp.Tags = normal.Tags bp.Time = normal.Time bp.Precision = normal.Precision return nil } // utility functions // Addr provides the current url as a string of the server the client is connected to. func (c *Client) Addr() string { if c.unixSocket != "" { return c.unixSocket } return c.url.String() } // checkPointTypes ensures no unsupported types are submitted to influxdb, returning error if they are found. func checkPointTypes(p Point) error { for _, v := range p.Fields { switch v.(type) { case int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64, float32, float64, bool, string, nil: return nil default: return fmt.Errorf("unsupported point type: %T", v) } } return nil } // helper functions // EpochToTime takes a unix epoch time and uses precision to return back a time.Time func EpochToTime(epoch int64, precision string) (time.Time, error) { if precision == "" { precision = "s" } var t time.Time switch precision { case "h": t = time.Unix(0, epoch*int64(time.Hour)) case "m": t = time.Unix(0, epoch*int64(time.Minute)) case "s": t = time.Unix(0, epoch*int64(time.Second)) case "ms": t = time.Unix(0, epoch*int64(time.Millisecond)) case "u": t = time.Unix(0, epoch*int64(time.Microsecond)) case "n": t = time.Unix(0, epoch) default: return time.Time{}, fmt.Errorf("Unknown precision %q", precision) } return t, nil } // SetPrecision will round a time to the specified precision func SetPrecision(t time.Time, precision string) time.Time { switch precision { case "n": case "u": return t.Round(time.Microsecond) case "ms": return t.Round(time.Millisecond) case "s": return t.Round(time.Second) case "m": return t.Round(time.Minute) case "h": return t.Round(time.Hour) } return t } golang-github-influxdata-influxdb1-client-0.0~git20220302.a9ab567/influxdb_test.go000066400000000000000000000702671433650256200274200ustar00rootroot00000000000000package client_test import ( "context" "crypto/tls" "encoding/json" "errors" "fmt" "io/ioutil" "net/http" "net/http/httptest" "net/url" "os" "strings" "testing" "time" client "github.com/influxdata/influxdb1-client" ) func BenchmarkWrite(b *testing.B) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { var data client.Response w.WriteHeader(http.StatusNoContent) _ = json.NewEncoder(w).Encode(data) })) defer ts.Close() u, _ := url.Parse(ts.URL) config := client.Config{URL: *u} c, err := client.NewClient(config) if err != nil { b.Fatalf("unexpected error. expected %v, actual %v", nil, err) } bp := client.BatchPoints{ Points: []client.Point{ {Fields: map[string]interface{}{"value": 101}}}, } for i := 0; i < b.N; i++ { r, err := c.Write(bp) if err != nil { b.Fatalf("unexpected error. expected %v, actual %v", nil, err) } if r != nil { b.Fatalf("unexpected response. expected %v, actual %v", nil, r) } } } func BenchmarkUnmarshalJSON2Tags(b *testing.B) { var bp client.BatchPoints data := []byte(` { "database": "foo", "retentionPolicy": "bar", "points": [ { "name": "cpu", "tags": { "host": "server01", "region": "us-east1" }, "time": 14244733039069373, "precision": "n", "fields": { "value": 4541770385657154000 } } ] } `) for i := 0; i < b.N; i++ { if err := json.Unmarshal(data, &bp); err != nil { b.Errorf("unable to unmarshal nanosecond data: %s", err.Error()) } b.SetBytes(int64(len(data))) } } func BenchmarkUnmarshalJSON10Tags(b *testing.B) { var bp client.BatchPoints data := []byte(` { "database": "foo", "retentionPolicy": "bar", "points": [ { "name": "cpu", "tags": { "host": "server01", "region": "us-east1", "tag1": "value1", "tag2": "value2", "tag2": "value3", "tag4": "value4", "tag5": "value5", "tag6": "value6", "tag7": "value7", "tag8": "value8" }, "time": 14244733039069373, "precision": "n", "fields": { "value": 4541770385657154000 } } ] } `) for i := 0; i < b.N; i++ { if err := json.Unmarshal(data, &bp); err != nil { b.Errorf("unable to unmarshal nanosecond data: %s", err.Error()) } b.SetBytes(int64(len(data))) } } func TestNewClient(t *testing.T) { config := client.Config{} _, err := client.NewClient(config) if err != nil { t.Fatalf("unexpected error. expected %v, actual %v", nil, err) } } func TestClient_Ping(t *testing.T) { ts := emptyTestServer() defer ts.Close() u, _ := url.Parse(ts.URL) config := client.Config{URL: *u} c, err := client.NewClient(config) if err != nil { t.Fatalf("unexpected error. expected %v, actual %v", nil, err) } d, version, err := c.Ping() if err != nil { t.Fatalf("unexpected error. expected %v, actual %v", nil, err) } if d.Nanoseconds() == 0 { t.Fatalf("expected a duration greater than zero. actual %v", d.Nanoseconds()) } if version != "x.x" { t.Fatalf("unexpected version. expected %s, actual %v", "x.x", version) } } func TestClient_Query(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { var data client.Response w.WriteHeader(http.StatusOK) _ = json.NewEncoder(w).Encode(data) })) defer ts.Close() u, _ := url.Parse(ts.URL) config := client.Config{URL: *u} c, err := client.NewClient(config) if err != nil { t.Fatalf("unexpected error. expected %v, actual %v", nil, err) } query := client.Query{} _, err = c.Query(query) if err != nil { t.Fatalf("unexpected error. expected %v, actual %v", nil, err) } } func TestClient_Query_RP(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { params := r.URL.Query() if got, exp := params.Get("db"), "db0"; got != exp { t.Errorf("unexpected db query parameter: %s != %s", exp, got) } if got, exp := params.Get("rp"), "rp0"; got != exp { t.Errorf("unexpected rp query parameter: %s != %s", exp, got) } var data client.Response w.WriteHeader(http.StatusOK) _ = json.NewEncoder(w).Encode(data) })) defer ts.Close() u, _ := url.Parse(ts.URL) config := client.Config{URL: *u} c, err := client.NewClient(config) if err != nil { t.Fatalf("unexpected error. expected %v, actual %v", nil, err) } query := client.Query{ Database: "db0", RetentionPolicy: "rp0", } _, err = c.Query(query) if err != nil { t.Fatalf("unexpected error. expected %v, actual %v", nil, err) } } func TestClient_ChunkedQuery(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { var data client.Response w.WriteHeader(http.StatusOK) enc := json.NewEncoder(w) _ = enc.Encode(data) _ = enc.Encode(data) })) defer ts.Close() u, _ := url.Parse(ts.URL) config := client.Config{URL: *u} c, err := client.NewClient(config) if err != nil { t.Fatalf("unexpected error. expected %v, actual %v", nil, err) } query := client.Query{Chunked: true} _, err = c.Query(query) if err != nil { t.Fatalf("unexpected error. expected %v, actual %v", nil, err) } } func TestClient_QueryContext(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { var data client.Response w.WriteHeader(http.StatusOK) _ = json.NewEncoder(w).Encode(data) })) defer ts.Close() u, _ := url.Parse(ts.URL) config := client.Config{URL: *u} c, err := client.NewClient(config) if err != nil { t.Fatalf("unexpected error. expected %v, actual %v", nil, err) } query := client.Query{} ctx, cancel := context.WithCancel(context.Background()) defer cancel() _, err = c.QueryContext(ctx, query) if err != nil { t.Fatalf("unexpected error. expected %v, actual %v", nil, err) } } func TestClient_QueryContext_Cancelled(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { var data client.Response w.WriteHeader(http.StatusOK) _ = json.NewEncoder(w).Encode(data) })) defer ts.Close() u, _ := url.Parse(ts.URL) config := client.Config{URL: *u} c, err := client.NewClient(config) if err != nil { t.Fatalf("unexpected error. expected %v, actual %v", nil, err) } query := client.Query{} ctx, cancel := context.WithCancel(context.Background()) cancel() _, err = c.QueryContext(ctx, query) if err == nil { t.Fatalf("Since context was cancelled an error was expected, but got nil.") } } func TestClient_ChunkedQuery_WithContext(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { var data client.Response w.WriteHeader(http.StatusOK) enc := json.NewEncoder(w) _ = enc.Encode(data) _ = enc.Encode(data) })) defer ts.Close() u, _ := url.Parse(ts.URL) config := client.Config{URL: *u} c, err := client.NewClient(config) if err != nil { t.Fatalf("unexpected error. expected %v, actual %v", nil, err) } query := client.Query{Chunked: true} ctx, cancel := context.WithCancel(context.Background()) defer cancel() _, err = c.QueryContext(ctx, query) if err != nil { t.Fatalf("unexpected error. expected %v, actual %v", nil, err) } } func TestClient_BasicAuth(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { u, p, ok := r.BasicAuth() if !ok { t.Errorf("basic auth error") } if u != "username" { t.Errorf("unexpected username, expected %q, actual %q", "username", u) } if p != "password" { t.Errorf("unexpected password, expected %q, actual %q", "password", p) } w.WriteHeader(http.StatusNoContent) })) defer ts.Close() u, _ := url.Parse(ts.URL) u.User = url.UserPassword("username", "password") config := client.Config{URL: *u, Username: "username", Password: "password"} c, err := client.NewClient(config) if err != nil { t.Fatalf("unexpected error. expected %v, actual %v", nil, err) } _, _, err = c.Ping() if err != nil { t.Fatalf("unexpected error. expected %v, actual %v", nil, err) } } func TestClient_Write(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { in, err := ioutil.ReadAll(r.Body) if err != nil { t.Fatalf("unexpected error: %s", err) } else if have, want := strings.TrimSpace(string(in)), `m0,host=server01 v1=2,v2=2i,v3=2u,v4="foobar",v5=true 0`; have != want { t.Errorf("unexpected write protocol: %s != %s", have, want) } var data client.Response w.WriteHeader(http.StatusNoContent) _ = json.NewEncoder(w).Encode(data) })) defer ts.Close() u, _ := url.Parse(ts.URL) config := client.Config{URL: *u} c, err := client.NewClient(config) if err != nil { t.Fatalf("unexpected error. expected %v, actual %v", nil, err) } bp := client.BatchPoints{ Points: []client.Point{ { Measurement: "m0", Tags: map[string]string{ "host": "server01", }, Time: time.Unix(0, 0).UTC(), Fields: map[string]interface{}{ "v1": float64(2), "v2": int64(2), "v3": uint64(2), "v4": "foobar", "v5": true, }, }, }, } r, err := c.Write(bp) if err != nil { t.Fatalf("unexpected error. expected %v, actual %v", nil, err) } if r != nil { t.Fatalf("unexpected response. expected %v, actual %v", nil, r) } } func TestClient_UserAgent(t *testing.T) { receivedUserAgent := "" ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { receivedUserAgent = r.UserAgent() var data client.Response w.WriteHeader(http.StatusOK) _ = json.NewEncoder(w).Encode(data) })) defer ts.Close() _, err := http.Get(ts.URL) if err != nil { t.Fatalf("unexpected error. expected %v, actual %v", nil, err) } tests := []struct { name string userAgent string expected string }{ { name: "Empty user agent", userAgent: "", expected: "InfluxDBClient", }, { name: "Custom user agent", userAgent: "Test Influx Client", expected: "Test Influx Client", }, } for _, test := range tests { u, _ := url.Parse(ts.URL) config := client.Config{URL: *u, UserAgent: test.userAgent} c, err := client.NewClient(config) if err != nil { t.Fatalf("unexpected error. expected %v, actual %v", nil, err) } receivedUserAgent = "" query := client.Query{} _, err = c.Query(query) if err != nil { t.Fatalf("unexpected error. expected %v, actual %v", nil, err) } if !strings.HasPrefix(receivedUserAgent, test.expected) { t.Fatalf("Unexpected user agent. expected %v, actual %v", test.expected, receivedUserAgent) } receivedUserAgent = "" bp := client.BatchPoints{} _, err = c.Write(bp) if err != nil { t.Fatalf("unexpected error. expected %v, actual %v", nil, err) } if !strings.HasPrefix(receivedUserAgent, test.expected) { t.Fatalf("Unexpected user agent. expected %v, actual %v", test.expected, receivedUserAgent) } receivedUserAgent = "" _, _, err = c.Ping() if err != nil { t.Fatalf("unexpected error. expected %v, actual %v", nil, err) } if receivedUserAgent != test.expected { t.Fatalf("Unexpected user agent. expected %v, actual %v", test.expected, receivedUserAgent) } } } func TestClient_Messages(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusOK) w.Write([]byte(`{"results":[{"messages":[{"level":"warning","text":"deprecation test"}]}]}`)) })) defer ts.Close() u, _ := url.Parse(ts.URL) config := client.Config{URL: *u} c, err := client.NewClient(config) if err != nil { t.Fatalf("unexpected error. expected %v, actual %v", nil, err) } query := client.Query{} resp, err := c.Query(query) if err != nil { t.Fatalf("unexpected error. expected %v, actual %v", nil, err) } if got, exp := len(resp.Results), 1; got != exp { t.Fatalf("unexpected number of results. expected %v, actual %v", exp, got) } r := resp.Results[0] if got, exp := len(r.Messages), 1; got != exp { t.Fatalf("unexpected number of messages. expected %v, actual %v", exp, got) } m := r.Messages[0] if got, exp := m.Level, "warning"; got != exp { t.Errorf("unexpected message level. expected %v, actual %v", exp, got) } if got, exp := m.Text, "deprecation test"; got != exp { t.Errorf("unexpected message text. expected %v, actual %v", exp, got) } } func TestPoint_UnmarshalEpoch(t *testing.T) { now := time.Now() tests := []struct { name string epoch int64 precision string expected time.Time }{ { name: "nanoseconds", epoch: now.UnixNano(), precision: "n", expected: now, }, { name: "microseconds", epoch: now.Round(time.Microsecond).UnixNano() / int64(time.Microsecond), precision: "u", expected: now.Round(time.Microsecond), }, { name: "milliseconds", epoch: now.Round(time.Millisecond).UnixNano() / int64(time.Millisecond), precision: "ms", expected: now.Round(time.Millisecond), }, { name: "seconds", epoch: now.Round(time.Second).UnixNano() / int64(time.Second), precision: "s", expected: now.Round(time.Second), }, { name: "minutes", epoch: now.Round(time.Minute).UnixNano() / int64(time.Minute), precision: "m", expected: now.Round(time.Minute), }, { name: "hours", epoch: now.Round(time.Hour).UnixNano() / int64(time.Hour), precision: "h", expected: now.Round(time.Hour), }, { name: "max int64", epoch: 9223372036854775807, precision: "n", expected: time.Unix(0, 9223372036854775807), }, { name: "100 years from now", epoch: now.Add(time.Hour * 24 * 365 * 100).UnixNano(), precision: "n", expected: now.Add(time.Hour * 24 * 365 * 100), }, } for _, test := range tests { t.Logf("testing %q\n", test.name) data := []byte(fmt.Sprintf(`{"time": %d, "precision":"%s"}`, test.epoch, test.precision)) t.Logf("json: %s", string(data)) var p client.Point err := json.Unmarshal(data, &p) if err != nil { t.Fatalf("unexpected error. exptected: %v, actual: %v", nil, err) } if !p.Time.Equal(test.expected) { t.Fatalf("Unexpected time. expected: %v, actual: %v", test.expected, p.Time) } } } func TestPoint_UnmarshalRFC(t *testing.T) { now := time.Now().UTC() tests := []struct { name string rfc string now time.Time expected time.Time }{ { name: "RFC3339Nano", rfc: time.RFC3339Nano, now: now, expected: now, }, { name: "RFC3339", rfc: time.RFC3339, now: now.Round(time.Second), expected: now.Round(time.Second), }, } for _, test := range tests { t.Logf("testing %q\n", test.name) ts := test.now.Format(test.rfc) data := []byte(fmt.Sprintf(`{"time": %q}`, ts)) t.Logf("json: %s", string(data)) var p client.Point err := json.Unmarshal(data, &p) if err != nil { t.Fatalf("unexpected error. exptected: %v, actual: %v", nil, err) } if !p.Time.Equal(test.expected) { t.Fatalf("Unexpected time. expected: %v, actual: %v", test.expected, p.Time) } } } func TestPoint_MarshalOmitempty(t *testing.T) { now := time.Now().UTC() tests := []struct { name string point client.Point now time.Time expected string }{ { name: "all empty", point: client.Point{Measurement: "cpu", Fields: map[string]interface{}{"value": 1.1}}, now: now, expected: `{"measurement":"cpu","fields":{"value":1.1}}`, }, { name: "with time", point: client.Point{Measurement: "cpu", Fields: map[string]interface{}{"value": 1.1}, Time: now}, now: now, expected: fmt.Sprintf(`{"measurement":"cpu","time":"%s","fields":{"value":1.1}}`, now.Format(time.RFC3339Nano)), }, { name: "with tags", point: client.Point{Measurement: "cpu", Fields: map[string]interface{}{"value": 1.1}, Tags: map[string]string{"foo": "bar"}}, now: now, expected: `{"measurement":"cpu","tags":{"foo":"bar"},"fields":{"value":1.1}}`, }, { name: "with precision", point: client.Point{Measurement: "cpu", Fields: map[string]interface{}{"value": 1.1}, Precision: "ms"}, now: now, expected: `{"measurement":"cpu","fields":{"value":1.1},"precision":"ms"}`, }, } for _, test := range tests { t.Logf("testing %q\n", test.name) b, err := json.Marshal(&test.point) if err != nil { t.Fatalf("unexpected error. exptected: %v, actual: %v", nil, err) } if test.expected != string(b) { t.Fatalf("Unexpected result. expected: %v, actual: %v", test.expected, string(b)) } } } func TestEpochToTime(t *testing.T) { now := time.Now() tests := []struct { name string epoch int64 precision string expected time.Time }{ {name: "nanoseconds", epoch: now.UnixNano(), precision: "n", expected: now}, {name: "microseconds", epoch: now.Round(time.Microsecond).UnixNano() / int64(time.Microsecond), precision: "u", expected: now.Round(time.Microsecond)}, {name: "milliseconds", epoch: now.Round(time.Millisecond).UnixNano() / int64(time.Millisecond), precision: "ms", expected: now.Round(time.Millisecond)}, {name: "seconds", epoch: now.Round(time.Second).UnixNano() / int64(time.Second), precision: "s", expected: now.Round(time.Second)}, {name: "minutes", epoch: now.Round(time.Minute).UnixNano() / int64(time.Minute), precision: "m", expected: now.Round(time.Minute)}, {name: "hours", epoch: now.Round(time.Hour).UnixNano() / int64(time.Hour), precision: "h", expected: now.Round(time.Hour)}, } for _, test := range tests { t.Logf("testing %q\n", test.name) tm, e := client.EpochToTime(test.epoch, test.precision) if e != nil { t.Fatalf("unexpected error: expected %v, actual: %v", nil, e) } if !tm.Equal(test.expected) { t.Fatalf("unexpected time: expected %v, actual %v", test.expected, tm) } } } // helper functions func emptyTestServer() *httptest.Server { return httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { time.Sleep(50 * time.Millisecond) w.Header().Set("X-Influxdb-Version", "x.x") })) } // Ensure that data with epoch times can be decoded. func TestBatchPoints_Normal(t *testing.T) { var bp client.BatchPoints data := []byte(` { "database": "foo", "retentionPolicy": "bar", "points": [ { "name": "cpu", "tags": { "host": "server01" }, "time": 14244733039069373, "precision": "n", "values": { "value": 4541770385657154000 } }, { "name": "cpu", "tags": { "host": "server01" }, "time": 14244733039069380, "precision": "n", "values": { "value": 7199311900554737000 } } ] } `) if err := json.Unmarshal(data, &bp); err != nil { t.Errorf("unable to unmarshal nanosecond data: %s", err.Error()) } } func TestClient_Timeout(t *testing.T) { done := make(chan bool) ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { <-done })) defer ts.Close() defer func() { done <- true }() u, _ := url.Parse(ts.URL) config := client.Config{URL: *u, Timeout: 500 * time.Millisecond} c, err := client.NewClient(config) if err != nil { t.Fatalf("unexpected error. expected %v, actual %v", nil, err) } query := client.Query{} _, err = c.Query(query) if err == nil { t.Fatalf("unexpected success. expected timeout error") return } if !strings.Contains(err.Error(), "request canceled") && !strings.Contains(err.Error(), "use of closed network connection") && !strings.Contains(err.Error(), "Timeout") { t.Fatalf("unexpected error. expected 'request canceled' error, got %v", err) } } func TestClient_NoTimeout(t *testing.T) { if testing.Short() { t.Skip("skipping in short mode") } ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { time.Sleep(1 * time.Second) var data client.Response w.WriteHeader(http.StatusOK) _ = json.NewEncoder(w).Encode(data) })) defer ts.Close() u, _ := url.Parse(ts.URL) config := client.Config{URL: *u} c, err := client.NewClient(config) if err != nil { t.Fatalf("unexpected error. expected %v, actual %v", nil, err) } query := client.Query{} _, err = c.Query(query) if err != nil { t.Fatalf("unexpected error. expected %v, actual %v", nil, err) } } func TestClient_ParseConnectionString(t *testing.T) { for _, tt := range []struct { addr string ssl bool exp string }{ { addr: "localhost", exp: "http://localhost:8086", }, { addr: "localhost:8086", exp: "http://localhost:8086", }, { addr: "localhost:80", exp: "http://localhost", }, { addr: "localhost", exp: "https://localhost:8086", ssl: true, }, { addr: "localhost:443", exp: "https://localhost", ssl: true, }, { addr: "localhost:80", exp: "https://localhost:80", ssl: true, }, { addr: "localhost:443", exp: "http://localhost:443", }, } { name := tt.addr if tt.ssl { name += "+ssl" } t.Run(name, func(t *testing.T) { u, err := client.ParseConnectionString(tt.addr, tt.ssl) if err != nil { t.Fatalf("unexpected error: %s", err) } if got, want := u.String(), tt.exp; got != want { t.Fatalf("unexpected connection string: got=%s want=%s", got, want) } }) } } func TestClient_ParseConnectionString_IPv6(t *testing.T) { path := "[fdf5:9ede:1875:0:a9ee:a600:8fe3:d495]:8086" u, err := client.ParseConnectionString(path, false) if err != nil { t.Fatalf("unexpected error, expected %v, actual %v", nil, err) } if u.Host != path { t.Fatalf("ipv6 parse failed, expected %s, actual %s", path, u.Host) } } func TestClient_CustomCertificates(t *testing.T) { // generated with: // openssl req -x509 -newkey rsa:2048 -keyout key.pem -out cert.pem -days 3650 -nodes -config influx.cnf // influx.cnf: // [req] // distinguished_name = req_distinguished_name // x509_extensions = v3_req // prompt = no // [req_distinguished_name] // C = US // ST = CA // L = San Francisco // O = InfluxDB // CN = github.com/influxdata // [v3_req] // keyUsage = keyEncipherment, dataEncipherment // extendedKeyUsage = serverAuth // subjectAltName = @alt_names // [alt_names] // IP.1 = 127.0.0.1 // key := ` -----BEGIN PRIVATE KEY----- MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQDLswqKJLxfhBRi 4qdj7+jpBxTAi4MewrcMPp+9YlbLke3F7w2DPrZVkYVeWmg8LyTPAigrXeadK6hv qjRr05a7sMc5+ynivGbWUySWT+u17V85x6VR5TMIkJEOqpiIU8aYk0l+3UcrzVjS 1QZCUBoxVwAVaSR6AXTA8YrVXdk/AI3f22dYiBjFmV4LJJkGjTaCnlDKu54hMU1t WTyFcoY9TBzZ1XA+ng5RQ/QADeL2PYrTW4s/mLI3jfKKD53EI4uM2FjW37ZfuxTa mhCR7/lxM4COg9K70y5uebfqJvuoXAwXLOzVbdfF5b9fJFbL67kaK2tiMT3Wt39m hXzclLTDAgMBAAECggEAK8mpElkjRUUXPMqMQSdpYe5rv5g973bb8n3jyMpC7i/I dSwWM4hfmbVWfhnhHk7kErvb9raQxGiGJLrp2eP6Gw69RPGA54SodpoY21cCzHDi b4FDQH+MoOKyy/xQHb4kitfejK70ha320huI5OhjOQgCtJeNh8yYVIGX3pX2BVyu 36UB9tfX1S5pbiHeih3vZGd322Muj/joNzIelnYRBnoO0xqvQ0S1Dk+dLCTHO0/m u9AZN8c2TsRWZpJPMWwBv8LuABbE0e66/TSsrfklAn86ELCo44lZURDE7uPZ4pIH FWtmf+nW5Hy6aPhy60E40MqotlejhWwB3ktY/m3JAQKBgQDuB4nhxzJA9lH9EaCt byvJ9wGVvI3k79hOwc/Z2R3dNe+Ma+TJy+aBppvsLF4qz83aWC+canyasbHcPNR/ vXQGlsgKfucrmd1PfMV7uvOIkfOjK0E6mRC+jMuKtNTQrdtM1BU/Z7LY0iy0fNJ6 aNqhFdlJmmk0g+4bR4SAWB6FkwKBgQDbE/7r1u+GdJk/mhdjTi1aegr9lXb0l7L6 BCvOYhs/Z/pXfsaYPSXhgk2w+LiGk6BaEA2/4Sr0YS2MAAaIhBVeFBIXVpNrXB3K Yg1jOEeLQ3qoVBeJFhJNrN9ZQx33HANC1W/Y1apMwaYqCRUGVQkrdcsN2KNea1z0 3qeYeCCSEQKBgCKZKeuNfrp+k1BLnaVYAW9r3ekb7SwXyMM53LJ3oqWiz10D2c+T OcAirYtYr59dcTiJlPIRcGcz6PxwQxsGOLU0eYM9CvEFfmutYS8o73ksbdOL2AFi elKYOIXC3yQuATBbq3L56b8mXaUmd5mfYBgGCv1t2ljtzFBext248UbNAoGBAIv1 2V24YiwnH6THf/ucfVMZNx5Mt8OJivk5YvcmLDw05HWzc5LdNe89PP871z963u3K 5c3ZP4UC9INFnOboY3JIJkqsr9/d6NZcECt8UBDDmoAhwSt+Y1EmiUZQn7s4NUkk bKE919/Ts6GVTc5O013lkkUVS0HOG4QBH1dEH6LRAoGAStl11WA9tuKXiBl5XG/C cq9mFPNJK3pEgd6YH874vEnYEEqENR4MFK3uWXus9Nm+VYxbUbPEzFF4kpsfukDg /JAVqY4lUam7g6fyyaoIIPQEp7jGjbsUf46IjnUjFcaojOugA3EAfn9awREUDuJZ cvh4WzEegcExTppINW1NB5E= -----END PRIVATE KEY----- ` cert := ` -----BEGIN CERTIFICATE----- MIIDdjCCAl6gAwIBAgIJAMYGAwkxUV51MA0GCSqGSIb3DQEBCwUAMFgxCzAJBgNV BAYTAlVTMQswCQYDVQQIDAJDQTEWMBQGA1UEBwwNU2FuIEZyYW5jaXNjbzERMA8G A1UECgwISW5mbHV4REIxETAPBgNVBAMMCGluZmx1eGRiMB4XDTE1MTIyOTAxNTg1 NloXDTI1MTIyNjAxNTg1NlowWDELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAkNBMRYw FAYDVQQHDA1TYW4gRnJhbmNpc2NvMREwDwYDVQQKDAhJbmZsdXhEQjERMA8GA1UE AwwIaW5mbHV4ZGIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDLswqK JLxfhBRi4qdj7+jpBxTAi4MewrcMPp+9YlbLke3F7w2DPrZVkYVeWmg8LyTPAigr XeadK6hvqjRr05a7sMc5+ynivGbWUySWT+u17V85x6VR5TMIkJEOqpiIU8aYk0l+ 3UcrzVjS1QZCUBoxVwAVaSR6AXTA8YrVXdk/AI3f22dYiBjFmV4LJJkGjTaCnlDK u54hMU1tWTyFcoY9TBzZ1XA+ng5RQ/QADeL2PYrTW4s/mLI3jfKKD53EI4uM2FjW 37ZfuxTamhCR7/lxM4COg9K70y5uebfqJvuoXAwXLOzVbdfF5b9fJFbL67kaK2ti MT3Wt39mhXzclLTDAgMBAAGjQzBBMAwGA1UdEwQFMAMBAf8wCwYDVR0PBAQDAgQw MBMGA1UdJQQMMAoGCCsGAQUFBwMBMA8GA1UdEQQIMAaHBH8AAAEwDQYJKoZIhvcN AQELBQADggEBAJxgHeduV9q2BuKnrt+sjXLGn/HwbMbgGbgFK6kUKJBWtv6Pa7JJ m4teDmTMWiaeB2g4N2bmaWTuEZzzShNKG5roFeWm1ilFMAyzkb+VifN4YuDKH62F 3e259qsytiGbbJF3F//4sjfMw8qZVEPvspG1zKsASo0PpSOOUFmxcj0oMAXhnMrk rRcbk6fufhyq0iZGl8ZLKTCrkjk0b3qlNs6UaRD9/XBB59VlQ8I338sfjV06edwY jn5Amab0uyoFNEp70Y4WGxrxUTS1GAC1LCA13S7EnidD440UrnWALTarjmHAK6aW war3JNM1mGB3o2iAtuOJlFIKLpI1x+1e8pI= -----END CERTIFICATE----- ` cer, err := tls.X509KeyPair([]byte(cert), []byte(key)) if err != nil { t.Fatalf("Received error: %v", err) } server := httptest.NewUnstartedServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { var data client.Response w.WriteHeader(http.StatusOK) _ = json.NewEncoder(w).Encode(data) })) server.TLS = &tls.Config{Certificates: []tls.Certificate{cer}} server.TLS.BuildNameToCertificate() server.StartTLS() defer server.Close() certFile, _ := ioutil.TempFile("", "influx-cert-") certFile.WriteString(cert) certFile.Close() defer os.Remove(certFile.Name()) u, _ := url.Parse(server.URL) tests := []struct { name string unsafeSsl bool expected error }{ {name: "validate certificates", unsafeSsl: false, expected: errors.New("error")}, {name: "not validate certificates", unsafeSsl: true, expected: nil}, } for _, test := range tests { config := client.Config{URL: *u, UnsafeSsl: test.unsafeSsl} c, err := client.NewClient(config) if err != nil { t.Fatalf("unexpected error. expected %v, actual %v", nil, err) } query := client.Query{} _, err = c.Query(query) if (test.expected == nil) != (err == nil) { t.Fatalf("%s: expected %v. got %v. unsafeSsl: %v", test.name, test.expected, err, test.unsafeSsl) } } } func TestChunkedResponse(t *testing.T) { s := `{"results":[{},{}]}{"results":[{}]}` r := client.NewChunkedResponse(strings.NewReader(s)) resp, err := r.NextResponse() if err != nil { t.Fatalf("unexpected error. expected %v, actual %v", nil, err) } else if actual := len(resp.Results); actual != 2 { t.Fatalf("unexpected number of results. expected %v, actual %v", 2, actual) } resp, err = r.NextResponse() if err != nil { t.Fatalf("unexpected error. expected %v, actual %v", nil, err) } else if actual := len(resp.Results); actual != 1 { t.Fatalf("unexpected number of results. expected %v, actual %v", 1, actual) } resp, err = r.NextResponse() if err != nil { t.Fatalf("unexpected error. expected %v, actual %v", nil, err) } else if resp != nil { t.Fatalf("unexpected response. expected %v, actual %v", nil, resp) } } func TestClient_Proxy(t *testing.T) { pinged := false server := httptest.NewServer(http.HandlerFunc(func(resp http.ResponseWriter, req *http.Request) { if got, want := req.URL.String(), "http://example.com:8086/ping"; got != want { t.Errorf("invalid url in request: got=%s want=%s", got, want) } resp.WriteHeader(http.StatusNoContent) pinged = true })) defer server.Close() proxyURL, _ := url.Parse(server.URL) c, err := client.NewClient(client.Config{ URL: url.URL{ Scheme: "http", Host: "example.com:8086", }, Proxy: http.ProxyURL(proxyURL), }) if err != nil { t.Fatalf("unexpected error: %s", err) } if _, _, err := c.Ping(); err != nil { t.Fatalf("could not ping server: %s", err) } if !pinged { t.Fatalf("no http request was received") } } golang-github-influxdata-influxdb1-client-0.0~git20220302.a9ab567/models/000077500000000000000000000000001433650256200254665ustar00rootroot00000000000000golang-github-influxdata-influxdb1-client-0.0~git20220302.a9ab567/models/inline_fnv.go000066400000000000000000000014741433650256200301520ustar00rootroot00000000000000package models // import "github.com/influxdata/influxdb1-client/models" // from stdlib hash/fnv/fnv.go const ( prime64 = 1099511628211 offset64 = 14695981039346656037 ) // InlineFNV64a is an alloc-free port of the standard library's fnv64a. // See https://en.wikipedia.org/wiki/Fowler%E2%80%93Noll%E2%80%93Vo_hash_function. type InlineFNV64a uint64 // NewInlineFNV64a returns a new instance of InlineFNV64a. func NewInlineFNV64a() InlineFNV64a { return offset64 } // Write adds data to the running hash. func (s *InlineFNV64a) Write(data []byte) (int, error) { hash := uint64(*s) for _, c := range data { hash ^= uint64(c) hash *= prime64 } *s = InlineFNV64a(hash) return len(data), nil } // Sum64 returns the uint64 of the current resulting hash. func (s *InlineFNV64a) Sum64() uint64 { return uint64(*s) } golang-github-influxdata-influxdb1-client-0.0~git20220302.a9ab567/models/inline_fnv_test.go000066400000000000000000000010021433650256200311740ustar00rootroot00000000000000package models_test import ( "hash/fnv" "testing" "testing/quick" "github.com/influxdata/influxdb1-client/models" ) func TestInlineFNV64aEquivalenceFuzz(t *testing.T) { f := func(data []byte) bool { stdlibFNV := fnv.New64a() stdlibFNV.Write(data) want := stdlibFNV.Sum64() inlineFNV := models.NewInlineFNV64a() inlineFNV.Write(data) got := inlineFNV.Sum64() return want == got } cfg := &quick.Config{ MaxCount: 10000, } if err := quick.Check(f, cfg); err != nil { t.Fatal(err) } } golang-github-influxdata-influxdb1-client-0.0~git20220302.a9ab567/models/inline_strconv_parse.go000066400000000000000000000024561433650256200322520ustar00rootroot00000000000000package models // import "github.com/influxdata/influxdb1-client/models" import ( "reflect" "strconv" "unsafe" ) // parseIntBytes is a zero-alloc wrapper around strconv.ParseInt. func parseIntBytes(b []byte, base int, bitSize int) (i int64, err error) { s := unsafeBytesToString(b) return strconv.ParseInt(s, base, bitSize) } // parseUintBytes is a zero-alloc wrapper around strconv.ParseUint. func parseUintBytes(b []byte, base int, bitSize int) (i uint64, err error) { s := unsafeBytesToString(b) return strconv.ParseUint(s, base, bitSize) } // parseFloatBytes is a zero-alloc wrapper around strconv.ParseFloat. func parseFloatBytes(b []byte, bitSize int) (float64, error) { s := unsafeBytesToString(b) return strconv.ParseFloat(s, bitSize) } // parseBoolBytes is a zero-alloc wrapper around strconv.ParseBool. func parseBoolBytes(b []byte) (bool, error) { return strconv.ParseBool(unsafeBytesToString(b)) } // unsafeBytesToString converts a []byte to a string without a heap allocation. // // It is unsafe, and is intended to prepare input to short-lived functions // that require strings. func unsafeBytesToString(in []byte) string { src := *(*reflect.SliceHeader)(unsafe.Pointer(&in)) dst := reflect.StringHeader{ Data: src.Data, Len: src.Len, } s := *(*string)(unsafe.Pointer(&dst)) return s } inline_strconv_parse_test.go000066400000000000000000000043621433650256200332300ustar00rootroot00000000000000golang-github-influxdata-influxdb1-client-0.0~git20220302.a9ab567/modelspackage models import ( "strconv" "testing" "testing/quick" ) func TestParseIntBytesEquivalenceFuzz(t *testing.T) { f := func(b []byte, base int, bitSize int) bool { exp, expErr := strconv.ParseInt(string(b), base, bitSize) got, gotErr := parseIntBytes(b, base, bitSize) return exp == got && checkErrs(expErr, gotErr) } cfg := &quick.Config{ MaxCount: 10000, } if err := quick.Check(f, cfg); err != nil { t.Fatal(err) } } func TestParseIntBytesValid64bitBase10EquivalenceFuzz(t *testing.T) { buf := []byte{} f := func(n int64) bool { buf = strconv.AppendInt(buf[:0], n, 10) exp, expErr := strconv.ParseInt(string(buf), 10, 64) got, gotErr := parseIntBytes(buf, 10, 64) return exp == got && checkErrs(expErr, gotErr) } cfg := &quick.Config{ MaxCount: 10000, } if err := quick.Check(f, cfg); err != nil { t.Fatal(err) } } func TestParseFloatBytesEquivalenceFuzz(t *testing.T) { f := func(b []byte, bitSize int) bool { exp, expErr := strconv.ParseFloat(string(b), bitSize) got, gotErr := parseFloatBytes(b, bitSize) return exp == got && checkErrs(expErr, gotErr) } cfg := &quick.Config{ MaxCount: 10000, } if err := quick.Check(f, cfg); err != nil { t.Fatal(err) } } func TestParseFloatBytesValid64bitEquivalenceFuzz(t *testing.T) { buf := []byte{} f := func(n float64) bool { buf = strconv.AppendFloat(buf[:0], n, 'f', -1, 64) exp, expErr := strconv.ParseFloat(string(buf), 64) got, gotErr := parseFloatBytes(buf, 64) return exp == got && checkErrs(expErr, gotErr) } cfg := &quick.Config{ MaxCount: 10000, } if err := quick.Check(f, cfg); err != nil { t.Fatal(err) } } func TestParseBoolBytesEquivalence(t *testing.T) { var buf []byte for _, s := range []string{"1", "t", "T", "TRUE", "true", "True", "0", "f", "F", "FALSE", "false", "False", "fail", "TrUe", "FAlSE", "numbers", ""} { buf = append(buf[:0], s...) exp, expErr := strconv.ParseBool(s) got, gotErr := parseBoolBytes(buf) if got != exp || !checkErrs(expErr, gotErr) { t.Errorf("Failed to parse boolean value %q correctly: wanted (%t, %v), got (%t, %v)", s, exp, expErr, got, gotErr) } } } func checkErrs(a, b error) bool { if (a == nil) != (b == nil) { return false } return a == nil || a.Error() == b.Error() } golang-github-influxdata-influxdb1-client-0.0~git20220302.a9ab567/models/points.go000066400000000000000000001656621433650256200273510ustar00rootroot00000000000000// Package models implements basic objects used throughout the TICK stack. package models // import "github.com/influxdata/influxdb1-client/models" import ( "bytes" "encoding/binary" "errors" "fmt" "io" "math" "sort" "strconv" "strings" "time" "unicode" "unicode/utf8" "github.com/influxdata/influxdb1-client/pkg/escape" ) type escapeSet struct { k [1]byte esc [2]byte } var ( measurementEscapeCodes = [...]escapeSet{ {k: [1]byte{','}, esc: [2]byte{'\\', ','}}, {k: [1]byte{' '}, esc: [2]byte{'\\', ' '}}, } tagEscapeCodes = [...]escapeSet{ {k: [1]byte{','}, esc: [2]byte{'\\', ','}}, {k: [1]byte{' '}, esc: [2]byte{'\\', ' '}}, {k: [1]byte{'='}, esc: [2]byte{'\\', '='}}, } // ErrPointMustHaveAField is returned when operating on a point that does not have any fields. ErrPointMustHaveAField = errors.New("point without fields is unsupported") // ErrInvalidNumber is returned when a number is expected but not provided. ErrInvalidNumber = errors.New("invalid number") // ErrInvalidPoint is returned when a point cannot be parsed correctly. ErrInvalidPoint = errors.New("point is invalid") ) const ( // MaxKeyLength is the largest allowed size of the combined measurement and tag keys. MaxKeyLength = 65535 ) // enableUint64Support will enable uint64 support if set to true. var enableUint64Support = false // EnableUintSupport manually enables uint support for the point parser. // This function will be removed in the future and only exists for unit tests during the // transition. func EnableUintSupport() { enableUint64Support = true } // Point defines the values that will be written to the database. type Point interface { // Name return the measurement name for the point. Name() []byte // SetName updates the measurement name for the point. SetName(string) // Tags returns the tag set for the point. Tags() Tags // ForEachTag iterates over each tag invoking fn. If fn return false, iteration stops. ForEachTag(fn func(k, v []byte) bool) // AddTag adds or replaces a tag value for a point. AddTag(key, value string) // SetTags replaces the tags for the point. SetTags(tags Tags) // HasTag returns true if the tag exists for the point. HasTag(tag []byte) bool // Fields returns the fields for the point. Fields() (Fields, error) // Time return the timestamp for the point. Time() time.Time // SetTime updates the timestamp for the point. SetTime(t time.Time) // UnixNano returns the timestamp of the point as nanoseconds since Unix epoch. UnixNano() int64 // HashID returns a non-cryptographic checksum of the point's key. HashID() uint64 // Key returns the key (measurement joined with tags) of the point. Key() []byte // String returns a string representation of the point. If there is a // timestamp associated with the point then it will be specified with the default // precision of nanoseconds. String() string // MarshalBinary returns a binary representation of the point. MarshalBinary() ([]byte, error) // PrecisionString returns a string representation of the point. If there // is a timestamp associated with the point then it will be specified in the // given unit. PrecisionString(precision string) string // RoundedString returns a string representation of the point. If there // is a timestamp associated with the point, then it will be rounded to the // given duration. RoundedString(d time.Duration) string // Split will attempt to return multiple points with the same timestamp whose // string representations are no longer than size. Points with a single field or // a point without a timestamp may exceed the requested size. Split(size int) []Point // Round will round the timestamp of the point to the given duration. Round(d time.Duration) // StringSize returns the length of the string that would be returned by String(). StringSize() int // AppendString appends the result of String() to the provided buffer and returns // the result, potentially reducing string allocations. AppendString(buf []byte) []byte // FieldIterator retuns a FieldIterator that can be used to traverse the // fields of a point without constructing the in-memory map. FieldIterator() FieldIterator } // FieldType represents the type of a field. type FieldType int const ( // Integer indicates the field's type is integer. Integer FieldType = iota // Float indicates the field's type is float. Float // Boolean indicates the field's type is boolean. Boolean // String indicates the field's type is string. String // Empty is used to indicate that there is no field. Empty // Unsigned indicates the field's type is an unsigned integer. Unsigned ) // FieldIterator provides a low-allocation interface to iterate through a point's fields. type FieldIterator interface { // Next indicates whether there any fields remaining. Next() bool // FieldKey returns the key of the current field. FieldKey() []byte // Type returns the FieldType of the current field. Type() FieldType // StringValue returns the string value of the current field. StringValue() string // IntegerValue returns the integer value of the current field. IntegerValue() (int64, error) // UnsignedValue returns the unsigned value of the current field. UnsignedValue() (uint64, error) // BooleanValue returns the boolean value of the current field. BooleanValue() (bool, error) // FloatValue returns the float value of the current field. FloatValue() (float64, error) // Reset resets the iterator to its initial state. Reset() } // Points represents a sortable list of points by timestamp. type Points []Point // Len implements sort.Interface. func (a Points) Len() int { return len(a) } // Less implements sort.Interface. func (a Points) Less(i, j int) bool { return a[i].Time().Before(a[j].Time()) } // Swap implements sort.Interface. func (a Points) Swap(i, j int) { a[i], a[j] = a[j], a[i] } // point is the default implementation of Point. type point struct { time time.Time // text encoding of measurement and tags // key must always be stored sorted by tags, if the original line was not sorted, // we need to resort it key []byte // text encoding of field data fields []byte // text encoding of timestamp ts []byte // cached version of parsed fields from data cachedFields map[string]interface{} // cached version of parsed name from key cachedName string // cached version of parsed tags cachedTags Tags it fieldIterator } // type assertions var ( _ Point = (*point)(nil) _ FieldIterator = (*point)(nil) ) const ( // the number of characters for the largest possible int64 (9223372036854775807) maxInt64Digits = 19 // the number of characters for the smallest possible int64 (-9223372036854775808) minInt64Digits = 20 // the number of characters for the largest possible uint64 (18446744073709551615) maxUint64Digits = 20 // the number of characters required for the largest float64 before a range check // would occur during parsing maxFloat64Digits = 25 // the number of characters required for smallest float64 before a range check occur // would occur during parsing minFloat64Digits = 27 ) // ParsePoints returns a slice of Points from a text representation of a point // with each point separated by newlines. If any points fail to parse, a non-nil error // will be returned in addition to the points that parsed successfully. func ParsePoints(buf []byte) ([]Point, error) { return ParsePointsWithPrecision(buf, time.Now().UTC(), "n") } // ParsePointsString is identical to ParsePoints but accepts a string. func ParsePointsString(buf string) ([]Point, error) { return ParsePoints([]byte(buf)) } // ParseKey returns the measurement name and tags from a point. // // NOTE: to minimize heap allocations, the returned Tags will refer to subslices of buf. // This can have the unintended effect preventing buf from being garbage collected. func ParseKey(buf []byte) (string, Tags) { name, tags := ParseKeyBytes(buf) return string(name), tags } func ParseKeyBytes(buf []byte) ([]byte, Tags) { return ParseKeyBytesWithTags(buf, nil) } func ParseKeyBytesWithTags(buf []byte, tags Tags) ([]byte, Tags) { // Ignore the error because scanMeasurement returns "missing fields" which we ignore // when just parsing a key state, i, _ := scanMeasurement(buf, 0) var name []byte if state == tagKeyState { tags = parseTags(buf, tags) // scanMeasurement returns the location of the comma if there are tags, strip that off name = buf[:i-1] } else { name = buf[:i] } return unescapeMeasurement(name), tags } func ParseTags(buf []byte) Tags { return parseTags(buf, nil) } func ParseName(buf []byte) []byte { // Ignore the error because scanMeasurement returns "missing fields" which we ignore // when just parsing a key state, i, _ := scanMeasurement(buf, 0) var name []byte if state == tagKeyState { name = buf[:i-1] } else { name = buf[:i] } return unescapeMeasurement(name) } // ParsePointsWithPrecision is similar to ParsePoints, but allows the // caller to provide a precision for time. // // NOTE: to minimize heap allocations, the returned Points will refer to subslices of buf. // This can have the unintended effect preventing buf from being garbage collected. func ParsePointsWithPrecision(buf []byte, defaultTime time.Time, precision string) ([]Point, error) { points := make([]Point, 0, bytes.Count(buf, []byte{'\n'})+1) var ( pos int block []byte failed []string ) for pos < len(buf) { pos, block = scanLine(buf, pos) pos++ if len(block) == 0 { continue } start := skipWhitespace(block, 0) // If line is all whitespace, just skip it if start >= len(block) { continue } // lines which start with '#' are comments if block[start] == '#' { continue } // strip the newline if one is present if block[len(block)-1] == '\n' { block = block[:len(block)-1] } pt, err := parsePoint(block[start:], defaultTime, precision) if err != nil { failed = append(failed, fmt.Sprintf("unable to parse '%s': %v", string(block[start:]), err)) } else { points = append(points, pt) } } if len(failed) > 0 { return points, fmt.Errorf("%s", strings.Join(failed, "\n")) } return points, nil } func parsePoint(buf []byte, defaultTime time.Time, precision string) (Point, error) { // scan the first block which is measurement[,tag1=value1,tag2=value2...] pos, key, err := scanKey(buf, 0) if err != nil { return nil, err } // measurement name is required if len(key) == 0 { return nil, fmt.Errorf("missing measurement") } if len(key) > MaxKeyLength { return nil, fmt.Errorf("max key length exceeded: %v > %v", len(key), MaxKeyLength) } // scan the second block is which is field1=value1[,field2=value2,...] pos, fields, err := scanFields(buf, pos) if err != nil { return nil, err } // at least one field is required if len(fields) == 0 { return nil, fmt.Errorf("missing fields") } var maxKeyErr error err = walkFields(fields, func(k, v []byte) bool { if sz := seriesKeySize(key, k); sz > MaxKeyLength { maxKeyErr = fmt.Errorf("max key length exceeded: %v > %v", sz, MaxKeyLength) return false } return true }) if err != nil { return nil, err } if maxKeyErr != nil { return nil, maxKeyErr } // scan the last block which is an optional integer timestamp pos, ts, err := scanTime(buf, pos) if err != nil { return nil, err } pt := &point{ key: key, fields: fields, ts: ts, } if len(ts) == 0 { pt.time = defaultTime pt.SetPrecision(precision) } else { ts, err := parseIntBytes(ts, 10, 64) if err != nil { return nil, err } pt.time, err = SafeCalcTime(ts, precision) if err != nil { return nil, err } // Determine if there are illegal non-whitespace characters after the // timestamp block. for pos < len(buf) { if buf[pos] != ' ' { return nil, ErrInvalidPoint } pos++ } } return pt, nil } // GetPrecisionMultiplier will return a multiplier for the precision specified. func GetPrecisionMultiplier(precision string) int64 { d := time.Nanosecond switch precision { case "u": d = time.Microsecond case "ms": d = time.Millisecond case "s": d = time.Second case "m": d = time.Minute case "h": d = time.Hour } return int64(d) } // scanKey scans buf starting at i for the measurement and tag portion of the point. // It returns the ending position and the byte slice of key within buf. If there // are tags, they will be sorted if they are not already. func scanKey(buf []byte, i int) (int, []byte, error) { start := skipWhitespace(buf, i) i = start // Determines whether the tags are sort, assume they are sorted := true // indices holds the indexes within buf of the start of each tag. For example, // a buf of 'cpu,host=a,region=b,zone=c' would have indices slice of [4,11,20] // which indicates that the first tag starts at buf[4], seconds at buf[11], and // last at buf[20] indices := make([]int, 100) // tracks how many commas we've seen so we know how many values are indices. // Since indices is an arbitrarily large slice, // we need to know how many values in the buffer are in use. commas := 0 // First scan the Point's measurement. state, i, err := scanMeasurement(buf, i) if err != nil { return i, buf[start:i], err } // Optionally scan tags if needed. if state == tagKeyState { i, commas, indices, err = scanTags(buf, i, indices) if err != nil { return i, buf[start:i], err } } // Now we know where the key region is within buf, and the location of tags, we // need to determine if duplicate tags exist and if the tags are sorted. This iterates // over the list comparing each tag in the sequence with each other. for j := 0; j < commas-1; j++ { // get the left and right tags _, left := scanTo(buf[indices[j]:indices[j+1]-1], 0, '=') _, right := scanTo(buf[indices[j+1]:indices[j+2]-1], 0, '=') // If left is greater than right, the tags are not sorted. We do not have to // continue because the short path no longer works. // If the tags are equal, then there are duplicate tags, and we should abort. // If the tags are not sorted, this pass may not find duplicate tags and we // need to do a more exhaustive search later. if cmp := bytes.Compare(left, right); cmp > 0 { sorted = false break } else if cmp == 0 { return i, buf[start:i], fmt.Errorf("duplicate tags") } } // If the tags are not sorted, then sort them. This sort is inline and // uses the tag indices we created earlier. The actual buffer is not sorted, the // indices are using the buffer for value comparison. After the indices are sorted, // the buffer is reconstructed from the sorted indices. if !sorted && commas > 0 { // Get the measurement name for later measurement := buf[start : indices[0]-1] // Sort the indices indices := indices[:commas] insertionSort(0, commas, buf, indices) // Create a new key using the measurement and sorted indices b := make([]byte, len(buf[start:i])) pos := copy(b, measurement) for _, i := range indices { b[pos] = ',' pos++ _, v := scanToSpaceOr(buf, i, ',') pos += copy(b[pos:], v) } // Check again for duplicate tags now that the tags are sorted. for j := 0; j < commas-1; j++ { // get the left and right tags _, left := scanTo(buf[indices[j]:], 0, '=') _, right := scanTo(buf[indices[j+1]:], 0, '=') // If the tags are equal, then there are duplicate tags, and we should abort. // If the tags are not sorted, this pass may not find duplicate tags and we // need to do a more exhaustive search later. if bytes.Equal(left, right) { return i, b, fmt.Errorf("duplicate tags") } } return i, b, nil } return i, buf[start:i], nil } // The following constants allow us to specify which state to move to // next, when scanning sections of a Point. const ( tagKeyState = iota tagValueState fieldsState ) // scanMeasurement examines the measurement part of a Point, returning // the next state to move to, and the current location in the buffer. func scanMeasurement(buf []byte, i int) (int, int, error) { // Check first byte of measurement, anything except a comma is fine. // It can't be a space, since whitespace is stripped prior to this // function call. if i >= len(buf) || buf[i] == ',' { return -1, i, fmt.Errorf("missing measurement") } for { i++ if i >= len(buf) { // cpu return -1, i, fmt.Errorf("missing fields") } if buf[i-1] == '\\' { // Skip character (it's escaped). continue } // Unescaped comma; move onto scanning the tags. if buf[i] == ',' { return tagKeyState, i + 1, nil } // Unescaped space; move onto scanning the fields. if buf[i] == ' ' { // cpu value=1.0 return fieldsState, i, nil } } } // scanTags examines all the tags in a Point, keeping track of and // returning the updated indices slice, number of commas and location // in buf where to start examining the Point fields. func scanTags(buf []byte, i int, indices []int) (int, int, []int, error) { var ( err error commas int state = tagKeyState ) for { switch state { case tagKeyState: // Grow our indices slice if we have too many tags. if commas >= len(indices) { newIndics := make([]int, cap(indices)*2) copy(newIndics, indices) indices = newIndics } indices[commas] = i commas++ i, err = scanTagsKey(buf, i) state = tagValueState // tag value always follows a tag key case tagValueState: state, i, err = scanTagsValue(buf, i) case fieldsState: indices[commas] = i + 1 return i, commas, indices, nil } if err != nil { return i, commas, indices, err } } } // scanTagsKey scans each character in a tag key. func scanTagsKey(buf []byte, i int) (int, error) { // First character of the key. if i >= len(buf) || buf[i] == ' ' || buf[i] == ',' || buf[i] == '=' { // cpu,{'', ' ', ',', '='} return i, fmt.Errorf("missing tag key") } // Examine each character in the tag key until we hit an unescaped // equals (the tag value), or we hit an error (i.e., unescaped // space or comma). for { i++ // Either we reached the end of the buffer or we hit an // unescaped comma or space. if i >= len(buf) || ((buf[i] == ' ' || buf[i] == ',') && buf[i-1] != '\\') { // cpu,tag{'', ' ', ','} return i, fmt.Errorf("missing tag value") } if buf[i] == '=' && buf[i-1] != '\\' { // cpu,tag= return i + 1, nil } } } // scanTagsValue scans each character in a tag value. func scanTagsValue(buf []byte, i int) (int, int, error) { // Tag value cannot be empty. if i >= len(buf) || buf[i] == ',' || buf[i] == ' ' { // cpu,tag={',', ' '} return -1, i, fmt.Errorf("missing tag value") } // Examine each character in the tag value until we hit an unescaped // comma (move onto next tag key), an unescaped space (move onto // fields), or we error out. for { i++ if i >= len(buf) { // cpu,tag=value return -1, i, fmt.Errorf("missing fields") } // An unescaped equals sign is an invalid tag value. if buf[i] == '=' && buf[i-1] != '\\' { // cpu,tag={'=', 'fo=o'} return -1, i, fmt.Errorf("invalid tag format") } if buf[i] == ',' && buf[i-1] != '\\' { // cpu,tag=foo, return tagKeyState, i + 1, nil } // cpu,tag=foo value=1.0 // cpu, tag=foo\= value=1.0 if buf[i] == ' ' && buf[i-1] != '\\' { return fieldsState, i, nil } } } func insertionSort(l, r int, buf []byte, indices []int) { for i := l + 1; i < r; i++ { for j := i; j > l && less(buf, indices, j, j-1); j-- { indices[j], indices[j-1] = indices[j-1], indices[j] } } } func less(buf []byte, indices []int, i, j int) bool { // This grabs the tag names for i & j, it ignores the values _, a := scanTo(buf, indices[i], '=') _, b := scanTo(buf, indices[j], '=') return bytes.Compare(a, b) < 0 } // scanFields scans buf, starting at i for the fields section of a point. It returns // the ending position and the byte slice of the fields within buf. func scanFields(buf []byte, i int) (int, []byte, error) { start := skipWhitespace(buf, i) i = start quoted := false // tracks how many '=' we've seen equals := 0 // tracks how many commas we've seen commas := 0 for { // reached the end of buf? if i >= len(buf) { break } // escaped characters? if buf[i] == '\\' && i+1 < len(buf) { i += 2 continue } // If the value is quoted, scan until we get to the end quote // Only quote values in the field value since quotes are not significant // in the field key if buf[i] == '"' && equals > commas { quoted = !quoted i++ continue } // If we see an =, ensure that there is at least on char before and after it if buf[i] == '=' && !quoted { equals++ // check for "... =123" but allow "a\ =123" if buf[i-1] == ' ' && buf[i-2] != '\\' { return i, buf[start:i], fmt.Errorf("missing field key") } // check for "...a=123,=456" but allow "a=123,a\,=456" if buf[i-1] == ',' && buf[i-2] != '\\' { return i, buf[start:i], fmt.Errorf("missing field key") } // check for "... value=" if i+1 >= len(buf) { return i, buf[start:i], fmt.Errorf("missing field value") } // check for "... value=,value2=..." if buf[i+1] == ',' || buf[i+1] == ' ' { return i, buf[start:i], fmt.Errorf("missing field value") } if isNumeric(buf[i+1]) || buf[i+1] == '-' || buf[i+1] == 'N' || buf[i+1] == 'n' { var err error i, err = scanNumber(buf, i+1) if err != nil { return i, buf[start:i], err } continue } // If next byte is not a double-quote, the value must be a boolean if buf[i+1] != '"' { var err error i, _, err = scanBoolean(buf, i+1) if err != nil { return i, buf[start:i], err } continue } } if buf[i] == ',' && !quoted { commas++ } // reached end of block? if buf[i] == ' ' && !quoted { break } i++ } if quoted { return i, buf[start:i], fmt.Errorf("unbalanced quotes") } // check that all field sections had key and values (e.g. prevent "a=1,b" if equals == 0 || commas != equals-1 { return i, buf[start:i], fmt.Errorf("invalid field format") } return i, buf[start:i], nil } // scanTime scans buf, starting at i for the time section of a point. It // returns the ending position and the byte slice of the timestamp within buf // and and error if the timestamp is not in the correct numeric format. func scanTime(buf []byte, i int) (int, []byte, error) { start := skipWhitespace(buf, i) i = start for { // reached the end of buf? if i >= len(buf) { break } // Reached end of block or trailing whitespace? if buf[i] == '\n' || buf[i] == ' ' { break } // Handle negative timestamps if i == start && buf[i] == '-' { i++ continue } // Timestamps should be integers, make sure they are so we don't need // to actually parse the timestamp until needed. if buf[i] < '0' || buf[i] > '9' { return i, buf[start:i], fmt.Errorf("bad timestamp") } i++ } return i, buf[start:i], nil } func isNumeric(b byte) bool { return (b >= '0' && b <= '9') || b == '.' } // scanNumber returns the end position within buf, start at i after // scanning over buf for an integer, or float. It returns an // error if a invalid number is scanned. func scanNumber(buf []byte, i int) (int, error) { start := i var isInt, isUnsigned bool // Is negative number? if i < len(buf) && buf[i] == '-' { i++ // There must be more characters now, as just '-' is illegal. if i == len(buf) { return i, ErrInvalidNumber } } // how many decimal points we've see decimal := false // indicates the number is float in scientific notation scientific := false for { if i >= len(buf) { break } if buf[i] == ',' || buf[i] == ' ' { break } if buf[i] == 'i' && i > start && !(isInt || isUnsigned) { isInt = true i++ continue } else if buf[i] == 'u' && i > start && !(isInt || isUnsigned) { isUnsigned = true i++ continue } if buf[i] == '.' { // Can't have more than 1 decimal (e.g. 1.1.1 should fail) if decimal { return i, ErrInvalidNumber } decimal = true } // `e` is valid for floats but not as the first char if i > start && (buf[i] == 'e' || buf[i] == 'E') { scientific = true i++ continue } // + and - are only valid at this point if they follow an e (scientific notation) if (buf[i] == '+' || buf[i] == '-') && (buf[i-1] == 'e' || buf[i-1] == 'E') { i++ continue } // NaN is an unsupported value if i+2 < len(buf) && (buf[i] == 'N' || buf[i] == 'n') { return i, ErrInvalidNumber } if !isNumeric(buf[i]) { return i, ErrInvalidNumber } i++ } if (isInt || isUnsigned) && (decimal || scientific) { return i, ErrInvalidNumber } numericDigits := i - start if isInt { numericDigits-- } if decimal { numericDigits-- } if buf[start] == '-' { numericDigits-- } if numericDigits == 0 { return i, ErrInvalidNumber } // It's more common that numbers will be within min/max range for their type but we need to prevent // out or range numbers from being parsed successfully. This uses some simple heuristics to decide // if we should parse the number to the actual type. It does not do it all the time because it incurs // extra allocations and we end up converting the type again when writing points to disk. if isInt { // Make sure the last char is an 'i' for integers (e.g. 9i10 is not valid) if buf[i-1] != 'i' { return i, ErrInvalidNumber } // Parse the int to check bounds the number of digits could be larger than the max range // We subtract 1 from the index to remove the `i` from our tests if len(buf[start:i-1]) >= maxInt64Digits || len(buf[start:i-1]) >= minInt64Digits { if _, err := parseIntBytes(buf[start:i-1], 10, 64); err != nil { return i, fmt.Errorf("unable to parse integer %s: %s", buf[start:i-1], err) } } } else if isUnsigned { // Return an error if uint64 support has not been enabled. if !enableUint64Support { return i, ErrInvalidNumber } // Make sure the last char is a 'u' for unsigned if buf[i-1] != 'u' { return i, ErrInvalidNumber } // Make sure the first char is not a '-' for unsigned if buf[start] == '-' { return i, ErrInvalidNumber } // Parse the uint to check bounds the number of digits could be larger than the max range // We subtract 1 from the index to remove the `u` from our tests if len(buf[start:i-1]) >= maxUint64Digits { if _, err := parseUintBytes(buf[start:i-1], 10, 64); err != nil { return i, fmt.Errorf("unable to parse unsigned %s: %s", buf[start:i-1], err) } } } else { // Parse the float to check bounds if it's scientific or the number of digits could be larger than the max range if scientific || len(buf[start:i]) >= maxFloat64Digits || len(buf[start:i]) >= minFloat64Digits { if _, err := parseFloatBytes(buf[start:i], 10); err != nil { return i, fmt.Errorf("invalid float") } } } return i, nil } // scanBoolean returns the end position within buf, start at i after // scanning over buf for boolean. Valid values for a boolean are // t, T, true, TRUE, f, F, false, FALSE. It returns an error if a invalid boolean // is scanned. func scanBoolean(buf []byte, i int) (int, []byte, error) { start := i if i < len(buf) && (buf[i] != 't' && buf[i] != 'f' && buf[i] != 'T' && buf[i] != 'F') { return i, buf[start:i], fmt.Errorf("invalid boolean") } i++ for { if i >= len(buf) { break } if buf[i] == ',' || buf[i] == ' ' { break } i++ } // Single char bool (t, T, f, F) is ok if i-start == 1 { return i, buf[start:i], nil } // length must be 4 for true or TRUE if (buf[start] == 't' || buf[start] == 'T') && i-start != 4 { return i, buf[start:i], fmt.Errorf("invalid boolean") } // length must be 5 for false or FALSE if (buf[start] == 'f' || buf[start] == 'F') && i-start != 5 { return i, buf[start:i], fmt.Errorf("invalid boolean") } // Otherwise valid := false switch buf[start] { case 't': valid = bytes.Equal(buf[start:i], []byte("true")) case 'f': valid = bytes.Equal(buf[start:i], []byte("false")) case 'T': valid = bytes.Equal(buf[start:i], []byte("TRUE")) || bytes.Equal(buf[start:i], []byte("True")) case 'F': valid = bytes.Equal(buf[start:i], []byte("FALSE")) || bytes.Equal(buf[start:i], []byte("False")) } if !valid { return i, buf[start:i], fmt.Errorf("invalid boolean") } return i, buf[start:i], nil } // skipWhitespace returns the end position within buf, starting at i after // scanning over spaces in tags. func skipWhitespace(buf []byte, i int) int { for i < len(buf) { if buf[i] != ' ' && buf[i] != '\t' && buf[i] != 0 { break } i++ } return i } // scanLine returns the end position in buf and the next line found within // buf. func scanLine(buf []byte, i int) (int, []byte) { start := i quoted := false fields := false // tracks how many '=' and commas we've seen // this duplicates some of the functionality in scanFields equals := 0 commas := 0 for { // reached the end of buf? if i >= len(buf) { break } // skip past escaped characters if buf[i] == '\\' && i+2 < len(buf) { i += 2 continue } if buf[i] == ' ' { fields = true } // If we see a double quote, makes sure it is not escaped if fields { if !quoted && buf[i] == '=' { i++ equals++ continue } else if !quoted && buf[i] == ',' { i++ commas++ continue } else if buf[i] == '"' && equals > commas { i++ quoted = !quoted continue } } if buf[i] == '\n' && !quoted { break } i++ } return i, buf[start:i] } // scanTo returns the end position in buf and the next consecutive block // of bytes, starting from i and ending with stop byte, where stop byte // has not been escaped. // // If there are leading spaces, they are skipped. func scanTo(buf []byte, i int, stop byte) (int, []byte) { start := i for { // reached the end of buf? if i >= len(buf) { break } // Reached unescaped stop value? if buf[i] == stop && (i == 0 || buf[i-1] != '\\') { break } i++ } return i, buf[start:i] } // scanTo returns the end position in buf and the next consecutive block // of bytes, starting from i and ending with stop byte. If there are leading // spaces, they are skipped. func scanToSpaceOr(buf []byte, i int, stop byte) (int, []byte) { start := i if buf[i] == stop || buf[i] == ' ' { return i, buf[start:i] } for { i++ if buf[i-1] == '\\' { continue } // reached the end of buf? if i >= len(buf) { return i, buf[start:i] } // reached end of block? if buf[i] == stop || buf[i] == ' ' { return i, buf[start:i] } } } func scanTagValue(buf []byte, i int) (int, []byte) { start := i for { if i >= len(buf) { break } if buf[i] == ',' && buf[i-1] != '\\' { break } i++ } if i > len(buf) { return i, nil } return i, buf[start:i] } func scanFieldValue(buf []byte, i int) (int, []byte) { start := i quoted := false for i < len(buf) { // Only escape char for a field value is a double-quote and backslash if buf[i] == '\\' && i+1 < len(buf) && (buf[i+1] == '"' || buf[i+1] == '\\') { i += 2 continue } // Quoted value? (e.g. string) if buf[i] == '"' { i++ quoted = !quoted continue } if buf[i] == ',' && !quoted { break } i++ } return i, buf[start:i] } func EscapeMeasurement(in []byte) []byte { for _, c := range measurementEscapeCodes { if bytes.IndexByte(in, c.k[0]) != -1 { in = bytes.Replace(in, c.k[:], c.esc[:], -1) } } return in } func unescapeMeasurement(in []byte) []byte { if bytes.IndexByte(in, '\\') == -1 { return in } for i := range measurementEscapeCodes { c := &measurementEscapeCodes[i] if bytes.IndexByte(in, c.k[0]) != -1 { in = bytes.Replace(in, c.esc[:], c.k[:], -1) } } return in } func escapeTag(in []byte) []byte { for i := range tagEscapeCodes { c := &tagEscapeCodes[i] if bytes.IndexByte(in, c.k[0]) != -1 { in = bytes.Replace(in, c.k[:], c.esc[:], -1) } } return in } func unescapeTag(in []byte) []byte { if bytes.IndexByte(in, '\\') == -1 { return in } for i := range tagEscapeCodes { c := &tagEscapeCodes[i] if bytes.IndexByte(in, c.k[0]) != -1 { in = bytes.Replace(in, c.esc[:], c.k[:], -1) } } return in } // escapeStringFieldReplacer replaces double quotes and backslashes // with the same character preceded by a backslash. // As of Go 1.7 this benchmarked better in allocations and CPU time // compared to iterating through a string byte-by-byte and appending to a new byte slice, // calling strings.Replace twice, and better than (*Regex).ReplaceAllString. var escapeStringFieldReplacer = strings.NewReplacer(`"`, `\"`, `\`, `\\`) // EscapeStringField returns a copy of in with any double quotes or // backslashes with escaped values. func EscapeStringField(in string) string { return escapeStringFieldReplacer.Replace(in) } // unescapeStringField returns a copy of in with any escaped double-quotes // or backslashes unescaped. func unescapeStringField(in string) string { if strings.IndexByte(in, '\\') == -1 { return in } var out []byte i := 0 for { if i >= len(in) { break } // unescape backslashes if in[i] == '\\' && i+1 < len(in) && in[i+1] == '\\' { out = append(out, '\\') i += 2 continue } // unescape double-quotes if in[i] == '\\' && i+1 < len(in) && in[i+1] == '"' { out = append(out, '"') i += 2 continue } out = append(out, in[i]) i++ } return string(out) } // NewPoint returns a new point with the given measurement name, tags, fields and timestamp. If // an unsupported field value (NaN, or +/-Inf) or out of range time is passed, this function // returns an error. func NewPoint(name string, tags Tags, fields Fields, t time.Time) (Point, error) { key, err := pointKey(name, tags, fields, t) if err != nil { return nil, err } return &point{ key: key, time: t, fields: fields.MarshalBinary(), }, nil } // pointKey checks some basic requirements for valid points, and returns the // key, along with an possible error. func pointKey(measurement string, tags Tags, fields Fields, t time.Time) ([]byte, error) { if len(fields) == 0 { return nil, ErrPointMustHaveAField } if !t.IsZero() { if err := CheckTime(t); err != nil { return nil, err } } for key, value := range fields { switch value := value.(type) { case float64: // Ensure the caller validates and handles invalid field values if math.IsInf(value, 0) { return nil, fmt.Errorf("+/-Inf is an unsupported value for field %s", key) } if math.IsNaN(value) { return nil, fmt.Errorf("NaN is an unsupported value for field %s", key) } case float32: // Ensure the caller validates and handles invalid field values if math.IsInf(float64(value), 0) { return nil, fmt.Errorf("+/-Inf is an unsupported value for field %s", key) } if math.IsNaN(float64(value)) { return nil, fmt.Errorf("NaN is an unsupported value for field %s", key) } } if len(key) == 0 { return nil, fmt.Errorf("all fields must have non-empty names") } } key := MakeKey([]byte(measurement), tags) for field := range fields { sz := seriesKeySize(key, []byte(field)) if sz > MaxKeyLength { return nil, fmt.Errorf("max key length exceeded: %v > %v", sz, MaxKeyLength) } } return key, nil } func seriesKeySize(key, field []byte) int { // 4 is the length of the tsm1.fieldKeySeparator constant. It's inlined here to avoid a circular // dependency. return len(key) + 4 + len(field) } // NewPointFromBytes returns a new Point from a marshalled Point. func NewPointFromBytes(b []byte) (Point, error) { p := &point{} if err := p.UnmarshalBinary(b); err != nil { return nil, err } // This does some basic validation to ensure there are fields and they // can be unmarshalled as well. iter := p.FieldIterator() var hasField bool for iter.Next() { if len(iter.FieldKey()) == 0 { continue } hasField = true switch iter.Type() { case Float: _, err := iter.FloatValue() if err != nil { return nil, fmt.Errorf("unable to unmarshal field %s: %s", string(iter.FieldKey()), err) } case Integer: _, err := iter.IntegerValue() if err != nil { return nil, fmt.Errorf("unable to unmarshal field %s: %s", string(iter.FieldKey()), err) } case Unsigned: _, err := iter.UnsignedValue() if err != nil { return nil, fmt.Errorf("unable to unmarshal field %s: %s", string(iter.FieldKey()), err) } case String: // Skip since this won't return an error case Boolean: _, err := iter.BooleanValue() if err != nil { return nil, fmt.Errorf("unable to unmarshal field %s: %s", string(iter.FieldKey()), err) } } } if !hasField { return nil, ErrPointMustHaveAField } return p, nil } // MustNewPoint returns a new point with the given measurement name, tags, fields and timestamp. If // an unsupported field value (NaN) is passed, this function panics. func MustNewPoint(name string, tags Tags, fields Fields, time time.Time) Point { pt, err := NewPoint(name, tags, fields, time) if err != nil { panic(err.Error()) } return pt } // Key returns the key (measurement joined with tags) of the point. func (p *point) Key() []byte { return p.key } func (p *point) name() []byte { _, name := scanTo(p.key, 0, ',') return name } func (p *point) Name() []byte { return escape.Unescape(p.name()) } // SetName updates the measurement name for the point. func (p *point) SetName(name string) { p.cachedName = "" p.key = MakeKey([]byte(name), p.Tags()) } // Time return the timestamp for the point. func (p *point) Time() time.Time { return p.time } // SetTime updates the timestamp for the point. func (p *point) SetTime(t time.Time) { p.time = t } // Round will round the timestamp of the point to the given duration. func (p *point) Round(d time.Duration) { p.time = p.time.Round(d) } // Tags returns the tag set for the point. func (p *point) Tags() Tags { if p.cachedTags != nil { return p.cachedTags } p.cachedTags = parseTags(p.key, nil) return p.cachedTags } func (p *point) ForEachTag(fn func(k, v []byte) bool) { walkTags(p.key, fn) } func (p *point) HasTag(tag []byte) bool { if len(p.key) == 0 { return false } var exists bool walkTags(p.key, func(key, value []byte) bool { if bytes.Equal(tag, key) { exists = true return false } return true }) return exists } func walkTags(buf []byte, fn func(key, value []byte) bool) { if len(buf) == 0 { return } pos, name := scanTo(buf, 0, ',') // it's an empty key, so there are no tags if len(name) == 0 { return } hasEscape := bytes.IndexByte(buf, '\\') != -1 i := pos + 1 var key, value []byte for { if i >= len(buf) { break } i, key = scanTo(buf, i, '=') i, value = scanTagValue(buf, i+1) if len(value) == 0 { continue } if hasEscape { if !fn(unescapeTag(key), unescapeTag(value)) { return } } else { if !fn(key, value) { return } } i++ } } // walkFields walks each field key and value via fn. If fn returns false, the iteration // is stopped. The values are the raw byte slices and not the converted types. func walkFields(buf []byte, fn func(key, value []byte) bool) error { var i int var key, val []byte for len(buf) > 0 { i, key = scanTo(buf, 0, '=') if i > len(buf)-2 { return fmt.Errorf("invalid value: field-key=%s", key) } buf = buf[i+1:] i, val = scanFieldValue(buf, 0) buf = buf[i:] if !fn(key, val) { break } // slice off comma if len(buf) > 0 { buf = buf[1:] } } return nil } // parseTags parses buf into the provided destination tags, returning destination // Tags, which may have a different length and capacity. func parseTags(buf []byte, dst Tags) Tags { if len(buf) == 0 { return nil } n := bytes.Count(buf, []byte(",")) if cap(dst) < n { dst = make(Tags, n) } else { dst = dst[:n] } // Ensure existing behaviour when point has no tags and nil slice passed in. if dst == nil { dst = Tags{} } // Series keys can contain escaped commas, therefore the number of commas // in a series key only gives an estimation of the upper bound on the number // of tags. var i int walkTags(buf, func(key, value []byte) bool { dst[i].Key, dst[i].Value = key, value i++ return true }) return dst[:i] } // MakeKey creates a key for a set of tags. func MakeKey(name []byte, tags Tags) []byte { return AppendMakeKey(nil, name, tags) } // AppendMakeKey appends the key derived from name and tags to dst and returns the extended buffer. func AppendMakeKey(dst []byte, name []byte, tags Tags) []byte { // unescape the name and then re-escape it to avoid double escaping. // The key should always be stored in escaped form. dst = append(dst, EscapeMeasurement(unescapeMeasurement(name))...) dst = tags.AppendHashKey(dst) return dst } // SetTags replaces the tags for the point. func (p *point) SetTags(tags Tags) { p.key = MakeKey(p.Name(), tags) p.cachedTags = tags } // AddTag adds or replaces a tag value for a point. func (p *point) AddTag(key, value string) { tags := p.Tags() tags = append(tags, Tag{Key: []byte(key), Value: []byte(value)}) sort.Sort(tags) p.cachedTags = tags p.key = MakeKey(p.Name(), tags) } // Fields returns the fields for the point. func (p *point) Fields() (Fields, error) { if p.cachedFields != nil { return p.cachedFields, nil } cf, err := p.unmarshalBinary() if err != nil { return nil, err } p.cachedFields = cf return p.cachedFields, nil } // SetPrecision will round a time to the specified precision. func (p *point) SetPrecision(precision string) { switch precision { case "n": case "u": p.SetTime(p.Time().Truncate(time.Microsecond)) case "ms": p.SetTime(p.Time().Truncate(time.Millisecond)) case "s": p.SetTime(p.Time().Truncate(time.Second)) case "m": p.SetTime(p.Time().Truncate(time.Minute)) case "h": p.SetTime(p.Time().Truncate(time.Hour)) } } // String returns the string representation of the point. func (p *point) String() string { if p.Time().IsZero() { return string(p.Key()) + " " + string(p.fields) } return string(p.Key()) + " " + string(p.fields) + " " + strconv.FormatInt(p.UnixNano(), 10) } // AppendString appends the string representation of the point to buf. func (p *point) AppendString(buf []byte) []byte { buf = append(buf, p.key...) buf = append(buf, ' ') buf = append(buf, p.fields...) if !p.time.IsZero() { buf = append(buf, ' ') buf = strconv.AppendInt(buf, p.UnixNano(), 10) } return buf } // StringSize returns the length of the string that would be returned by String(). func (p *point) StringSize() int { size := len(p.key) + len(p.fields) + 1 if !p.time.IsZero() { digits := 1 // even "0" has one digit t := p.UnixNano() if t < 0 { // account for negative sign, then negate digits++ t = -t } for t > 9 { // already accounted for one digit digits++ t /= 10 } size += digits + 1 // digits and a space } return size } // MarshalBinary returns a binary representation of the point. func (p *point) MarshalBinary() ([]byte, error) { if len(p.fields) == 0 { return nil, ErrPointMustHaveAField } tb, err := p.time.MarshalBinary() if err != nil { return nil, err } b := make([]byte, 8+len(p.key)+len(p.fields)+len(tb)) i := 0 binary.BigEndian.PutUint32(b[i:], uint32(len(p.key))) i += 4 i += copy(b[i:], p.key) binary.BigEndian.PutUint32(b[i:i+4], uint32(len(p.fields))) i += 4 i += copy(b[i:], p.fields) copy(b[i:], tb) return b, nil } // UnmarshalBinary decodes a binary representation of the point into a point struct. func (p *point) UnmarshalBinary(b []byte) error { var n int // Read key length. if len(b) < 4 { return io.ErrShortBuffer } n, b = int(binary.BigEndian.Uint32(b[:4])), b[4:] // Read key. if len(b) < n { return io.ErrShortBuffer } p.key, b = b[:n], b[n:] // Read fields length. if len(b) < 4 { return io.ErrShortBuffer } n, b = int(binary.BigEndian.Uint32(b[:4])), b[4:] // Read fields. if len(b) < n { return io.ErrShortBuffer } p.fields, b = b[:n], b[n:] // Read timestamp. return p.time.UnmarshalBinary(b) } // PrecisionString returns a string representation of the point. If there // is a timestamp associated with the point then it will be specified in the // given unit. func (p *point) PrecisionString(precision string) string { if p.Time().IsZero() { return fmt.Sprintf("%s %s", p.Key(), string(p.fields)) } return fmt.Sprintf("%s %s %d", p.Key(), string(p.fields), p.UnixNano()/GetPrecisionMultiplier(precision)) } // RoundedString returns a string representation of the point. If there // is a timestamp associated with the point, then it will be rounded to the // given duration. func (p *point) RoundedString(d time.Duration) string { if p.Time().IsZero() { return fmt.Sprintf("%s %s", p.Key(), string(p.fields)) } return fmt.Sprintf("%s %s %d", p.Key(), string(p.fields), p.time.Round(d).UnixNano()) } func (p *point) unmarshalBinary() (Fields, error) { iter := p.FieldIterator() fields := make(Fields, 8) for iter.Next() { if len(iter.FieldKey()) == 0 { continue } switch iter.Type() { case Float: v, err := iter.FloatValue() if err != nil { return nil, fmt.Errorf("unable to unmarshal field %s: %s", string(iter.FieldKey()), err) } fields[string(iter.FieldKey())] = v case Integer: v, err := iter.IntegerValue() if err != nil { return nil, fmt.Errorf("unable to unmarshal field %s: %s", string(iter.FieldKey()), err) } fields[string(iter.FieldKey())] = v case Unsigned: v, err := iter.UnsignedValue() if err != nil { return nil, fmt.Errorf("unable to unmarshal field %s: %s", string(iter.FieldKey()), err) } fields[string(iter.FieldKey())] = v case String: fields[string(iter.FieldKey())] = iter.StringValue() case Boolean: v, err := iter.BooleanValue() if err != nil { return nil, fmt.Errorf("unable to unmarshal field %s: %s", string(iter.FieldKey()), err) } fields[string(iter.FieldKey())] = v } } return fields, nil } // HashID returns a non-cryptographic checksum of the point's key. func (p *point) HashID() uint64 { h := NewInlineFNV64a() h.Write(p.key) sum := h.Sum64() return sum } // UnixNano returns the timestamp of the point as nanoseconds since Unix epoch. func (p *point) UnixNano() int64 { return p.Time().UnixNano() } // Split will attempt to return multiple points with the same timestamp whose // string representations are no longer than size. Points with a single field or // a point without a timestamp may exceed the requested size. func (p *point) Split(size int) []Point { if p.time.IsZero() || p.StringSize() <= size { return []Point{p} } // key string, timestamp string, spaces size -= len(p.key) + len(strconv.FormatInt(p.time.UnixNano(), 10)) + 2 var points []Point var start, cur int for cur < len(p.fields) { end, _ := scanTo(p.fields, cur, '=') end, _ = scanFieldValue(p.fields, end+1) if cur > start && end-start > size { points = append(points, &point{ key: p.key, time: p.time, fields: p.fields[start : cur-1], }) start = cur } cur = end + 1 } points = append(points, &point{ key: p.key, time: p.time, fields: p.fields[start:], }) return points } // Tag represents a single key/value tag pair. type Tag struct { Key []byte Value []byte } // NewTag returns a new Tag. func NewTag(key, value []byte) Tag { return Tag{ Key: key, Value: value, } } // Size returns the size of the key and value. func (t Tag) Size() int { return len(t.Key) + len(t.Value) } // Clone returns a shallow copy of Tag. // // Tags associated with a Point created by ParsePointsWithPrecision will hold references to the byte slice that was parsed. // Use Clone to create a Tag with new byte slices that do not refer to the argument to ParsePointsWithPrecision. func (t Tag) Clone() Tag { other := Tag{ Key: make([]byte, len(t.Key)), Value: make([]byte, len(t.Value)), } copy(other.Key, t.Key) copy(other.Value, t.Value) return other } // String returns the string reprsentation of the tag. func (t *Tag) String() string { var buf bytes.Buffer buf.WriteByte('{') buf.WriteString(string(t.Key)) buf.WriteByte(' ') buf.WriteString(string(t.Value)) buf.WriteByte('}') return buf.String() } // Tags represents a sorted list of tags. type Tags []Tag // NewTags returns a new Tags from a map. func NewTags(m map[string]string) Tags { if len(m) == 0 { return nil } a := make(Tags, 0, len(m)) for k, v := range m { a = append(a, NewTag([]byte(k), []byte(v))) } sort.Sort(a) return a } // HashKey hashes all of a tag's keys. func (a Tags) HashKey() []byte { return a.AppendHashKey(nil) } func (a Tags) needsEscape() bool { for i := range a { t := &a[i] for j := range tagEscapeCodes { c := &tagEscapeCodes[j] if bytes.IndexByte(t.Key, c.k[0]) != -1 || bytes.IndexByte(t.Value, c.k[0]) != -1 { return true } } } return false } // AppendHashKey appends the result of hashing all of a tag's keys and values to dst and returns the extended buffer. func (a Tags) AppendHashKey(dst []byte) []byte { // Empty maps marshal to empty bytes. if len(a) == 0 { return dst } // Type invariant: Tags are sorted sz := 0 var escaped Tags if a.needsEscape() { var tmp [20]Tag if len(a) < len(tmp) { escaped = tmp[:len(a)] } else { escaped = make(Tags, len(a)) } for i := range a { t := &a[i] nt := &escaped[i] nt.Key = escapeTag(t.Key) nt.Value = escapeTag(t.Value) sz += len(nt.Key) + len(nt.Value) } } else { sz = a.Size() escaped = a } sz += len(escaped) + (len(escaped) * 2) // separators // Generate marshaled bytes. if cap(dst)-len(dst) < sz { nd := make([]byte, len(dst), len(dst)+sz) copy(nd, dst) dst = nd } buf := dst[len(dst) : len(dst)+sz] idx := 0 for i := range escaped { k := &escaped[i] if len(k.Value) == 0 { continue } buf[idx] = ',' idx++ copy(buf[idx:], k.Key) idx += len(k.Key) buf[idx] = '=' idx++ copy(buf[idx:], k.Value) idx += len(k.Value) } return dst[:len(dst)+idx] } // String returns the string representation of the tags. func (a Tags) String() string { var buf bytes.Buffer buf.WriteByte('[') for i := range a { buf.WriteString(a[i].String()) if i < len(a)-1 { buf.WriteByte(' ') } } buf.WriteByte(']') return buf.String() } // Size returns the number of bytes needed to store all tags. Note, this is // the number of bytes needed to store all keys and values and does not account // for data structures or delimiters for example. func (a Tags) Size() int { var total int for i := range a { total += a[i].Size() } return total } // Clone returns a copy of the slice where the elements are a result of calling `Clone` on the original elements // // Tags associated with a Point created by ParsePointsWithPrecision will hold references to the byte slice that was parsed. // Use Clone to create Tags with new byte slices that do not refer to the argument to ParsePointsWithPrecision. func (a Tags) Clone() Tags { if len(a) == 0 { return nil } others := make(Tags, len(a)) for i := range a { others[i] = a[i].Clone() } return others } func (a Tags) Len() int { return len(a) } func (a Tags) Less(i, j int) bool { return bytes.Compare(a[i].Key, a[j].Key) == -1 } func (a Tags) Swap(i, j int) { a[i], a[j] = a[j], a[i] } // Equal returns true if a equals other. func (a Tags) Equal(other Tags) bool { if len(a) != len(other) { return false } for i := range a { if !bytes.Equal(a[i].Key, other[i].Key) || !bytes.Equal(a[i].Value, other[i].Value) { return false } } return true } // CompareTags returns -1 if a < b, 1 if a > b, and 0 if a == b. func CompareTags(a, b Tags) int { // Compare each key & value until a mismatch. for i := 0; i < len(a) && i < len(b); i++ { if cmp := bytes.Compare(a[i].Key, b[i].Key); cmp != 0 { return cmp } if cmp := bytes.Compare(a[i].Value, b[i].Value); cmp != 0 { return cmp } } // If all tags are equal up to this point then return shorter tagset. if len(a) < len(b) { return -1 } else if len(a) > len(b) { return 1 } // All tags are equal. return 0 } // Get returns the value for a key. func (a Tags) Get(key []byte) []byte { // OPTIMIZE: Use sort.Search if tagset is large. for _, t := range a { if bytes.Equal(t.Key, key) { return t.Value } } return nil } // GetString returns the string value for a string key. func (a Tags) GetString(key string) string { return string(a.Get([]byte(key))) } // Set sets the value for a key. func (a *Tags) Set(key, value []byte) { for i, t := range *a { if bytes.Equal(t.Key, key) { (*a)[i].Value = value return } } *a = append(*a, Tag{Key: key, Value: value}) sort.Sort(*a) } // SetString sets the string value for a string key. func (a *Tags) SetString(key, value string) { a.Set([]byte(key), []byte(value)) } // Map returns a map representation of the tags. func (a Tags) Map() map[string]string { m := make(map[string]string, len(a)) for _, t := range a { m[string(t.Key)] = string(t.Value) } return m } // CopyTags returns a shallow copy of tags. func CopyTags(a Tags) Tags { other := make(Tags, len(a)) copy(other, a) return other } // DeepCopyTags returns a deep copy of tags. func DeepCopyTags(a Tags) Tags { // Calculate size of keys/values in bytes. var n int for _, t := range a { n += len(t.Key) + len(t.Value) } // Build single allocation for all key/values. buf := make([]byte, n) // Copy tags to new set. other := make(Tags, len(a)) for i, t := range a { copy(buf, t.Key) other[i].Key, buf = buf[:len(t.Key)], buf[len(t.Key):] copy(buf, t.Value) other[i].Value, buf = buf[:len(t.Value)], buf[len(t.Value):] } return other } // Fields represents a mapping between a Point's field names and their // values. type Fields map[string]interface{} // FieldIterator retuns a FieldIterator that can be used to traverse the // fields of a point without constructing the in-memory map. func (p *point) FieldIterator() FieldIterator { p.Reset() return p } type fieldIterator struct { start, end int key, keybuf []byte valueBuf []byte fieldType FieldType } // Next indicates whether there any fields remaining. func (p *point) Next() bool { p.it.start = p.it.end if p.it.start >= len(p.fields) { return false } p.it.end, p.it.key = scanTo(p.fields, p.it.start, '=') if escape.IsEscaped(p.it.key) { p.it.keybuf = escape.AppendUnescaped(p.it.keybuf[:0], p.it.key) p.it.key = p.it.keybuf } p.it.end, p.it.valueBuf = scanFieldValue(p.fields, p.it.end+1) p.it.end++ if len(p.it.valueBuf) == 0 { p.it.fieldType = Empty return true } c := p.it.valueBuf[0] if c == '"' { p.it.fieldType = String return true } if strings.IndexByte(`0123456789-.nNiIu`, c) >= 0 { if p.it.valueBuf[len(p.it.valueBuf)-1] == 'i' { p.it.fieldType = Integer p.it.valueBuf = p.it.valueBuf[:len(p.it.valueBuf)-1] } else if p.it.valueBuf[len(p.it.valueBuf)-1] == 'u' { p.it.fieldType = Unsigned p.it.valueBuf = p.it.valueBuf[:len(p.it.valueBuf)-1] } else { p.it.fieldType = Float } return true } // to keep the same behavior that currently exists, default to boolean p.it.fieldType = Boolean return true } // FieldKey returns the key of the current field. func (p *point) FieldKey() []byte { return p.it.key } // Type returns the FieldType of the current field. func (p *point) Type() FieldType { return p.it.fieldType } // StringValue returns the string value of the current field. func (p *point) StringValue() string { return unescapeStringField(string(p.it.valueBuf[1 : len(p.it.valueBuf)-1])) } // IntegerValue returns the integer value of the current field. func (p *point) IntegerValue() (int64, error) { n, err := parseIntBytes(p.it.valueBuf, 10, 64) if err != nil { return 0, fmt.Errorf("unable to parse integer value %q: %v", p.it.valueBuf, err) } return n, nil } // UnsignedValue returns the unsigned value of the current field. func (p *point) UnsignedValue() (uint64, error) { n, err := parseUintBytes(p.it.valueBuf, 10, 64) if err != nil { return 0, fmt.Errorf("unable to parse unsigned value %q: %v", p.it.valueBuf, err) } return n, nil } // BooleanValue returns the boolean value of the current field. func (p *point) BooleanValue() (bool, error) { b, err := parseBoolBytes(p.it.valueBuf) if err != nil { return false, fmt.Errorf("unable to parse bool value %q: %v", p.it.valueBuf, err) } return b, nil } // FloatValue returns the float value of the current field. func (p *point) FloatValue() (float64, error) { f, err := parseFloatBytes(p.it.valueBuf, 64) if err != nil { return 0, fmt.Errorf("unable to parse floating point value %q: %v", p.it.valueBuf, err) } return f, nil } // Reset resets the iterator to its initial state. func (p *point) Reset() { p.it.fieldType = Empty p.it.key = nil p.it.valueBuf = nil p.it.start = 0 p.it.end = 0 } // MarshalBinary encodes all the fields to their proper type and returns the binary // represenation // NOTE: uint64 is specifically not supported due to potential overflow when we decode // again later to an int64 // NOTE2: uint is accepted, and may be 64 bits, and is for some reason accepted... func (p Fields) MarshalBinary() []byte { var b []byte keys := make([]string, 0, len(p)) for k := range p { keys = append(keys, k) } // Not really necessary, can probably be removed. sort.Strings(keys) for i, k := range keys { if i > 0 { b = append(b, ',') } b = appendField(b, k, p[k]) } return b } func appendField(b []byte, k string, v interface{}) []byte { b = append(b, []byte(escape.String(k))...) b = append(b, '=') // check popular types first switch v := v.(type) { case float64: b = strconv.AppendFloat(b, v, 'f', -1, 64) case int64: b = strconv.AppendInt(b, v, 10) b = append(b, 'i') case string: b = append(b, '"') b = append(b, []byte(EscapeStringField(v))...) b = append(b, '"') case bool: b = strconv.AppendBool(b, v) case int32: b = strconv.AppendInt(b, int64(v), 10) b = append(b, 'i') case int16: b = strconv.AppendInt(b, int64(v), 10) b = append(b, 'i') case int8: b = strconv.AppendInt(b, int64(v), 10) b = append(b, 'i') case int: b = strconv.AppendInt(b, int64(v), 10) b = append(b, 'i') case uint64: b = strconv.AppendUint(b, v, 10) b = append(b, 'u') case uint32: b = strconv.AppendInt(b, int64(v), 10) b = append(b, 'i') case uint16: b = strconv.AppendInt(b, int64(v), 10) b = append(b, 'i') case uint8: b = strconv.AppendInt(b, int64(v), 10) b = append(b, 'i') case uint: // TODO: 'uint' should be converted to writing as an unsigned integer, // but we cannot since that would break backwards compatibility. b = strconv.AppendInt(b, int64(v), 10) b = append(b, 'i') case float32: b = strconv.AppendFloat(b, float64(v), 'f', -1, 32) case []byte: b = append(b, v...) case nil: // skip default: // Can't determine the type, so convert to string b = append(b, '"') b = append(b, []byte(EscapeStringField(fmt.Sprintf("%v", v)))...) b = append(b, '"') } return b } // ValidKeyToken returns true if the token used for measurement, tag key, or tag // value is a valid unicode string and only contains printable, non-replacement characters. func ValidKeyToken(s string) bool { if !utf8.ValidString(s) { return false } for _, r := range s { if !unicode.IsPrint(r) || r == unicode.ReplacementChar { return false } } return true } // ValidKeyTokens returns true if the measurement name and all tags are valid. func ValidKeyTokens(name string, tags Tags) bool { if !ValidKeyToken(name) { return false } for _, tag := range tags { if !ValidKeyToken(string(tag.Key)) || !ValidKeyToken(string(tag.Value)) { return false } } return true } golang-github-influxdata-influxdb1-client-0.0~git20220302.a9ab567/models/points_internal_test.go000066400000000000000000000007071433650256200322700ustar00rootroot00000000000000package models import "testing" func TestMarshalPointNoFields(t *testing.T) { points, err := ParsePointsString("m,k=v f=0i") if err != nil { t.Fatal(err) } // It's unclear how this can ever happen, but we've observed points that were marshalled without any fields. points[0].(*point).fields = []byte{} if _, err := points[0].MarshalBinary(); err != ErrPointMustHaveAField { t.Fatalf("got error %v, exp %v", err, ErrPointMustHaveAField) } } golang-github-influxdata-influxdb1-client-0.0~git20220302.a9ab567/models/points_test.go000066400000000000000000002137541433650256200304040ustar00rootroot00000000000000package models_test import ( "bytes" "fmt" "io" "math" "math/rand" "reflect" "strconv" "strings" "testing" "time" "github.com/influxdata/influxdb1-client/models" ) var ( tags = models.NewTags(map[string]string{"foo": "bar", "apple": "orange", "host": "serverA", "region": "uswest"}) fields = models.Fields{ "int64": int64(math.MaxInt64), "uint32": uint32(math.MaxUint32), "string": "String field that has a decent length, probably some log message or something", "boolean": false, "float64-tiny": float64(math.SmallestNonzeroFloat64), "float64-large": float64(math.MaxFloat64), } maxFloat64 = strconv.FormatFloat(math.MaxFloat64, 'f', 1, 64) minFloat64 = strconv.FormatFloat(-math.MaxFloat64, 'f', 1, 64) sink interface{} ) func TestMarshal(t *testing.T) { got := tags.HashKey() if exp := ",apple=orange,foo=bar,host=serverA,region=uswest"; string(got) != exp { t.Log("got: ", string(got)) t.Log("exp: ", exp) t.Error("invalid match") } } func TestMarshalFields(t *testing.T) { for _, tt := range []struct { name string value interface{} exp string }{ { name: "Float", value: float64(2), exp: `value=2`, }, { name: "Integer", value: int64(2), exp: `value=2i`, }, { name: "Unsigned", value: uint64(2), exp: `value=2u`, }, { name: "String", value: "foobar", exp: `value="foobar"`, }, { name: "Boolean", value: true, exp: `value=true`, }, } { t.Run(tt.name, func(t *testing.T) { fields := map[string]interface{}{"value": tt.value} if have, want := models.Fields(fields).MarshalBinary(), []byte(tt.exp); !bytes.Equal(have, want) { t.Fatalf("unexpected field output: %s != %s", string(have), string(want)) } }) } } func TestTags_HashKey(t *testing.T) { tags = models.NewTags(map[string]string{"A FOO": "bar", "APPLE": "orange", "host": "serverA", "region": "uswest"}) got := tags.HashKey() if exp := ",A\\ FOO=bar,APPLE=orange,host=serverA,region=uswest"; string(got) != exp { t.Log("got: ", string(got)) t.Log("exp: ", exp) t.Error("invalid match") } } func BenchmarkMarshal(b *testing.B) { for i := 0; i < b.N; i++ { tags.HashKey() } } func TestPoint_Tags(t *testing.T) { examples := []struct { Point string Tags models.Tags }{ {`cpu value=1`, models.Tags{}}, {"cpu,tag0=v0 value=1", models.NewTags(map[string]string{"tag0": "v0"})}, {"cpu,tag0=v0,tag1=v0 value=1", models.NewTags(map[string]string{"tag0": "v0", "tag1": "v0"})}, {`cpu,tag0=v\ 0 value=1`, models.NewTags(map[string]string{"tag0": "v 0"})}, {`cpu,tag0=v\ 0\ 1,tag1=v2 value=1`, models.NewTags(map[string]string{"tag0": "v 0 1", "tag1": "v2"})}, {`cpu,tag0=\, value=1`, models.NewTags(map[string]string{"tag0": ","})}, {`cpu,ta\ g0=\, value=1`, models.NewTags(map[string]string{"ta g0": ","})}, {`cpu,tag0=\,1 value=1`, models.NewTags(map[string]string{"tag0": ",1"})}, {`cpu,tag0=1\"\",t=k value=1`, models.NewTags(map[string]string{"tag0": `1\"\"`, "t": "k"})}, } for _, example := range examples { t.Run(example.Point, func(t *testing.T) { pts, err := models.ParsePointsString(example.Point) if err != nil { t.Fatal(err) } else if len(pts) != 1 { t.Fatalf("parsed %d points, expected 1", len(pts)) } // Repeat to test Tags() caching for i := 0; i < 2; i++ { tags := pts[0].Tags() if !reflect.DeepEqual(tags, example.Tags) { t.Fatalf("got %#v (%s), expected %#v", tags, tags.String(), example.Tags) } } }) } } func TestPoint_StringSize(t *testing.T) { testPoint_cube(t, func(p models.Point) { l := p.StringSize() s := p.String() if l != len(s) { t.Errorf("Incorrect length for %q. got %v, exp %v", s, l, len(s)) } }) } func TestPoint_AppendString(t *testing.T) { testPoint_cube(t, func(p models.Point) { got := p.AppendString(nil) exp := []byte(p.String()) if !reflect.DeepEqual(exp, got) { t.Errorf("AppendString() didn't match String(): got %v, exp %v", got, exp) } }) } func testPoint_cube(t *testing.T, f func(p models.Point)) { // heard of a table-driven test? let's make a cube-driven test... tagList := []models.Tags{nil, {models.NewTag([]byte("foo"), []byte("bar"))}, tags} fieldList := []models.Fields{{"a": 42.0}, {"a": 42, "b": "things"}, fields} timeList := []time.Time{time.Time{}, time.Unix(0, 0), time.Unix(-34526, 0), time.Unix(231845, 0), time.Now()} for _, tagSet := range tagList { for _, fieldSet := range fieldList { for _, pointTime := range timeList { p, err := models.NewPoint("test", tagSet, fieldSet, pointTime) if err != nil { t.Errorf("unexpected error creating point: %v", err) continue } f(p) } } } } func TestTag_Clone(t *testing.T) { tag := models.NewTag([]byte("key"), []byte("value")) c := tag.Clone() if &c.Key == &tag.Key || !bytes.Equal(c.Key, tag.Key) { t.Fatalf("key %s should have been a clone of %s", c.Key, tag.Key) } if &c.Value == &tag.Value || !bytes.Equal(c.Value, tag.Value) { t.Fatalf("value %s should have been a clone of %s", c.Value, tag.Value) } } func TestTags_Clone(t *testing.T) { tags := models.NewTags(map[string]string{"k1": "v1", "k2": "v2", "k3": "v3"}) clone := tags.Clone() for i := range tags { tag := tags[i] c := clone[i] if &c.Key == &tag.Key || !bytes.Equal(c.Key, tag.Key) { t.Fatalf("key %s should have been a clone of %s", c.Key, tag.Key) } if &c.Value == &tag.Value || !bytes.Equal(c.Value, tag.Value) { t.Fatalf("value %s should have been a clone of %s", c.Value, tag.Value) } } } var p models.Point func BenchmarkNewPoint(b *testing.B) { ts := time.Now() for i := 0; i < b.N; i++ { p, _ = models.NewPoint("measurement", tags, fields, ts) } } func BenchmarkNewPointFromBinary(b *testing.B) { pts, err := models.ParsePointsString("cpu value1=1.0,value2=1.0,value3=3.0,value4=4,value5=\"five\" 1000000000") if err != nil { b.Fatalf("unexpected error ParsePointsString: %v", err) } bytes, err := pts[0].MarshalBinary() if err != nil { b.Fatalf("unexpected error MarshalBinary: %v", err) } for i := 0; i < b.N; i++ { _, err := models.NewPointFromBytes(bytes) if err != nil { b.Fatalf("unexpected error NewPointsFromBytes: %v", err) } } } func BenchmarkParsePointNoTags5000(b *testing.B) { var batch [5000]string for i := 0; i < len(batch); i++ { batch[i] = `cpu value=1i 1000000000` } lines := strings.Join(batch[:], "\n") b.ResetTimer() for i := 0; i < b.N; i++ { models.ParsePoints([]byte(lines)) b.SetBytes(int64(len(lines))) } } func BenchmarkParsePointNoTags(b *testing.B) { line := `cpu value=1i 1000000000` for i := 0; i < b.N; i++ { models.ParsePoints([]byte(line)) b.SetBytes(int64(len(line))) } } func BenchmarkParsePointWithPrecisionN(b *testing.B) { line := `cpu value=1i 1000000000` defaultTime := time.Now().UTC() for i := 0; i < b.N; i++ { models.ParsePointsWithPrecision([]byte(line), defaultTime, "n") b.SetBytes(int64(len(line))) } } func BenchmarkParsePointWithPrecisionU(b *testing.B) { line := `cpu value=1i 1000000000` defaultTime := time.Now().UTC() for i := 0; i < b.N; i++ { models.ParsePointsWithPrecision([]byte(line), defaultTime, "u") b.SetBytes(int64(len(line))) } } func BenchmarkParsePointsTagsSorted2(b *testing.B) { line := `cpu,host=serverA,region=us-west value=1i 1000000000` for i := 0; i < b.N; i++ { models.ParsePoints([]byte(line)) b.SetBytes(int64(len(line))) } } func BenchmarkParsePointsTagsSorted5(b *testing.B) { line := `cpu,env=prod,host=serverA,region=us-west,target=servers,zone=1c value=1i 1000000000` for i := 0; i < b.N; i++ { models.ParsePoints([]byte(line)) b.SetBytes(int64(len(line))) } } func BenchmarkParsePointsTagsSorted10(b *testing.B) { line := `cpu,env=prod,host=serverA,region=us-west,tag1=value1,tag2=value2,tag3=value3,tag4=value4,tag5=value5,target=servers,zone=1c value=1i 1000000000` for i := 0; i < b.N; i++ { models.ParsePoints([]byte(line)) b.SetBytes(int64(len(line))) } } func BenchmarkParsePointsTagsUnSorted2(b *testing.B) { line := `cpu,region=us-west,host=serverA value=1i 1000000000` for i := 0; i < b.N; i++ { pt, _ := models.ParsePoints([]byte(line)) b.SetBytes(int64(len(line))) pt[0].Key() } } func BenchmarkParsePointsTagsUnSorted5(b *testing.B) { line := `cpu,region=us-west,host=serverA,env=prod,target=servers,zone=1c value=1i 1000000000` for i := 0; i < b.N; i++ { pt, _ := models.ParsePoints([]byte(line)) b.SetBytes(int64(len(line))) pt[0].Key() } } func BenchmarkParsePointsTagsUnSorted10(b *testing.B) { line := `cpu,region=us-west,host=serverA,env=prod,target=servers,zone=1c,tag1=value1,tag2=value2,tag3=value3,tag4=value4,tag5=value5 value=1i 1000000000` for i := 0; i < b.N; i++ { pt, _ := models.ParsePoints([]byte(line)) b.SetBytes(int64(len(line))) pt[0].Key() } } func BenchmarkParseKey(b *testing.B) { line := `cpu,region=us-west,host=serverA,env=prod,target=servers,zone=1c,tag1=value1,tag2=value2,tag3=value3,tag4=value4,tag5=value5` for i := 0; i < b.N; i++ { models.ParseKey([]byte(line)) } } // TestPoint wraps a models.Point but also makes available the raw // arguments to the Point. // // This is useful for ensuring that comparisons between results of // operations on Points match the expected input data to the Point, // since models.Point does not expose the raw input data (e.g., tags) // via its API. type TestPoint struct { RawFields models.Fields RawTags models.Tags RawTime time.Time models.Point } // NewTestPoint returns a new TestPoint. // // NewTestPoint panics if it is not a valid models.Point. func NewTestPoint(name string, tags models.Tags, fields models.Fields, time time.Time) TestPoint { return TestPoint{ RawTags: tags, RawFields: fields, RawTime: time, Point: models.MustNewPoint(name, tags, fields, time), } } func test(t *testing.T, line string, point TestPoint) { pts, err := models.ParsePointsWithPrecision([]byte(line), time.Unix(0, 0), "n") if err != nil { t.Fatalf(`ParsePoints("%s") mismatch. got %v, exp nil`, line, err) } if exp := 1; len(pts) != exp { t.Fatalf(`ParsePoints("%s") len mismatch. got %d, exp %d`, line, len(pts), exp) } if exp := point.Key(); !bytes.Equal(pts[0].Key(), exp) { t.Errorf("ParsePoints(\"%s\") key mismatch.\ngot %v\nexp %v", line, string(pts[0].Key()), string(exp)) } if exp := len(point.Tags()); len(pts[0].Tags()) != exp { t.Errorf(`ParsePoints("%s") tags mismatch. got %v, exp %v`, line, pts[0].Tags(), exp) } for _, tag := range pts[0].Tags() { if !bytes.Equal(tag.Value, point.RawTags.Get(tag.Key)) { t.Errorf(`ParsePoints("%s") tags mismatch. got %s, exp %s`, line, tag.Value, point.RawTags.Get(tag.Key)) } } for name, value := range point.RawFields { fields, err := pts[0].Fields() if err != nil { t.Fatal(err) } val := fields[name] expfval, ok := val.(float64) if ok && math.IsNaN(expfval) { gotfval, ok := value.(float64) if ok && !math.IsNaN(gotfval) { t.Errorf(`ParsePoints("%s") field '%s' mismatch. exp NaN`, line, name) } } if !reflect.DeepEqual(val, value) { t.Errorf(`ParsePoints("%s") field '%s' mismatch. got %[3]v (%[3]T), exp %[4]v (%[4]T)`, line, name, val, value) } } if !pts[0].Time().Equal(point.Time()) { t.Errorf(`ParsePoints("%s") time mismatch. got %v, exp %v`, line, pts[0].Time(), point.Time()) } if !strings.HasPrefix(pts[0].String(), line) { t.Errorf("ParsePoints string mismatch.\ngot: %v\nexp: %v", pts[0].String(), line) } } func TestParsePointNoValue(t *testing.T) { pts, err := models.ParsePointsString("") if err != nil { t.Errorf(`ParsePoints("%s") mismatch. got %v, exp nil`, "", err) } if exp := 0; len(pts) != exp { t.Errorf(`ParsePoints("%s") len mismatch. got %v, exp %v`, "", len(pts), exp) } } func TestParsePointWhitespaceValue(t *testing.T) { pts, err := models.ParsePointsString(" ") if err != nil { t.Errorf(`ParsePoints("%s") mismatch. got %v, exp nil`, "", err) } if exp := 0; len(pts) != exp { t.Errorf(`ParsePoints("%s") len mismatch. got %v, exp %v`, "", len(pts), exp) } } func TestParsePointNoFields(t *testing.T) { expectedSuffix := "missing fields" examples := []string{ "cpu_load_short,host=server01,region=us-west", "cpu", "cpu,host==", "=", } for i, example := range examples { _, err := models.ParsePointsString(example) if err == nil { t.Errorf(`[Example %d] ParsePoints("%s") mismatch. got nil, exp error`, i, example) } else if !strings.HasSuffix(err.Error(), expectedSuffix) { t.Errorf(`[Example %d] ParsePoints("%s") mismatch. got %q, exp suffix %q`, i, example, err, expectedSuffix) } } } func TestParsePointNoTimestamp(t *testing.T) { test(t, "cpu value=1", NewTestPoint("cpu", nil, models.Fields{"value": 1.0}, time.Unix(0, 0))) } func TestParsePointMissingQuote(t *testing.T) { expectedSuffix := "unbalanced quotes" examples := []string{ `cpu,host=serverA value="test`, `cpu,host=serverA value="test""`, } for i, example := range examples { _, err := models.ParsePointsString(example) if err == nil { t.Errorf(`[Example %d] ParsePoints("%s") mismatch. got nil, exp error`, i, example) } else if !strings.HasSuffix(err.Error(), expectedSuffix) { t.Errorf(`[Example %d] ParsePoints("%s") mismatch. got %q, exp suffix %q`, i, example, err, expectedSuffix) } } } func TestParsePointMissingTagKey(t *testing.T) { expectedSuffix := "missing tag key" examples := []string{ `cpu, value=1`, `cpu,`, `cpu,,,`, `cpu,host=serverA,=us-east value=1i`, `cpu,host=serverAa\,,=us-east value=1i`, `cpu,host=serverA\,,=us-east value=1i`, `cpu, =serverA value=1i`, } for i, example := range examples { _, err := models.ParsePointsString(example) if err == nil { t.Errorf(`[Example %d] ParsePoints("%s") mismatch. got nil, exp error`, i, example) } else if !strings.HasSuffix(err.Error(), expectedSuffix) { t.Errorf(`[Example %d] ParsePoints("%s") mismatch. got %q, exp suffix %q`, i, example, err, expectedSuffix) } } _, err := models.ParsePointsString(`cpu,host=serverA,\ =us-east value=1i`) if err != nil { t.Errorf(`ParsePoints("%s") mismatch. got %v, exp nil`, `cpu,host=serverA,\ =us-east value=1i`, err) } } func TestParsePointMissingTagValue(t *testing.T) { expectedSuffix := "missing tag value" examples := []string{ `cpu,host`, `cpu,host,`, `cpu,host=`, `cpu,host value=1i`, `cpu,host=serverA,region value=1i`, `cpu,host=serverA,region= value=1i`, `cpu,host=serverA,region=,zone=us-west value=1i`, } for i, example := range examples { _, err := models.ParsePointsString(example) if err == nil { t.Errorf(`[Example %d] ParsePoints("%s") mismatch. got nil, exp error`, i, example) } else if !strings.HasSuffix(err.Error(), expectedSuffix) { t.Errorf(`[Example %d] ParsePoints("%s") mismatch. got %q, exp suffix %q`, i, example, err, expectedSuffix) } } } func TestParsePointInvalidTagFormat(t *testing.T) { expectedSuffix := "invalid tag format" examples := []string{ `cpu,host=f=o,`, `cpu,host=f\==o,`, } for i, example := range examples { _, err := models.ParsePointsString(example) if err == nil { t.Errorf(`[Example %d] ParsePoints("%s") mismatch. got nil, exp error`, i, example) } else if !strings.HasSuffix(err.Error(), expectedSuffix) { t.Errorf(`[Example %d] ParsePoints("%s") mismatch. got %q, exp suffix %q`, i, example, err, expectedSuffix) } } } func TestParsePointMissingFieldName(t *testing.T) { _, err := models.ParsePointsString(`cpu,host=serverA,region=us-west =`) if err == nil { t.Errorf(`ParsePoints("%s") mismatch. got nil, exp error`, `cpu,host=serverA,region=us-west =`) } _, err = models.ParsePointsString(`cpu,host=serverA,region=us-west =123i`) if err == nil { t.Errorf(`ParsePoints("%s") mismatch. got nil, exp error`, `cpu,host=serverA,region=us-west =123i`) } _, err = models.ParsePointsString(`cpu,host=serverA,region=us-west a\ =123i`) if err != nil { t.Errorf(`ParsePoints("%s") mismatch. got nil, exp error`, `cpu,host=serverA,region=us-west a\ =123i`) } _, err = models.ParsePointsString(`cpu,host=serverA,region=us-west value=123i,=456i`) if err == nil { t.Errorf(`ParsePoints("%s") mismatch. got nil, exp error`, `cpu,host=serverA,region=us-west value=123i,=456i`) } } func TestParsePointMissingFieldValue(t *testing.T) { _, err := models.ParsePointsString(`cpu,host=serverA,region=us-west value=`) if err == nil { t.Errorf(`ParsePoints("%s") mismatch. got nil, exp error`, `cpu,host=serverA,region=us-west value=`) } _, err = models.ParsePointsString(`cpu,host=serverA,region=us-west value= 1000000000i`) if err == nil { t.Errorf(`ParsePoints("%s") mismatch. got nil, exp error`, `cpu,host=serverA,region=us-west value= 1000000000i`) } _, err = models.ParsePointsString(`cpu,host=serverA,region=us-west value=,value2=1i`) if err == nil { t.Errorf(`ParsePoints("%s") mismatch. got nil, exp error`, `cpu,host=serverA,region=us-west value=,value2=1i`) } _, err = models.ParsePointsString(`cpu,host=server01,region=us-west 1434055562000000000i`) if err == nil { t.Errorf(`ParsePoints("%s") mismatch. got nil, exp error`, `cpu,host=server01,region=us-west 1434055562000000000i`) } _, err = models.ParsePointsString(`cpu,host=server01,region=us-west value=1i,b`) if err == nil { t.Errorf(`ParsePoints("%s") mismatch. got nil, exp error`, `cpu,host=server01,region=us-west value=1i,b`) } _, err = models.ParsePointsString(`m f="blah"=123,r 1531703600000000000`) if err == nil { t.Errorf(`ParsePoints("%s") mismatch. got nil, exp error`, `m f="blah"=123,r 1531703600000000000`) } } func TestParsePointBadNumber(t *testing.T) { for _, tt := range []string{ "cpu v=- ", "cpu v=-i ", "cpu v=-. ", "cpu v=. ", "cpu v=1.0i ", "cpu v=1ii ", "cpu v=1a ", "cpu v=-e-e-e ", "cpu v=42+3 ", "cpu v= ", "cpu v=-123u", } { _, err := models.ParsePointsString(tt) if err == nil { t.Errorf("Point %q should be invalid", tt) } } } func TestParsePointMaxInt64(t *testing.T) { // out of range _, err := models.ParsePointsString(`cpu,host=serverA,region=us-west value=9223372036854775808i`) exp := `unable to parse 'cpu,host=serverA,region=us-west value=9223372036854775808i': unable to parse integer 9223372036854775808: strconv.ParseInt: parsing "9223372036854775808": value out of range` if err == nil || (err != nil && err.Error() != exp) { t.Fatalf("Error mismatch:\nexp: %s\ngot: %v", exp, err) } // max int p, err := models.ParsePointsString(`cpu,host=serverA,region=us-west value=9223372036854775807i`) if err != nil { t.Fatalf(`ParsePoints("%s") mismatch. got %v, exp nil`, `cpu,host=serverA,region=us-west value=9223372036854775807i`, err) } fields, err := p[0].Fields() if err != nil { t.Fatal(err) } if exp, got := int64(9223372036854775807), fields["value"].(int64); exp != got { t.Fatalf("ParsePoints Value mismatch. \nexp: %v\ngot: %v", exp, got) } // leading zeros _, err = models.ParsePointsString(`cpu,host=serverA,region=us-west value=0009223372036854775807i`) if err != nil { t.Fatalf(`ParsePoints("%s") mismatch. got %v, exp nil`, `cpu,host=serverA,region=us-west value=0009223372036854775807i`, err) } } func TestParsePointMinInt64(t *testing.T) { // out of range _, err := models.ParsePointsString(`cpu,host=serverA,region=us-west value=-9223372036854775809i`) if err == nil { t.Errorf(`ParsePoints("%s") mismatch. got nil, exp error`, `cpu,host=serverA,region=us-west value=-9223372036854775809i`) } // min int p, err := models.ParsePointsString(`cpu,host=serverA,region=us-west value=-9223372036854775808i`) if err != nil { t.Errorf(`ParsePoints("%s") mismatch. got %v, exp nil`, `cpu,host=serverA,region=us-west value=-9223372036854775808i`, err) } fields, err := p[0].Fields() if err != nil { t.Fatal(err) } if exp, got := int64(-9223372036854775808), fields["value"].(int64); exp != got { t.Fatalf("ParsePoints Value mismatch. \nexp: %v\ngot: %v", exp, got) } // leading zeros _, err = models.ParsePointsString(`cpu,host=serverA,region=us-west value=-0009223372036854775808i`) if err != nil { t.Errorf(`ParsePoints("%s") mismatch. got %v, exp nil`, `cpu,host=serverA,region=us-west value=-0009223372036854775808i`, err) } } func TestParsePointMaxFloat64(t *testing.T) { // out of range _, err := models.ParsePointsString(fmt.Sprintf(`cpu,host=serverA,region=us-west value=%s`, "1"+string(maxFloat64))) if err == nil { t.Errorf(`ParsePoints("%s") mismatch. got nil, exp error`, `cpu,host=serverA,region=us-west value=...`) } // max float p, err := models.ParsePointsString(fmt.Sprintf(`cpu,host=serverA,region=us-west value=%s`, string(maxFloat64))) if err != nil { t.Errorf(`ParsePoints("%s") mismatch. got %v, exp nil`, `cpu,host=serverA,region=us-west value=9223372036854775807`, err) } fields, err := p[0].Fields() if err != nil { t.Fatal(err) } if exp, got := math.MaxFloat64, fields["value"].(float64); exp != got { t.Fatalf("ParsePoints Value mismatch. \nexp: %v\ngot: %v", exp, got) } // leading zeros _, err = models.ParsePointsString(fmt.Sprintf(`cpu,host=serverA,region=us-west value=%s`, "0000"+string(maxFloat64))) if err != nil { t.Errorf(`ParsePoints("%s") mismatch. got %v, exp nil`, `cpu,host=serverA,region=us-west value=0009223372036854775807`, err) } } func TestParsePointMinFloat64(t *testing.T) { // out of range _, err := models.ParsePointsString(fmt.Sprintf(`cpu,host=serverA,region=us-west value=%s`, "-1"+string(minFloat64)[1:])) if err == nil { t.Errorf(`ParsePoints("%s") mismatch. got nil, exp error`, `cpu,host=serverA,region=us-west value=...`) } // min float p, err := models.ParsePointsString(fmt.Sprintf(`cpu,host=serverA,region=us-west value=%s`, string(minFloat64))) if err != nil { t.Errorf(`ParsePoints("%s") mismatch. got %v, exp nil`, `cpu,host=serverA,region=us-west value=...`, err) } fields, err := p[0].Fields() if err != nil { t.Fatal(err) } if exp, got := -math.MaxFloat64, fields["value"].(float64); exp != got { t.Fatalf("ParsePoints Value mismatch. \nexp: %v\ngot: %v", exp, got) } // leading zeros _, err = models.ParsePointsString(fmt.Sprintf(`cpu,host=serverA,region=us-west value=%s`, "-0000000"+string(minFloat64)[1:])) if err != nil { t.Errorf(`ParsePoints("%s") mismatch. got %v, exp nil`, `cpu,host=serverA,region=us-west value=...`, err) } } func TestParsePointMaxUint64(t *testing.T) { // out of range _, err := models.ParsePointsString(`cpu,host=serverA,region=us-west value=18446744073709551616u`) exp := `unable to parse 'cpu,host=serverA,region=us-west value=18446744073709551616u': unable to parse unsigned 18446744073709551616: strconv.ParseUint: parsing "18446744073709551616": value out of range` if err == nil || (err != nil && err.Error() != exp) { t.Fatalf("Error mismatch:\nexp: %s\ngot: %v", exp, err) } // max int p, err := models.ParsePointsString(`cpu,host=serverA,region=us-west value=18446744073709551615u`) if err != nil { t.Fatalf(`ParsePoints("%s") mismatch. got %v, exp nil`, `cpu,host=serverA,region=us-west value=18446744073709551615u`, err) } fields, err := p[0].Fields() if err != nil { t.Fatal(err) } if exp, got := uint64(18446744073709551615), fields["value"].(uint64); exp != got { t.Fatalf("ParsePoints Value mismatch. \nexp: %v\ngot: %v", exp, got) } // leading zeros _, err = models.ParsePointsString(`cpu,host=serverA,region=us-west value=00018446744073709551615u`) if err != nil { t.Fatalf(`ParsePoints("%s") mismatch. got %v, exp nil`, `cpu,host=serverA,region=us-west value=00018446744073709551615u`, err) } } func TestParsePointMinUint64(t *testing.T) { // out of range _, err := models.ParsePointsString(`cpu,host=serverA,region=us-west value=--1u`) if err == nil { t.Errorf(`ParsePoints("%s") mismatch. got nil, exp error`, `cpu,host=serverA,region=us-west value=-1u`) } // min int p, err := models.ParsePointsString(`cpu,host=serverA,region=us-west value=0u`) if err != nil { t.Errorf(`ParsePoints("%s") mismatch. got %v, exp nil`, `cpu,host=serverA,region=us-west value=0u`, err) } fields, err := p[0].Fields() if err != nil { t.Fatal(err) } if exp, got := uint64(0), fields["value"].(uint64); exp != got { t.Fatalf("ParsePoints Value mismatch. \nexp: %v\ngot: %v", exp, got) } // leading zeros _, err = models.ParsePointsString(`cpu,host=serverA,region=us-west value=0000u`) if err != nil { t.Errorf(`ParsePoints("%s") mismatch. got %v, exp nil`, `cpu,host=serverA,region=us-west value=0000u`, err) } } func TestParsePointNumberNonNumeric(t *testing.T) { _, err := models.ParsePointsString(`cpu,host=serverA,region=us-west value=.1a`) if err == nil { t.Errorf(`ParsePoints("%s") mismatch. got nil, exp error`, `cpu,host=serverA,region=us-west value=.1a`) } } func TestParsePointNegativeWrongPlace(t *testing.T) { _, err := models.ParsePointsString(`cpu,host=serverA,region=us-west value=0.-1`) if err == nil { t.Errorf(`ParsePoints("%s") mismatch. got nil, exp error`, `cpu,host=serverA,region=us-west value=0.-1`) } } func TestParsePointOnlyNegativeSign(t *testing.T) { _, err := models.ParsePointsString(`cpu,host=serverA,region=us-west value=-`) if err == nil { t.Errorf(`ParsePoints("%s") mismatch. got nil, exp error`, `cpu,host=serverA,region=us-west value=-`) } } func TestParsePointFloatMultipleDecimals(t *testing.T) { _, err := models.ParsePointsString(`cpu,host=serverA,region=us-west value=1.1.1`) if err == nil { t.Errorf(`ParsePoints("%s") mismatch. got nil, exp error`, `cpu,host=serverA,region=us-west value=1.1.1`) } } func TestParsePointInteger(t *testing.T) { _, err := models.ParsePointsString(`cpu,host=serverA,region=us-west value=1i`) if err != nil { t.Errorf(`ParsePoints("%s") mismatch. got %v, exp nil`, `cpu,host=serverA,region=us-west value=1i`, err) } } func TestParsePointNegativeInteger(t *testing.T) { _, err := models.ParsePointsString(`cpu,host=serverA,region=us-west value=-1i`) if err != nil { t.Errorf(`ParsePoints("%s") mismatch. got %v, exp nil`, `cpu,host=serverA,region=us-west value=-1i`, err) } } func TestParsePointNegativeFloat(t *testing.T) { _, err := models.ParsePointsString(`cpu,host=serverA,region=us-west value=-1.0`) if err != nil { t.Errorf(`ParsePoints("%s") mismatch. got %v, exp nil`, `cpu,host=serverA,region=us-west value=-1.0`, err) } } func TestParsePointFloatNoLeadingDigit(t *testing.T) { _, err := models.ParsePointsString(`cpu,host=serverA,region=us-west value=.1`) if err != nil { t.Errorf(`ParsePoints("%s") mismatch. got %v, exp nil`, `cpu,host=serverA,region=us-west value=-1.0`, err) } } func TestParsePointFloatScientific(t *testing.T) { _, err := models.ParsePointsString(`cpu,host=serverA,region=us-west value=1.0e4`) if err != nil { t.Errorf(`ParsePoints("%s") mismatch. got %v, exp nil`, `cpu,host=serverA,region=us-west value=1.0e4`, err) } pts, err := models.ParsePointsString(`cpu,host=serverA,region=us-west value=1e4`) if err != nil { t.Errorf(`ParsePoints("%s") mismatch. got %v, exp nil`, `cpu,host=serverA,region=us-west value=1.0e4`, err) } fields, err := pts[0].Fields() if err != nil { t.Fatal(err) } if fields["value"] != 1e4 { t.Errorf(`ParsePoints("%s") mismatch. got %v, exp nil`, `cpu,host=serverA,region=us-west value=1e4`, err) } } func TestParsePointFloatScientificUpper(t *testing.T) { _, err := models.ParsePointsString(`cpu,host=serverA,region=us-west value=1.0E4`) if err != nil { t.Errorf(`ParsePoints("%s") mismatch. got %v, exp nil`, `cpu,host=serverA,region=us-west value=1.0E4`, err) } pts, err := models.ParsePointsString(`cpu,host=serverA,region=us-west value=1E4`) if err != nil { t.Errorf(`ParsePoints("%s") mismatch. got %v, exp nil`, `cpu,host=serverA,region=us-west value=1.0E4`, err) } fields, err := pts[0].Fields() if err != nil { t.Fatal(err) } if fields["value"] != 1e4 { t.Errorf(`ParsePoints("%s") mismatch. got %v, exp nil`, `cpu,host=serverA,region=us-west value=1E4`, err) } } func TestParsePointFloatScientificDecimal(t *testing.T) { _, err := models.ParsePointsString(`cpu,host=serverA,region=us-west value=1.0e-4`) if err != nil { t.Errorf(`ParsePoints("%s") mismatch. got %v, exp nil`, `cpu,host=serverA,region=us-west value=1.0e-4`, err) } } func TestParsePointFloatNegativeScientific(t *testing.T) { _, err := models.ParsePointsString(`cpu,host=serverA,region=us-west value=-1.0e-4`) if err != nil { t.Errorf(`ParsePoints("%s") mismatch. got %v, exp nil`, `cpu,host=serverA,region=us-west value=-1.0e-4`, err) } } func TestParsePointBooleanInvalid(t *testing.T) { _, err := models.ParsePointsString(`cpu,host=serverA,region=us-west value=a`) if err == nil { t.Errorf(`ParsePoints("%s") mismatch. got nil, exp error`, `cpu,host=serverA,region=us-west value=a`) } } func TestParsePointScientificIntInvalid(t *testing.T) { _, err := models.ParsePointsString(`cpu,host=serverA,region=us-west value=9ie10`) if err == nil { t.Errorf(`ParsePoints("%s") mismatch. got nil, exp error`, `cpu,host=serverA,region=us-west value=9ie10`) } _, err = models.ParsePointsString(`cpu,host=serverA,region=us-west value=9e10i`) if err == nil { t.Errorf(`ParsePoints("%s") mismatch. got nil, exp error`, `cpu,host=serverA,region=us-west value=9e10i`) } } func TestParsePointWhitespace(t *testing.T) { examples := []string{ `cpu value=1.0 1257894000000000000`, `cpu value=1.0 1257894000000000000`, `cpu value=1.0 1257894000000000000`, `cpu value=1.0 1257894000000000000 `, `cpu value=1.0 1257894000000000000 `, `cpu value=1.0 1257894000000000000 `, } expPoint := NewTestPoint("cpu", models.Tags{}, models.Fields{"value": 1.0}, time.Unix(0, 1257894000000000000)) for i, example := range examples { pts, err := models.ParsePoints([]byte(example)) if err != nil { t.Fatalf(`[Example %d] ParsePoints("%s") error. got %v, exp nil`, i, example, err) } if got, exp := len(pts), 1; got != exp { t.Fatalf("[Example %d] got %d points, expected %d", i, got, exp) } if got, exp := string(pts[0].Name()), string(expPoint.Name()); got != exp { t.Fatalf("[Example %d] got %v measurement, expected %v", i, got, exp) } fields, err := pts[0].Fields() if err != nil { t.Fatal(err) } eFields, err := expPoint.Fields() if err != nil { t.Fatal(err) } if got, exp := len(fields), len(eFields); got != exp { t.Fatalf("[Example %d] got %d fields, expected %d", i, got, exp) } if got, exp := fields["value"], eFields["value"]; got != exp { t.Fatalf(`[Example %d] got %v for field "value", expected %v`, i, got, exp) } if got, exp := pts[0].Time().UnixNano(), expPoint.Time().UnixNano(); got != exp { t.Fatalf(`[Example %d] got %d time, expected %d`, i, got, exp) } } } func TestParsePointUnescape(t *testing.T) { // commas in measurement name test(t, `foo\,bar value=1i`, NewTestPoint( "foo,bar", // comma in the name models.NewTags(map[string]string{}), models.Fields{ "value": int64(1), }, time.Unix(0, 0))) // comma in measurement name with tags test(t, `cpu\,main,regions=east value=1.0`, NewTestPoint( "cpu,main", // comma in the name models.NewTags(map[string]string{ "regions": "east", }), models.Fields{ "value": 1.0, }, time.Unix(0, 0))) // spaces in measurement name test(t, `cpu\ load,region=east value=1.0`, NewTestPoint( "cpu load", // space in the name models.NewTags(map[string]string{ "region": "east", }), models.Fields{ "value": 1.0, }, time.Unix(0, 0))) // equals in measurement name test(t, `cpu\=load,region=east value=1.0`, NewTestPoint( `cpu\=load`, // backslash is literal models.NewTags(map[string]string{ "region": "east", }), models.Fields{ "value": 1.0, }, time.Unix(0, 0))) // equals in measurement name test(t, `cpu=load,region=east value=1.0`, NewTestPoint( `cpu=load`, // literal equals is fine in measurement name models.NewTags(map[string]string{ "region": "east", }), models.Fields{ "value": 1.0, }, time.Unix(0, 0))) // commas in tag names test(t, `cpu,region\,zone=east value=1.0`, NewTestPoint("cpu", models.NewTags(map[string]string{ "region,zone": "east", // comma in the tag key }), models.Fields{ "value": 1.0, }, time.Unix(0, 0))) // spaces in tag name test(t, `cpu,region\ zone=east value=1.0`, NewTestPoint("cpu", models.NewTags(map[string]string{ "region zone": "east", // space in the tag name }), models.Fields{ "value": 1.0, }, time.Unix(0, 0))) // backslash with escaped equals in tag name test(t, `cpu,reg\\=ion=east value=1.0`, NewTestPoint("cpu", models.NewTags(map[string]string{ `reg\=ion`: "east", }), models.Fields{ "value": 1.0, }, time.Unix(0, 0))) // space is tag name test(t, `cpu,\ =east value=1.0`, NewTestPoint("cpu", models.NewTags(map[string]string{ " ": "east", // tag name is single space }), models.Fields{ "value": 1.0, }, time.Unix(0, 0))) // commas in tag values test(t, `cpu,regions=east\,west value=1.0`, NewTestPoint("cpu", models.NewTags(map[string]string{ "regions": "east,west", // comma in the tag value }), models.Fields{ "value": 1.0, }, time.Unix(0, 0))) // backslash literal followed by escaped space test(t, `cpu,regions=\\ east value=1.0`, NewTestPoint( "cpu", models.NewTags(map[string]string{ "regions": `\ east`, }), models.Fields{ "value": 1.0, }, time.Unix(0, 0))) // backslash literal followed by escaped space test(t, `cpu,regions=eas\\ t value=1.0`, NewTestPoint( "cpu", models.NewTags(map[string]string{ "regions": `eas\ t`, }), models.Fields{ "value": 1.0, }, time.Unix(0, 0))) // backslash literal followed by trailing space test(t, `cpu,regions=east\\ value=1.0`, NewTestPoint( "cpu", models.NewTags(map[string]string{ "regions": `east\ `, }), models.Fields{ "value": 1.0, }, time.Unix(0, 0))) // spaces in tag values test(t, `cpu,regions=east\ west value=1.0`, NewTestPoint("cpu", models.NewTags(map[string]string{ "regions": "east west", // comma in the tag value }), models.Fields{ "value": 1.0, }, time.Unix(0, 0))) // commas in field keys test(t, `cpu,regions=east value\,ms=1.0`, NewTestPoint("cpu", models.NewTags(map[string]string{ "regions": "east", }), models.Fields{ "value,ms": 1.0, // comma in the field keys }, time.Unix(0, 0))) // spaces in field keys test(t, `cpu,regions=east value\ ms=1.0`, NewTestPoint("cpu", models.NewTags(map[string]string{ "regions": "east", }), models.Fields{ "value ms": 1.0, // comma in the field keys }, time.Unix(0, 0))) // tag with no value test(t, `cpu,regions=east value="1"`, NewTestPoint("cpu", models.NewTags(map[string]string{ "regions": "east", "foobar": "", }), models.Fields{ "value": "1", }, time.Unix(0, 0))) // commas in field values test(t, `cpu,regions=east value="1,0"`, NewTestPoint("cpu", models.NewTags(map[string]string{ "regions": "east", }), models.Fields{ "value": "1,0", // comma in the field value }, time.Unix(0, 0))) // random character escaped test(t, `cpu,regions=eas\t value=1.0`, NewTestPoint( "cpu", models.NewTags(map[string]string{ "regions": "eas\\t", }), models.Fields{ "value": 1.0, }, time.Unix(0, 0))) // backslash literal followed by escaped characters test(t, `cpu,regions=\\,\,\=east value=1.0`, NewTestPoint( "cpu", models.NewTags(map[string]string{ "regions": `\,,=east`, }), models.Fields{ "value": 1.0, }, time.Unix(0, 0))) // field keys using escape char. test(t, `cpu \a=1i`, NewTestPoint( "cpu", models.NewTags(map[string]string{}), models.Fields{ "\\a": int64(1), // Left as parsed since it's not a known escape sequence. }, time.Unix(0, 0))) // measurement, tag and tag value with equals test(t, `cpu=load,equals\=foo=tag\=value value=1i`, NewTestPoint( "cpu=load", // Not escaped models.NewTags(map[string]string{ "equals=foo": "tag=value", // Tag and value unescaped }), models.Fields{ "value": int64(1), }, time.Unix(0, 0))) } func TestParsePointWithTags(t *testing.T) { test(t, "cpu,host=serverA,region=us-east value=1.0 1000000000", NewTestPoint("cpu", models.NewTags(map[string]string{"host": "serverA", "region": "us-east"}), models.Fields{"value": 1.0}, time.Unix(1, 0))) } func TestParsePointWithDuplicateTags(t *testing.T) { for i, tt := range []struct { line string err string }{ { line: `cpu,host=serverA,host=serverB value=1i 1000000000`, err: `unable to parse 'cpu,host=serverA,host=serverB value=1i 1000000000': duplicate tags`, }, { line: `cpu,b=2,b=1,c=3 value=1i 1000000000`, err: `unable to parse 'cpu,b=2,b=1,c=3 value=1i 1000000000': duplicate tags`, }, { line: `cpu,b=2,c=3,b=1 value=1i 1000000000`, err: `unable to parse 'cpu,b=2,c=3,b=1 value=1i 1000000000': duplicate tags`, }, } { _, err := models.ParsePointsString(tt.line) if err == nil || tt.err != err.Error() { t.Errorf("%d. ParsePoint() expected error '%s'. got '%s'", i, tt.err, err) } } } func TestParsePointWithStringField(t *testing.T) { test(t, `cpu,host=serverA,region=us-east value=1.0,str="foo",str2="bar" 1000000000`, NewTestPoint("cpu", models.NewTags(map[string]string{ "host": "serverA", "region": "us-east", }), models.Fields{ "value": 1.0, "str": "foo", "str2": "bar", }, time.Unix(1, 0)), ) test(t, `cpu,host=serverA,region=us-east str="foo \" bar" 1000000000`, NewTestPoint("cpu", models.NewTags(map[string]string{ "host": "serverA", "region": "us-east", }), models.Fields{ "str": `foo " bar`, }, time.Unix(1, 0)), ) } func TestParsePointWithStringWithSpaces(t *testing.T) { test(t, `cpu,host=serverA,region=us-east value=1.0,str="foo bar" 1000000000`, NewTestPoint( "cpu", models.NewTags(map[string]string{ "host": "serverA", "region": "us-east", }), models.Fields{ "value": 1.0, "str": "foo bar", // spaces in string value }, time.Unix(1, 0)), ) } func TestParsePointWithStringWithNewline(t *testing.T) { test(t, "cpu,host=serverA,region=us-east value=1.0,str=\"foo\nbar\" 1000000000", NewTestPoint( "cpu", models.NewTags(map[string]string{ "host": "serverA", "region": "us-east", }), models.Fields{ "value": 1.0, "str": "foo\nbar", // newline in string value }, time.Unix(1, 0)), ) } func TestParsePointWithStringWithCommas(t *testing.T) { // escaped comma test(t, `cpu,host=serverA,region=us-east value=1.0,str="foo\,bar" 1000000000`, NewTestPoint( "cpu", models.NewTags(map[string]string{ "host": "serverA", "region": "us-east", }), models.Fields{ "value": 1.0, "str": `foo\,bar`, // commas in string value }, time.Unix(1, 0)), ) // non-escaped comma test(t, `cpu,host=serverA,region=us-east value=1.0,str="foo,bar" 1000000000`, NewTestPoint( "cpu", models.NewTags(map[string]string{ "host": "serverA", "region": "us-east", }), models.Fields{ "value": 1.0, "str": "foo,bar", // commas in string value }, time.Unix(1, 0)), ) // string w/ trailing escape chars test(t, `cpu,host=serverA,region=us-east str="foo\\",str2="bar" 1000000000`, NewTestPoint( "cpu", models.NewTags(map[string]string{ "host": "serverA", "region": "us-east", }), models.Fields{ "str": "foo\\", // trailing escape char "str2": "bar", }, time.Unix(1, 0)), ) } func TestParsePointQuotedMeasurement(t *testing.T) { // non-escaped comma test(t, `"cpu",host=serverA,region=us-east value=1.0 1000000000`, NewTestPoint( `"cpu"`, models.NewTags(map[string]string{ "host": "serverA", "region": "us-east", }), models.Fields{ "value": 1.0, }, time.Unix(1, 0)), ) } func TestParsePointQuotedTags(t *testing.T) { test(t, `cpu,"host"="serverA",region=us-east value=1.0 1000000000`, NewTestPoint( "cpu", models.NewTags(map[string]string{ `"host"`: `"serverA"`, "region": "us-east", }), models.Fields{ "value": 1.0, }, time.Unix(1, 0)), ) } func TestParsePoint_TrailingSlash(t *testing.T) { _, err := models.ParsePointsString(`a v=1 0\`) if err == nil { t.Fatalf("ParsePoints failed: %v", err) } else if !strings.Contains(err.Error(), "bad timestamp") { t.Fatalf("ParsePoints unexpected error: %v", err) } } func TestParsePointsUnbalancedQuotedTags(t *testing.T) { pts, err := models.ParsePointsString("baz,mytag=\"a x=1 1441103862125\nbaz,mytag=a z=1 1441103862126") if err != nil { t.Fatalf("ParsePoints failed: %v", err) } if exp := 2; len(pts) != exp { t.Fatalf("ParsePoints count mismatch. got %v, exp %v", len(pts), exp) } // Expected " in the tag value exp := models.MustNewPoint("baz", models.NewTags(map[string]string{"mytag": `"a`}), models.Fields{"x": float64(1)}, time.Unix(0, 1441103862125)) if pts[0].String() != exp.String() { t.Errorf("Point mismatch:\ngot: %v\nexp: %v", pts[0].String(), exp.String()) } // Expected two points to ensure we did not overscan the line exp = models.MustNewPoint("baz", models.NewTags(map[string]string{"mytag": `a`}), models.Fields{"z": float64(1)}, time.Unix(0, 1441103862126)) if pts[1].String() != exp.String() { t.Errorf("Point mismatch:\ngot: %v\nexp: %v", pts[1].String(), exp.String()) } } func TestParsePointEscapedStringsAndCommas(t *testing.T) { // non-escaped comma and quotes test(t, `cpu,host=serverA,region=us-east value="{Hello\"{,}\" World}" 1000000000`, NewTestPoint( "cpu", models.NewTags(map[string]string{ "host": "serverA", "region": "us-east", }), models.Fields{ "value": `{Hello"{,}" World}`, }, time.Unix(1, 0)), ) // escaped comma and quotes test(t, `cpu,host=serverA,region=us-east value="{Hello\"{\,}\" World}" 1000000000`, NewTestPoint( "cpu", models.NewTags(map[string]string{ "host": "serverA", "region": "us-east", }), models.Fields{ "value": `{Hello"{\,}" World}`, }, time.Unix(1, 0)), ) } func TestParsePointWithStringWithEquals(t *testing.T) { test(t, `cpu,host=serverA,region=us-east str="foo=bar",value=1.0 1000000000`, NewTestPoint( "cpu", models.NewTags(map[string]string{ "host": "serverA", "region": "us-east", }), models.Fields{ "value": 1.0, "str": "foo=bar", // spaces in string value }, time.Unix(1, 0)), ) } func TestParsePointWithStringWithBackslash(t *testing.T) { test(t, `cpu value="test\\\"" 1000000000`, NewTestPoint( "cpu", models.NewTags(map[string]string{}), models.Fields{ "value": `test\"`, }, time.Unix(1, 0)), ) test(t, `cpu value="test\\" 1000000000`, NewTestPoint( "cpu", models.NewTags(map[string]string{}), models.Fields{ "value": `test\`, }, time.Unix(1, 0)), ) test(t, `cpu value="test\\\"" 1000000000`, NewTestPoint( "cpu", models.NewTags(map[string]string{}), models.Fields{ "value": `test\"`, }, time.Unix(1, 0)), ) test(t, `cpu value="test\"" 1000000000`, NewTestPoint( "cpu", models.NewTags(map[string]string{}), models.Fields{ "value": `test"`, }, time.Unix(1, 0)), ) } func TestParsePointWithBoolField(t *testing.T) { test(t, `cpu,host=serverA,region=us-east true=true,t=t,T=T,TRUE=TRUE,True=True,false=false,f=f,F=F,FALSE=FALSE,False=False 1000000000`, NewTestPoint( "cpu", models.NewTags(map[string]string{ "host": "serverA", "region": "us-east", }), models.Fields{ "t": true, "T": true, "true": true, "True": true, "TRUE": true, "f": false, "F": false, "false": false, "False": false, "FALSE": false, }, time.Unix(1, 0)), ) } func TestParsePointUnicodeString(t *testing.T) { test(t, `cpu,host=serverA,region=us-east value="wè" 1000000000`, NewTestPoint( "cpu", models.NewTags(map[string]string{ "host": "serverA", "region": "us-east", }), models.Fields{ "value": "wè", }, time.Unix(1, 0)), ) } func TestParsePointNegativeTimestamp(t *testing.T) { test(t, `cpu value=1 -1`, NewTestPoint( "cpu", models.NewTags(map[string]string{}), models.Fields{ "value": 1.0, }, time.Unix(0, -1)), ) } func TestParsePointMaxTimestamp(t *testing.T) { test(t, fmt.Sprintf(`cpu value=1 %d`, models.MaxNanoTime), NewTestPoint( "cpu", models.NewTags(map[string]string{}), models.Fields{ "value": 1.0, }, time.Unix(0, models.MaxNanoTime)), ) } func TestParsePointMinTimestamp(t *testing.T) { test(t, `cpu value=1 -9223372036854775806`, NewTestPoint( "cpu", models.NewTags(map[string]string{}), models.Fields{ "value": 1.0, }, time.Unix(0, models.MinNanoTime)), ) } func TestParsePointInvalidTimestamp(t *testing.T) { examples := []string{ "cpu value=1 9223372036854775808", "cpu value=1 -92233720368547758078", "cpu value=1 -", "cpu value=1 -/", "cpu value=1 -1?", "cpu value=1 1-", "cpu value=1 9223372036854775807 12", } for i, example := range examples { _, err := models.ParsePointsString(example) if err == nil { t.Fatalf("[Example %d] ParsePoints failed: %v", i, err) } } } func TestNewPointFloatWithoutDecimal(t *testing.T) { test(t, `cpu value=1 1000000000`, NewTestPoint( "cpu", models.NewTags(map[string]string{}), models.Fields{ "value": 1.0, }, time.Unix(1, 0)), ) } func TestNewPointNegativeFloat(t *testing.T) { test(t, `cpu value=-0.64 1000000000`, NewTestPoint( "cpu", models.NewTags(map[string]string{}), models.Fields{ "value": -0.64, }, time.Unix(1, 0)), ) } func TestNewPointFloatNoDecimal(t *testing.T) { test(t, `cpu value=1. 1000000000`, NewTestPoint( "cpu", models.NewTags(map[string]string{}), models.Fields{ "value": 1.0, }, time.Unix(1, 0)), ) } func TestNewPointFloatScientific(t *testing.T) { test(t, `cpu value=6.632243e+06 1000000000`, NewTestPoint( "cpu", models.NewTags(map[string]string{}), models.Fields{ "value": float64(6632243), }, time.Unix(1, 0)), ) } func TestNewPointLargeInteger(t *testing.T) { test(t, `cpu value=6632243i 1000000000`, NewTestPoint( "cpu", models.NewTags(map[string]string{}), models.Fields{ "value": int64(6632243), // if incorrectly encoded as a float, it would show up as 6.632243e+06 }, time.Unix(1, 0)), ) } func TestParsePointNaN(t *testing.T) { _, err := models.ParsePointsString("cpu value=NaN 1000000000") if err == nil { t.Fatalf("ParsePoints expected error, got nil") } _, err = models.ParsePointsString("cpu value=nAn 1000000000") if err == nil { t.Fatalf("ParsePoints expected error, got nil") } _, err = models.ParsePointsString("cpu value=NaN") if err == nil { t.Fatalf("ParsePoints expected error, got nil") } } func TestNewPointLargeNumberOfTags(t *testing.T) { tags := "" for i := 0; i < 255; i++ { tags += fmt.Sprintf(",tag%d=value%d", i, i) } pt, err := models.ParsePointsString(fmt.Sprintf("cpu%s value=1", tags)) if err != nil { t.Fatalf("ParsePoints() with max tags failed: %v", err) } if len(pt[0].Tags()) != 255 { t.Fatalf("expected %d tags, got %d", 255, len(pt[0].Tags())) } } func TestParsePointIntsFloats(t *testing.T) { pts, err := models.ParsePoints([]byte(`cpu,host=serverA,region=us-east int=10i,float=11.0,float2=12.1 1000000000`)) if err != nil { t.Fatalf(`ParsePoints() failed. got %s`, err) } if exp := 1; len(pts) != exp { t.Errorf("ParsePoint() len mismatch: got %v, exp %v", len(pts), exp) } pt := pts[0] fields, err := pt.Fields() if err != nil { t.Fatal(err) } if _, ok := fields["int"].(int64); !ok { t.Errorf("ParsePoint() int field mismatch: got %T, exp %T", fields["int"], int64(10)) } if _, ok := fields["float"].(float64); !ok { t.Errorf("ParsePoint() float field mismatch: got %T, exp %T", fields["float64"], float64(11.0)) } if _, ok := fields["float2"].(float64); !ok { t.Errorf("ParsePoint() float field mismatch: got %T, exp %T", fields["float64"], float64(12.1)) } } func TestParsePointKeyUnsorted(t *testing.T) { pts, err := models.ParsePoints([]byte("cpu,last=1,first=2 value=1i")) if err != nil { t.Fatalf(`ParsePoints() failed. got %s`, err) } if exp := 1; len(pts) != exp { t.Errorf("ParsePoint() len mismatch: got %v, exp %v", len(pts), exp) } pt := pts[0] if exp := "cpu,first=2,last=1"; string(pt.Key()) != exp { t.Errorf("ParsePoint key not sorted. got %v, exp %v", string(pt.Key()), exp) } } func TestParsePointToString(t *testing.T) { line := `cpu,host=serverA,region=us-east bool=false,float=11,float2=12.123,int=10i,str="string val" 1000000000` pts, err := models.ParsePoints([]byte(line)) if err != nil { t.Fatalf(`ParsePoints() failed. got %s`, err) } if exp := 1; len(pts) != exp { t.Errorf("ParsePoint() len mismatch: got %v, exp %v", len(pts), exp) } pt := pts[0] got := pt.String() if line != got { t.Errorf("ParsePoint() to string mismatch:\n got %v\n exp %v", got, line) } pt = models.MustNewPoint("cpu", models.NewTags(map[string]string{"host": "serverA", "region": "us-east"}), models.Fields{"int": 10, "float": float64(11.0), "float2": float64(12.123), "bool": false, "str": "string val"}, time.Unix(1, 0)) got = pt.String() if line != got { t.Errorf("NewPoint() to string mismatch:\n got %v\n exp %v", got, line) } } func TestParsePointsWithPrecision(t *testing.T) { tests := []struct { name string line string precision string exp string }{ { name: "nanosecond by default", line: `cpu,host=serverA,region=us-east value=1.0 946730096789012345`, precision: "", exp: "cpu,host=serverA,region=us-east value=1.0 946730096789012345", }, { name: "nanosecond", line: `cpu,host=serverA,region=us-east value=1.0 946730096789012345`, precision: "n", exp: "cpu,host=serverA,region=us-east value=1.0 946730096789012345", }, { name: "microsecond", line: `cpu,host=serverA,region=us-east value=1.0 946730096789012`, precision: "u", exp: "cpu,host=serverA,region=us-east value=1.0 946730096789012000", }, { name: "millisecond", line: `cpu,host=serverA,region=us-east value=1.0 946730096789`, precision: "ms", exp: "cpu,host=serverA,region=us-east value=1.0 946730096789000000", }, { name: "second", line: `cpu,host=serverA,region=us-east value=1.0 946730096`, precision: "s", exp: "cpu,host=serverA,region=us-east value=1.0 946730096000000000", }, { name: "minute", line: `cpu,host=serverA,region=us-east value=1.0 15778834`, precision: "m", exp: "cpu,host=serverA,region=us-east value=1.0 946730040000000000", }, { name: "hour", line: `cpu,host=serverA,region=us-east value=1.0 262980`, precision: "h", exp: "cpu,host=serverA,region=us-east value=1.0 946728000000000000", }, } for _, test := range tests { pts, err := models.ParsePointsWithPrecision([]byte(test.line), time.Now().UTC(), test.precision) if err != nil { t.Fatalf(`%s: ParsePoints() failed. got %s`, test.name, err) } if exp := 1; len(pts) != exp { t.Errorf("%s: ParsePoint() len mismatch: got %v, exp %v", test.name, len(pts), exp) } pt := pts[0] got := pt.String() if got != test.exp { t.Errorf("%s: ParsePoint() to string mismatch:\n got %v\n exp %v", test.name, got, test.exp) } } } func TestParsePointsWithPrecisionNoTime(t *testing.T) { line := `cpu,host=serverA,region=us-east value=1.0` tm, _ := time.Parse(time.RFC3339Nano, "2000-01-01T12:34:56.789012345Z") tests := []struct { name string precision string exp string }{ { name: "no precision", precision: "", exp: "cpu,host=serverA,region=us-east value=1.0 946730096789012345", }, { name: "nanosecond precision", precision: "n", exp: "cpu,host=serverA,region=us-east value=1.0 946730096789012345", }, { name: "microsecond precision", precision: "u", exp: "cpu,host=serverA,region=us-east value=1.0 946730096789012000", }, { name: "millisecond precision", precision: "ms", exp: "cpu,host=serverA,region=us-east value=1.0 946730096789000000", }, { name: "second precision", precision: "s", exp: "cpu,host=serverA,region=us-east value=1.0 946730096000000000", }, { name: "minute precision", precision: "m", exp: "cpu,host=serverA,region=us-east value=1.0 946730040000000000", }, { name: "hour precision", precision: "h", exp: "cpu,host=serverA,region=us-east value=1.0 946728000000000000", }, } for _, test := range tests { pts, err := models.ParsePointsWithPrecision([]byte(line), tm, test.precision) if err != nil { t.Fatalf(`%s: ParsePoints() failed. got %s`, test.name, err) } if exp := 1; len(pts) != exp { t.Errorf("%s: ParsePoint() len mismatch: got %v, exp %v", test.name, len(pts), exp) } pt := pts[0] got := pt.String() if got != test.exp { t.Errorf("%s: ParsePoint() to string mismatch:\n got %v\n exp %v", test.name, got, test.exp) } } } func TestParsePointsWithPrecisionComments(t *testing.T) { tests := []struct { name string batch string exp string lenPoints int }{ { name: "comment only", batch: `# comment only`, exp: "cpu,host=serverA,region=us-east value=1.0 946730096789012345", lenPoints: 0, }, { name: "point with comment above", batch: `# a point is below cpu,host=serverA,region=us-east value=1.0 946730096789012345`, exp: "cpu,host=serverA,region=us-east value=1.0 946730096789012345", lenPoints: 1, }, { name: "point with comment below", batch: `cpu,host=serverA,region=us-east value=1.0 946730096789012345 # end of points`, exp: "cpu,host=serverA,region=us-east value=1.0 946730096789012345", lenPoints: 1, }, { name: "indented comment", batch: ` # a point is below cpu,host=serverA,region=us-east value=1.0 946730096789012345`, exp: "cpu,host=serverA,region=us-east value=1.0 946730096789012345", lenPoints: 1, }, } for _, test := range tests { pts, err := models.ParsePointsWithPrecision([]byte(test.batch), time.Now().UTC(), "") if err != nil { t.Fatalf(`%s: ParsePoints() failed. got %s`, test.name, err) } pointsLength := len(pts) if exp := test.lenPoints; pointsLength != exp { t.Errorf("%s: ParsePoint() len mismatch: got %v, exp %v", test.name, pointsLength, exp) } if pointsLength > 0 { pt := pts[0] got := pt.String() if got != test.exp { t.Errorf("%s: ParsePoint() to string mismatch:\n got %v\n exp %v", test.name, got, test.exp) } } } } func TestNewPointEscaped(t *testing.T) { // commas pt := models.MustNewPoint("cpu,main", models.NewTags(map[string]string{"tag,bar": "value"}), models.Fields{"name,bar": 1.0}, time.Unix(0, 0)) if exp := `cpu\,main,tag\,bar=value name\,bar=1 0`; pt.String() != exp { t.Errorf("NewPoint().String() mismatch.\ngot %v\nexp %v", pt.String(), exp) } // spaces pt = models.MustNewPoint("cpu main", models.NewTags(map[string]string{"tag bar": "value"}), models.Fields{"name bar": 1.0}, time.Unix(0, 0)) if exp := `cpu\ main,tag\ bar=value name\ bar=1 0`; pt.String() != exp { t.Errorf("NewPoint().String() mismatch.\ngot %v\nexp %v", pt.String(), exp) } // equals pt = models.MustNewPoint("cpu=main", models.NewTags(map[string]string{"tag=bar": "value=foo"}), models.Fields{"name=bar": 1.0}, time.Unix(0, 0)) if exp := `cpu=main,tag\=bar=value\=foo name\=bar=1 0`; pt.String() != exp { t.Errorf("NewPoint().String() mismatch.\ngot %v\nexp %v", pt.String(), exp) } } func TestNewPointWithoutField(t *testing.T) { _, err := models.NewPoint("cpu", models.NewTags(map[string]string{"tag": "bar"}), models.Fields{}, time.Unix(0, 0)) if err == nil { t.Fatalf(`NewPoint() expected error. got nil`) } } func TestNewPointUnhandledType(t *testing.T) { // nil value pt := models.MustNewPoint("cpu", nil, models.Fields{"value": nil}, time.Unix(0, 0)) if exp := `cpu value= 0`; pt.String() != exp { t.Errorf("NewPoint().String() mismatch.\ngot %v\nexp %v", pt.String(), exp) } // unsupported type gets stored as string now := time.Unix(0, 0).UTC() pt = models.MustNewPoint("cpu", nil, models.Fields{"value": now}, time.Unix(0, 0)) if exp := `cpu value="1970-01-01 00:00:00 +0000 UTC" 0`; pt.String() != exp { t.Errorf("NewPoint().String() mismatch.\ngot %v\nexp %v", pt.String(), exp) } fields, err := pt.Fields() if err != nil { t.Fatal(err) } if exp := "1970-01-01 00:00:00 +0000 UTC"; fields["value"] != exp { t.Errorf("NewPoint().String() mismatch.\ngot %v\nexp %v", pt.String(), exp) } } func TestMakeKeyEscaped(t *testing.T) { if exp, got := `cpu\ load`, models.MakeKey([]byte(`cpu\ load`), models.NewTags(map[string]string{})); string(got) != exp { t.Errorf("MakeKey() mismatch.\ngot %v\nexp %v", got, exp) } if exp, got := `cpu\ load`, models.MakeKey([]byte(`cpu load`), models.NewTags(map[string]string{})); string(got) != exp { t.Errorf("MakeKey() mismatch.\ngot %v\nexp %v", got, exp) } if exp, got := `cpu\,load`, models.MakeKey([]byte(`cpu\,load`), models.NewTags(map[string]string{})); string(got) != exp { t.Errorf("MakeKey() mismatch.\ngot %v\nexp %v", got, exp) } if exp, got := `cpu\,load`, models.MakeKey([]byte(`cpu,load`), models.NewTags(map[string]string{})); string(got) != exp { t.Errorf("MakeKey() mismatch.\ngot %v\nexp %v", got, exp) } } func TestPrecisionString(t *testing.T) { tags := map[string]interface{}{"value": float64(1)} tm, _ := time.Parse(time.RFC3339Nano, "2000-01-01T12:34:56.789012345Z") tests := []struct { name string precision string exp string }{ { name: "no precision", precision: "", exp: "cpu value=1 946730096789012345", }, { name: "nanosecond precision", precision: "ns", exp: "cpu value=1 946730096789012345", }, { name: "microsecond precision", precision: "u", exp: "cpu value=1 946730096789012", }, { name: "millisecond precision", precision: "ms", exp: "cpu value=1 946730096789", }, { name: "second precision", precision: "s", exp: "cpu value=1 946730096", }, { name: "minute precision", precision: "m", exp: "cpu value=1 15778834", }, { name: "hour precision", precision: "h", exp: "cpu value=1 262980", }, } for _, test := range tests { pt := models.MustNewPoint("cpu", nil, tags, tm) act := pt.PrecisionString(test.precision) if act != test.exp { t.Errorf("%s: PrecisionString() mismatch:\n actual: %v\n exp: %v", test.name, act, test.exp) } } } func TestRoundedString(t *testing.T) { tags := map[string]interface{}{"value": float64(1)} tm, _ := time.Parse(time.RFC3339Nano, "2000-01-01T12:34:56.789012345Z") tests := []struct { name string precision time.Duration exp string }{ { name: "no precision", precision: time.Duration(0), exp: "cpu value=1 946730096789012345", }, { name: "nanosecond precision", precision: time.Nanosecond, exp: "cpu value=1 946730096789012345", }, { name: "microsecond precision", precision: time.Microsecond, exp: "cpu value=1 946730096789012000", }, { name: "millisecond precision", precision: time.Millisecond, exp: "cpu value=1 946730096789000000", }, { name: "second precision", precision: time.Second, exp: "cpu value=1 946730097000000000", }, { name: "minute precision", precision: time.Minute, exp: "cpu value=1 946730100000000000", }, { name: "hour precision", precision: time.Hour, exp: "cpu value=1 946731600000000000", }, } for _, test := range tests { pt := models.MustNewPoint("cpu", nil, tags, tm) act := pt.RoundedString(test.precision) if act != test.exp { t.Errorf("%s: RoundedString() mismatch:\n actual: %v\n exp: %v", test.name, act, test.exp) } } } func TestParsePointsStringWithExtraBuffer(t *testing.T) { b := make([]byte, 70*5000) buf := bytes.NewBuffer(b) key := "cpu,host=A,region=uswest" buf.WriteString(fmt.Sprintf("%s value=%.3f 1\n", key, rand.Float64())) points, err := models.ParsePointsString(buf.String()) if err != nil { t.Fatalf("failed to write points: %s", err.Error()) } pointKey := string(points[0].Key()) if len(key) != len(pointKey) { t.Fatalf("expected length of both keys are same but got %d and %d", len(key), len(pointKey)) } if key != pointKey { t.Fatalf("expected both keys are same but got %s and %s", key, pointKey) } } func TestParsePointsQuotesInFieldKey(t *testing.T) { buf := `cpu "a=1 cpu value=2 1` points, err := models.ParsePointsString(buf) if err != nil { t.Fatalf("failed to write points: %s", err.Error()) } fields, err := points[0].Fields() if err != nil { t.Fatal(err) } value, ok := fields["\"a"] if !ok { t.Fatalf("expected to parse field '\"a'") } if value != float64(1) { t.Fatalf("expected field value to be 1, got %v", value) } // The following input should not parse buf = `cpu "\, '= "\ v=1.0` _, err = models.ParsePointsString(buf) if err == nil { t.Fatalf("expected parsing failure but got no error") } } func TestParsePointsQuotesInTags(t *testing.T) { buf := `t159,label=hey\ "ya a=1i,value=0i t159,label=another a=2i,value=1i 1` points, err := models.ParsePointsString(buf) if err != nil { t.Fatalf("failed to write points: %s", err.Error()) } if len(points) != 2 { t.Fatalf("expected 2 points, got %d", len(points)) } } func TestParsePointsBlankLine(t *testing.T) { buf := `cpu value=1i 1000000000 cpu value=2i 2000000000` points, err := models.ParsePointsString(buf) if err != nil { t.Fatalf("failed to write points: %s", err.Error()) } if len(points) != 2 { t.Fatalf("expected 2 points, got %d", len(points)) } } func TestNewPointsWithBytesWithCorruptData(t *testing.T) { corrupted := []byte{0, 0, 0, 3, 102, 111, 111, 0, 0, 0, 4, 61, 34, 65, 34, 1, 0, 0, 0, 14, 206, 86, 119, 24, 32, 72, 233, 168, 2, 148} p, err := models.NewPointFromBytes(corrupted) if p != nil || err == nil { t.Fatalf("NewPointFromBytes: got: (%v, %v), expected: (nil, error)", p, err) } } func TestNewPointsWithShortBuffer(t *testing.T) { _, err := models.NewPointFromBytes([]byte{0, 0, 0, 3, 4}) if err != io.ErrShortBuffer { t.Fatalf("NewPointFromBytes: got: (%v, %v), expected: (nil, error)", p, err) } } func TestNewPointsRejectsEmptyFieldNames(t *testing.T) { if _, err := models.NewPoint("foo", nil, models.Fields{"": 1}, time.Now()); err == nil { t.Fatalf("new point with empty field name. got: nil, expected: error") } } func TestNewPointsRejectsMaxKey(t *testing.T) { var key string // tsm field key is point key, separator (4 bytes) and field for i := 0; i < models.MaxKeyLength-len("value")-4; i++ { key += "a" } // Test max key len if _, err := models.NewPoint(key, nil, models.Fields{"value": 1, "ok": 2.0}, time.Now()); err != nil { t.Fatalf("new point with max key. got: %v, expected: nil", err) } if _, err := models.ParsePointsString(fmt.Sprintf("%v value=1,ok=2.0", key)); err != nil { t.Fatalf("parse point with max key. got: %v, expected: nil", err) } // Test 1 byte over max key len key += "a" if _, err := models.NewPoint(key, nil, models.Fields{"value": 1, "ok": 2.0}, time.Now()); err == nil { t.Fatalf("new point with max key. got: nil, expected: error") } if _, err := models.ParsePointsString(fmt.Sprintf("%v value=1,ok=2.0", key)); err == nil { t.Fatalf("parse point with max key. got: nil, expected: error") } } func TestPoint_FieldIterator_Simple(t *testing.T) { p, err := models.ParsePoints([]byte(`m v=42i,f=42 36`)) if err != nil { t.Fatal(err) } if len(p) != 1 { t.Fatalf("wrong number of points, got %d, exp %d", len(p), 1) } fi := p[0].FieldIterator() if !fi.Next() { t.Fatal("field iterator terminated before first field") } if fi.Type() != models.Integer { t.Fatalf("'42i' should be an Integer, got %v", fi.Type()) } iv, err := fi.IntegerValue() if err != nil { t.Fatal(err) } if exp, got := int64(42), iv; exp != got { t.Fatalf("'42i' should be %d, got %d", exp, got) } if !fi.Next() { t.Fatalf("field iterator terminated before second field") } if fi.Type() != models.Float { t.Fatalf("'42' should be a Float, got %v", fi.Type()) } fv, err := fi.FloatValue() if err != nil { t.Fatal(err) } if exp, got := 42.0, fv; exp != got { t.Fatalf("'42' should be %f, got %f", exp, got) } if fi.Next() { t.Fatal("field iterator didn't terminate") } } func toFields(fi models.FieldIterator) models.Fields { m := make(models.Fields) for fi.Next() { var v interface{} var err error switch fi.Type() { case models.Float: v, err = fi.FloatValue() case models.Integer: v, err = fi.IntegerValue() case models.Unsigned: v, err = fi.UnsignedValue() case models.String: v = fi.StringValue() case models.Boolean: v, err = fi.BooleanValue() case models.Empty: v = nil default: panic("unknown type") } if err != nil { panic(err) } m[string(fi.FieldKey())] = v } return m } func TestPoint_FieldIterator_FieldMap(t *testing.T) { points, err := models.ParsePointsString(` m v=42 m v=42i m v="string" m v=true m v="string\"with\"escapes" m v=42i,f=42,g=42.314,u=123u m a=2i,b=3i,c=true,d="stuff",e=-0.23,f=123.456 `) if err != nil { t.Fatal("failed to parse test points:", err) } for _, p := range points { exp, err := p.Fields() if err != nil { t.Fatal(err) } got := toFields(p.FieldIterator()) if !reflect.DeepEqual(got, exp) { t.Errorf("FieldIterator failed for %#q: got %#v, exp %#v", p.String(), got, exp) } } } func TestEscapeStringField(t *testing.T) { cases := []struct { in string expOut string }{ {in: "abcdefg", expOut: "abcdefg"}, {in: `one double quote " .`, expOut: `one double quote \" .`}, {in: `quote " then backslash \ .`, expOut: `quote \" then backslash \\ .`}, {in: `backslash \ then quote " .`, expOut: `backslash \\ then quote \" .`}, } for _, c := range cases { // Unescapes as expected. got := models.EscapeStringField(c.in) if got != c.expOut { t.Errorf("unexpected result from EscapeStringField(%s)\ngot [%s]\nexp [%s]\n", c.in, got, c.expOut) continue } pointLine := fmt.Sprintf(`t s="%s"`, got) test(t, pointLine, NewTestPoint( "t", models.NewTags(nil), models.Fields{"s": c.in}, time.Unix(0, 0), )) } } func TestParseKeyBytes(t *testing.T) { testCases := []struct { input string expectedName string expectedTags map[string]string }{ {input: "m,k=v", expectedName: "m", expectedTags: map[string]string{"k": "v"}}, {input: "m\\ q,k=v", expectedName: "m q", expectedTags: map[string]string{"k": "v"}}, {input: "m,k\\ q=v", expectedName: "m", expectedTags: map[string]string{"k q": "v"}}, {input: "m\\ q,k\\ q=v", expectedName: "m q", expectedTags: map[string]string{"k q": "v"}}, } for _, testCase := range testCases { t.Run(testCase.input, func(t *testing.T) { name, tags := models.ParseKeyBytes([]byte(testCase.input)) if !bytes.Equal([]byte(testCase.expectedName), name) { t.Errorf("%s produced measurement %s but expected %s", testCase.input, string(name), testCase.expectedName) } if !tags.Equal(models.NewTags(testCase.expectedTags)) { t.Errorf("%s produced tags %s but expected %s", testCase.input, tags.String(), models.NewTags(testCase.expectedTags).String()) } }) } } func TestParseName(t *testing.T) { testCases := []struct { input string expectedName string }{ {input: "m,k=v", expectedName: "m"}, {input: "m\\ q,k=v", expectedName: "m q"}, } for _, testCase := range testCases { t.Run(testCase.input, func(t *testing.T) { name := models.ParseName([]byte(testCase.input)) if !bytes.Equal([]byte(testCase.expectedName), name) { t.Errorf("%s produced measurement %s but expected %s", testCase.input, string(name), testCase.expectedName) } }) } } func BenchmarkEscapeStringField_Plain(b *testing.B) { s := "nothing special" for i := 0; i < b.N; i++ { sink = models.EscapeStringField(s) } } func BenchmarkEscapeString_Quotes(b *testing.B) { s := `Hello, "world"` for i := 0; i < b.N; i++ { sink = models.EscapeStringField(s) } } func BenchmarkEscapeString_Backslashes(b *testing.B) { s := `C:\windows\system32` for i := 0; i < b.N; i++ { sink = models.EscapeStringField(s) } } func BenchmarkEscapeString_QuotesAndBackslashes(b *testing.B) { s1 := `a quote " then backslash \ .` s2 := `a backslash \ then quote " .` for i := 0; i < b.N; i++ { sink = [...]string{models.EscapeStringField(s1), models.EscapeStringField(s2)} } } func BenchmarkParseTags(b *testing.B) { tags := []byte("cpu,tag0=value0,tag1=value1,tag2=value2,tag3=value3,tag4=value4,tag5=value5") for i := 0; i < b.N; i++ { models.ParseTags(tags) } } func BenchmarkEscapeMeasurement(b *testing.B) { benchmarks := []struct { m []byte }{ {[]byte("this_is_a_test")}, {[]byte("this,is,a,test")}, } for _, bm := range benchmarks { b.Run(string(bm.m), func(b *testing.B) { b.ReportAllocs() for i := 0; i < b.N; i++ { models.EscapeMeasurement(bm.m) } }) } } func makeTags(key, val string, n int) models.Tags { tags := make(models.Tags, n) for i := range tags { tags[i].Key = []byte(fmt.Sprintf("%s%03d", key, i)) tags[i].Value = []byte(fmt.Sprintf("%s%03d", val, i)) } return tags } func BenchmarkTags_HashKey(b *testing.B) { benchmarks := []struct { name string t models.Tags }{ {"5 tags-no esc", makeTags("tag_foo", "val_bar", 5)}, {"25 tags-no esc", makeTags("tag_foo", "val_bar", 25)}, {"5 tags-esc", makeTags("tag foo", "val bar", 5)}, {"25 tags-esc", makeTags("tag foo", "val bar", 25)}, } for _, bm := range benchmarks { b.Run(bm.name, func(b *testing.B) { b.ReportAllocs() for i := 0; i < b.N; i++ { bm.t.HashKey() } }) } } func BenchmarkMakeKey(b *testing.B) { benchmarks := []struct { m []byte t models.Tags }{ {[]byte("this_is_a_test"), nil}, {[]byte("this,is,a,test"), nil}, {[]byte(`this\ is\ a\ test`), nil}, {[]byte("this_is_a_test"), makeTags("tag_foo", "val_bar", 8)}, {[]byte("this,is,a,test"), makeTags("tag_foo", "val_bar", 8)}, {[]byte("this_is_a_test"), makeTags("tag_foo", "val bar", 8)}, {[]byte("this,is,a,test"), makeTags("tag_foo", "val bar", 8)}, } for _, bm := range benchmarks { b.Run(string(bm.m), func(b *testing.B) { b.ReportAllocs() for i := 0; i < b.N; i++ { models.MakeKey(bm.m, bm.t) } }) } } func init() { // Force uint support to be enabled for testing. models.EnableUintSupport() } golang-github-influxdata-influxdb1-client-0.0~git20220302.a9ab567/models/rows.go000066400000000000000000000031731433650256200270130ustar00rootroot00000000000000package models import ( "sort" ) // Row represents a single row returned from the execution of a statement. type Row struct { Name string `json:"name,omitempty"` Tags map[string]string `json:"tags,omitempty"` Columns []string `json:"columns,omitempty"` Values [][]interface{} `json:"values,omitempty"` Partial bool `json:"partial,omitempty"` } // SameSeries returns true if r contains values for the same series as o. func (r *Row) SameSeries(o *Row) bool { return r.tagsHash() == o.tagsHash() && r.Name == o.Name } // tagsHash returns a hash of tag key/value pairs. func (r *Row) tagsHash() uint64 { h := NewInlineFNV64a() keys := r.tagsKeys() for _, k := range keys { h.Write([]byte(k)) h.Write([]byte(r.Tags[k])) } return h.Sum64() } // tagKeys returns a sorted list of tag keys. func (r *Row) tagsKeys() []string { a := make([]string, 0, len(r.Tags)) for k := range r.Tags { a = append(a, k) } sort.Strings(a) return a } // Rows represents a collection of rows. Rows implements sort.Interface. type Rows []*Row // Len implements sort.Interface. func (p Rows) Len() int { return len(p) } // Less implements sort.Interface. func (p Rows) Less(i, j int) bool { // Sort by name first. if p[i].Name != p[j].Name { return p[i].Name < p[j].Name } // Sort by tag set hash. Tags don't have a meaningful sort order so we // just compute a hash and sort by that instead. This allows the tests // to receive rows in a predictable order every time. return p[i].tagsHash() < p[j].tagsHash() } // Swap implements sort.Interface. func (p Rows) Swap(i, j int) { p[i], p[j] = p[j], p[i] } golang-github-influxdata-influxdb1-client-0.0~git20220302.a9ab567/models/statistic.go000066400000000000000000000022301433650256200300210ustar00rootroot00000000000000package models // Statistic is the representation of a statistic used by the monitoring service. type Statistic struct { Name string `json:"name"` Tags map[string]string `json:"tags"` Values map[string]interface{} `json:"values"` } // NewStatistic returns an initialized Statistic. func NewStatistic(name string) Statistic { return Statistic{ Name: name, Tags: make(map[string]string), Values: make(map[string]interface{}), } } // StatisticTags is a map that can be merged with others without causing // mutations to either map. type StatisticTags map[string]string // Merge creates a new map containing the merged contents of tags and t. // If both tags and the receiver map contain the same key, the value in tags // is used in the resulting map. // // Merge always returns a usable map. func (t StatisticTags) Merge(tags map[string]string) map[string]string { // Add everything in tags to the result. out := make(map[string]string, len(tags)) for k, v := range tags { out[k] = v } // Only add values from t that don't appear in tags. for k, v := range t { if _, ok := tags[k]; !ok { out[k] = v } } return out } golang-github-influxdata-influxdb1-client-0.0~git20220302.a9ab567/models/statistic_test.go000066400000000000000000000024461433650256200310710ustar00rootroot00000000000000package models_test import ( "reflect" "testing" "github.com/influxdata/influxdb1-client/models" ) func TestTags_Merge(t *testing.T) { examples := []struct { Base map[string]string Arg map[string]string Result map[string]string }{ { Base: nil, Arg: nil, Result: map[string]string{}, }, { Base: nil, Arg: map[string]string{"foo": "foo"}, Result: map[string]string{"foo": "foo"}, }, { Base: map[string]string{"foo": "foo"}, Arg: nil, Result: map[string]string{"foo": "foo"}, }, { Base: map[string]string{"foo": "foo"}, Arg: map[string]string{"bar": "bar"}, Result: map[string]string{"foo": "foo", "bar": "bar"}, }, { Base: map[string]string{"foo": "foo", "bar": "bar"}, Arg: map[string]string{"zoo": "zoo"}, Result: map[string]string{"foo": "foo", "bar": "bar", "zoo": "zoo"}, }, { Base: map[string]string{"foo": "foo", "bar": "bar"}, Arg: map[string]string{"bar": "newbar"}, Result: map[string]string{"foo": "foo", "bar": "newbar"}, }, } for i, example := range examples { i++ result := models.StatisticTags(example.Base).Merge(example.Arg) if got, exp := result, example.Result; !reflect.DeepEqual(got, exp) { t.Errorf("[Example %d] got %#v, expected %#v", i, got, exp) } } } golang-github-influxdata-influxdb1-client-0.0~git20220302.a9ab567/models/time.go000066400000000000000000000044731433650256200267630ustar00rootroot00000000000000package models // Helper time methods since parsing time can easily overflow and we only support a // specific time range. import ( "fmt" "math" "time" ) const ( // MinNanoTime is the minumum time that can be represented. // // 1677-09-21 00:12:43.145224194 +0000 UTC // // The two lowest minimum integers are used as sentinel values. The // minimum value needs to be used as a value lower than any other value for // comparisons and another separate value is needed to act as a sentinel // default value that is unusable by the user, but usable internally. // Because these two values need to be used for a special purpose, we do // not allow users to write points at these two times. MinNanoTime = int64(math.MinInt64) + 2 // MaxNanoTime is the maximum time that can be represented. // // 2262-04-11 23:47:16.854775806 +0000 UTC // // The highest time represented by a nanosecond needs to be used for an // exclusive range in the shard group, so the maximum time needs to be one // less than the possible maximum number of nanoseconds representable by an // int64 so that we don't lose a point at that one time. MaxNanoTime = int64(math.MaxInt64) - 1 ) var ( minNanoTime = time.Unix(0, MinNanoTime).UTC() maxNanoTime = time.Unix(0, MaxNanoTime).UTC() // ErrTimeOutOfRange gets returned when time is out of the representable range using int64 nanoseconds since the epoch. ErrTimeOutOfRange = fmt.Errorf("time outside range %d - %d", MinNanoTime, MaxNanoTime) ) // SafeCalcTime safely calculates the time given. Will return error if the time is outside the // supported range. func SafeCalcTime(timestamp int64, precision string) (time.Time, error) { mult := GetPrecisionMultiplier(precision) if t, ok := safeSignedMult(timestamp, mult); ok { tme := time.Unix(0, t).UTC() return tme, CheckTime(tme) } return time.Time{}, ErrTimeOutOfRange } // CheckTime checks that a time is within the safe range. func CheckTime(t time.Time) error { if t.Before(minNanoTime) || t.After(maxNanoTime) { return ErrTimeOutOfRange } return nil } // Perform the multiplication and check to make sure it didn't overflow. func safeSignedMult(a, b int64) (int64, bool) { if a == 0 || b == 0 || a == 1 || b == 1 { return a * b, true } if a == MinNanoTime || b == MaxNanoTime { return 0, false } c := a * b return c, c/b == a } golang-github-influxdata-influxdb1-client-0.0~git20220302.a9ab567/models/uint_support.go000066400000000000000000000001141433650256200305640ustar00rootroot00000000000000// +build uint uint64 package models func init() { EnableUintSupport() } golang-github-influxdata-influxdb1-client-0.0~git20220302.a9ab567/pkg/000077500000000000000000000000001433650256200247645ustar00rootroot00000000000000golang-github-influxdata-influxdb1-client-0.0~git20220302.a9ab567/pkg/escape/000077500000000000000000000000001433650256200262245ustar00rootroot00000000000000golang-github-influxdata-influxdb1-client-0.0~git20220302.a9ab567/pkg/escape/bytes.go000066400000000000000000000044211433650256200277020ustar00rootroot00000000000000// Package escape contains utilities for escaping parts of InfluxQL // and InfluxDB line protocol. package escape // import "github.com/influxdata/influxdb1-client/pkg/escape" import ( "bytes" "strings" ) // Codes is a map of bytes to be escaped. var Codes = map[byte][]byte{ ',': []byte(`\,`), '"': []byte(`\"`), ' ': []byte(`\ `), '=': []byte(`\=`), } // Bytes escapes characters on the input slice, as defined by Codes. func Bytes(in []byte) []byte { for b, esc := range Codes { in = bytes.Replace(in, []byte{b}, esc, -1) } return in } const escapeChars = `," =` // IsEscaped returns whether b has any escaped characters, // i.e. whether b seems to have been processed by Bytes. func IsEscaped(b []byte) bool { for len(b) > 0 { i := bytes.IndexByte(b, '\\') if i < 0 { return false } if i+1 < len(b) && strings.IndexByte(escapeChars, b[i+1]) >= 0 { return true } b = b[i+1:] } return false } // AppendUnescaped appends the unescaped version of src to dst // and returns the resulting slice. func AppendUnescaped(dst, src []byte) []byte { var pos int for len(src) > 0 { next := bytes.IndexByte(src[pos:], '\\') if next < 0 || pos+next+1 >= len(src) { return append(dst, src...) } if pos+next+1 < len(src) && strings.IndexByte(escapeChars, src[pos+next+1]) >= 0 { if pos+next > 0 { dst = append(dst, src[:pos+next]...) } src = src[pos+next+1:] pos = 0 } else { pos += next + 1 } } return dst } // Unescape returns a new slice containing the unescaped version of in. func Unescape(in []byte) []byte { if len(in) == 0 { return nil } if bytes.IndexByte(in, '\\') == -1 { return in } i := 0 inLen := len(in) // The output size will be no more than inLen. Preallocating the // capacity of the output is faster and uses less memory than // letting append() do its own (over)allocation. out := make([]byte, 0, inLen) for { if i >= inLen { break } if in[i] == '\\' && i+1 < inLen { switch in[i+1] { case ',': out = append(out, ',') i += 2 continue case '"': out = append(out, '"') i += 2 continue case ' ': out = append(out, ' ') i += 2 continue case '=': out = append(out, '=') i += 2 continue } } out = append(out, in[i]) i += 1 } return out } golang-github-influxdata-influxdb1-client-0.0~git20220302.a9ab567/pkg/escape/bytes_test.go000066400000000000000000000046461433650256200307520ustar00rootroot00000000000000package escape import ( "bytes" "reflect" "strings" "testing" ) var result []byte func BenchmarkBytesEscapeNoEscapes(b *testing.B) { buf := []byte(`no_escapes`) for i := 0; i < b.N; i++ { result = Bytes(buf) } } func BenchmarkUnescapeNoEscapes(b *testing.B) { buf := []byte(`no_escapes`) for i := 0; i < b.N; i++ { result = Unescape(buf) } } func BenchmarkBytesEscapeMany(b *testing.B) { tests := [][]byte{ []byte("this is my special string"), []byte("a field w=i th == tons of escapes"), []byte("some,commas,here"), } for n := 0; n < b.N; n++ { for _, test := range tests { result = Bytes(test) } } } func BenchmarkUnescapeMany(b *testing.B) { tests := [][]byte{ []byte(`this\ is\ my\ special\ string`), []byte(`a\ field\ w\=i\ th\ \=\=\ tons\ of\ escapes`), []byte(`some\,commas\,here`), } for i := 0; i < b.N; i++ { for _, test := range tests { result = Unescape(test) } } } var boolResult bool func BenchmarkIsEscaped(b *testing.B) { tests := [][]byte{ []byte(`no_escapes`), []byte(`a\ field\ w\=i\ th\ \=\=\ tons\ of\ escapes`), []byte(`some\,commas\,here`), } for i := 0; i < b.N; i++ { for _, test := range tests { boolResult = IsEscaped(test) } } } func BenchmarkAppendUnescaped(b *testing.B) { tests := [][]byte{ []byte(`this\ is\ my\ special\ string`), []byte(`a\ field\ w\=i\ th\ \=\=\ tons\ of\ escapes`), []byte(`some\,commas\,here`), } for i := 0; i < b.N; i++ { result = nil for _, test := range tests { result = AppendUnescaped(result, test) } } } func TestUnescape(t *testing.T) { tests := []struct { in []byte out []byte }{ { []byte(nil), []byte(nil), }, { []byte(""), []byte(nil), }, { []byte("\\,\\\"\\ \\="), []byte(",\" ="), }, { []byte("\\\\"), []byte("\\\\"), }, { []byte("plain and simple"), []byte("plain and simple"), }, } for ii, tt := range tests { got := Unescape(tt.in) if !reflect.DeepEqual(got, tt.out) { t.Errorf("[%d] Unescape(%#v) = %#v, expected %#v", ii, string(tt.in), string(got), string(tt.out)) } } } func TestAppendUnescaped(t *testing.T) { cases := strings.Split(strings.TrimSpace(` normal inv\alid goo\"d sp\ ace \,\"\ \= f\\\ x `), "\n") for _, c := range cases { exp := Unescape([]byte(c)) got := AppendUnescaped(nil, []byte(c)) if !bytes.Equal(got, exp) { t.Errorf("AppendUnescaped failed for %#q: got %#q, exp %#q", c, got, exp) } } } golang-github-influxdata-influxdb1-client-0.0~git20220302.a9ab567/pkg/escape/strings.go000066400000000000000000000007431433650256200302500ustar00rootroot00000000000000package escape import "strings" var ( escaper = strings.NewReplacer(`,`, `\,`, `"`, `\"`, ` `, `\ `, `=`, `\=`) unescaper = strings.NewReplacer(`\,`, `,`, `\"`, `"`, `\ `, ` `, `\=`, `=`) ) // UnescapeString returns unescaped version of in. func UnescapeString(in string) string { if strings.IndexByte(in, '\\') == -1 { return in } return unescaper.Replace(in) } // String returns the escaped version of in. func String(in string) string { return escaper.Replace(in) } golang-github-influxdata-influxdb1-client-0.0~git20220302.a9ab567/pkg/escape/strings_test.go000066400000000000000000000041261433650256200313060ustar00rootroot00000000000000package escape import ( "testing" ) var s string func BenchmarkStringEscapeNoEscapes(b *testing.B) { for n := 0; n < b.N; n++ { s = String("no_escapes") } } func BenchmarkStringUnescapeNoEscapes(b *testing.B) { for n := 0; n < b.N; n++ { s = UnescapeString("no_escapes") } } func BenchmarkManyStringEscape(b *testing.B) { tests := []string{ "this is my special string", "a field w=i th == tons of escapes", "some,commas,here", } for n := 0; n < b.N; n++ { for _, test := range tests { s = String(test) } } } func BenchmarkManyStringUnescape(b *testing.B) { tests := []string{ `this\ is\ my\ special\ string`, `a\ field\ w\=i\ th\ \=\=\ tons\ of\ escapes`, `some\,commas\,here`, } for n := 0; n < b.N; n++ { for _, test := range tests { s = UnescapeString(test) } } } func TestStringEscape(t *testing.T) { tests := []struct { in string expected string }{ { in: "", expected: "", }, { in: "this is my special string", expected: `this\ is\ my\ special\ string`, }, { in: "a field w=i th == tons of escapes", expected: `a\ field\ w\=i\ th\ \=\=\ tons\ of\ escapes`, }, { in: "no_escapes", expected: "no_escapes", }, { in: "some,commas,here", expected: `some\,commas\,here`, }, } for _, test := range tests { if test.expected != String(test.in) { t.Errorf("Got %s, expected %s", String(test.in), test.expected) } } } func TestStringUnescape(t *testing.T) { tests := []struct { in string expected string }{ { in: "", expected: "", }, { in: `this\ is\ my\ special\ string`, expected: "this is my special string", }, { in: `a\ field\ w\=i\ th\ \=\=\ tons\ of\ escapes`, expected: "a field w=i th == tons of escapes", }, { in: "no_escapes", expected: "no_escapes", }, { in: `some\,commas\,here`, expected: "some,commas,here", }, } for _, test := range tests { if test.expected != UnescapeString(test.in) { t.Errorf("Got %s, expected %s", UnescapeString(test.in), test.expected) } } } golang-github-influxdata-influxdb1-client-0.0~git20220302.a9ab567/v2/000077500000000000000000000000001433650256200245325ustar00rootroot00000000000000golang-github-influxdata-influxdb1-client-0.0~git20220302.a9ab567/v2/client.go000066400000000000000000000473031433650256200263460ustar00rootroot00000000000000// Package client (v2) is the current official Go client for InfluxDB. package client // import "github.com/influxdata/influxdb1-client/v2" import ( "bytes" "compress/gzip" "crypto/tls" "encoding/json" "errors" "fmt" "io" "io/ioutil" "mime" "net/http" "net/url" "path" "strconv" "strings" "time" "github.com/influxdata/influxdb1-client/models" ) type ContentEncoding string const ( DefaultEncoding ContentEncoding = "" GzipEncoding ContentEncoding = "gzip" ) // HTTPConfig is the config data needed to create an HTTP Client. type HTTPConfig struct { // Addr should be of the form "http://host:port" // or "http://[ipv6-host%zone]:port". Addr string // Username is the influxdb username, optional. Username string // Password is the influxdb password, optional. Password string // UserAgent is the http User Agent, defaults to "InfluxDBClient". UserAgent string // Timeout for influxdb writes, defaults to no timeout. Timeout time.Duration // InsecureSkipVerify gets passed to the http client, if true, it will // skip https certificate verification. Defaults to false. InsecureSkipVerify bool // TLSConfig allows the user to set their own TLS config for the HTTP // Client. If set, this option overrides InsecureSkipVerify. TLSConfig *tls.Config // Proxy configures the Proxy function on the HTTP client. Proxy func(req *http.Request) (*url.URL, error) // WriteEncoding specifies the encoding of write request WriteEncoding ContentEncoding } // BatchPointsConfig is the config data needed to create an instance of the BatchPoints struct. type BatchPointsConfig struct { // Precision is the write precision of the points, defaults to "ns". Precision string // Database is the database to write points to. Database string // RetentionPolicy is the retention policy of the points. RetentionPolicy string // Write consistency is the number of servers required to confirm write. WriteConsistency string } // Client is a client interface for writing & querying the database. type Client interface { // Ping checks that status of cluster, and will always return 0 time and no // error for UDP clients. Ping(timeout time.Duration) (time.Duration, string, error) // Write takes a BatchPoints object and writes all Points to InfluxDB. Write(bp BatchPoints) error // Query makes an InfluxDB Query on the database. This will fail if using // the UDP client. Query(q Query) (*Response, error) // QueryAsChunk makes an InfluxDB Query on the database. This will fail if using // the UDP client. QueryAsChunk(q Query) (*ChunkedResponse, error) // Close releases any resources a Client may be using. Close() error } // NewHTTPClient returns a new Client from the provided config. // Client is safe for concurrent use by multiple goroutines. func NewHTTPClient(conf HTTPConfig) (Client, error) { if conf.UserAgent == "" { conf.UserAgent = "InfluxDBClient" } u, err := url.Parse(conf.Addr) if err != nil { return nil, err } else if u.Scheme != "http" && u.Scheme != "https" { m := fmt.Sprintf("Unsupported protocol scheme: %s, your address"+ " must start with http:// or https://", u.Scheme) return nil, errors.New(m) } switch conf.WriteEncoding { case DefaultEncoding, GzipEncoding: default: return nil, fmt.Errorf("unsupported encoding %s", conf.WriteEncoding) } tr := &http.Transport{ TLSClientConfig: &tls.Config{ InsecureSkipVerify: conf.InsecureSkipVerify, }, Proxy: conf.Proxy, } if conf.TLSConfig != nil { tr.TLSClientConfig = conf.TLSConfig } return &client{ url: *u, username: conf.Username, password: conf.Password, useragent: conf.UserAgent, httpClient: &http.Client{ Timeout: conf.Timeout, Transport: tr, }, transport: tr, encoding: conf.WriteEncoding, }, nil } // Ping will check to see if the server is up with an optional timeout on waiting for leader. // Ping returns how long the request took, the version of the server it connected to, and an error if one occurred. func (c *client) Ping(timeout time.Duration) (time.Duration, string, error) { now := time.Now() u := c.url u.Path = path.Join(u.Path, "ping") req, err := http.NewRequest("GET", u.String(), nil) if err != nil { return 0, "", err } req.Header.Set("User-Agent", c.useragent) if c.username != "" { req.SetBasicAuth(c.username, c.password) } if timeout > 0 { params := req.URL.Query() params.Set("wait_for_leader", fmt.Sprintf("%.0fs", timeout.Seconds())) req.URL.RawQuery = params.Encode() } resp, err := c.httpClient.Do(req) if err != nil { return 0, "", err } defer resp.Body.Close() body, err := ioutil.ReadAll(resp.Body) if err != nil { return 0, "", err } if resp.StatusCode != http.StatusNoContent { var err = errors.New(string(body)) return 0, "", err } version := resp.Header.Get("X-Influxdb-Version") return time.Since(now), version, nil } // Close releases the client's resources. func (c *client) Close() error { c.transport.CloseIdleConnections() return nil } // client is safe for concurrent use as the fields are all read-only // once the client is instantiated. type client struct { // N.B - if url.UserInfo is accessed in future modifications to the // methods on client, you will need to synchronize access to url. url url.URL username string password string useragent string httpClient *http.Client transport *http.Transport encoding ContentEncoding } // BatchPoints is an interface into a batched grouping of points to write into // InfluxDB together. BatchPoints is NOT thread-safe, you must create a separate // batch for each goroutine. type BatchPoints interface { // AddPoint adds the given point to the Batch of points. AddPoint(p *Point) // AddPoints adds the given points to the Batch of points. AddPoints(ps []*Point) // Points lists the points in the Batch. Points() []*Point // Precision returns the currently set precision of this Batch. Precision() string // SetPrecision sets the precision of this batch. SetPrecision(s string) error // Database returns the currently set database of this Batch. Database() string // SetDatabase sets the database of this Batch. SetDatabase(s string) // WriteConsistency returns the currently set write consistency of this Batch. WriteConsistency() string // SetWriteConsistency sets the write consistency of this Batch. SetWriteConsistency(s string) // RetentionPolicy returns the currently set retention policy of this Batch. RetentionPolicy() string // SetRetentionPolicy sets the retention policy of this Batch. SetRetentionPolicy(s string) } // NewBatchPoints returns a BatchPoints interface based on the given config. func NewBatchPoints(conf BatchPointsConfig) (BatchPoints, error) { if conf.Precision == "" { conf.Precision = "ns" } if _, err := time.ParseDuration("1" + conf.Precision); err != nil { return nil, err } bp := &batchpoints{ database: conf.Database, precision: conf.Precision, retentionPolicy: conf.RetentionPolicy, writeConsistency: conf.WriteConsistency, } return bp, nil } type batchpoints struct { points []*Point database string precision string retentionPolicy string writeConsistency string } func (bp *batchpoints) AddPoint(p *Point) { bp.points = append(bp.points, p) } func (bp *batchpoints) AddPoints(ps []*Point) { bp.points = append(bp.points, ps...) } func (bp *batchpoints) Points() []*Point { return bp.points } func (bp *batchpoints) Precision() string { return bp.precision } func (bp *batchpoints) Database() string { return bp.database } func (bp *batchpoints) WriteConsistency() string { return bp.writeConsistency } func (bp *batchpoints) RetentionPolicy() string { return bp.retentionPolicy } func (bp *batchpoints) SetPrecision(p string) error { if _, err := time.ParseDuration("1" + p); err != nil { return err } bp.precision = p return nil } func (bp *batchpoints) SetDatabase(db string) { bp.database = db } func (bp *batchpoints) SetWriteConsistency(wc string) { bp.writeConsistency = wc } func (bp *batchpoints) SetRetentionPolicy(rp string) { bp.retentionPolicy = rp } // Point represents a single data point. type Point struct { pt models.Point } // NewPoint returns a point with the given timestamp. If a timestamp is not // given, then data is sent to the database without a timestamp, in which case // the server will assign local time upon reception. NOTE: it is recommended to // send data with a timestamp. func NewPoint( name string, tags map[string]string, fields map[string]interface{}, t ...time.Time, ) (*Point, error) { var T time.Time if len(t) > 0 { T = t[0] } pt, err := models.NewPoint(name, models.NewTags(tags), fields, T) if err != nil { return nil, err } return &Point{ pt: pt, }, nil } // String returns a line-protocol string of the Point. func (p *Point) String() string { return p.pt.String() } // PrecisionString returns a line-protocol string of the Point, // with the timestamp formatted for the given precision. func (p *Point) PrecisionString(precision string) string { return p.pt.PrecisionString(precision) } // Name returns the measurement name of the point. func (p *Point) Name() string { return string(p.pt.Name()) } // Tags returns the tags associated with the point. func (p *Point) Tags() map[string]string { return p.pt.Tags().Map() } // Time return the timestamp for the point. func (p *Point) Time() time.Time { return p.pt.Time() } // UnixNano returns timestamp of the point in nanoseconds since Unix epoch. func (p *Point) UnixNano() int64 { return p.pt.UnixNano() } // Fields returns the fields for the point. func (p *Point) Fields() (map[string]interface{}, error) { return p.pt.Fields() } // NewPointFrom returns a point from the provided models.Point. func NewPointFrom(pt models.Point) *Point { return &Point{pt: pt} } func (c *client) Write(bp BatchPoints) error { var b bytes.Buffer var w io.Writer if c.encoding == GzipEncoding { w = gzip.NewWriter(&b) } else { w = &b } for _, p := range bp.Points() { if p == nil { continue } if _, err := io.WriteString(w, p.pt.PrecisionString(bp.Precision())); err != nil { return err } if _, err := w.Write([]byte{'\n'}); err != nil { return err } } // gzip writer should be closed to flush data into underlying buffer if c, ok := w.(io.Closer); ok { if err := c.Close(); err != nil { return err } } u := c.url u.Path = path.Join(u.Path, "write") req, err := http.NewRequest("POST", u.String(), &b) if err != nil { return err } if c.encoding != DefaultEncoding { req.Header.Set("Content-Encoding", string(c.encoding)) } req.Header.Set("Content-Type", "") req.Header.Set("User-Agent", c.useragent) if c.username != "" { req.SetBasicAuth(c.username, c.password) } params := req.URL.Query() params.Set("db", bp.Database()) params.Set("rp", bp.RetentionPolicy()) params.Set("precision", bp.Precision()) params.Set("consistency", bp.WriteConsistency()) req.URL.RawQuery = params.Encode() resp, err := c.httpClient.Do(req) if err != nil { return err } defer resp.Body.Close() body, err := ioutil.ReadAll(resp.Body) if err != nil { return err } if resp.StatusCode != http.StatusNoContent && resp.StatusCode != http.StatusOK { var err = errors.New(string(body)) return err } return nil } // Query defines a query to send to the server. type Query struct { Command string Database string RetentionPolicy string Precision string Chunked bool ChunkSize int Parameters map[string]interface{} } // Params is a type alias to the query parameters. type Params map[string]interface{} // NewQuery returns a query object. // The database and precision arguments can be empty strings if they are not needed for the query. func NewQuery(command, database, precision string) Query { return Query{ Command: command, Database: database, Precision: precision, Parameters: make(map[string]interface{}), } } // NewQueryWithRP returns a query object. // The database, retention policy, and precision arguments can be empty strings if they are not needed // for the query. Setting the retention policy only works on InfluxDB versions 1.6 or greater. func NewQueryWithRP(command, database, retentionPolicy, precision string) Query { return Query{ Command: command, Database: database, RetentionPolicy: retentionPolicy, Precision: precision, Parameters: make(map[string]interface{}), } } // NewQueryWithParameters returns a query object. // The database and precision arguments can be empty strings if they are not needed for the query. // parameters is a map of the parameter names used in the command to their values. func NewQueryWithParameters(command, database, precision string, parameters map[string]interface{}) Query { return Query{ Command: command, Database: database, Precision: precision, Parameters: parameters, } } // Response represents a list of statement results. type Response struct { Results []Result Err string `json:"error,omitempty"` } // Error returns the first error from any statement. // It returns nil if no errors occurred on any statements. func (r *Response) Error() error { if r.Err != "" { return errors.New(r.Err) } for _, result := range r.Results { if result.Err != "" { return errors.New(result.Err) } } return nil } // Message represents a user message. type Message struct { Level string Text string } // Result represents a resultset returned from a single statement. type Result struct { StatementId int `json:"statement_id"` Series []models.Row Messages []*Message Err string `json:"error,omitempty"` } // Query sends a command to the server and returns the Response. func (c *client) Query(q Query) (*Response, error) { req, err := c.createDefaultRequest(q) if err != nil { return nil, err } params := req.URL.Query() if q.Chunked { params.Set("chunked", "true") if q.ChunkSize > 0 { params.Set("chunk_size", strconv.Itoa(q.ChunkSize)) } req.URL.RawQuery = params.Encode() } resp, err := c.httpClient.Do(req) if err != nil { return nil, err } defer func() { io.Copy(ioutil.Discard, resp.Body) // https://github.com/influxdata/influxdb1-client/issues/58 resp.Body.Close() }() if err := checkResponse(resp); err != nil { return nil, err } var response Response if q.Chunked { cr := NewChunkedResponse(resp.Body) for { r, err := cr.NextResponse() if err != nil { if err == io.EOF { break } // If we got an error while decoding the response, send that back. return nil, err } if r == nil { break } response.Results = append(response.Results, r.Results...) if r.Err != "" { response.Err = r.Err break } } } else { dec := json.NewDecoder(resp.Body) dec.UseNumber() decErr := dec.Decode(&response) // ignore this error if we got an invalid status code if decErr != nil && decErr.Error() == "EOF" && resp.StatusCode != http.StatusOK { decErr = nil } // If we got a valid decode error, send that back if decErr != nil { return nil, fmt.Errorf("unable to decode json: received status code %d err: %s", resp.StatusCode, decErr) } } // If we don't have an error in our json response, and didn't get statusOK // then send back an error if resp.StatusCode != http.StatusOK && response.Error() == nil { return &response, fmt.Errorf("received status code %d from server", resp.StatusCode) } return &response, nil } // QueryAsChunk sends a command to the server and returns the Response. func (c *client) QueryAsChunk(q Query) (*ChunkedResponse, error) { req, err := c.createDefaultRequest(q) if err != nil { return nil, err } params := req.URL.Query() params.Set("chunked", "true") if q.ChunkSize > 0 { params.Set("chunk_size", strconv.Itoa(q.ChunkSize)) } req.URL.RawQuery = params.Encode() resp, err := c.httpClient.Do(req) if err != nil { return nil, err } if err := checkResponse(resp); err != nil { return nil, err } return NewChunkedResponse(resp.Body), nil } func checkResponse(resp *http.Response) error { // If we lack a X-Influxdb-Version header, then we didn't get a response from influxdb // but instead some other service. If the error code is also a 500+ code, then some // downstream loadbalancer/proxy/etc had an issue and we should report that. if resp.Header.Get("X-Influxdb-Version") == "" && resp.StatusCode >= http.StatusInternalServerError { body, err := ioutil.ReadAll(resp.Body) if err != nil || len(body) == 0 { return fmt.Errorf("received status code %d from downstream server", resp.StatusCode) } return fmt.Errorf("received status code %d from downstream server, with response body: %q", resp.StatusCode, body) } // If we get an unexpected content type, then it is also not from influx direct and therefore // we want to know what we received and what status code was returned for debugging purposes. if cType, _, _ := mime.ParseMediaType(resp.Header.Get("Content-Type")); cType != "application/json" { // Read up to 1kb of the body to help identify downstream errors and limit the impact of things // like downstream serving a large file body, err := ioutil.ReadAll(io.LimitReader(resp.Body, 1024)) if err != nil || len(body) == 0 { return fmt.Errorf("expected json response, got empty body, with status: %v", resp.StatusCode) } return fmt.Errorf("expected json response, got %q, with status: %v and response body: %q", cType, resp.StatusCode, body) } return nil } func (c *client) createDefaultRequest(q Query) (*http.Request, error) { u := c.url u.Path = path.Join(u.Path, "query") jsonParameters, err := json.Marshal(q.Parameters) if err != nil { return nil, err } req, err := http.NewRequest("POST", u.String(), nil) if err != nil { return nil, err } req.Header.Set("Content-Type", "") req.Header.Set("User-Agent", c.useragent) if c.username != "" { req.SetBasicAuth(c.username, c.password) } params := req.URL.Query() params.Set("q", q.Command) params.Set("db", q.Database) if q.RetentionPolicy != "" { params.Set("rp", q.RetentionPolicy) } params.Set("params", string(jsonParameters)) if q.Precision != "" { params.Set("epoch", q.Precision) } req.URL.RawQuery = params.Encode() return req, nil } // duplexReader reads responses and writes it to another writer while // satisfying the reader interface. type duplexReader struct { r io.ReadCloser w io.Writer } func (r *duplexReader) Read(p []byte) (n int, err error) { n, err = r.r.Read(p) if err == nil { r.w.Write(p[:n]) } return n, err } // Close closes the response. func (r *duplexReader) Close() error { return r.r.Close() } // ChunkedResponse represents a response from the server that // uses chunking to stream the output. type ChunkedResponse struct { dec *json.Decoder duplex *duplexReader buf bytes.Buffer } // NewChunkedResponse reads a stream and produces responses from the stream. func NewChunkedResponse(r io.Reader) *ChunkedResponse { rc, ok := r.(io.ReadCloser) if !ok { rc = ioutil.NopCloser(r) } resp := &ChunkedResponse{} resp.duplex = &duplexReader{r: rc, w: &resp.buf} resp.dec = json.NewDecoder(resp.duplex) resp.dec.UseNumber() return resp } // NextResponse reads the next line of the stream and returns a response. func (r *ChunkedResponse) NextResponse() (*Response, error) { var response Response if err := r.dec.Decode(&response); err != nil { if err == io.EOF { return nil, err } // A decoding error happened. This probably means the server crashed // and sent a last-ditch error message to us. Ensure we have read the // entirety of the connection to get any remaining error text. io.Copy(ioutil.Discard, r.duplex) return nil, errors.New(strings.TrimSpace(r.buf.String())) } r.buf.Reset() return &response, nil } // Close closes the response. func (r *ChunkedResponse) Close() error { return r.duplex.Close() } golang-github-influxdata-influxdb1-client-0.0~git20220302.a9ab567/v2/client_test.go000066400000000000000000000625711433650256200274110ustar00rootroot00000000000000package client import ( "encoding/json" "fmt" "io/ioutil" "net/http" "net/http/httptest" "net/url" "path" "reflect" "strings" "sync" "testing" "time" ) func TestUDPClient_Query(t *testing.T) { config := UDPConfig{Addr: "localhost:8089"} c, err := NewUDPClient(config) if err != nil { t.Errorf("unexpected error. expected %v, actual %v", nil, err) } defer c.Close() query := Query{} _, err = c.Query(query) if err == nil { t.Error("Querying UDP client should fail") } } func TestUDPClient_Ping(t *testing.T) { config := UDPConfig{Addr: "localhost:8089"} c, err := NewUDPClient(config) if err != nil { t.Errorf("unexpected error. expected %v, actual %v", nil, err) } defer c.Close() rtt, version, err := c.Ping(0) if rtt != 0 || version != "" || err != nil { t.Errorf("unexpected error. expected (%v, '%v', %v), actual (%v, '%v', %v)", 0, "", nil, rtt, version, err) } } func TestUDPClient_Write(t *testing.T) { config := UDPConfig{Addr: "localhost:8089"} c, err := NewUDPClient(config) if err != nil { t.Errorf("unexpected error. expected %v, actual %v", nil, err) } defer c.Close() bp, err := NewBatchPoints(BatchPointsConfig{}) if err != nil { t.Errorf("unexpected error. expected %v, actual %v", nil, err) } fields := make(map[string]interface{}) fields["value"] = 1.0 pt, _ := NewPoint("cpu", make(map[string]string), fields) bp.AddPoint(pt) err = c.Write(bp) if err != nil { t.Errorf("unexpected error. expected %v, actual %v", nil, err) } } func TestUDPClient_BadAddr(t *testing.T) { config := UDPConfig{Addr: "foobar@wahoo"} c, err := NewUDPClient(config) if err == nil { defer c.Close() t.Error("Expected resolve error") } } func TestUDPClient_Batches(t *testing.T) { var logger writeLogger var cl udpclient cl.conn = &logger cl.payloadSize = 20 // should allow for two points per batch // expected point should look like this: "cpu a=1i" fields := map[string]interface{}{"a": 1} p, _ := NewPoint("cpu", nil, fields, time.Time{}) bp, _ := NewBatchPoints(BatchPointsConfig{}) for i := 0; i < 9; i++ { bp.AddPoint(p) } if err := cl.Write(bp); err != nil { t.Fatalf("Unexpected error during Write: %v", err) } if len(logger.writes) != 5 { t.Errorf("Mismatched write count: got %v, exp %v", len(logger.writes), 5) } } func TestUDPClient_Split(t *testing.T) { var logger writeLogger var cl udpclient cl.conn = &logger cl.payloadSize = 1 // force one field per point fields := map[string]interface{}{"a": 1, "b": 2, "c": 3, "d": 4} p, _ := NewPoint("cpu", nil, fields, time.Unix(1, 0)) bp, _ := NewBatchPoints(BatchPointsConfig{}) bp.AddPoint(p) if err := cl.Write(bp); err != nil { t.Fatalf("Unexpected error during Write: %v", err) } if len(logger.writes) != len(fields) { t.Errorf("Mismatched write count: got %v, exp %v", len(logger.writes), len(fields)) } } type writeLogger struct { writes [][]byte } func (w *writeLogger) Write(b []byte) (int, error) { w.writes = append(w.writes, append([]byte(nil), b...)) return len(b), nil } func (w *writeLogger) Close() error { return nil } func TestClient_Query(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { var data Response w.Header().Set("Content-Type", "application/json") w.WriteHeader(http.StatusOK) _ = json.NewEncoder(w).Encode(data) })) defer ts.Close() config := HTTPConfig{Addr: ts.URL} c, _ := NewHTTPClient(config) defer c.Close() query := Query{} _, err := c.Query(query) if err != nil { t.Errorf("unexpected error. expected %v, actual %v", nil, err) } } func TestClient_QueryWithRP(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { params := r.URL.Query() if got, exp := params.Get("db"), "db0"; got != exp { t.Errorf("unexpected db query parameter: %s != %s", exp, got) } if got, exp := params.Get("rp"), "rp0"; got != exp { t.Errorf("unexpected rp query parameter: %s != %s", exp, got) } var data Response w.Header().Set("Content-Type", "application/json") w.WriteHeader(http.StatusOK) _ = json.NewEncoder(w).Encode(data) })) defer ts.Close() config := HTTPConfig{Addr: ts.URL} c, _ := NewHTTPClient(config) defer c.Close() query := NewQueryWithRP("SELECT * FROM m0", "db0", "rp0", "") _, err := c.Query(query) if err != nil { t.Errorf("unexpected error. expected %v, actual %v", nil, err) } } func TestClientDownstream500WithBody_Query(t *testing.T) { const err500page = ` 500 Internal Server Error Internal Server Error ` ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusInternalServerError) w.Write([]byte(err500page)) })) defer ts.Close() config := HTTPConfig{Addr: ts.URL} c, _ := NewHTTPClient(config) defer c.Close() query := Query{} _, err := c.Query(query) expected := fmt.Sprintf("received status code 500 from downstream server, with response body: %q", err500page) if err.Error() != expected { t.Errorf("unexpected error. expected %v, actual %v", expected, err) } } func TestClientDownstream500_Query(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusInternalServerError) })) defer ts.Close() config := HTTPConfig{Addr: ts.URL} c, _ := NewHTTPClient(config) defer c.Close() query := Query{} _, err := c.Query(query) expected := "received status code 500 from downstream server" if err.Error() != expected { t.Errorf("unexpected error. expected %v, actual %v", expected, err) } } func TestClientDownstream400WithBody_Query(t *testing.T) { const err403page = ` 403 Forbidden Forbidden ` ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusForbidden) w.Write([]byte(err403page)) })) defer ts.Close() config := HTTPConfig{Addr: ts.URL} c, _ := NewHTTPClient(config) defer c.Close() query := Query{} _, err := c.Query(query) expected := fmt.Sprintf(`expected json response, got "text/html", with status: %v and response body: %q`, http.StatusForbidden, err403page) if err.Error() != expected { t.Errorf("unexpected error. expected %v, actual %v", expected, err) } } func TestClientDownstream400_Query(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusForbidden) })) defer ts.Close() config := HTTPConfig{Addr: ts.URL} c, _ := NewHTTPClient(config) defer c.Close() query := Query{} _, err := c.Query(query) expected := fmt.Sprintf(`expected json response, got empty body, with status: %v`, http.StatusForbidden) if err.Error() != expected { t.Errorf("unexpected error. expected %v, actual %v", expected, err) } } func TestClient500_Query(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.Header().Set("Content-Type", "application/json") w.Header().Set("X-Influxdb-Version", "1.3.1") w.WriteHeader(http.StatusInternalServerError) w.Write([]byte(`{"error":"test"}`)) })) defer ts.Close() config := HTTPConfig{Addr: ts.URL} c, _ := NewHTTPClient(config) defer c.Close() query := Query{} resp, err := c.Query(query) if err != nil { t.Errorf("unexpected error. expected nothing, actual %v", err) } if resp.Err != "test" { t.Errorf(`unexpected response error. expected "test", actual %v`, resp.Err) } } func TestClient_ChunkedQuery(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { var data Response w.Header().Set("Content-Type", "application/json") w.Header().Set("X-Influxdb-Version", "1.3.1") w.WriteHeader(http.StatusOK) enc := json.NewEncoder(w) _ = enc.Encode(data) _ = enc.Encode(data) })) defer ts.Close() config := HTTPConfig{Addr: ts.URL} c, err := NewHTTPClient(config) if err != nil { t.Fatalf("unexpected error. expected %v, actual %v", nil, err) } query := Query{Chunked: true} _, err = c.Query(query) if err != nil { t.Fatalf("unexpected error. expected %v, actual %v", nil, err) } } func TestClientDownstream500WithBody_ChunkedQuery(t *testing.T) { const err500page = ` 500 Internal Server Error Internal Server Error ` ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusInternalServerError) w.Write([]byte(err500page)) })) defer ts.Close() config := HTTPConfig{Addr: ts.URL} c, err := NewHTTPClient(config) if err != nil { t.Fatalf("unexpected error. expected %v, actual %v", nil, err) } query := Query{Chunked: true} _, err = c.Query(query) expected := fmt.Sprintf("received status code 500 from downstream server, with response body: %q", err500page) if err.Error() != expected { t.Errorf("unexpected error. expected %v, actual %v", expected, err) } } func TestClientDownstream500_ChunkedQuery(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusInternalServerError) })) defer ts.Close() config := HTTPConfig{Addr: ts.URL} c, _ := NewHTTPClient(config) defer c.Close() query := Query{Chunked: true} _, err := c.Query(query) expected := "received status code 500 from downstream server" if err.Error() != expected { t.Errorf("unexpected error. expected %v, actual %v", expected, err) } } func TestClient500_ChunkedQuery(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.Header().Set("Content-Type", "application/json") w.Header().Set("X-Influxdb-Version", "1.3.1") w.WriteHeader(http.StatusInternalServerError) w.Write([]byte(`{"error":"test"}`)) })) defer ts.Close() config := HTTPConfig{Addr: ts.URL} c, _ := NewHTTPClient(config) defer c.Close() query := Query{Chunked: true} resp, err := c.Query(query) if err != nil { t.Errorf("unexpected error. expected nothing, actual %v", err) } if resp.Err != "test" { t.Errorf(`unexpected response error. expected "test", actual %v`, resp.Err) } } func TestClientDownstream400WithBody_ChunkedQuery(t *testing.T) { const err403page = ` 403 Forbidden Forbidden ` ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusForbidden) w.Write([]byte(err403page)) })) defer ts.Close() config := HTTPConfig{Addr: ts.URL} c, _ := NewHTTPClient(config) defer c.Close() query := Query{Chunked: true} _, err := c.Query(query) expected := fmt.Sprintf(`expected json response, got "text/html", with status: %v and response body: %q`, http.StatusForbidden, err403page) if err.Error() != expected { t.Errorf("unexpected error. expected %v, actual %v", expected, err) } } func TestClientDownstream400_ChunkedQuery(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusForbidden) })) defer ts.Close() config := HTTPConfig{Addr: ts.URL} c, _ := NewHTTPClient(config) defer c.Close() query := Query{Chunked: true} _, err := c.Query(query) expected := fmt.Sprintf(`expected json response, got empty body, with status: %v`, http.StatusForbidden) if err.Error() != expected { t.Errorf("unexpected error. expected %v, actual %v", expected, err) } } func TestClient_BoundParameters(t *testing.T) { var parameterString string ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { var data Response r.ParseForm() parameterString = r.FormValue("params") w.Header().Set("Content-Type", "application/json") w.WriteHeader(http.StatusOK) _ = json.NewEncoder(w).Encode(data) })) defer ts.Close() config := HTTPConfig{Addr: ts.URL} c, _ := NewHTTPClient(config) defer c.Close() expectedParameters := map[string]interface{}{ "testStringParameter": "testStringValue", "testNumberParameter": 12.3, } query := Query{ Parameters: expectedParameters, } _, err := c.Query(query) if err != nil { t.Errorf("unexpected error. expected %v, actual %v", nil, err) } var actualParameters map[string]interface{} err = json.Unmarshal([]byte(parameterString), &actualParameters) if err != nil { t.Errorf("unexpected error. expected %v, actual %v", nil, err) } if !reflect.DeepEqual(expectedParameters, actualParameters) { t.Errorf("unexpected parameters. expected %v, actual %v", expectedParameters, actualParameters) } } func TestClient_BasicAuth(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { u, p, ok := r.BasicAuth() if !ok { t.Errorf("basic auth error") } if u != "username" { t.Errorf("unexpected username, expected %q, actual %q", "username", u) } if p != "password" { t.Errorf("unexpected password, expected %q, actual %q", "password", p) } var data Response w.Header().Set("Content-Type", "application/json") w.WriteHeader(http.StatusOK) _ = json.NewEncoder(w).Encode(data) })) defer ts.Close() config := HTTPConfig{Addr: ts.URL, Username: "username", Password: "password"} c, _ := NewHTTPClient(config) defer c.Close() query := Query{} _, err := c.Query(query) if err != nil { t.Errorf("unexpected error. expected %v, actual %v", nil, err) } } func TestClient_Ping(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { var data Response w.Header().Set("Content-Type", "application/json") w.WriteHeader(http.StatusNoContent) _ = json.NewEncoder(w).Encode(data) })) defer ts.Close() config := HTTPConfig{Addr: ts.URL} c, _ := NewHTTPClient(config) defer c.Close() _, _, err := c.Ping(0) if err != nil { t.Errorf("unexpected error. expected %v, actual %v", nil, err) } } func TestClient_Concurrent_Use(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.Header().Set("Content-Type", "application/json") w.WriteHeader(http.StatusOK) w.Write([]byte(`{}`)) })) defer ts.Close() config := HTTPConfig{Addr: ts.URL} c, _ := NewHTTPClient(config) defer c.Close() var wg sync.WaitGroup wg.Add(3) n := 1000 errC := make(chan error) go func() { defer wg.Done() bp, err := NewBatchPoints(BatchPointsConfig{}) if err != nil { errC <- fmt.Errorf("got error %v", err) return } for i := 0; i < n; i++ { if err = c.Write(bp); err != nil { errC <- fmt.Errorf("got error %v", err) return } } }() go func() { defer wg.Done() var q Query for i := 0; i < n; i++ { if _, err := c.Query(q); err != nil { errC <- fmt.Errorf("got error %v", err) return } } }() go func() { defer wg.Done() for i := 0; i < n; i++ { c.Ping(time.Second) } }() go func() { wg.Wait() close(errC) }() for err := range errC { if err != nil { t.Error(err) } } } func TestClient_Write(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { in, err := ioutil.ReadAll(r.Body) if err != nil { t.Fatalf("unexpected error: %s", err) } else if have, want := strings.TrimSpace(string(in)), `m0,host=server01 v1=2,v2=2i,v3=2u,v4="foobar",v5=true 0`; have != want { t.Errorf("unexpected write protocol: %s != %s", have, want) } var data Response w.WriteHeader(http.StatusNoContent) _ = json.NewEncoder(w).Encode(data) })) defer ts.Close() config := HTTPConfig{Addr: ts.URL} c, _ := NewHTTPClient(config) defer c.Close() bp, err := NewBatchPoints(BatchPointsConfig{}) if err != nil { t.Errorf("unexpected error. expected %v, actual %v", nil, err) } pt, err := NewPoint( "m0", map[string]string{ "host": "server01", }, map[string]interface{}{ "v1": float64(2), "v2": int64(2), "v3": uint64(2), "v4": "foobar", "v5": true, }, time.Unix(0, 0).UTC(), ) if err != nil { t.Errorf("unexpected error. expected %v, actual %v", nil, err) } bp.AddPoint(pt) err = c.Write(bp) if err != nil { t.Errorf("unexpected error. expected %v, actual %v", nil, err) } } func TestClient_UserAgent(t *testing.T) { receivedUserAgent := "" ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { receivedUserAgent = r.UserAgent() var data Response w.Header().Set("Content-Type", "application/json") w.WriteHeader(http.StatusOK) _ = json.NewEncoder(w).Encode(data) })) defer ts.Close() _, err := http.Get(ts.URL) if err != nil { t.Errorf("unexpected error. expected %v, actual %v", nil, err) } tests := []struct { name string userAgent string expected string }{ { name: "Empty user agent", userAgent: "", expected: "InfluxDBClient", }, { name: "Custom user agent", userAgent: "Test Influx Client", expected: "Test Influx Client", }, } for _, test := range tests { config := HTTPConfig{Addr: ts.URL, UserAgent: test.userAgent} c, _ := NewHTTPClient(config) defer c.Close() receivedUserAgent = "" query := Query{} _, err = c.Query(query) if err != nil { t.Errorf("unexpected error. expected %v, actual %v", nil, err) } if !strings.HasPrefix(receivedUserAgent, test.expected) { t.Errorf("Unexpected user agent. expected %v, actual %v", test.expected, receivedUserAgent) } receivedUserAgent = "" bp, _ := NewBatchPoints(BatchPointsConfig{}) err = c.Write(bp) if err != nil { t.Errorf("unexpected error. expected %v, actual %v", nil, err) } if !strings.HasPrefix(receivedUserAgent, test.expected) { t.Errorf("Unexpected user agent. expected %v, actual %v", test.expected, receivedUserAgent) } receivedUserAgent = "" _, err := c.Query(query) if err != nil { t.Errorf("unexpected error. expected %v, actual %v", nil, err) } if receivedUserAgent != test.expected { t.Errorf("Unexpected user agent. expected %v, actual %v", test.expected, receivedUserAgent) } } } func TestClient_PointString(t *testing.T) { const shortForm = "2006-Jan-02" time1, _ := time.Parse(shortForm, "2013-Feb-03") tags := map[string]string{"cpu": "cpu-total"} fields := map[string]interface{}{"idle": 10.1, "system": 50.9, "user": 39.0} p, _ := NewPoint("cpu_usage", tags, fields, time1) s := "cpu_usage,cpu=cpu-total idle=10.1,system=50.9,user=39 1359849600000000000" if p.String() != s { t.Errorf("Point String Error, got %s, expected %s", p.String(), s) } s = "cpu_usage,cpu=cpu-total idle=10.1,system=50.9,user=39 1359849600000" if p.PrecisionString("ms") != s { t.Errorf("Point String Error, got %s, expected %s", p.PrecisionString("ms"), s) } } func TestClient_PointWithoutTimeString(t *testing.T) { tags := map[string]string{"cpu": "cpu-total"} fields := map[string]interface{}{"idle": 10.1, "system": 50.9, "user": 39.0} p, _ := NewPoint("cpu_usage", tags, fields) s := "cpu_usage,cpu=cpu-total idle=10.1,system=50.9,user=39" if p.String() != s { t.Errorf("Point String Error, got %s, expected %s", p.String(), s) } if p.PrecisionString("ms") != s { t.Errorf("Point String Error, got %s, expected %s", p.PrecisionString("ms"), s) } } func TestClient_PointName(t *testing.T) { tags := map[string]string{"cpu": "cpu-total"} fields := map[string]interface{}{"idle": 10.1, "system": 50.9, "user": 39.0} p, _ := NewPoint("cpu_usage", tags, fields) exp := "cpu_usage" if p.Name() != exp { t.Errorf("Error, got %s, expected %s", p.Name(), exp) } } func TestClient_PointTags(t *testing.T) { tags := map[string]string{"cpu": "cpu-total"} fields := map[string]interface{}{"idle": 10.1, "system": 50.9, "user": 39.0} p, _ := NewPoint("cpu_usage", tags, fields) if !reflect.DeepEqual(tags, p.Tags()) { t.Errorf("Error, got %v, expected %v", p.Tags(), tags) } } func TestClient_PointUnixNano(t *testing.T) { const shortForm = "2006-Jan-02" time1, _ := time.Parse(shortForm, "2013-Feb-03") tags := map[string]string{"cpu": "cpu-total"} fields := map[string]interface{}{"idle": 10.1, "system": 50.9, "user": 39.0} p, _ := NewPoint("cpu_usage", tags, fields, time1) exp := int64(1359849600000000000) if p.UnixNano() != exp { t.Errorf("Error, got %d, expected %d", p.UnixNano(), exp) } } func TestClient_PointFields(t *testing.T) { tags := map[string]string{"cpu": "cpu-total"} fields := map[string]interface{}{"idle": 10.1, "system": 50.9, "user": 39.0} p, _ := NewPoint("cpu_usage", tags, fields) pfields, err := p.Fields() if err != nil { t.Fatal(err) } if !reflect.DeepEqual(fields, pfields) { t.Errorf("Error, got %v, expected %v", pfields, fields) } } func TestBatchPoints_PrecisionError(t *testing.T) { _, err := NewBatchPoints(BatchPointsConfig{Precision: "foobar"}) if err == nil { t.Errorf("Precision: foobar should have errored") } bp, _ := NewBatchPoints(BatchPointsConfig{Precision: "ns"}) err = bp.SetPrecision("foobar") if err == nil { t.Errorf("Precision: foobar should have errored") } } func TestBatchPoints_SettersGetters(t *testing.T) { bp, _ := NewBatchPoints(BatchPointsConfig{ Precision: "ns", Database: "db", RetentionPolicy: "rp", WriteConsistency: "wc", }) if bp.Precision() != "ns" { t.Errorf("Expected: %s, got %s", bp.Precision(), "ns") } if bp.Database() != "db" { t.Errorf("Expected: %s, got %s", bp.Database(), "db") } if bp.RetentionPolicy() != "rp" { t.Errorf("Expected: %s, got %s", bp.RetentionPolicy(), "rp") } if bp.WriteConsistency() != "wc" { t.Errorf("Expected: %s, got %s", bp.WriteConsistency(), "wc") } bp.SetDatabase("db2") bp.SetRetentionPolicy("rp2") bp.SetWriteConsistency("wc2") err := bp.SetPrecision("s") if err != nil { t.Errorf("Did not expect error: %s", err.Error()) } if bp.Precision() != "s" { t.Errorf("Expected: %s, got %s", bp.Precision(), "s") } if bp.Database() != "db2" { t.Errorf("Expected: %s, got %s", bp.Database(), "db2") } if bp.RetentionPolicy() != "rp2" { t.Errorf("Expected: %s, got %s", bp.RetentionPolicy(), "rp2") } if bp.WriteConsistency() != "wc2" { t.Errorf("Expected: %s, got %s", bp.WriteConsistency(), "wc2") } } func TestClientConcatURLPath(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if !strings.Contains(r.URL.String(), "/influxdbproxy/ping") || strings.Contains(r.URL.String(), "/ping/ping") { t.Errorf("unexpected error. expected %v contains in %v", "/influxdbproxy/ping", r.URL) } var data Response w.Header().Set("Content-Type", "application/json") w.WriteHeader(http.StatusNoContent) _ = json.NewEncoder(w).Encode(data) })) defer ts.Close() url, _ := url.Parse(ts.URL) url.Path = path.Join(url.Path, "influxdbproxy") fmt.Println("TestClientConcatURLPath: concat with path 'influxdbproxy' result ", url.String()) c, _ := NewHTTPClient(HTTPConfig{Addr: url.String()}) defer c.Close() _, _, err := c.Ping(0) if err != nil { t.Errorf("unexpected error. expected %v, actual %v", nil, err) } _, _, err = c.Ping(0) if err != nil { t.Errorf("unexpected error. expected %v, actual %v", nil, err) } } func TestClientProxy(t *testing.T) { pinged := false ts := httptest.NewServer(http.HandlerFunc(func(resp http.ResponseWriter, req *http.Request) { if got, want := req.URL.String(), "http://example.com:8086/ping"; got != want { t.Errorf("invalid url in request: got=%s want=%s", got, want) } resp.WriteHeader(http.StatusNoContent) pinged = true })) defer ts.Close() proxyURL, _ := url.Parse(ts.URL) c, _ := NewHTTPClient(HTTPConfig{ Addr: "http://example.com:8086", Proxy: http.ProxyURL(proxyURL), }) if _, _, err := c.Ping(0); err != nil { t.Fatalf("could not ping server: %s", err) } if !pinged { t.Fatalf("no http request was received") } } func TestClient_QueryAsChunk(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { var data Response w.Header().Set("Content-Type", "application/json") w.Header().Set("X-Influxdb-Version", "1.3.1") w.WriteHeader(http.StatusOK) enc := json.NewEncoder(w) _ = enc.Encode(data) _ = enc.Encode(data) })) defer ts.Close() config := HTTPConfig{Addr: ts.URL} c, err := NewHTTPClient(config) if err != nil { t.Fatalf("unexpected error. expected %v, actual %v", nil, err) } query := Query{Chunked: true} resp, err := c.QueryAsChunk(query) defer resp.Close() if err != nil { t.Fatalf("unexpected error. expected %v, actual %v", nil, err) } } func TestClient_ReadStatementId(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { data := Response{ Results: []Result{{ StatementId: 1, Series: nil, Messages: nil, Err: "", }}, Err: "", } w.Header().Set("Content-Type", "application/json") w.Header().Set("X-Influxdb-Version", "1.3.1") w.WriteHeader(http.StatusOK) enc := json.NewEncoder(w) _ = enc.Encode(data) _ = enc.Encode(data) })) defer ts.Close() config := HTTPConfig{Addr: ts.URL} c, err := NewHTTPClient(config) if err != nil { t.Fatalf("unexpected error. expected %v, actual %v", nil, err) } query := Query{Chunked: true} resp, err := c.QueryAsChunk(query) defer resp.Close() if err != nil { t.Fatalf("unexpected error. expected %v, actual %v", nil, err) } r, err := resp.NextResponse() if err != nil { t.Fatalf("expected success, got %s", err) } if r.Results[0].StatementId != 1 { t.Fatalf("expected statement_id = 1, got %d", r.Results[0].StatementId) } } golang-github-influxdata-influxdb1-client-0.0~git20220302.a9ab567/v2/example_test.go000066400000000000000000000150521433650256200275560ustar00rootroot00000000000000package client_test import ( "fmt" "math/rand" "os" "time" "github.com/influxdata/influxdb1-client/v2" ) // Create a new client func ExampleClient() { // NOTE: this assumes you've setup a user and have setup shell env variables, // namely INFLUX_USER/INFLUX_PWD. If not just omit Username/Password below. _, err := client.NewHTTPClient(client.HTTPConfig{ Addr: "http://localhost:8086", Username: os.Getenv("INFLUX_USER"), Password: os.Getenv("INFLUX_PWD"), }) if err != nil { fmt.Println("Error creating InfluxDB Client: ", err.Error()) } } // Write a point using the UDP client func ExampleClient_uDP() { // Make client config := client.UDPConfig{Addr: "localhost:8089"} c, err := client.NewUDPClient(config) if err != nil { fmt.Println("Error: ", err.Error()) } defer c.Close() // Create a new point batch bp, _ := client.NewBatchPoints(client.BatchPointsConfig{ Precision: "s", }) // Create a point and add to batch tags := map[string]string{"cpu": "cpu-total"} fields := map[string]interface{}{ "idle": 10.1, "system": 53.3, "user": 46.6, } pt, err := client.NewPoint("cpu_usage", tags, fields, time.Now()) if err != nil { fmt.Println("Error: ", err.Error()) } bp.AddPoint(pt) // Write the batch c.Write(bp) } // Ping the cluster using the HTTP client func ExampleClient_Ping() { // Make client c, err := client.NewHTTPClient(client.HTTPConfig{ Addr: "http://localhost:8086", }) if err != nil { fmt.Println("Error creating InfluxDB Client: ", err.Error()) } defer c.Close() _, _, err = c.Ping(0) if err != nil { fmt.Println("Error pinging InfluxDB Cluster: ", err.Error()) } } // Write a point using the HTTP client func ExampleClient_write() { // Make client c, err := client.NewHTTPClient(client.HTTPConfig{ Addr: "http://localhost:8086", }) if err != nil { fmt.Println("Error creating InfluxDB Client: ", err.Error()) } defer c.Close() // Create a new point batch bp, _ := client.NewBatchPoints(client.BatchPointsConfig{ Database: "BumbleBeeTuna", Precision: "s", }) // Create a point and add to batch tags := map[string]string{"cpu": "cpu-total"} fields := map[string]interface{}{ "idle": 10.1, "system": 53.3, "user": 46.6, } pt, err := client.NewPoint("cpu_usage", tags, fields, time.Now()) if err != nil { fmt.Println("Error: ", err.Error()) } bp.AddPoint(pt) // Write the batch c.Write(bp) } // Create a batch and add a point func ExampleBatchPoints() { // Create a new point batch bp, _ := client.NewBatchPoints(client.BatchPointsConfig{ Database: "BumbleBeeTuna", Precision: "s", }) // Create a point and add to batch tags := map[string]string{"cpu": "cpu-total"} fields := map[string]interface{}{ "idle": 10.1, "system": 53.3, "user": 46.6, } pt, err := client.NewPoint("cpu_usage", tags, fields, time.Now()) if err != nil { fmt.Println("Error: ", err.Error()) } bp.AddPoint(pt) } // Using the BatchPoints setter functions func ExampleBatchPoints_setters() { // Create a new point batch bp, _ := client.NewBatchPoints(client.BatchPointsConfig{}) bp.SetDatabase("BumbleBeeTuna") bp.SetPrecision("ms") // Create a point and add to batch tags := map[string]string{"cpu": "cpu-total"} fields := map[string]interface{}{ "idle": 10.1, "system": 53.3, "user": 46.6, } pt, err := client.NewPoint("cpu_usage", tags, fields, time.Now()) if err != nil { fmt.Println("Error: ", err.Error()) } bp.AddPoint(pt) } // Create a new point with a timestamp func ExamplePoint() { tags := map[string]string{"cpu": "cpu-total"} fields := map[string]interface{}{ "idle": 10.1, "system": 53.3, "user": 46.6, } pt, err := client.NewPoint("cpu_usage", tags, fields, time.Now()) if err == nil { fmt.Println("We created a point: ", pt.String()) } } // Create a new point without a timestamp func ExamplePoint_withoutTime() { tags := map[string]string{"cpu": "cpu-total"} fields := map[string]interface{}{ "idle": 10.1, "system": 53.3, "user": 46.6, } pt, err := client.NewPoint("cpu_usage", tags, fields) if err == nil { fmt.Println("We created a point w/o time: ", pt.String()) } } // Write 1000 points func ExampleClient_write1000() { sampleSize := 1000 // Make client c, err := client.NewHTTPClient(client.HTTPConfig{ Addr: "http://localhost:8086", }) if err != nil { fmt.Println("Error creating InfluxDB Client: ", err.Error()) } defer c.Close() rand.Seed(42) bp, _ := client.NewBatchPoints(client.BatchPointsConfig{ Database: "systemstats", Precision: "us", }) for i := 0; i < sampleSize; i++ { regions := []string{"us-west1", "us-west2", "us-west3", "us-east1"} tags := map[string]string{ "cpu": "cpu-total", "host": fmt.Sprintf("host%d", rand.Intn(1000)), "region": regions[rand.Intn(len(regions))], } idle := rand.Float64() * 100.0 fields := map[string]interface{}{ "idle": idle, "busy": 100.0 - idle, } pt, err := client.NewPoint( "cpu_usage", tags, fields, time.Now(), ) if err != nil { println("Error:", err.Error()) continue } bp.AddPoint(pt) } err = c.Write(bp) if err != nil { fmt.Println("Error: ", err.Error()) } } // Make a Query func ExampleClient_query() { // Make client c, err := client.NewHTTPClient(client.HTTPConfig{ Addr: "http://localhost:8086", }) if err != nil { fmt.Println("Error creating InfluxDB Client: ", err.Error()) } defer c.Close() q := client.NewQuery("SELECT count(value) FROM shapes", "square_holes", "ns") if response, err := c.Query(q); err == nil && response.Error() == nil { fmt.Println(response.Results) } } // Create a Database with a query func ExampleClient_createDatabase() { // Make client c, err := client.NewHTTPClient(client.HTTPConfig{ Addr: "http://localhost:8086", }) if err != nil { fmt.Println("Error creating InfluxDB Client: ", err.Error()) } defer c.Close() q := client.NewQuery("CREATE DATABASE telegraf", "", "") if response, err := c.Query(q); err == nil && response.Error() == nil { fmt.Println(response.Results) } } func ExampleClient_queryWithParams() { // Make client c, err := client.NewHTTPClient(client.HTTPConfig{ Addr: "http://localhost:8086", }) if err != nil { fmt.Println("Error creating InfluxDB Client: ", err.Error()) } defer c.Close() q := client.NewQueryWithParameters("SELECT $fn($value) FROM $m", "square_holes", "ns", client.Params{ "fn": client.Identifier("count"), "value": client.Identifier("value"), "m": client.Identifier("shapes"), }) if response, err := c.Query(q); err == nil && response.Error() == nil { fmt.Println(response.Results) } } golang-github-influxdata-influxdb1-client-0.0~git20220302.a9ab567/v2/params.go000066400000000000000000000030501433650256200263420ustar00rootroot00000000000000package client import ( "encoding/json" "time" ) type ( // Identifier is an identifier value. Identifier string // StringValue is a string literal. StringValue string // RegexValue is a regexp literal. RegexValue string // NumberValue is a number literal. NumberValue float64 // IntegerValue is an integer literal. IntegerValue int64 // BooleanValue is a boolean literal. BooleanValue bool // TimeValue is a time literal. TimeValue time.Time // DurationValue is a duration literal. DurationValue time.Duration ) func (v Identifier) MarshalJSON() ([]byte, error) { m := map[string]string{"identifier": string(v)} return json.Marshal(m) } func (v StringValue) MarshalJSON() ([]byte, error) { m := map[string]string{"string": string(v)} return json.Marshal(m) } func (v RegexValue) MarshalJSON() ([]byte, error) { m := map[string]string{"regex": string(v)} return json.Marshal(m) } func (v NumberValue) MarshalJSON() ([]byte, error) { m := map[string]float64{"number": float64(v)} return json.Marshal(m) } func (v IntegerValue) MarshalJSON() ([]byte, error) { m := map[string]int64{"integer": int64(v)} return json.Marshal(m) } func (v BooleanValue) MarshalJSON() ([]byte, error) { m := map[string]bool{"boolean": bool(v)} return json.Marshal(m) } func (v TimeValue) MarshalJSON() ([]byte, error) { t := time.Time(v) m := map[string]string{"string": t.Format(time.RFC3339Nano)} return json.Marshal(m) } func (v DurationValue) MarshalJSON() ([]byte, error) { m := map[string]int64{"duration": int64(v)} return json.Marshal(m) } golang-github-influxdata-influxdb1-client-0.0~git20220302.a9ab567/v2/udp.go000066400000000000000000000050771433650256200256620ustar00rootroot00000000000000package client import ( "fmt" "io" "net" "time" ) const ( // UDPPayloadSize is a reasonable default payload size for UDP packets that // could be travelling over the internet. UDPPayloadSize = 512 ) // UDPConfig is the config data needed to create a UDP Client. type UDPConfig struct { // Addr should be of the form "host:port" // or "[ipv6-host%zone]:port". Addr string // PayloadSize is the maximum size of a UDP client message, optional // Tune this based on your network. Defaults to UDPPayloadSize. PayloadSize int } // NewUDPClient returns a client interface for writing to an InfluxDB UDP // service from the given config. func NewUDPClient(conf UDPConfig) (Client, error) { var udpAddr *net.UDPAddr udpAddr, err := net.ResolveUDPAddr("udp", conf.Addr) if err != nil { return nil, err } conn, err := net.DialUDP("udp", nil, udpAddr) if err != nil { return nil, err } payloadSize := conf.PayloadSize if payloadSize == 0 { payloadSize = UDPPayloadSize } return &udpclient{ conn: conn, payloadSize: payloadSize, }, nil } // Close releases the udpclient's resources. func (uc *udpclient) Close() error { return uc.conn.Close() } type udpclient struct { conn io.WriteCloser payloadSize int } func (uc *udpclient) Write(bp BatchPoints) error { var b = make([]byte, 0, uc.payloadSize) // initial buffer size, it will grow as needed var d, _ = time.ParseDuration("1" + bp.Precision()) var delayedError error var checkBuffer = func(n int) { if len(b) > 0 && len(b)+n > uc.payloadSize { if _, err := uc.conn.Write(b); err != nil { delayedError = err } b = b[:0] } } for _, p := range bp.Points() { p.pt.Round(d) pointSize := p.pt.StringSize() + 1 // include newline in size //point := p.pt.RoundedString(d) + "\n" checkBuffer(pointSize) if p.Time().IsZero() || pointSize <= uc.payloadSize { b = p.pt.AppendString(b) b = append(b, '\n') continue } points := p.pt.Split(uc.payloadSize - 1) // account for newline character for _, sp := range points { checkBuffer(sp.StringSize() + 1) b = sp.AppendString(b) b = append(b, '\n') } } if len(b) > 0 { if _, err := uc.conn.Write(b); err != nil { return err } } return delayedError } func (uc *udpclient) Query(q Query) (*Response, error) { return nil, fmt.Errorf("Querying via UDP is not supported") } func (uc *udpclient) QueryAsChunk(q Query) (*ChunkedResponse, error) { return nil, fmt.Errorf("Querying via UDP is not supported") } func (uc *udpclient) Ping(timeout time.Duration) (time.Duration, string, error) { return 0, "", nil }