diff --git a/.gitignore b/.gitignore index dc6a97725f1..37584197671 100644 --- a/.gitignore +++ b/.gitignore @@ -11,3 +11,4 @@ .DS_Store /tempodb/encoding/benchmark_block /cmd/tempo-serverless/vendor/ +/pkg/traceql/y.output \ No newline at end of file diff --git a/Makefile b/Makefile index 97d2c887192..40eb74d691b 100644 --- a/Makefile +++ b/Makefile @@ -10,6 +10,12 @@ GOARCH ?= $(shell go env GOARCH) GOPATH := $(shell go env GOPATH) GORELEASER := $(GOPATH)/bin/goreleaser +# Build Images +DOCKER_PROTOBUF_IMAGE ?= otel/build-protobuf:0.2.1 +FLATBUFFERS_IMAGE ?= neomantra/flatbuffers +LOKI_BUILD_IMAGE ?= grafana/loki-build-image:0.15.0 +DOCS_IMAGE ?= grafana/docs-base:latest + # More exclusions can be added similar with: -not -path './testbed/*' ALL_SRC := $(shell find . -name '*.go' \ -not -path './vendor*/*' \ @@ -128,9 +134,10 @@ ifndef COMPONENT $(error COMPONENT variable was not defined) endif -### Dependencies -DOCKER_PROTOBUF ?= otel/build-protobuf:0.2.1 -PROTOC = docker run --rm -u ${shell id -u} -v${PWD}:${PWD} -w${PWD} ${DOCKER_PROTOBUF} --proto_path=${PWD} +# ######### +# Gen Proto +# ######### +PROTOC = docker run --rm -u ${shell id -u} -v${PWD}:${PWD} -w${PWD} ${DOCKER_PROTOBUF_IMAGE} --proto_path=${PWD} PROTO_INTERMEDIATE_DIR = pkg/.patched-proto PROTO_INCLUDES = -I$(PROTO_INTERMEDIATE_DIR) PROTO_GEN = $(PROTOC) $(PROTO_INCLUDES) --gogofaster_out=plugins=grpc,paths=source_relative:$(2) $(1) @@ -181,15 +188,29 @@ gen-proto: rm -rf $(PROTO_INTERMEDIATE_DIR) +# ############## +# Gen Flatbuffer +# ############## .PHONY: gen-flat gen-flat: # -o /pkg generates into same folder as tempo.fbs for simpler imports. - docker run --rm -v${PWD}:/opt/src neomantra/flatbuffers flatc --go -o /opt/src/pkg /opt/src/pkg/tempofb/tempo.fbs + docker run --rm -v${PWD}:/opt/src ${FLATBUFFERS_IMAGE} flatc --go -o /opt/src/pkg /opt/src/pkg/tempofb/tempo.fbs + +# ############## +# Gen Traceql +# ############## +.PHONY: gen-traceql +gen-traceql: + docker run --rm -v${PWD}:/src/loki ${LOKI_BUILD_IMAGE} gen-traceql-local -### Check vendored files and generated proto +.PHONY: gen-traceql-local +gen-traceql-local: + goyacc -o pkg/traceql/expr.y.go pkg/traceql/expr.y && rm y.output + +### Check vendored and generated files are up to date .PHONY: vendor-check -vendor-check: gen-proto gen-flat update-mod - git diff --exit-code -- **/go.sum **/go.mod vendor/ pkg/tempopb/ pkg/tempofb/ +vendor-check: gen-proto gen-flat update-mod gen-traceql + git diff --exit-code -- **/go.sum **/go.mod vendor/ pkg/tempopb/ pkg/tempofb/ pkg/traceql/ ### Tidy dependencies for tempo and tempo-serverless modules .PHONY: update-mod @@ -213,8 +234,6 @@ release-snapshot: $(GORELEASER) $(GORELEASER) build --skip-validate --rm-dist --snapshot ### Docs -DOCS_IMAGE = grafana/docs-base:latest - .PHONY: docs docs: docker pull ${DOCS_IMAGE} @@ -257,7 +276,7 @@ drone: # piggyback on Loki's build image, this image contains a newer version of drone-cli than is # released currently (1.4.0). The newer version of drone-clie keeps drone.yml human-readable. # This will run 'make drone-jsonnet' from within the container - docker run --rm -v $(shell pwd):/src/loki grafana/loki-build-image:0.15.0 drone-jsonnet + docker run --rm -v $(shell pwd):/src/loki ${LOKI_BUILD_IMAGE} drone-jsonnet drone lint .drone/drone.yml @make drone-signature diff --git a/go.mod b/go.mod index 33a48f28b50..1d5747132e0 100644 --- a/go.mod +++ b/go.mod @@ -16,6 +16,7 @@ require ( github.com/cespare/xxhash v1.1.0 github.com/cespare/xxhash/v2 v2.1.2 github.com/cristalhq/hedgedhttp v0.7.0 + github.com/davecgh/go-spew v1.1.1 github.com/drone/envsubst v1.0.3 github.com/dustin/go-humanize v1.0.0 github.com/facette/natsort v0.0.0-20181210072756-2cd4dd1e2dcb @@ -116,7 +117,6 @@ require ( github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf // indirect github.com/coreos/go-systemd/v22 v22.3.2 // indirect github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f // indirect - github.com/davecgh/go-spew v1.1.1 // indirect github.com/dennwc/varint v1.0.0 // indirect github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect github.com/dimchansky/utfbom v1.1.1 // indirect diff --git a/pkg/traceql/ast.go b/pkg/traceql/ast.go new file mode 100644 index 00000000000..2c0ed6f4230 --- /dev/null +++ b/pkg/traceql/ast.go @@ -0,0 +1,426 @@ +package traceql + +import ( + "fmt" + "time" +) + +type element interface { + fmt.Stringer + validate() error +} + +type typedExpression interface { + impliedType() StaticType +} + +type RootExpr struct { + p Pipeline +} + +func newRootExpr(e element) *RootExpr { + p, ok := e.(Pipeline) + if !ok { + p = newPipeline(e) + } + + return &RootExpr{ + p: p, + } +} + +// ********************** +// Pipeline +// ********************** + +type Pipeline struct { + p []element +} + +// nolint: revive +func (Pipeline) __scalarExpression() {} + +// nolint: revive +func (Pipeline) __spansetExpression() {} + +func newPipeline(i ...element) Pipeline { + return Pipeline{ + p: i, + } +} + +func (p Pipeline) addItem(i element) Pipeline { + p.p = append(p.p, i) + return p +} + +func (p Pipeline) impliedType() StaticType { + if len(p.p) == 0 { + return typeSpanset + } + + finalItem := p.p[len(p.p)-1] + aggregate, ok := finalItem.(Aggregate) + if ok { + return aggregate.impliedType() + } + + return typeSpanset +} + +type GroupOperation struct { + e FieldExpression +} + +func newGroupOperation(e FieldExpression) GroupOperation { + return GroupOperation{ + e: e, + } +} + +type CoalesceOperation struct { +} + +func newCoalesceOperation() CoalesceOperation { + return CoalesceOperation{} +} + +// ********************** +// Scalars +// ********************** +type ScalarExpression interface { + element + typedExpression + __scalarExpression() +} + +type ScalarOperation struct { + op Operator + lhs ScalarExpression + rhs ScalarExpression +} + +func newScalarOperation(op Operator, lhs ScalarExpression, rhs ScalarExpression) ScalarOperation { + return ScalarOperation{ + op: op, + lhs: lhs, + rhs: rhs, + } +} + +// nolint: revive +func (ScalarOperation) __scalarExpression() {} + +func (o ScalarOperation) impliedType() StaticType { + if o.op.isBoolean() { + return typeBoolean + } + + // remaining operators will be based on the operands + // opAdd, opSub, opDiv, opMod, opMult + t := o.lhs.impliedType() + if t != typeAttribute { + return t + } + + return o.rhs.impliedType() +} + +type Aggregate struct { + agg AggregateOp + e FieldExpression +} + +func newAggregate(agg AggregateOp, e FieldExpression) Aggregate { + return Aggregate{ + agg: agg, + e: e, + } +} + +// nolint: revive +func (Aggregate) __scalarExpression() {} + +func (a Aggregate) impliedType() StaticType { + if a.agg == aggregateCount || a.e == nil { + return typeInt + } + + return a.e.impliedType() +} + +// ********************** +// Spansets +// ********************** +type SpansetExpression interface { + element + __spansetExpression() +} + +type SpansetOperation struct { + op Operator + lhs SpansetExpression + rhs SpansetExpression +} + +func newSpansetOperation(op Operator, lhs SpansetExpression, rhs SpansetExpression) SpansetOperation { + return SpansetOperation{ + op: op, + lhs: lhs, + rhs: rhs, + } +} + +// nolint: revive +func (SpansetOperation) __spansetExpression() {} + +type SpansetFilter struct { + e FieldExpression +} + +func newSpansetFilter(e FieldExpression) SpansetFilter { + return SpansetFilter{ + e: e, + } +} + +// nolint: revive +func (SpansetFilter) __spansetExpression() {} + +type ScalarFilter struct { + op Operator + lhs ScalarExpression + rhs ScalarExpression +} + +func newScalarFilter(op Operator, lhs ScalarExpression, rhs ScalarExpression) ScalarFilter { + return ScalarFilter{ + op: op, + lhs: lhs, + rhs: rhs, + } +} + +// nolint: revive +func (ScalarFilter) __spansetExpression() {} + +// ********************** +// Expressions +// ********************** +type FieldExpression interface { + element + typedExpression + + // referencesSpan returns true if this field expression has any attributes or intrinsics. i.e. it references the span itself + referencesSpan() bool + __fieldExpression() +} + +type BinaryOperation struct { + op Operator + lhs FieldExpression + rhs FieldExpression +} + +func newBinaryOperation(op Operator, lhs FieldExpression, rhs FieldExpression) BinaryOperation { + return BinaryOperation{ + op: op, + lhs: lhs, + rhs: rhs, + } +} + +// nolint: revive +func (BinaryOperation) __fieldExpression() {} + +func (o BinaryOperation) impliedType() StaticType { + if o.op.isBoolean() { + return typeBoolean + } + + // remaining operators will be based on the operands + // opAdd, opSub, opDiv, opMod, opMult + t := o.lhs.impliedType() + if t != typeAttribute { + return t + } + + return o.rhs.impliedType() +} + +func (o BinaryOperation) referencesSpan() bool { + return o.lhs.referencesSpan() || o.rhs.referencesSpan() +} + +type UnaryOperation struct { + op Operator + e FieldExpression +} + +func newUnaryOperation(op Operator, e FieldExpression) UnaryOperation { + return UnaryOperation{ + op: op, + e: e, + } +} + +// nolint: revive +func (UnaryOperation) __fieldExpression() {} + +func (o UnaryOperation) impliedType() StaticType { + // both operators (opPower and opNot) will just be based on the operand type + return o.e.impliedType() +} + +func (o UnaryOperation) referencesSpan() bool { + return o.e.referencesSpan() +} + +// ********************** +// Statics +// ********************** +type Static struct { + staticType StaticType + n int + f float64 + s string + b bool + d time.Duration + status Status +} + +// nolint: revive +func (Static) __fieldExpression() {} + +// nolint: revive +func (Static) __scalarExpression() {} + +func (Static) referencesSpan() bool { + return false +} + +func (s Static) impliedType() StaticType { + return s.staticType +} + +func newStaticInt(n int) Static { + return Static{ + staticType: typeInt, + n: n, + } +} + +func newStaticFloat(f float64) Static { + return Static{ + staticType: typeFloat, + f: f, + } +} + +func newStaticString(s string) Static { + return Static{ + staticType: typeString, + s: s, + } +} + +func newStaticBool(b bool) Static { + return Static{ + staticType: typeBoolean, + b: b, + } +} + +func newStaticNil() Static { + return Static{ + staticType: typeNil, + } +} + +func newStaticDuration(d time.Duration) Static { + return Static{ + staticType: typeDuration, + d: d, + } +} + +func newStaticStatus(s Status) Static { + return Static{ + staticType: typeStatus, + status: s, + } +} + +// ********************** +// Attributes +// ********************** + +type Attribute struct { + scope AttributeScope + parent bool + name string + intrinsic Intrinsic +} + +// newAttribute creates a new attribute with the given identifier string. If the identifier +// string matches an intrinsic use that. +func newAttribute(att string) Attribute { + intrinsic := intrinsicFromString(att) + + return Attribute{ + scope: attributeScopeNone, + parent: false, + name: att, + intrinsic: intrinsic, + } +} + +// nolint: revive +func (Attribute) __fieldExpression() {} + +func (a Attribute) impliedType() StaticType { + switch a.intrinsic { + case intrinsicDuration: + return typeDuration + case intrinsicChildCount: + return typeInt + case intrinsicName: + return typeString + case intrinsicStatus: + return typeStatus + case intrinsicParent: + return typeNil + } + + return typeAttribute +} + +func (Attribute) referencesSpan() bool { + return true +} + +// newScopedAttribute creates a new scopedattribute with the given identifier string. +// this handles parent, span, and resource scopes. +func newScopedAttribute(scope AttributeScope, parent bool, att string) Attribute { + intrinsic := intrinsicNone + // if we are explicitly passed a resource or span scopes then we shouldn't parse for intrinsic + if scope != attributeScopeResource && scope != attributeScopeSpan { + intrinsic = intrinsicFromString(att) + } + + return Attribute{ + scope: scope, + parent: parent, + name: att, + intrinsic: intrinsic, + } +} + +func newIntrinsic(n Intrinsic) Attribute { + return Attribute{ + scope: attributeScopeNone, + parent: false, + name: n.String(), + intrinsic: n, + } +} diff --git a/pkg/traceql/ast_stringer.go b/pkg/traceql/ast_stringer.go new file mode 100644 index 00000000000..322f13686d9 --- /dev/null +++ b/pkg/traceql/ast_stringer.go @@ -0,0 +1,125 @@ +package traceql + +import ( + "fmt" + "strconv" + "strings" +) + +func (r RootExpr) String() string { + return r.p.String() +} + +func (p Pipeline) String() string { + s := make([]string, 0, len(p.p)) + for _, p := range p.p { + s = append(s, p.String()) + } + return strings.Join(s, "|") +} + +func (o GroupOperation) String() string { + return "by(" + o.e.String() + ")" +} + +func (o CoalesceOperation) String() string { + return "coalesce()" +} + +func (o ScalarOperation) String() string { + return binaryOp(o.op, o.lhs, o.rhs) +} + +func (a Aggregate) String() string { + if a.e == nil { + return a.agg.String() + "()" + } + + return a.agg.String() + "(" + a.e.String() + ")" +} + +func (o SpansetOperation) String() string { + return binaryOp(o.op, o.lhs, o.rhs) +} + +func (f SpansetFilter) String() string { + return "{ " + f.e.String() + " }" +} + +func (f ScalarFilter) String() string { + return binaryOp(f.op, f.lhs, f.rhs) +} + +func (o BinaryOperation) String() string { + return binaryOp(o.op, o.lhs, o.rhs) +} + +func (o UnaryOperation) String() string { + return unaryOp(o.op, o.e) +} + +func (n Static) String() string { + switch n.staticType { + case typeInt: + return strconv.Itoa(n.n) + case typeFloat: + return strconv.FormatFloat(n.f, 'f', 5, 64) + case typeString: + return "`" + n.s + "`" + case typeBoolean: + return strconv.FormatBool(n.b) + case typeNil: + return "nil" + case typeDuration: + return n.d.String() + case typeStatus: + return n.status.String() + } + + return fmt.Sprintf("static(%d)", n.staticType) +} + +func (a Attribute) String() string { + scopes := []string{} + if a.parent { + scopes = append(scopes, "parent") + } + + if a.scope != attributeScopeNone { + attributeScope := a.scope.String() + scopes = append(scopes, attributeScope) + } + + att := a.name + if a.intrinsic != intrinsicNone { + att = a.intrinsic.String() + } + + scope := "" + if len(scopes) > 0 { + scope = strings.Join(scopes, ".") + } + scope += "." + + return scope + att +} + +func binaryOp(op Operator, lhs element, rhs element) string { + return wrapElement(lhs) + " " + op.String() + " " + wrapElement(rhs) +} + +func unaryOp(op Operator, e element) string { + return op.String() + wrapElement(e) +} + +func wrapElement(e element) string { + static, ok := e.(Static) + if ok { + return static.String() + } + att, ok := e.(Attribute) + if ok { + return att.String() + } + return "(" + e.String() + ")" +} diff --git a/pkg/traceql/ast_stringer_test.go b/pkg/traceql/ast_stringer_test.go new file mode 100644 index 00000000000..d9048e82d2e --- /dev/null +++ b/pkg/traceql/ast_stringer_test.go @@ -0,0 +1,39 @@ +package traceql + +import ( + "os" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "gopkg.in/yaml.v2" +) + +const testExamplesFile = "./test_examples.yaml" + +func TestStringer(t *testing.T) { + b, err := os.ReadFile(testExamplesFile) + require.NoError(t, err) + + queries := &TestQueries{} + err = yaml.Unmarshal(b, queries) + require.NoError(t, err) + + for _, q := range queries.Valid { + t.Run(q, func(t *testing.T) { + pass1, err := Parse(q) + require.NoError(t, err) + + // now parse it a second time and confirm that it parses the same way twice + pass2, err := Parse(pass1.String()) + ok := assert.NoError(t, err) + if !ok { + t.Logf("\n\t1: %s", pass1.String()) + return + } + + assert.Equal(t, pass1, pass2) + t.Logf("\n\t1: %s\n\t2: %s", pass1.String(), pass2.String()) + }) + } +} diff --git a/pkg/traceql/ast_validate.go b/pkg/traceql/ast_validate.go new file mode 100644 index 00000000000..457c40a1284 --- /dev/null +++ b/pkg/traceql/ast_validate.go @@ -0,0 +1,159 @@ +package traceql + +import "fmt" + +func (r RootExpr) validate() error { + return r.p.validate() +} + +func (p Pipeline) validate() error { + for _, p := range p.p { + err := p.validate() + if err != nil { + return err + } + } + return nil +} + +func (o GroupOperation) validate() error { + if !o.e.referencesSpan() { + return fmt.Errorf("grouping field expressions must reference the span: %s", o.String()) + } + + return o.e.validate() +} + +func (o CoalesceOperation) validate() error { + return nil +} + +func (o ScalarOperation) validate() error { + if err := o.lhs.validate(); err != nil { + return err + } + if err := o.rhs.validate(); err != nil { + return err + } + + lhsT := o.lhs.impliedType() + rhsT := o.rhs.impliedType() + if !lhsT.isMatchingOperand(rhsT) { + return fmt.Errorf("binary operations must operate on the same type: %s", o.String()) + } + + if !o.op.binaryTypesValid(lhsT, rhsT) { + return fmt.Errorf("illegal operation for the given types: %s", o.String()) + } + + return nil +} + +func (a Aggregate) validate() error { + if a.e == nil { + return nil + } + + if err := a.e.validate(); err != nil { + return err + } + + // aggregate field expressions require a type of a number or attribute + t := a.e.impliedType() + if t != typeAttribute && !t.isNumeric() { + return fmt.Errorf("aggregate field expressions must resolve to a number type: %s", a.String()) + } + + if !a.e.referencesSpan() { + return fmt.Errorf("aggregate field expressions must reference the span: %s", a.String()) + } + + return nil +} + +func (o SpansetOperation) validate() error { + if err := o.lhs.validate(); err != nil { + return err + } + return o.rhs.validate() +} + +func (f SpansetFilter) validate() error { + if err := f.e.validate(); err != nil { + return err + } + + t := f.e.impliedType() + if t != typeAttribute && t != typeBoolean { + return fmt.Errorf("span filter field expressions must resolve to a boolean: %s", f.String()) + } + + return nil +} + +func (f ScalarFilter) validate() error { + if err := f.lhs.validate(); err != nil { + return err + } + if err := f.rhs.validate(); err != nil { + return err + } + + lhsT := f.lhs.impliedType() + rhsT := f.rhs.impliedType() + if !lhsT.isMatchingOperand(rhsT) { + return fmt.Errorf("binary operations must operate on the same type: %s", f.String()) + } + + if !f.op.binaryTypesValid(lhsT, rhsT) { + return fmt.Errorf("illegal operation for the given types: %s", f.String()) + } + + return nil +} + +func (o BinaryOperation) validate() error { + if err := o.lhs.validate(); err != nil { + return err + } + if err := o.rhs.validate(); err != nil { + return err + } + + lhsT := o.lhs.impliedType() + rhsT := o.rhs.impliedType() + if !lhsT.isMatchingOperand(rhsT) { + return fmt.Errorf("binary operations must operate on the same type: %s", o.String()) + } + + if !o.op.binaryTypesValid(lhsT, rhsT) { + return fmt.Errorf("illegal operation for the given types: %s", o.String()) + } + + return nil +} + +func (o UnaryOperation) validate() error { + if err := o.e.validate(); err != nil { + return err + } + + t := o.e.impliedType() + if t == typeAttribute { + return nil + } + + if !o.op.unaryTypesValid(t) { + return fmt.Errorf("illegal operation for the given type: %s", o.String()) + } + + return nil +} + +func (n Static) validate() error { + return nil +} + +func (a Attribute) validate() error { + return nil +} diff --git a/pkg/traceql/ast_validate_test.go b/pkg/traceql/ast_validate_test.go new file mode 100644 index 00000000000..9ad9bbcbca4 --- /dev/null +++ b/pkg/traceql/ast_validate_test.go @@ -0,0 +1,62 @@ +package traceql + +import ( + "os" + "testing" + + "github.com/davecgh/go-spew/spew" + "github.com/stretchr/testify/require" + "gopkg.in/yaml.v3" +) + +type TestQueries struct { + Valid []string `yaml:"valid"` + ParseFails []string `yaml:"parse_fails"` + ValidateFails []string `yaml:"validate_fails"` + Dump []string `yaml:"dump"` +} + +func TestExamples(t *testing.T) { + b, err := os.ReadFile(testExamplesFile) + require.NoError(t, err) + + queries := &TestQueries{} + err = yaml.Unmarshal(b, queries) + require.NoError(t, err) + + for _, q := range queries.Valid { + t.Run("valid - "+q, func(t *testing.T) { + p, err := Parse(q) + require.NoError(t, err) + err = p.validate() + require.NoError(t, err) + }) + } + + for _, q := range queries.ParseFails { + t.Run("parse fails - "+q, func(t *testing.T) { + _, err := Parse(q) + require.Error(t, err) + }) + } + + for _, q := range queries.ValidateFails { + t.Run("validate fails - "+q, func(t *testing.T) { + p, err := Parse(q) + require.NoError(t, err) + err = p.validate() + require.Error(t, err) + }) + } + + scs := spew.ConfigState{DisableMethods: true, Indent: " "} + for _, q := range queries.Dump { + t.Run("dump - "+q, func(t *testing.T) { + yyDebug = 3 + p, err := Parse(q) + yyDebug = 0 + require.NoError(t, err) + scs.Dump(p) + }) + } +} diff --git a/pkg/traceql/enum_aggregates.go b/pkg/traceql/enum_aggregates.go new file mode 100644 index 00000000000..0ae95617352 --- /dev/null +++ b/pkg/traceql/enum_aggregates.go @@ -0,0 +1,31 @@ +package traceql + +import "fmt" + +type AggregateOp int + +const ( + aggregateCount AggregateOp = iota + aggregateMax + aggregateMin + aggregateSum + aggregateAvg +) + +func (a AggregateOp) String() string { + + switch a { + case aggregateCount: + return "count" + case aggregateMax: + return "max" + case aggregateMin: + return "min" + case aggregateSum: + return "sum" + case aggregateAvg: + return "avg" + } + + return fmt.Sprintf("aggregate(%d)", a) +} diff --git a/pkg/traceql/enum_attributes.go b/pkg/traceql/enum_attributes.go new file mode 100644 index 00000000000..31ef756bf11 --- /dev/null +++ b/pkg/traceql/enum_attributes.go @@ -0,0 +1,72 @@ +package traceql + +import "fmt" + +type AttributeScope int + +const ( + attributeScopeNone AttributeScope = iota + attributeScopeResource + attributeScopeSpan +) + +func (s AttributeScope) String() string { + switch s { + case attributeScopeNone: + return "none" + case attributeScopeSpan: + return "span" + case attributeScopeResource: + return "resource" + } + + return fmt.Sprintf("att(%d).", s) +} + +type Intrinsic int + +const ( + intrinsicNone Intrinsic = iota + intrinsicDuration + intrinsicChildCount + intrinsicName + intrinsicStatus + intrinsicParent +) + +func (i Intrinsic) String() string { + switch i { + case intrinsicNone: + return "none" + case intrinsicDuration: + return "duration" + case intrinsicName: + return "name" + case intrinsicStatus: + return "status" + case intrinsicChildCount: + return "childCount" + case intrinsicParent: + return "parent" + } + + return fmt.Sprintf("intrinsic(%d)", i) +} + +// intrinsicFromString returns the matching intrinsic for the given string or -1 if there is none +func intrinsicFromString(s string) Intrinsic { + switch s { + case "duration": + return intrinsicDuration + case "name": + return intrinsicName + case "status": + return intrinsicStatus + case "childCount": + return intrinsicChildCount + case "parent": + return intrinsicParent + } + + return intrinsicNone +} diff --git a/pkg/traceql/enum_operators.go b/pkg/traceql/enum_operators.go new file mode 100644 index 00000000000..b8dfdf12e96 --- /dev/null +++ b/pkg/traceql/enum_operators.go @@ -0,0 +1,157 @@ +package traceql + +import "fmt" + +type Operator int + +const ( + opAdd Operator = iota + opSub + opDiv + opMod + opMult + opEqual + opNotEqual + opRegex + opNotRegex + opGreater + opGreaterEqual + opLess + opLessEqual + opPower + opAnd + opOr + opNot + opSpansetChild + opSpansetDescendant + opSpansetAnd + opSpansetUnion + opSpansetSibling +) + +func (op Operator) isBoolean() bool { + return op == opOr || + op == opAnd || + op == opEqual || + op == opNotEqual || + op == opRegex || + op == opNotRegex || + op == opGreater || + op == opGreaterEqual || + op == opLess || + op == opLessEqual || + op == opNot +} + +func (op Operator) binaryTypesValid(lhsT StaticType, rhsT StaticType) bool { + return binaryTypeValid(op, lhsT) && binaryTypeValid(op, rhsT) +} + +func binaryTypeValid(op Operator, t StaticType) bool { + if t == typeAttribute { + return true + } + + switch t { + case typeBoolean: + return op == opAnd || + op == opOr || + op == opEqual || + op == opNotEqual + case typeFloat: + fallthrough + case typeInt: + fallthrough + case typeDuration: + return op == opAdd || + op == opSub || + op == opMult || + op == opDiv || + op == opMod || + op == opPower || + op == opEqual || + op == opNotEqual || + op == opGreater || + op == opGreaterEqual || + op == opLess || + op == opLessEqual + case typeString: + return op == opEqual || + op == opNotEqual || + op == opRegex || + op == opNotRegex + case typeNil: + fallthrough + case typeStatus: + return op == opEqual || op == opNotEqual + } + + return false +} + +func (op Operator) unaryTypesValid(t StaticType) bool { + if t == typeAttribute { + return true + } + + switch op { + case opSub: + return t.isNumeric() + case opNot: + return t == typeBoolean + } + + return false +} + +func (op Operator) String() string { + + switch op { + case opAdd: + return "+" + case opSub: + return "-" + case opDiv: + return "/" + case opMod: + return "%" + case opMult: + return "*" + case opEqual: + return "=" + case opNotEqual: + return "!=" + case opRegex: + return "=~" + case opNotRegex: + return "!~" + case opGreater: + return ">" + case opGreaterEqual: + return ">=" + case opLess: + return "<" + case opLessEqual: + return "<=" + case opPower: + return "^" + case opAnd: + return "&&" + case opOr: + return "||" + case opNot: + return "!" + case opSpansetChild: + return ">" + case opSpansetDescendant: + return ">>" + case opSpansetAnd: + return "&&" + case opSpansetSibling: + return "~" + case opSpansetUnion: + return "||" + } + + return fmt.Sprintf("operator(%d)", op) +} diff --git a/pkg/traceql/enum_operators_test.go b/pkg/traceql/enum_operators_test.go new file mode 100644 index 00000000000..865c74133c4 --- /dev/null +++ b/pkg/traceql/enum_operators_test.go @@ -0,0 +1,163 @@ +package traceql + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestOperatorIsBoolean(t *testing.T) { + tt := []struct { + op Operator + expected bool + }{ + {opAdd, false}, + {opSub, false}, + {opDiv, false}, + {opMod, false}, + {opMult, false}, + {opEqual, true}, + {opNotEqual, true}, + {opRegex, true}, + {opNotRegex, true}, + {opGreater, true}, + {opGreaterEqual, true}, + {opLess, true}, + {opLessEqual, true}, + {opPower, false}, + {opAnd, true}, + {opOr, true}, + {opNot, true}, + {opSpansetChild, false}, + {opSpansetDescendant, false}, + {opSpansetAnd, false}, + {opSpansetUnion, false}, + {opSpansetSibling, false}, + } + + for _, tc := range tt { + t.Run(tc.op.String(), func(t *testing.T) { + actual := tc.op.isBoolean() + assert.Equal(t, tc.expected, actual) + }) + } +} + +func TestOperatorBinaryTypesValid(t *testing.T) { + tt := []struct { + op Operator + t StaticType + expected bool + }{ + // numeric + {opAdd, typeInt, true}, + {opDiv, typeDuration, true}, + {opMod, typeFloat, true}, + {opMult, typeInt, true}, + {opPower, typeDuration, true}, + {opSub, typeAttribute, true}, + + {opAdd, typeString, false}, + {opDiv, typeSpanset, false}, + {opMod, typeStatus, false}, + {opMult, typeNil, false}, + {opPower, typeBoolean, false}, + // equality + {opEqual, typeDuration, true}, + {opNotEqual, typeStatus, true}, + {opEqual, typeString, true}, + {opNotEqual, typeInt, true}, + {opEqual, typeNil, true}, + {opNotEqual, typeAttribute, true}, + {opEqual, typeBoolean, true}, + {opNotEqual, typeFloat, true}, + + {opEqual, typeSpanset, false}, + // range comparison + {opGreater, typeInt, true}, + {opGreaterEqual, typeFloat, true}, + {opLess, typeFloat, true}, + {opLessEqual, typeDuration, true}, + + {opGreater, typeStatus, false}, + {opGreaterEqual, typeNil, false}, + {opLess, typeString, false}, + {opLessEqual, typeBoolean, false}, + // string comparison + {opRegex, typeString, true}, + {opNotRegex, typeAttribute, true}, + {opRegex, typeString, true}, + + {opRegex, typeInt, false}, + {opNotRegex, typeInt, false}, + // boolean + {opAnd, typeBoolean, true}, + {opOr, typeAttribute, true}, + {opAnd, typeAttribute, true}, + + {opAnd, typeDuration, false}, + {opOr, typeDuration, false}, + // not + {opNot, typeBoolean, false}, + } + + for _, tc := range tt { + t.Run(tc.op.String(), func(t *testing.T) { + actual := tc.op.binaryTypesValid(tc.t, typeAttribute) + assert.Equal(t, tc.expected, actual) + actual = tc.op.binaryTypesValid(typeAttribute, tc.t) + assert.Equal(t, tc.expected, actual) + actual = tc.op.binaryTypesValid(tc.t, tc.t) + assert.Equal(t, tc.expected, actual) + }) + } +} + +func TestOperatorUnaryTypesValid(t *testing.T) { + tt := []struct { + op Operator + t StaticType + expected bool + }{ + {opAdd, typeInt, false}, + {opDiv, typeInt, false}, + {opMod, typeInt, false}, + {opMult, typeInt, false}, + {opEqual, typeInt, false}, + {opNotEqual, typeInt, false}, + {opRegex, typeInt, false}, + {opNotRegex, typeInt, false}, + {opGreater, typeInt, false}, + {opGreaterEqual, typeInt, false}, + {opLess, typeInt, false}, + {opLessEqual, typeInt, false}, + {opPower, typeInt, false}, + {opAnd, typeInt, false}, + {opOr, typeInt, false}, + {opSpansetChild, typeInt, false}, + {opSpansetDescendant, typeInt, false}, + {opSpansetAnd, typeInt, false}, + {opSpansetUnion, typeInt, false}, + {opSpansetSibling, typeInt, false}, + // not + {opNot, typeBoolean, true}, + {opNot, typeInt, false}, + {opNot, typeNil, false}, + {opNot, typeString, false}, + // sub + {opSub, typeInt, true}, + {opSub, typeFloat, true}, + {opSub, typeDuration, true}, + {opSub, typeBoolean, false}, + {opSub, typeStatus, false}, + {opSub, typeNil, false}, + {opSub, typeSpanset, false}, + } + + for _, tc := range tt { + t.Run(tc.op.String(), func(t *testing.T) { + actual := tc.op.unaryTypesValid(tc.t) + assert.Equal(t, tc.expected, actual) + }) + } +} diff --git a/pkg/traceql/enum_statics.go b/pkg/traceql/enum_statics.go new file mode 100644 index 00000000000..e9d3387cc09 --- /dev/null +++ b/pkg/traceql/enum_statics.go @@ -0,0 +1,62 @@ +package traceql + +import "fmt" + +type StaticType int + +const ( + typeSpanset StaticType = iota // type used by spanset pipelines + typeAttribute // a special constant that indicates the type is determined at query time by the attribute + typeInt + typeFloat + typeString + typeBoolean + typeNil + typeDuration + typeStatus +) + +// isMatchingOperand returns whether two types can be combined with a binary operator. the kind of operator is +// immaterial. see Operator.typesValid() for code that determines if the passed types are valid for the given +// operator. +func (t StaticType) isMatchingOperand(otherT StaticType) bool { + if t == typeAttribute || otherT == typeAttribute { + return true + } + + if t == otherT { + return true + } + + if t.isNumeric() && otherT.isNumeric() { + return true + } + + return false +} + +func (t StaticType) isNumeric() bool { + return t == typeInt || t == typeFloat || t == typeDuration +} + +// Status represents valid static values of typeStatus +type Status int + +const ( + statusError Status = iota + statusOk + statusUnset +) + +func (s Status) String() string { + switch s { + case statusError: + return "error" + case statusOk: + return "ok" + case statusUnset: + return "unset" + } + + return fmt.Sprintf("status(%d)", s) +} diff --git a/pkg/traceql/expr.y b/pkg/traceql/expr.y new file mode 100644 index 00000000000..da6766c17d8 --- /dev/null +++ b/pkg/traceql/expr.y @@ -0,0 +1,265 @@ +%{ +package traceql + +import ( + "time" +) +%} + +%start root + +%union { + root RootExpr + groupOperation GroupOperation + coalesceOperation CoalesceOperation + + spansetExpression SpansetExpression + spansetPipelineExpression SpansetExpression + wrappedSpansetPipeline Pipeline + spansetPipeline Pipeline + spansetFilter SpansetFilter + scalarFilter ScalarFilter + scalarFilterOperation Operator + + scalarPipelineExpressionFilter ScalarFilter + scalarPipelineExpression ScalarExpression + scalarExpression ScalarExpression + wrappedScalarPipeline Pipeline + scalarPipeline Pipeline + aggregate Aggregate + + fieldExpression FieldExpression + static Static + intrinsicField Attribute + attributeField Attribute + + binOp Operator + staticInt int + staticStr string + staticFloat float64 + staticDuration time.Duration +} + +%type root +%type groupOperation +%type coalesceOperation + +%type spansetExpression +%type spansetPipelineExpression +%type wrappedSpansetPipeline +%type spansetPipeline +%type spansetFilter +%type scalarFilter +%type scalarFilterOperation + +%type scalarPipelineExpressionFilter +%type scalarPipelineExpression +%type scalarExpression +%type wrappedScalarPipeline +%type scalarPipeline +%type aggregate + +%type fieldExpression +%type static +%type intrinsicField +%type attributeField + +%token IDENTIFIER STRING +%token INTEGER +%token FLOAT +%token DURATION +%token DOT OPEN_BRACE CLOSE_BRACE OPEN_PARENS CLOSE_PARENS + NIL TRUE FALSE STATUS_ERROR STATUS_OK STATUS_UNSET + IDURATION CHILDCOUNT NAME STATUS PARENT + PARENT_DOT RESOURCE_DOT SPAN_DOT + COUNT AVG MAX MIN SUM + BY COALESCE + END_ATTRIBUTE + +// Operators are listed with increasing precedence. +%left PIPE +%left AND OR +%left EQ NEQ LT LTE GT GTE NRE RE DESC TILDE +%left ADD SUB +%left NOT +%left MUL DIV MOD +%right POW +%% + +// ********************** +// Pipeline +// ********************** +root: + spansetPipeline { yylex.(*lexer).expr = newRootExpr($1) } + | spansetPipelineExpression { yylex.(*lexer).expr = newRootExpr($1) } + | scalarPipelineExpressionFilter { yylex.(*lexer).expr = newRootExpr($1) } + ; + +// ********************** +// Spanset Expressions +// ********************** +spansetPipelineExpression: // shares the same operators as spansetExpression. split out for readability + OPEN_PARENS spansetPipelineExpression CLOSE_PARENS { $$ = $2 } + | spansetPipelineExpression AND spansetPipelineExpression { $$ = newSpansetOperation(opSpansetAnd, $1, $3) } + | spansetPipelineExpression GT spansetPipelineExpression { $$ = newSpansetOperation(opSpansetChild, $1, $3) } + | spansetPipelineExpression DESC spansetPipelineExpression { $$ = newSpansetOperation(opSpansetDescendant, $1, $3) } + | spansetPipelineExpression OR spansetPipelineExpression { $$ = newSpansetOperation(opSpansetUnion, $1, $3) } + | spansetPipelineExpression TILDE spansetPipelineExpression { $$ = newSpansetOperation(opSpansetSibling, $1, $3) } + | wrappedSpansetPipeline { $$ = $1 } + ; + +wrappedSpansetPipeline: + OPEN_PARENS spansetPipeline CLOSE_PARENS { $$ = $2 } + +spansetPipeline: + spansetExpression { $$ = newPipeline($1) } + | scalarFilter { $$ = newPipeline($1) } + | groupOperation { $$ = newPipeline($1) } + | spansetPipeline PIPE scalarFilter { $$ = $1.addItem($3) } + | spansetPipeline PIPE spansetExpression { $$ = $1.addItem($3) } + | spansetPipeline PIPE groupOperation { $$ = $1.addItem($3) } + | spansetPipeline PIPE coalesceOperation { $$ = $1.addItem($3) } + ; + +groupOperation: + BY OPEN_PARENS fieldExpression CLOSE_PARENS { $$ = newGroupOperation($3) } + ; + +coalesceOperation: + COALESCE OPEN_PARENS CLOSE_PARENS { $$ = newCoalesceOperation() } + ; + +spansetExpression: // shares the same operators as scalarPipelineExpression. split out for readability + OPEN_PARENS spansetExpression CLOSE_PARENS { $$ = $2 } + | spansetExpression AND spansetExpression { $$ = newSpansetOperation(opSpansetAnd, $1, $3) } + | spansetExpression GT spansetExpression { $$ = newSpansetOperation(opSpansetChild, $1, $3) } + | spansetExpression DESC spansetExpression { $$ = newSpansetOperation(opSpansetDescendant, $1, $3) } + | spansetExpression OR spansetExpression { $$ = newSpansetOperation(opSpansetUnion, $1, $3) } + | spansetExpression TILDE spansetExpression { $$ = newSpansetOperation(opSpansetSibling, $1, $3) } + | spansetFilter { $$ = $1 } + ; + +spansetFilter: + OPEN_BRACE fieldExpression CLOSE_BRACE { $$ = newSpansetFilter($2) } + ; + +scalarFilter: + scalarExpression scalarFilterOperation scalarExpression { $$ = newScalarFilter($2, $1, $3) } + ; + +scalarFilterOperation: + EQ { $$ = opEqual } + | NEQ { $$ = opNotEqual } + | LT { $$ = opLess } + | LTE { $$ = opLessEqual } + | GT { $$ = opGreater } + | GTE { $$ = opGreaterEqual } + ; + +// ********************** +// Scalar Expressions +// ********************** +scalarPipelineExpressionFilter: + scalarPipelineExpression scalarFilterOperation scalarPipelineExpression { $$ = newScalarFilter($2, $1, $3) } + | scalarPipelineExpression scalarFilterOperation static { $$ = newScalarFilter($2, $1, $3) } + ; + +scalarPipelineExpression: // shares the same operators as scalarExpression. split out for readability + OPEN_PARENS scalarPipelineExpression CLOSE_PARENS { $$ = $2 } + | scalarPipelineExpression ADD scalarPipelineExpression { $$ = newScalarOperation(opAdd, $1, $3) } + | scalarPipelineExpression SUB scalarPipelineExpression { $$ = newScalarOperation(opSub, $1, $3) } + | scalarPipelineExpression MUL scalarPipelineExpression { $$ = newScalarOperation(opMult, $1, $3) } + | scalarPipelineExpression DIV scalarPipelineExpression { $$ = newScalarOperation(opDiv, $1, $3) } + | scalarPipelineExpression MOD scalarPipelineExpression { $$ = newScalarOperation(opMod, $1, $3) } + | scalarPipelineExpression POW scalarPipelineExpression { $$ = newScalarOperation(opPower, $1, $3) } + | wrappedScalarPipeline { $$ = $1 } + ; + +wrappedScalarPipeline: + OPEN_PARENS scalarPipeline CLOSE_PARENS { $$ = $2 } + ; + +scalarPipeline: + spansetPipeline PIPE scalarExpression { $$ = $1.addItem($3) } + ; + +scalarExpression: // shares the same operators as scalarPipelineExpression. split out for readability + OPEN_PARENS scalarExpression CLOSE_PARENS { $$ = $2 } + | scalarExpression ADD scalarExpression { $$ = newScalarOperation(opAdd, $1, $3) } + | scalarExpression SUB scalarExpression { $$ = newScalarOperation(opSub, $1, $3) } + | scalarExpression MUL scalarExpression { $$ = newScalarOperation(opMult, $1, $3) } + | scalarExpression DIV scalarExpression { $$ = newScalarOperation(opDiv, $1, $3) } + | scalarExpression MOD scalarExpression { $$ = newScalarOperation(opMod, $1, $3) } + | scalarExpression POW scalarExpression { $$ = newScalarOperation(opPower, $1, $3) } + | aggregate { $$ = $1 } + | static { $$ = $1 } + ; + +aggregate: + COUNT OPEN_PARENS CLOSE_PARENS { $$ = newAggregate(aggregateCount, nil) } + | MAX OPEN_PARENS fieldExpression CLOSE_PARENS { $$ = newAggregate(aggregateMax, $3) } + | MIN OPEN_PARENS fieldExpression CLOSE_PARENS { $$ = newAggregate(aggregateMin, $3) } + | AVG OPEN_PARENS fieldExpression CLOSE_PARENS { $$ = newAggregate(aggregateAvg, $3) } + | SUM OPEN_PARENS fieldExpression CLOSE_PARENS { $$ = newAggregate(aggregateSum, $3) } + ; + +// ********************** +// FieldExpressions +// ********************** +fieldExpression: + OPEN_PARENS fieldExpression CLOSE_PARENS { $$ = $2 } + | fieldExpression ADD fieldExpression { $$ = newBinaryOperation(opAdd, $1, $3) } + | fieldExpression SUB fieldExpression { $$ = newBinaryOperation(opSub, $1, $3) } + | fieldExpression MUL fieldExpression { $$ = newBinaryOperation(opMult, $1, $3) } + | fieldExpression DIV fieldExpression { $$ = newBinaryOperation(opDiv, $1, $3) } + | fieldExpression MOD fieldExpression { $$ = newBinaryOperation(opMod, $1, $3) } + | fieldExpression EQ fieldExpression { $$ = newBinaryOperation(opEqual, $1, $3) } + | fieldExpression NEQ fieldExpression { $$ = newBinaryOperation(opNotEqual, $1, $3) } + | fieldExpression LT fieldExpression { $$ = newBinaryOperation(opLess, $1, $3) } + | fieldExpression LTE fieldExpression { $$ = newBinaryOperation(opLessEqual, $1, $3) } + | fieldExpression GT fieldExpression { $$ = newBinaryOperation(opGreater, $1, $3) } + | fieldExpression GTE fieldExpression { $$ = newBinaryOperation(opGreaterEqual, $1, $3) } + | fieldExpression RE fieldExpression { $$ = newBinaryOperation(opRegex, $1, $3) } + | fieldExpression NRE fieldExpression { $$ = newBinaryOperation(opNotRegex, $1, $3) } + | fieldExpression POW fieldExpression { $$ = newBinaryOperation(opPower, $1, $3) } + | fieldExpression AND fieldExpression { $$ = newBinaryOperation(opAnd, $1, $3) } + | fieldExpression OR fieldExpression { $$ = newBinaryOperation(opOr, $1, $3) } + | SUB fieldExpression { $$ = newUnaryOperation(opSub, $2) } + | NOT fieldExpression { $$ = newUnaryOperation(opNot, $2) } + | static { $$ = $1 } + | intrinsicField { $$ = $1 } + | attributeField { $$ = $1 } + ; + +// ********************** +// Statics +// ********************** +static: + STRING { $$ = newStaticString($1) } + | INTEGER { $$ = newStaticInt($1) } + | FLOAT { $$ = newStaticFloat($1) } + | TRUE { $$ = newStaticBool(true) } + | FALSE { $$ = newStaticBool(false) } + | NIL { $$ = newStaticNil() } + | DURATION { $$ = newStaticDuration($1) } + | STATUS_OK { $$ = newStaticStatus(statusOk) } + | STATUS_ERROR { $$ = newStaticStatus(statusError) } + | STATUS_UNSET { $$ = newStaticStatus(statusUnset) } + ; + +intrinsicField: + IDURATION { $$ = newIntrinsic(intrinsicDuration) } + | CHILDCOUNT { $$ = newIntrinsic(intrinsicChildCount) } + | NAME { $$ = newIntrinsic(intrinsicName) } + | STATUS { $$ = newIntrinsic(intrinsicStatus) } + | PARENT { $$ = newIntrinsic(intrinsicParent) } + ; + +attributeField: + DOT IDENTIFIER END_ATTRIBUTE { $$ = newAttribute($2) } + | RESOURCE_DOT IDENTIFIER END_ATTRIBUTE { $$ = newScopedAttribute(attributeScopeResource, false, $2) } + | SPAN_DOT IDENTIFIER END_ATTRIBUTE { $$ = newScopedAttribute(attributeScopeSpan, false, $2) } + | PARENT_DOT IDENTIFIER END_ATTRIBUTE { $$ = newScopedAttribute(attributeScopeNone, true, $2) } + | PARENT_DOT RESOURCE_DOT IDENTIFIER END_ATTRIBUTE { $$ = newScopedAttribute(attributeScopeResource, true, $3) } + | PARENT_DOT SPAN_DOT IDENTIFIER END_ATTRIBUTE { $$ = newScopedAttribute(attributeScopeSpan, true, $3) } + ; \ No newline at end of file diff --git a/pkg/traceql/expr.y.go b/pkg/traceql/expr.y.go new file mode 100644 index 00000000000..94a9f761eb2 --- /dev/null +++ b/pkg/traceql/expr.y.go @@ -0,0 +1,1328 @@ +// Code generated by goyacc -o pkg/traceql/expr.y.go pkg/traceql/expr.y. DO NOT EDIT. + +//line pkg/traceql/expr.y:2 +package traceql + +import __yyfmt__ "fmt" + +//line pkg/traceql/expr.y:2 + +import ( + "time" +) + +//line pkg/traceql/expr.y:11 +type yySymType struct { + yys int + root RootExpr + groupOperation GroupOperation + coalesceOperation CoalesceOperation + + spansetExpression SpansetExpression + spansetPipelineExpression SpansetExpression + wrappedSpansetPipeline Pipeline + spansetPipeline Pipeline + spansetFilter SpansetFilter + scalarFilter ScalarFilter + scalarFilterOperation Operator + + scalarPipelineExpressionFilter ScalarFilter + scalarPipelineExpression ScalarExpression + scalarExpression ScalarExpression + wrappedScalarPipeline Pipeline + scalarPipeline Pipeline + aggregate Aggregate + + fieldExpression FieldExpression + static Static + intrinsicField Attribute + attributeField Attribute + + binOp Operator + staticInt int + staticStr string + staticFloat float64 + staticDuration time.Duration +} + +const IDENTIFIER = 57346 +const STRING = 57347 +const INTEGER = 57348 +const FLOAT = 57349 +const DURATION = 57350 +const DOT = 57351 +const OPEN_BRACE = 57352 +const CLOSE_BRACE = 57353 +const OPEN_PARENS = 57354 +const CLOSE_PARENS = 57355 +const NIL = 57356 +const TRUE = 57357 +const FALSE = 57358 +const STATUS_ERROR = 57359 +const STATUS_OK = 57360 +const STATUS_UNSET = 57361 +const IDURATION = 57362 +const CHILDCOUNT = 57363 +const NAME = 57364 +const STATUS = 57365 +const PARENT = 57366 +const PARENT_DOT = 57367 +const RESOURCE_DOT = 57368 +const SPAN_DOT = 57369 +const COUNT = 57370 +const AVG = 57371 +const MAX = 57372 +const MIN = 57373 +const SUM = 57374 +const BY = 57375 +const COALESCE = 57376 +const END_ATTRIBUTE = 57377 +const PIPE = 57378 +const AND = 57379 +const OR = 57380 +const EQ = 57381 +const NEQ = 57382 +const LT = 57383 +const LTE = 57384 +const GT = 57385 +const GTE = 57386 +const NRE = 57387 +const RE = 57388 +const DESC = 57389 +const TILDE = 57390 +const ADD = 57391 +const SUB = 57392 +const NOT = 57393 +const MUL = 57394 +const DIV = 57395 +const MOD = 57396 +const POW = 57397 + +var yyToknames = [...]string{ + "$end", + "error", + "$unk", + "IDENTIFIER", + "STRING", + "INTEGER", + "FLOAT", + "DURATION", + "DOT", + "OPEN_BRACE", + "CLOSE_BRACE", + "OPEN_PARENS", + "CLOSE_PARENS", + "NIL", + "TRUE", + "FALSE", + "STATUS_ERROR", + "STATUS_OK", + "STATUS_UNSET", + "IDURATION", + "CHILDCOUNT", + "NAME", + "STATUS", + "PARENT", + "PARENT_DOT", + "RESOURCE_DOT", + "SPAN_DOT", + "COUNT", + "AVG", + "MAX", + "MIN", + "SUM", + "BY", + "COALESCE", + "END_ATTRIBUTE", + "PIPE", + "AND", + "OR", + "EQ", + "NEQ", + "LT", + "LTE", + "GT", + "GTE", + "NRE", + "RE", + "DESC", + "TILDE", + "ADD", + "SUB", + "NOT", + "MUL", + "DIV", + "MOD", + "POW", +} +var yyStatenames = [...]string{} + +const yyEofCode = 1 +const yyErrCode = 2 +const yyInitialStackSize = 16 + +//line yacctab:1 +var yyExca = [...]int{ + -1, 1, + 1, -1, + -2, 0, +} + +const yyPrivate = 57344 + +const yyLast = 637 + +var yyAct = [...]int{ + + 75, 17, 6, 7, 5, 169, 2, 149, 12, 17, + 69, 56, 111, 46, 45, 112, 33, 49, 116, 136, + 137, 204, 138, 139, 140, 149, 64, 65, 203, 66, + 67, 68, 69, 195, 17, 33, 92, 94, 93, 138, + 139, 140, 149, 194, 104, 106, 107, 108, 109, 193, + 192, 118, 202, 168, 64, 65, 71, 66, 67, 68, + 69, 15, 161, 105, 17, 17, 17, 17, 17, 17, + 17, 115, 126, 128, 129, 130, 131, 132, 133, 141, + 142, 143, 144, 145, 146, 148, 147, 47, 10, 136, + 137, 119, 138, 139, 140, 149, 99, 17, 197, 111, + 17, 166, 91, 51, 52, 167, 53, 54, 55, 56, + 166, 90, 89, 17, 40, 92, 94, 93, 41, 43, + 17, 171, 112, 88, 87, 173, 201, 134, 17, 152, + 153, 154, 66, 67, 68, 69, 167, 70, 117, 120, + 121, 122, 123, 124, 125, 162, 163, 164, 165, 114, + 150, 151, 141, 142, 143, 144, 145, 146, 148, 147, + 158, 63, 136, 137, 196, 138, 139, 140, 149, 157, + 156, 17, 50, 17, 155, 46, 77, 46, 173, 49, + 76, 49, 159, 160, 16, 51, 52, 48, 53, 54, + 55, 56, 14, 175, 176, 177, 178, 179, 180, 181, + 182, 183, 184, 185, 186, 187, 188, 189, 190, 23, + 24, 25, 29, 83, 4, 11, 72, 9, 28, 26, + 27, 31, 30, 32, 78, 79, 80, 81, 82, 86, + 84, 85, 200, 57, 58, 59, 60, 61, 62, 53, + 54, 55, 56, 64, 65, 95, 66, 67, 68, 69, + 1, 199, 0, 0, 73, 74, 150, 151, 141, 142, + 143, 144, 145, 146, 148, 147, 0, 0, 136, 137, + 198, 138, 139, 140, 149, 150, 151, 141, 142, 143, + 144, 145, 146, 148, 147, 0, 0, 136, 137, 191, + 138, 139, 140, 149, 150, 151, 141, 142, 143, 144, + 145, 146, 148, 147, 0, 0, 136, 137, 174, 138, + 139, 140, 149, 150, 151, 141, 142, 143, 144, 145, + 146, 148, 147, 0, 0, 136, 137, 135, 138, 139, + 140, 149, 150, 151, 141, 142, 143, 144, 145, 146, + 148, 147, 0, 0, 136, 137, 116, 138, 139, 140, + 149, 0, 0, 150, 151, 141, 142, 143, 144, 145, + 146, 148, 147, 0, 0, 136, 137, 0, 138, 139, + 140, 149, 57, 58, 59, 60, 61, 62, 0, 0, + 0, 0, 64, 65, 113, 66, 67, 68, 69, 57, + 58, 59, 60, 61, 62, 0, 110, 0, 0, 51, + 52, 0, 53, 54, 55, 56, 39, 42, 39, 42, + 0, 0, 40, 0, 40, 0, 41, 43, 41, 43, + 34, 37, 0, 35, 0, 0, 35, 36, 38, 0, + 36, 38, 23, 24, 25, 29, 0, 15, 0, 96, + 0, 28, 26, 27, 31, 30, 32, 0, 0, 0, + 0, 0, 0, 0, 0, 18, 21, 19, 20, 22, + 13, 97, 34, 37, 0, 0, 0, 0, 35, 0, + 0, 0, 36, 38, 23, 24, 25, 29, 0, 15, + 0, 172, 0, 28, 26, 27, 31, 30, 32, 0, + 0, 0, 0, 0, 0, 0, 0, 18, 21, 19, + 20, 22, 13, 23, 24, 25, 29, 0, 15, 0, + 170, 0, 28, 26, 27, 31, 30, 32, 0, 0, + 0, 0, 0, 0, 0, 0, 18, 21, 19, 20, + 22, 13, 23, 24, 25, 29, 0, 15, 0, 8, + 0, 28, 26, 27, 31, 30, 32, 0, 0, 0, + 0, 0, 0, 0, 0, 18, 21, 19, 20, 22, + 13, 23, 24, 25, 29, 0, 15, 0, 96, 0, + 28, 26, 27, 31, 30, 32, 0, 0, 0, 0, + 0, 0, 44, 3, 18, 21, 19, 20, 22, 23, + 24, 25, 29, 0, 0, 0, 127, 0, 28, 26, + 27, 31, 30, 32, 0, 0, 0, 0, 0, 0, + 0, 0, 18, 21, 19, 20, 22, 98, 100, 101, + 102, 103, 23, 24, 25, 29, 0, 0, 0, 119, + 0, 28, 26, 27, 31, 30, 32, +} +var yyPact = [...]int{ + + 527, -1000, -20, 425, -1000, 369, -1000, -1000, 527, -1000, + 350, -1000, 194, 125, -1000, 204, -1000, -1000, 112, 111, + 100, 99, 90, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, 427, 84, 84, 84, 84, 84, 51, + 51, 51, 51, 51, 383, 86, 371, 136, 58, 333, + 617, 79, 79, 79, 79, 79, 79, -1000, -1000, -1000, + -1000, -1000, -1000, 584, 584, 584, 584, 584, 584, 584, + 204, 316, 204, 204, 204, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, 170, 166, 165, 156, 49, 204, 204, + 204, 204, -1000, 369, -1000, -1000, 556, 41, 380, 498, + -1000, -1000, 380, -1000, 71, 51, -1000, -1000, 71, -1000, + -1000, -1000, 427, -1000, -1000, -1000, -1000, 54, -1000, 469, + 187, 187, -44, -44, -44, -44, -23, 584, 80, 80, + -45, -45, -45, -45, 295, -1000, 204, 204, 204, 204, + 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, + 204, 204, 276, -13, -13, 15, 14, 8, -2, 160, + 94, -1000, 257, 238, 219, 113, 371, 5, 39, -1, + 498, 194, 469, -21, -1000, -13, -13, -48, -48, -48, + -30, -30, -30, -30, -30, -30, -30, -30, -48, 40, + 40, -1000, -1000, -1000, -1000, -1000, -7, -14, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, +} +var yyPgo = [...]int{ + + 0, 250, 3, 245, 4, 582, 217, 5, 215, 2, + 161, 214, 87, 8, 192, 187, 184, 56, 0, 180, + 176, +} +var yyR1 = [...]int{ + + 0, 1, 1, 1, 5, 5, 5, 5, 5, 5, + 5, 6, 7, 7, 7, 7, 7, 7, 7, 2, + 3, 4, 4, 4, 4, 4, 4, 4, 8, 9, + 10, 10, 10, 10, 10, 10, 11, 11, 12, 12, + 12, 12, 12, 12, 12, 12, 14, 15, 13, 13, + 13, 13, 13, 13, 13, 13, 13, 16, 16, 16, + 16, 16, 17, 17, 17, 17, 17, 17, 17, 17, + 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, + 17, 17, 17, 17, 18, 18, 18, 18, 18, 18, + 18, 18, 18, 18, 19, 19, 19, 19, 19, 20, + 20, 20, 20, 20, 20, +} +var yyR2 = [...]int{ + + 0, 1, 1, 1, 3, 3, 3, 3, 3, 3, + 1, 3, 1, 1, 1, 3, 3, 3, 3, 4, + 3, 3, 3, 3, 3, 3, 3, 1, 3, 3, + 1, 1, 1, 1, 1, 1, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 1, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 1, 1, 3, 4, 4, + 4, 4, 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, 3, 2, + 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 3, + 3, 3, 3, 4, 4, +} +var yyChk = [...]int{ + + -1000, -1, -7, -5, -11, -4, -9, -2, 12, -6, + -12, -8, -13, 33, -14, 10, -16, -18, 28, 30, + 31, 29, 32, 5, 6, 7, 15, 16, 14, 8, + 18, 17, 19, 36, 37, 43, 47, 38, 48, 37, + 43, 47, 38, 48, -5, -7, -4, -12, -15, -13, + -10, 49, 50, 52, 53, 54, 55, 39, 40, 41, + 42, 43, 44, -10, 49, 50, 52, 53, 54, 55, + 12, -17, 12, 50, 51, -18, -19, -20, 20, 21, + 22, 23, 24, 9, 26, 27, 25, 12, 12, 12, + 12, 12, -9, -4, -2, -3, 12, 34, -5, 12, + -5, -5, -5, -5, -4, 12, -4, -4, -4, -4, + 13, 13, 36, 13, 13, 13, 13, -12, -18, 12, + -12, -12, -12, -12, -12, -12, -13, 12, -13, -13, + -13, -13, -13, -13, -17, 11, 49, 50, 52, 53, + 54, 39, 40, 41, 42, 43, 44, 46, 45, 55, + 37, 38, -17, -17, -17, 4, 4, 4, 4, 26, + 27, 13, -17, -17, -17, -17, -4, -13, 12, -7, + 12, -13, 12, -7, 13, -17, -17, -17, -17, -17, + -17, -17, -17, -17, -17, -17, -17, -17, -17, -17, + -17, 13, 35, 35, 35, 35, 4, 4, 13, 13, + 13, 13, 13, 35, 35, +} +var yyDef = [...]int{ + + 0, -2, 1, 2, 3, 12, 13, 14, 0, 10, + 0, 27, 0, 0, 45, 0, 55, 56, 0, 0, + 0, 0, 0, 84, 85, 86, 87, 88, 89, 90, + 91, 92, 93, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 12, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 30, 31, 32, + 33, 34, 35, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 81, 82, 83, 94, 95, + 96, 97, 98, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 15, 16, 17, 18, 0, 0, 5, 0, + 6, 7, 8, 9, 22, 0, 23, 24, 25, 26, + 4, 11, 0, 21, 38, 46, 48, 36, 37, 0, + 39, 40, 41, 42, 43, 44, 29, 0, 49, 50, + 51, 52, 53, 54, 0, 28, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 79, 80, 0, 0, 0, 0, 0, + 0, 57, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 47, 0, 0, 19, 63, 64, 65, 66, 67, + 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, + 78, 62, 99, 100, 101, 102, 0, 0, 58, 59, + 60, 61, 20, 103, 104, +} +var yyTok1 = [...]int{ + + 1, +} +var yyTok2 = [...]int{ + + 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, + 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, + 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, + 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, + 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, + 52, 53, 54, 55, +} +var yyTok3 = [...]int{ + 0, +} + +var yyErrorMessages = [...]struct { + state int + token int + msg string +}{} + +//line yaccpar:1 + +/* parser for yacc output */ + +var ( + yyDebug = 0 + yyErrorVerbose = false +) + +type yyLexer interface { + Lex(lval *yySymType) int + Error(s string) +} + +type yyParser interface { + Parse(yyLexer) int + Lookahead() int +} + +type yyParserImpl struct { + lval yySymType + stack [yyInitialStackSize]yySymType + char int +} + +func (p *yyParserImpl) Lookahead() int { + return p.char +} + +func yyNewParser() yyParser { + return &yyParserImpl{} +} + +const yyFlag = -1000 + +func yyTokname(c int) string { + if c >= 1 && c-1 < len(yyToknames) { + if yyToknames[c-1] != "" { + return yyToknames[c-1] + } + } + return __yyfmt__.Sprintf("tok-%v", c) +} + +func yyStatname(s int) string { + if s >= 0 && s < len(yyStatenames) { + if yyStatenames[s] != "" { + return yyStatenames[s] + } + } + return __yyfmt__.Sprintf("state-%v", s) +} + +func yyErrorMessage(state, lookAhead int) string { + const TOKSTART = 4 + + if !yyErrorVerbose { + return "syntax error" + } + + for _, e := range yyErrorMessages { + if e.state == state && e.token == lookAhead { + return "syntax error: " + e.msg + } + } + + res := "syntax error: unexpected " + yyTokname(lookAhead) + + // To match Bison, suggest at most four expected tokens. + expected := make([]int, 0, 4) + + // Look for shiftable tokens. + base := yyPact[state] + for tok := TOKSTART; tok-1 < len(yyToknames); tok++ { + if n := base + tok; n >= 0 && n < yyLast && yyChk[yyAct[n]] == tok { + if len(expected) == cap(expected) { + return res + } + expected = append(expected, tok) + } + } + + if yyDef[state] == -2 { + i := 0 + for yyExca[i] != -1 || yyExca[i+1] != state { + i += 2 + } + + // Look for tokens that we accept or reduce. + for i += 2; yyExca[i] >= 0; i += 2 { + tok := yyExca[i] + if tok < TOKSTART || yyExca[i+1] == 0 { + continue + } + if len(expected) == cap(expected) { + return res + } + expected = append(expected, tok) + } + + // If the default action is to accept or reduce, give up. + if yyExca[i+1] != 0 { + return res + } + } + + for i, tok := range expected { + if i == 0 { + res += ", expecting " + } else { + res += " or " + } + res += yyTokname(tok) + } + return res +} + +func yylex1(lex yyLexer, lval *yySymType) (char, token int) { + token = 0 + char = lex.Lex(lval) + if char <= 0 { + token = yyTok1[0] + goto out + } + if char < len(yyTok1) { + token = yyTok1[char] + goto out + } + if char >= yyPrivate { + if char < yyPrivate+len(yyTok2) { + token = yyTok2[char-yyPrivate] + goto out + } + } + for i := 0; i < len(yyTok3); i += 2 { + token = yyTok3[i+0] + if token == char { + token = yyTok3[i+1] + goto out + } + } + +out: + if token == 0 { + token = yyTok2[1] /* unknown char */ + } + if yyDebug >= 3 { + __yyfmt__.Printf("lex %s(%d)\n", yyTokname(token), uint(char)) + } + return char, token +} + +func yyParse(yylex yyLexer) int { + return yyNewParser().Parse(yylex) +} + +func (yyrcvr *yyParserImpl) Parse(yylex yyLexer) int { + var yyn int + var yyVAL yySymType + var yyDollar []yySymType + _ = yyDollar // silence set and not used + yyS := yyrcvr.stack[:] + + Nerrs := 0 /* number of errors */ + Errflag := 0 /* error recovery flag */ + yystate := 0 + yyrcvr.char = -1 + yytoken := -1 // yyrcvr.char translated into internal numbering + defer func() { + // Make sure we report no lookahead when not parsing. + yystate = -1 + yyrcvr.char = -1 + yytoken = -1 + }() + yyp := -1 + goto yystack + +ret0: + return 0 + +ret1: + return 1 + +yystack: + /* put a state and value onto the stack */ + if yyDebug >= 4 { + __yyfmt__.Printf("char %v in %v\n", yyTokname(yytoken), yyStatname(yystate)) + } + + yyp++ + if yyp >= len(yyS) { + nyys := make([]yySymType, len(yyS)*2) + copy(nyys, yyS) + yyS = nyys + } + yyS[yyp] = yyVAL + yyS[yyp].yys = yystate + +yynewstate: + yyn = yyPact[yystate] + if yyn <= yyFlag { + goto yydefault /* simple state */ + } + if yyrcvr.char < 0 { + yyrcvr.char, yytoken = yylex1(yylex, &yyrcvr.lval) + } + yyn += yytoken + if yyn < 0 || yyn >= yyLast { + goto yydefault + } + yyn = yyAct[yyn] + if yyChk[yyn] == yytoken { /* valid shift */ + yyrcvr.char = -1 + yytoken = -1 + yyVAL = yyrcvr.lval + yystate = yyn + if Errflag > 0 { + Errflag-- + } + goto yystack + } + +yydefault: + /* default state action */ + yyn = yyDef[yystate] + if yyn == -2 { + if yyrcvr.char < 0 { + yyrcvr.char, yytoken = yylex1(yylex, &yyrcvr.lval) + } + + /* look through exception table */ + xi := 0 + for { + if yyExca[xi+0] == -1 && yyExca[xi+1] == yystate { + break + } + xi += 2 + } + for xi += 2; ; xi += 2 { + yyn = yyExca[xi+0] + if yyn < 0 || yyn == yytoken { + break + } + } + yyn = yyExca[xi+1] + if yyn < 0 { + goto ret0 + } + } + if yyn == 0 { + /* error ... attempt to resume parsing */ + switch Errflag { + case 0: /* brand new error */ + yylex.Error(yyErrorMessage(yystate, yytoken)) + Nerrs++ + if yyDebug >= 1 { + __yyfmt__.Printf("%s", yyStatname(yystate)) + __yyfmt__.Printf(" saw %s\n", yyTokname(yytoken)) + } + fallthrough + + case 1, 2: /* incompletely recovered error ... try again */ + Errflag = 3 + + /* find a state where "error" is a legal shift action */ + for yyp >= 0 { + yyn = yyPact[yyS[yyp].yys] + yyErrCode + if yyn >= 0 && yyn < yyLast { + yystate = yyAct[yyn] /* simulate a shift of "error" */ + if yyChk[yystate] == yyErrCode { + goto yystack + } + } + + /* the current p has no shift on "error", pop stack */ + if yyDebug >= 2 { + __yyfmt__.Printf("error recovery pops state %d\n", yyS[yyp].yys) + } + yyp-- + } + /* there is no state on the stack with an error shift ... abort */ + goto ret1 + + case 3: /* no shift yet; clobber input char */ + if yyDebug >= 2 { + __yyfmt__.Printf("error recovery discards %s\n", yyTokname(yytoken)) + } + if yytoken == yyEofCode { + goto ret1 + } + yyrcvr.char = -1 + yytoken = -1 + goto yynewstate /* try again in the same state */ + } + } + + /* reduction by production yyn */ + if yyDebug >= 2 { + __yyfmt__.Printf("reduce %v in:\n\t%v\n", yyn, yyStatname(yystate)) + } + + yynt := yyn + yypt := yyp + _ = yypt // guard against "declared and not used" + + yyp -= yyR2[yyn] + // yyp is now the index of $0. Perform the default action. Iff the + // reduced production is ε, $1 is possibly out of range. + if yyp+1 >= len(yyS) { + nyys := make([]yySymType, len(yyS)*2) + copy(nyys, yyS) + yyS = nyys + } + yyVAL = yyS[yyp+1] + + /* consult goto table to find next state */ + yyn = yyR1[yyn] + yyg := yyPgo[yyn] + yyj := yyg + yyS[yyp].yys + 1 + + if yyj >= yyLast { + yystate = yyAct[yyg] + } else { + yystate = yyAct[yyj] + if yyChk[yystate] != -yyn { + yystate = yyAct[yyg] + } + } + // dummy call; replaced with literal code + switch yynt { + + case 1: + yyDollar = yyS[yypt-1 : yypt+1] +//line pkg/traceql/expr.y:93 + { + yylex.(*lexer).expr = newRootExpr(yyDollar[1].spansetPipeline) + } + case 2: + yyDollar = yyS[yypt-1 : yypt+1] +//line pkg/traceql/expr.y:94 + { + yylex.(*lexer).expr = newRootExpr(yyDollar[1].spansetPipelineExpression) + } + case 3: + yyDollar = yyS[yypt-1 : yypt+1] +//line pkg/traceql/expr.y:95 + { + yylex.(*lexer).expr = newRootExpr(yyDollar[1].scalarPipelineExpressionFilter) + } + case 4: + yyDollar = yyS[yypt-3 : yypt+1] +//line pkg/traceql/expr.y:102 + { + yyVAL.spansetPipelineExpression = yyDollar[2].spansetPipelineExpression + } + case 5: + yyDollar = yyS[yypt-3 : yypt+1] +//line pkg/traceql/expr.y:103 + { + yyVAL.spansetPipelineExpression = newSpansetOperation(opSpansetAnd, yyDollar[1].spansetPipelineExpression, yyDollar[3].spansetPipelineExpression) + } + case 6: + yyDollar = yyS[yypt-3 : yypt+1] +//line pkg/traceql/expr.y:104 + { + yyVAL.spansetPipelineExpression = newSpansetOperation(opSpansetChild, yyDollar[1].spansetPipelineExpression, yyDollar[3].spansetPipelineExpression) + } + case 7: + yyDollar = yyS[yypt-3 : yypt+1] +//line pkg/traceql/expr.y:105 + { + yyVAL.spansetPipelineExpression = newSpansetOperation(opSpansetDescendant, yyDollar[1].spansetPipelineExpression, yyDollar[3].spansetPipelineExpression) + } + case 8: + yyDollar = yyS[yypt-3 : yypt+1] +//line pkg/traceql/expr.y:106 + { + yyVAL.spansetPipelineExpression = newSpansetOperation(opSpansetUnion, yyDollar[1].spansetPipelineExpression, yyDollar[3].spansetPipelineExpression) + } + case 9: + yyDollar = yyS[yypt-3 : yypt+1] +//line pkg/traceql/expr.y:107 + { + yyVAL.spansetPipelineExpression = newSpansetOperation(opSpansetSibling, yyDollar[1].spansetPipelineExpression, yyDollar[3].spansetPipelineExpression) + } + case 10: + yyDollar = yyS[yypt-1 : yypt+1] +//line pkg/traceql/expr.y:108 + { + yyVAL.spansetPipelineExpression = yyDollar[1].wrappedSpansetPipeline + } + case 11: + yyDollar = yyS[yypt-3 : yypt+1] +//line pkg/traceql/expr.y:112 + { + yyVAL.wrappedSpansetPipeline = yyDollar[2].spansetPipeline + } + case 12: + yyDollar = yyS[yypt-1 : yypt+1] +//line pkg/traceql/expr.y:115 + { + yyVAL.spansetPipeline = newPipeline(yyDollar[1].spansetExpression) + } + case 13: + yyDollar = yyS[yypt-1 : yypt+1] +//line pkg/traceql/expr.y:116 + { + yyVAL.spansetPipeline = newPipeline(yyDollar[1].scalarFilter) + } + case 14: + yyDollar = yyS[yypt-1 : yypt+1] +//line pkg/traceql/expr.y:117 + { + yyVAL.spansetPipeline = newPipeline(yyDollar[1].groupOperation) + } + case 15: + yyDollar = yyS[yypt-3 : yypt+1] +//line pkg/traceql/expr.y:118 + { + yyVAL.spansetPipeline = yyDollar[1].spansetPipeline.addItem(yyDollar[3].scalarFilter) + } + case 16: + yyDollar = yyS[yypt-3 : yypt+1] +//line pkg/traceql/expr.y:119 + { + yyVAL.spansetPipeline = yyDollar[1].spansetPipeline.addItem(yyDollar[3].spansetExpression) + } + case 17: + yyDollar = yyS[yypt-3 : yypt+1] +//line pkg/traceql/expr.y:120 + { + yyVAL.spansetPipeline = yyDollar[1].spansetPipeline.addItem(yyDollar[3].groupOperation) + } + case 18: + yyDollar = yyS[yypt-3 : yypt+1] +//line pkg/traceql/expr.y:121 + { + yyVAL.spansetPipeline = yyDollar[1].spansetPipeline.addItem(yyDollar[3].coalesceOperation) + } + case 19: + yyDollar = yyS[yypt-4 : yypt+1] +//line pkg/traceql/expr.y:125 + { + yyVAL.groupOperation = newGroupOperation(yyDollar[3].fieldExpression) + } + case 20: + yyDollar = yyS[yypt-3 : yypt+1] +//line pkg/traceql/expr.y:129 + { + yyVAL.coalesceOperation = newCoalesceOperation() + } + case 21: + yyDollar = yyS[yypt-3 : yypt+1] +//line pkg/traceql/expr.y:133 + { + yyVAL.spansetExpression = yyDollar[2].spansetExpression + } + case 22: + yyDollar = yyS[yypt-3 : yypt+1] +//line pkg/traceql/expr.y:134 + { + yyVAL.spansetExpression = newSpansetOperation(opSpansetAnd, yyDollar[1].spansetExpression, yyDollar[3].spansetExpression) + } + case 23: + yyDollar = yyS[yypt-3 : yypt+1] +//line pkg/traceql/expr.y:135 + { + yyVAL.spansetExpression = newSpansetOperation(opSpansetChild, yyDollar[1].spansetExpression, yyDollar[3].spansetExpression) + } + case 24: + yyDollar = yyS[yypt-3 : yypt+1] +//line pkg/traceql/expr.y:136 + { + yyVAL.spansetExpression = newSpansetOperation(opSpansetDescendant, yyDollar[1].spansetExpression, yyDollar[3].spansetExpression) + } + case 25: + yyDollar = yyS[yypt-3 : yypt+1] +//line pkg/traceql/expr.y:137 + { + yyVAL.spansetExpression = newSpansetOperation(opSpansetUnion, yyDollar[1].spansetExpression, yyDollar[3].spansetExpression) + } + case 26: + yyDollar = yyS[yypt-3 : yypt+1] +//line pkg/traceql/expr.y:138 + { + yyVAL.spansetExpression = newSpansetOperation(opSpansetSibling, yyDollar[1].spansetExpression, yyDollar[3].spansetExpression) + } + case 27: + yyDollar = yyS[yypt-1 : yypt+1] +//line pkg/traceql/expr.y:139 + { + yyVAL.spansetExpression = yyDollar[1].spansetFilter + } + case 28: + yyDollar = yyS[yypt-3 : yypt+1] +//line pkg/traceql/expr.y:143 + { + yyVAL.spansetFilter = newSpansetFilter(yyDollar[2].fieldExpression) + } + case 29: + yyDollar = yyS[yypt-3 : yypt+1] +//line pkg/traceql/expr.y:147 + { + yyVAL.scalarFilter = newScalarFilter(yyDollar[2].scalarFilterOperation, yyDollar[1].scalarExpression, yyDollar[3].scalarExpression) + } + case 30: + yyDollar = yyS[yypt-1 : yypt+1] +//line pkg/traceql/expr.y:151 + { + yyVAL.scalarFilterOperation = opEqual + } + case 31: + yyDollar = yyS[yypt-1 : yypt+1] +//line pkg/traceql/expr.y:152 + { + yyVAL.scalarFilterOperation = opNotEqual + } + case 32: + yyDollar = yyS[yypt-1 : yypt+1] +//line pkg/traceql/expr.y:153 + { + yyVAL.scalarFilterOperation = opLess + } + case 33: + yyDollar = yyS[yypt-1 : yypt+1] +//line pkg/traceql/expr.y:154 + { + yyVAL.scalarFilterOperation = opLessEqual + } + case 34: + yyDollar = yyS[yypt-1 : yypt+1] +//line pkg/traceql/expr.y:155 + { + yyVAL.scalarFilterOperation = opGreater + } + case 35: + yyDollar = yyS[yypt-1 : yypt+1] +//line pkg/traceql/expr.y:156 + { + yyVAL.scalarFilterOperation = opGreaterEqual + } + case 36: + yyDollar = yyS[yypt-3 : yypt+1] +//line pkg/traceql/expr.y:163 + { + yyVAL.scalarPipelineExpressionFilter = newScalarFilter(yyDollar[2].scalarFilterOperation, yyDollar[1].scalarPipelineExpression, yyDollar[3].scalarPipelineExpression) + } + case 37: + yyDollar = yyS[yypt-3 : yypt+1] +//line pkg/traceql/expr.y:164 + { + yyVAL.scalarPipelineExpressionFilter = newScalarFilter(yyDollar[2].scalarFilterOperation, yyDollar[1].scalarPipelineExpression, yyDollar[3].static) + } + case 38: + yyDollar = yyS[yypt-3 : yypt+1] +//line pkg/traceql/expr.y:168 + { + yyVAL.scalarPipelineExpression = yyDollar[2].scalarPipelineExpression + } + case 39: + yyDollar = yyS[yypt-3 : yypt+1] +//line pkg/traceql/expr.y:169 + { + yyVAL.scalarPipelineExpression = newScalarOperation(opAdd, yyDollar[1].scalarPipelineExpression, yyDollar[3].scalarPipelineExpression) + } + case 40: + yyDollar = yyS[yypt-3 : yypt+1] +//line pkg/traceql/expr.y:170 + { + yyVAL.scalarPipelineExpression = newScalarOperation(opSub, yyDollar[1].scalarPipelineExpression, yyDollar[3].scalarPipelineExpression) + } + case 41: + yyDollar = yyS[yypt-3 : yypt+1] +//line pkg/traceql/expr.y:171 + { + yyVAL.scalarPipelineExpression = newScalarOperation(opMult, yyDollar[1].scalarPipelineExpression, yyDollar[3].scalarPipelineExpression) + } + case 42: + yyDollar = yyS[yypt-3 : yypt+1] +//line pkg/traceql/expr.y:172 + { + yyVAL.scalarPipelineExpression = newScalarOperation(opDiv, yyDollar[1].scalarPipelineExpression, yyDollar[3].scalarPipelineExpression) + } + case 43: + yyDollar = yyS[yypt-3 : yypt+1] +//line pkg/traceql/expr.y:173 + { + yyVAL.scalarPipelineExpression = newScalarOperation(opMod, yyDollar[1].scalarPipelineExpression, yyDollar[3].scalarPipelineExpression) + } + case 44: + yyDollar = yyS[yypt-3 : yypt+1] +//line pkg/traceql/expr.y:174 + { + yyVAL.scalarPipelineExpression = newScalarOperation(opPower, yyDollar[1].scalarPipelineExpression, yyDollar[3].scalarPipelineExpression) + } + case 45: + yyDollar = yyS[yypt-1 : yypt+1] +//line pkg/traceql/expr.y:175 + { + yyVAL.scalarPipelineExpression = yyDollar[1].wrappedScalarPipeline + } + case 46: + yyDollar = yyS[yypt-3 : yypt+1] +//line pkg/traceql/expr.y:179 + { + yyVAL.wrappedScalarPipeline = yyDollar[2].scalarPipeline + } + case 47: + yyDollar = yyS[yypt-3 : yypt+1] +//line pkg/traceql/expr.y:183 + { + yyVAL.scalarPipeline = yyDollar[1].spansetPipeline.addItem(yyDollar[3].scalarExpression) + } + case 48: + yyDollar = yyS[yypt-3 : yypt+1] +//line pkg/traceql/expr.y:187 + { + yyVAL.scalarExpression = yyDollar[2].scalarExpression + } + case 49: + yyDollar = yyS[yypt-3 : yypt+1] +//line pkg/traceql/expr.y:188 + { + yyVAL.scalarExpression = newScalarOperation(opAdd, yyDollar[1].scalarExpression, yyDollar[3].scalarExpression) + } + case 50: + yyDollar = yyS[yypt-3 : yypt+1] +//line pkg/traceql/expr.y:189 + { + yyVAL.scalarExpression = newScalarOperation(opSub, yyDollar[1].scalarExpression, yyDollar[3].scalarExpression) + } + case 51: + yyDollar = yyS[yypt-3 : yypt+1] +//line pkg/traceql/expr.y:190 + { + yyVAL.scalarExpression = newScalarOperation(opMult, yyDollar[1].scalarExpression, yyDollar[3].scalarExpression) + } + case 52: + yyDollar = yyS[yypt-3 : yypt+1] +//line pkg/traceql/expr.y:191 + { + yyVAL.scalarExpression = newScalarOperation(opDiv, yyDollar[1].scalarExpression, yyDollar[3].scalarExpression) + } + case 53: + yyDollar = yyS[yypt-3 : yypt+1] +//line pkg/traceql/expr.y:192 + { + yyVAL.scalarExpression = newScalarOperation(opMod, yyDollar[1].scalarExpression, yyDollar[3].scalarExpression) + } + case 54: + yyDollar = yyS[yypt-3 : yypt+1] +//line pkg/traceql/expr.y:193 + { + yyVAL.scalarExpression = newScalarOperation(opPower, yyDollar[1].scalarExpression, yyDollar[3].scalarExpression) + } + case 55: + yyDollar = yyS[yypt-1 : yypt+1] +//line pkg/traceql/expr.y:194 + { + yyVAL.scalarExpression = yyDollar[1].aggregate + } + case 56: + yyDollar = yyS[yypt-1 : yypt+1] +//line pkg/traceql/expr.y:195 + { + yyVAL.scalarExpression = yyDollar[1].static + } + case 57: + yyDollar = yyS[yypt-3 : yypt+1] +//line pkg/traceql/expr.y:199 + { + yyVAL.aggregate = newAggregate(aggregateCount, nil) + } + case 58: + yyDollar = yyS[yypt-4 : yypt+1] +//line pkg/traceql/expr.y:200 + { + yyVAL.aggregate = newAggregate(aggregateMax, yyDollar[3].fieldExpression) + } + case 59: + yyDollar = yyS[yypt-4 : yypt+1] +//line pkg/traceql/expr.y:201 + { + yyVAL.aggregate = newAggregate(aggregateMin, yyDollar[3].fieldExpression) + } + case 60: + yyDollar = yyS[yypt-4 : yypt+1] +//line pkg/traceql/expr.y:202 + { + yyVAL.aggregate = newAggregate(aggregateAvg, yyDollar[3].fieldExpression) + } + case 61: + yyDollar = yyS[yypt-4 : yypt+1] +//line pkg/traceql/expr.y:203 + { + yyVAL.aggregate = newAggregate(aggregateSum, yyDollar[3].fieldExpression) + } + case 62: + yyDollar = yyS[yypt-3 : yypt+1] +//line pkg/traceql/expr.y:210 + { + yyVAL.fieldExpression = yyDollar[2].fieldExpression + } + case 63: + yyDollar = yyS[yypt-3 : yypt+1] +//line pkg/traceql/expr.y:211 + { + yyVAL.fieldExpression = newBinaryOperation(opAdd, yyDollar[1].fieldExpression, yyDollar[3].fieldExpression) + } + case 64: + yyDollar = yyS[yypt-3 : yypt+1] +//line pkg/traceql/expr.y:212 + { + yyVAL.fieldExpression = newBinaryOperation(opSub, yyDollar[1].fieldExpression, yyDollar[3].fieldExpression) + } + case 65: + yyDollar = yyS[yypt-3 : yypt+1] +//line pkg/traceql/expr.y:213 + { + yyVAL.fieldExpression = newBinaryOperation(opMult, yyDollar[1].fieldExpression, yyDollar[3].fieldExpression) + } + case 66: + yyDollar = yyS[yypt-3 : yypt+1] +//line pkg/traceql/expr.y:214 + { + yyVAL.fieldExpression = newBinaryOperation(opDiv, yyDollar[1].fieldExpression, yyDollar[3].fieldExpression) + } + case 67: + yyDollar = yyS[yypt-3 : yypt+1] +//line pkg/traceql/expr.y:215 + { + yyVAL.fieldExpression = newBinaryOperation(opMod, yyDollar[1].fieldExpression, yyDollar[3].fieldExpression) + } + case 68: + yyDollar = yyS[yypt-3 : yypt+1] +//line pkg/traceql/expr.y:216 + { + yyVAL.fieldExpression = newBinaryOperation(opEqual, yyDollar[1].fieldExpression, yyDollar[3].fieldExpression) + } + case 69: + yyDollar = yyS[yypt-3 : yypt+1] +//line pkg/traceql/expr.y:217 + { + yyVAL.fieldExpression = newBinaryOperation(opNotEqual, yyDollar[1].fieldExpression, yyDollar[3].fieldExpression) + } + case 70: + yyDollar = yyS[yypt-3 : yypt+1] +//line pkg/traceql/expr.y:218 + { + yyVAL.fieldExpression = newBinaryOperation(opLess, yyDollar[1].fieldExpression, yyDollar[3].fieldExpression) + } + case 71: + yyDollar = yyS[yypt-3 : yypt+1] +//line pkg/traceql/expr.y:219 + { + yyVAL.fieldExpression = newBinaryOperation(opLessEqual, yyDollar[1].fieldExpression, yyDollar[3].fieldExpression) + } + case 72: + yyDollar = yyS[yypt-3 : yypt+1] +//line pkg/traceql/expr.y:220 + { + yyVAL.fieldExpression = newBinaryOperation(opGreater, yyDollar[1].fieldExpression, yyDollar[3].fieldExpression) + } + case 73: + yyDollar = yyS[yypt-3 : yypt+1] +//line pkg/traceql/expr.y:221 + { + yyVAL.fieldExpression = newBinaryOperation(opGreaterEqual, yyDollar[1].fieldExpression, yyDollar[3].fieldExpression) + } + case 74: + yyDollar = yyS[yypt-3 : yypt+1] +//line pkg/traceql/expr.y:222 + { + yyVAL.fieldExpression = newBinaryOperation(opRegex, yyDollar[1].fieldExpression, yyDollar[3].fieldExpression) + } + case 75: + yyDollar = yyS[yypt-3 : yypt+1] +//line pkg/traceql/expr.y:223 + { + yyVAL.fieldExpression = newBinaryOperation(opNotRegex, yyDollar[1].fieldExpression, yyDollar[3].fieldExpression) + } + case 76: + yyDollar = yyS[yypt-3 : yypt+1] +//line pkg/traceql/expr.y:224 + { + yyVAL.fieldExpression = newBinaryOperation(opPower, yyDollar[1].fieldExpression, yyDollar[3].fieldExpression) + } + case 77: + yyDollar = yyS[yypt-3 : yypt+1] +//line pkg/traceql/expr.y:225 + { + yyVAL.fieldExpression = newBinaryOperation(opAnd, yyDollar[1].fieldExpression, yyDollar[3].fieldExpression) + } + case 78: + yyDollar = yyS[yypt-3 : yypt+1] +//line pkg/traceql/expr.y:226 + { + yyVAL.fieldExpression = newBinaryOperation(opOr, yyDollar[1].fieldExpression, yyDollar[3].fieldExpression) + } + case 79: + yyDollar = yyS[yypt-2 : yypt+1] +//line pkg/traceql/expr.y:227 + { + yyVAL.fieldExpression = newUnaryOperation(opSub, yyDollar[2].fieldExpression) + } + case 80: + yyDollar = yyS[yypt-2 : yypt+1] +//line pkg/traceql/expr.y:228 + { + yyVAL.fieldExpression = newUnaryOperation(opNot, yyDollar[2].fieldExpression) + } + case 81: + yyDollar = yyS[yypt-1 : yypt+1] +//line pkg/traceql/expr.y:229 + { + yyVAL.fieldExpression = yyDollar[1].static + } + case 82: + yyDollar = yyS[yypt-1 : yypt+1] +//line pkg/traceql/expr.y:230 + { + yyVAL.fieldExpression = yyDollar[1].intrinsicField + } + case 83: + yyDollar = yyS[yypt-1 : yypt+1] +//line pkg/traceql/expr.y:231 + { + yyVAL.fieldExpression = yyDollar[1].attributeField + } + case 84: + yyDollar = yyS[yypt-1 : yypt+1] +//line pkg/traceql/expr.y:238 + { + yyVAL.static = newStaticString(yyDollar[1].staticStr) + } + case 85: + yyDollar = yyS[yypt-1 : yypt+1] +//line pkg/traceql/expr.y:239 + { + yyVAL.static = newStaticInt(yyDollar[1].staticInt) + } + case 86: + yyDollar = yyS[yypt-1 : yypt+1] +//line pkg/traceql/expr.y:240 + { + yyVAL.static = newStaticFloat(yyDollar[1].staticFloat) + } + case 87: + yyDollar = yyS[yypt-1 : yypt+1] +//line pkg/traceql/expr.y:241 + { + yyVAL.static = newStaticBool(true) + } + case 88: + yyDollar = yyS[yypt-1 : yypt+1] +//line pkg/traceql/expr.y:242 + { + yyVAL.static = newStaticBool(false) + } + case 89: + yyDollar = yyS[yypt-1 : yypt+1] +//line pkg/traceql/expr.y:243 + { + yyVAL.static = newStaticNil() + } + case 90: + yyDollar = yyS[yypt-1 : yypt+1] +//line pkg/traceql/expr.y:244 + { + yyVAL.static = newStaticDuration(yyDollar[1].staticDuration) + } + case 91: + yyDollar = yyS[yypt-1 : yypt+1] +//line pkg/traceql/expr.y:245 + { + yyVAL.static = newStaticStatus(statusOk) + } + case 92: + yyDollar = yyS[yypt-1 : yypt+1] +//line pkg/traceql/expr.y:246 + { + yyVAL.static = newStaticStatus(statusError) + } + case 93: + yyDollar = yyS[yypt-1 : yypt+1] +//line pkg/traceql/expr.y:247 + { + yyVAL.static = newStaticStatus(statusUnset) + } + case 94: + yyDollar = yyS[yypt-1 : yypt+1] +//line pkg/traceql/expr.y:251 + { + yyVAL.intrinsicField = newIntrinsic(intrinsicDuration) + } + case 95: + yyDollar = yyS[yypt-1 : yypt+1] +//line pkg/traceql/expr.y:252 + { + yyVAL.intrinsicField = newIntrinsic(intrinsicChildCount) + } + case 96: + yyDollar = yyS[yypt-1 : yypt+1] +//line pkg/traceql/expr.y:253 + { + yyVAL.intrinsicField = newIntrinsic(intrinsicName) + } + case 97: + yyDollar = yyS[yypt-1 : yypt+1] +//line pkg/traceql/expr.y:254 + { + yyVAL.intrinsicField = newIntrinsic(intrinsicStatus) + } + case 98: + yyDollar = yyS[yypt-1 : yypt+1] +//line pkg/traceql/expr.y:255 + { + yyVAL.intrinsicField = newIntrinsic(intrinsicParent) + } + case 99: + yyDollar = yyS[yypt-3 : yypt+1] +//line pkg/traceql/expr.y:259 + { + yyVAL.attributeField = newAttribute(yyDollar[2].staticStr) + } + case 100: + yyDollar = yyS[yypt-3 : yypt+1] +//line pkg/traceql/expr.y:260 + { + yyVAL.attributeField = newScopedAttribute(attributeScopeResource, false, yyDollar[2].staticStr) + } + case 101: + yyDollar = yyS[yypt-3 : yypt+1] +//line pkg/traceql/expr.y:261 + { + yyVAL.attributeField = newScopedAttribute(attributeScopeSpan, false, yyDollar[2].staticStr) + } + case 102: + yyDollar = yyS[yypt-3 : yypt+1] +//line pkg/traceql/expr.y:262 + { + yyVAL.attributeField = newScopedAttribute(attributeScopeNone, true, yyDollar[2].staticStr) + } + case 103: + yyDollar = yyS[yypt-4 : yypt+1] +//line pkg/traceql/expr.y:263 + { + yyVAL.attributeField = newScopedAttribute(attributeScopeResource, true, yyDollar[3].staticStr) + } + case 104: + yyDollar = yyS[yypt-4 : yypt+1] +//line pkg/traceql/expr.y:264 + { + yyVAL.attributeField = newScopedAttribute(attributeScopeSpan, true, yyDollar[3].staticStr) + } + } + goto yystack /* stack new state and value */ +} diff --git a/pkg/traceql/lexer.go b/pkg/traceql/lexer.go new file mode 100644 index 00000000000..313692c7350 --- /dev/null +++ b/pkg/traceql/lexer.go @@ -0,0 +1,240 @@ +package traceql + +import ( + "strconv" + "strings" + "text/scanner" + "time" + "unicode" + + "github.com/prometheus/common/model" +) + +var tokens = map[string]int{ + ".": DOT, + "{": OPEN_BRACE, + "}": CLOSE_BRACE, + "(": OPEN_PARENS, + ")": CLOSE_PARENS, + "=": EQ, + "!=": NEQ, + "=~": RE, + "!~": NRE, + ">": GT, + ">=": GTE, + "<": LT, + "<=": LTE, + "+": ADD, + "-": SUB, + "/": DIV, + "%": MOD, + "*": MUL, + "^": POW, + "true": TRUE, + "false": FALSE, + "nil": NIL, + "ok": STATUS_OK, + "error": STATUS_ERROR, + "unset": STATUS_UNSET, + "&&": AND, + "||": OR, + "!": NOT, + "|": PIPE, + ">>": DESC, + "~": TILDE, + "duration": IDURATION, + "childCount": CHILDCOUNT, + "name": NAME, + "status": STATUS, + "parent": PARENT, + "parent.": PARENT_DOT, + "resource.": RESOURCE_DOT, + "span.": SPAN_DOT, + "count": COUNT, + "avg": AVG, + "max": MAX, + "min": MIN, + "sum": SUM, + "by": BY, + "coalesce": COALESCE, +} + +type lexer struct { + scanner.Scanner + expr *RootExpr + parser *yyParserImpl + errs []ParseError + + parsingAttribute bool +} + +func (l *lexer) Lex(lval *yySymType) int { + // if we are currently parsing an attribute and the next rune suggests that + // this attribute will end, then return a special token indicating that the attribute is + // done parsing + if l.parsingAttribute && !isAttributeRune(l.Peek()) { + l.parsingAttribute = false + return END_ATTRIBUTE + } + + r := l.Scan() + + // if we are currently parsing an attribute then just grab everything until we find a character that ends the attribute. + // we will handle parsing this out in ast.go + if l.parsingAttribute { + str := l.TokenText() + // parse out any scopes here + tok := tokens[str+string(l.Peek())] + if tok == RESOURCE_DOT || tok == SPAN_DOT { + l.Next() + return tok + } + + // go forward until we find the end of the attribute + r := l.Peek() + for isAttributeRune(r) { + str += string(l.Next()) + r = l.Peek() + } + + lval.staticStr = str + return IDENTIFIER + } + + // now that we know we're not parsing an attribute, let's look for everything else + switch r { + case scanner.EOF: + return 0 + + case scanner.String, scanner.RawString: + var err error + lval.staticStr, err = strconv.Unquote(l.TokenText()) + if err != nil { + l.Error(err.Error()) + return 0 + } + return STRING + + case scanner.Int: + numberText := l.TokenText() + + // first try to parse as duration + duration, ok := tryScanDuration(numberText, &l.Scanner) + if ok { + lval.staticDuration = duration + return DURATION + } + + // if we can't then just try an int + var err error + lval.staticInt, err = strconv.Atoi(numberText) + if err != nil { + l.Error(err.Error()) + return 0 + } + return INTEGER + + case scanner.Float: + var err error + lval.staticFloat, err = strconv.ParseFloat(l.TokenText(), 64) + if err != nil { + l.Error(err.Error()) + return 0 + } + return FLOAT + } + + tokStrNext := l.TokenText() + string(l.Peek()) + if tok, ok := tokens[tokStrNext]; ok { + l.Next() + l.parsingAttribute = startsAttribute(tok) + return tok + } + + if tok, ok := tokens[l.TokenText()]; ok { + l.parsingAttribute = startsAttribute(tok) + return tok + } + + lval.staticStr = l.TokenText() + return IDENTIFIER +} + +func (l *lexer) Error(msg string) { + l.errs = append(l.errs, newParseError(msg, l.Line, l.Column)) +} + +func tryScanDuration(number string, l *scanner.Scanner) (time.Duration, bool) { + var sb strings.Builder + sb.WriteString(number) + //copy the scanner to avoid advancing it in case it's not a duration. + s := *l + consumed := 0 + for r := s.Peek(); r != scanner.EOF && !unicode.IsSpace(r); r = s.Peek() { + if !unicode.IsNumber(r) && !isDurationRune(r) && r != '.' { + break + } + _, _ = sb.WriteRune(r) + _ = s.Next() + consumed++ + } + + if consumed == 0 { + return 0, false + } + // we've found more characters before a whitespace or the end + d, err := parseDuration(sb.String()) + if err != nil { + return 0, false + } + // we need to consume the scanner, now that we know this is a duration. + for i := 0; i < consumed; i++ { + _ = l.Next() + } + return d, true +} + +func parseDuration(d string) (time.Duration, error) { + var duration time.Duration + // Try to parse promql style durations first, to ensure that we support the same duration + // units as promql + prometheusDuration, err := model.ParseDuration(d) + if err != nil { + // Fall back to standard library's time.ParseDuration if a promql style + // duration couldn't be parsed. + duration, err = time.ParseDuration(d) + if err != nil { + return 0, err + } + } else { + duration = time.Duration(prometheusDuration) + } + + return duration, nil +} + +func isDurationRune(r rune) bool { + // "ns", "us" (or "µs"), "ms", "s", "m", "h". + switch r { + case 'n', 's', 'u', 'm', 'h', 'µ', 'd', 'w', 'y': + return true + default: + return false + } +} + +func isAttributeRune(r rune) bool { + return !unicode.IsSpace(r) && + r != scanner.EOF && + r != '(' && + r != ')' && + r != '}' && + r != '{' +} + +func startsAttribute(tok int) bool { + return tok == DOT || + tok == RESOURCE_DOT || + tok == SPAN_DOT || + tok == PARENT_DOT +} diff --git a/pkg/traceql/lexer_test.go b/pkg/traceql/lexer_test.go new file mode 100644 index 00000000000..87506335505 --- /dev/null +++ b/pkg/traceql/lexer_test.go @@ -0,0 +1,136 @@ +package traceql + +import ( + "strings" + "testing" + "text/scanner" + "time" + + "github.com/stretchr/testify/require" +) + +type lexerTestCase struct { + input string + expected []int +} + +func TestLexerAttributes(t *testing.T) { + testLexer(t, ([]lexerTestCase{ + // attributes + {`.foo`, []int{DOT, IDENTIFIER, END_ATTRIBUTE}}, + {`.count`, []int{DOT, IDENTIFIER, END_ATTRIBUTE}}, + {`.foo3`, []int{DOT, IDENTIFIER, END_ATTRIBUTE}}, + {`.foo+bar`, []int{DOT, IDENTIFIER, END_ATTRIBUTE}}, + {`.foo-bar`, []int{DOT, IDENTIFIER, END_ATTRIBUTE}}, + // parent attributes + {`parent.foo`, []int{PARENT_DOT, IDENTIFIER, END_ATTRIBUTE}}, + {`parent.count`, []int{PARENT_DOT, IDENTIFIER, END_ATTRIBUTE}}, + {`parent.foo3`, []int{PARENT_DOT, IDENTIFIER, END_ATTRIBUTE}}, + {`parent.foo+bar`, []int{PARENT_DOT, IDENTIFIER, END_ATTRIBUTE}}, + {`parent.foo-bar`, []int{PARENT_DOT, IDENTIFIER, END_ATTRIBUTE}}, + // span attributes + {`span.foo`, []int{SPAN_DOT, IDENTIFIER, END_ATTRIBUTE}}, + {`span.count`, []int{SPAN_DOT, IDENTIFIER, END_ATTRIBUTE}}, + {`span.foo3`, []int{SPAN_DOT, IDENTIFIER, END_ATTRIBUTE}}, + {`span.foo+bar`, []int{SPAN_DOT, IDENTIFIER, END_ATTRIBUTE}}, + {`span.foo-bar`, []int{SPAN_DOT, IDENTIFIER, END_ATTRIBUTE}}, + // resource attributes + {`resource.foo`, []int{RESOURCE_DOT, IDENTIFIER, END_ATTRIBUTE}}, + {`resource.count`, []int{RESOURCE_DOT, IDENTIFIER, END_ATTRIBUTE}}, + {`resource.foo3`, []int{RESOURCE_DOT, IDENTIFIER, END_ATTRIBUTE}}, + {`resource.foo+bar`, []int{RESOURCE_DOT, IDENTIFIER, END_ATTRIBUTE}}, + {`resource.foo-bar`, []int{RESOURCE_DOT, IDENTIFIER, END_ATTRIBUTE}}, + // parent span attributes + {`parent.span.foo`, []int{PARENT_DOT, SPAN_DOT, IDENTIFIER, END_ATTRIBUTE}}, + {`parent.span.count`, []int{PARENT_DOT, SPAN_DOT, IDENTIFIER, END_ATTRIBUTE}}, + {`parent.span.foo3`, []int{PARENT_DOT, SPAN_DOT, IDENTIFIER, END_ATTRIBUTE}}, + {`parent.span.foo+bar`, []int{PARENT_DOT, SPAN_DOT, IDENTIFIER, END_ATTRIBUTE}}, + {`parent.span.foo-bar`, []int{PARENT_DOT, SPAN_DOT, IDENTIFIER, END_ATTRIBUTE}}, + // parent resource attributes + {`parent.resource.foo`, []int{PARENT_DOT, RESOURCE_DOT, IDENTIFIER, END_ATTRIBUTE}}, + {`parent.resource.count`, []int{PARENT_DOT, RESOURCE_DOT, IDENTIFIER, END_ATTRIBUTE}}, + {`parent.resource.foo3`, []int{PARENT_DOT, RESOURCE_DOT, IDENTIFIER, END_ATTRIBUTE}}, + {`parent.resource.foo+bar`, []int{PARENT_DOT, RESOURCE_DOT, IDENTIFIER, END_ATTRIBUTE}}, + {`parent.resource.foo-bar`, []int{PARENT_DOT, RESOURCE_DOT, IDENTIFIER, END_ATTRIBUTE}}, + // attribute enders: , {, }, (, ) all force end an attribute + {`.foo .bar`, []int{DOT, IDENTIFIER, END_ATTRIBUTE, DOT, IDENTIFIER, END_ATTRIBUTE}}, + {`.foo}.bar`, []int{DOT, IDENTIFIER, END_ATTRIBUTE, CLOSE_BRACE, DOT, IDENTIFIER, END_ATTRIBUTE}}, + {`.foo{.bar`, []int{DOT, IDENTIFIER, END_ATTRIBUTE, OPEN_BRACE, DOT, IDENTIFIER, END_ATTRIBUTE}}, + {`.foo).bar`, []int{DOT, IDENTIFIER, END_ATTRIBUTE, CLOSE_PARENS, DOT, IDENTIFIER, END_ATTRIBUTE}}, + {`.foo(.bar`, []int{DOT, IDENTIFIER, END_ATTRIBUTE, OPEN_PARENS, DOT, IDENTIFIER, END_ATTRIBUTE}}, + {`. foo`, []int{DOT, END_ATTRIBUTE, IDENTIFIER}}, + // not attributes + {`.3`, []int{FLOAT}}, + {`.24h`, []int{FLOAT, IDENTIFIER}}, + })) +} + +func TestLexerDuration(t *testing.T) { + testLexer(t, ([]lexerTestCase{ + // duration + {"1ns", []int{DURATION}}, + {"1s", []int{DURATION}}, + {"1us", []int{DURATION}}, + {"1m", []int{DURATION}}, + {"1h", []int{DURATION}}, + {"1µs", []int{DURATION}}, + {"1y", []int{DURATION}}, + {"1w", []int{DURATION}}, + {"1d", []int{DURATION}}, + {"1h15m30.918273645s", []int{DURATION}}, + // not duration + {"1t", []int{INTEGER, IDENTIFIER}}, + {"1", []int{INTEGER}}, + })) +} + +func TestLexerParseDuration(t *testing.T) { + const MICROSECOND = 1000 * time.Nanosecond + const DAY = 24 * time.Hour + const WEEK = 7 * DAY + const YEAR = 365 * DAY + + for _, tc := range []struct { + input string + expected time.Duration + }{ + {"1ns", time.Nanosecond}, + {"1s", time.Second}, + {"1us", MICROSECOND}, + {"1m", time.Minute}, + {"1h", time.Hour}, + {"1µs", MICROSECOND}, + {"1y", YEAR}, + {"1w", WEEK}, + {"1d", DAY}, + {"1h15m30.918273645s", time.Hour + 15*time.Minute + 30*time.Second + 918273645*time.Nanosecond}, + } { + actual, err := parseDuration(tc.input) + + require.Equal(t, err, nil) + require.Equal(t, tc.expected, actual) + } +} + +func testLexer(t *testing.T, tcs []lexerTestCase) { + for _, tc := range tcs { + t.Run(tc.input, func(t *testing.T) { + actual := []int{} + l := lexer{ + Scanner: scanner.Scanner{ + Mode: scanner.SkipComments | scanner.ScanStrings, + }, + } + l.Init(strings.NewReader(tc.input)) + var lval yySymType + for { + tok := l.Lex(&lval) + if tok == 0 { + break + } + actual = append(actual, tok) + } + require.Equal(t, tc.expected, actual) + }) + } +} diff --git a/pkg/traceql/parse.go b/pkg/traceql/parse.go new file mode 100644 index 00000000000..31fc7ded102 --- /dev/null +++ b/pkg/traceql/parse.go @@ -0,0 +1,68 @@ +package traceql + +import ( + "errors" + "fmt" + "strings" + "text/scanner" +) + +func init() { + yyErrorVerbose = true + // yyDebug = 3 + // replaces constants with actual identifiers in error messages + // i.e. "expecting OPEN_BRACE" => "expecting {" + for str, tok := range tokens { + yyToknames[tok-yyPrivate+1] = str + } +} + +func Parse(s string) (expr *RootExpr, err error) { + defer func() { + if r := recover(); r != nil { + var ok bool + if err, ok = r.(error); ok { + if errors.Is(err, ParseError{}) { + return + } + err = newParseError(err.Error(), 0, 0) + } + } + }() + l := lexer{ + parser: yyNewParser().(*yyParserImpl), + } + l.Init(strings.NewReader(s)) + l.Scanner.Error = func(_ *scanner.Scanner, msg string) { + l.Error(msg) + } + e := l.parser.Parse(&l) + if len(l.errs) > 0 { + return nil, l.errs[0] + } + if e != 0 { + return nil, fmt.Errorf("unknown parse error: %d", e) + } + return l.expr, nil +} + +// ParseError is what is returned when we failed to parse. +type ParseError struct { + msg string + line, col int +} + +func (p ParseError) Error() string { + if p.col == 0 && p.line == 0 { + return fmt.Sprintf("parse error : %s", p.msg) + } + return fmt.Sprintf("parse error at line %d, col %d: %s", p.line, p.col, p.msg) +} + +func newParseError(msg string, line, col int) ParseError { + return ParseError{ + msg: msg, + line: line, + col: col, + } +} diff --git a/pkg/traceql/parse_test.go b/pkg/traceql/parse_test.go new file mode 100644 index 00000000000..eeaa8c61da7 --- /dev/null +++ b/pkg/traceql/parse_test.go @@ -0,0 +1,879 @@ +package traceql + +import ( + "testing" + "time" + + "github.com/stretchr/testify/assert" +) + +func TestPipelineErrors(t *testing.T) { + tests := []struct { + in string + err error + }{ + {in: "{ .a } | { .b", err: newParseError("syntax error: unexpected $end", 1, 14)}, + {in: "{ .a | .b }", err: newParseError("syntax error: unexpected |", 1, 6)}, + {in: "({ .a } | { .b }", err: newParseError("syntax error: unexpected $end, expecting ) or |", 1, 17)}, + {in: "({ .a } | { .b }) + ({ .a } | { .b })", err: newParseError("syntax error: unexpected +", 1, 19)}, + } + + for _, tc := range tests { + t.Run(tc.in, func(t *testing.T) { + _, err := Parse(tc.in) + + assert.Equal(t, tc.err, err) + }) + } +} + +func TestPipelineOperatorPrecedence(t *testing.T) { + tests := []struct { + in string + expected SpansetOperation + }{ + { + in: "({ .a } | { .b }) > ({ .a } | { .b }) && ({ .a } | { .b })", + expected: newSpansetOperation(opSpansetAnd, + newSpansetOperation(opSpansetChild, + newPipeline( + newSpansetFilter(newAttribute("a")), + newSpansetFilter(newAttribute("b")), + ), + newPipeline( + newSpansetFilter(newAttribute("a")), + newSpansetFilter(newAttribute("b")), + ), + ), + newPipeline( + newSpansetFilter(newAttribute("a")), + newSpansetFilter(newAttribute("b")), + ), + ), + }, + { + in: "({ .a } | { .b }) > (({ .a } | { .b }) && ({ .a } | { .b }))", + expected: newSpansetOperation(opSpansetChild, + newPipeline( + newSpansetFilter(newAttribute("a")), + newSpansetFilter(newAttribute("b")), + ), + newSpansetOperation(opSpansetAnd, + newPipeline( + newSpansetFilter(newAttribute("a")), + newSpansetFilter(newAttribute("b")), + ), + newPipeline( + newSpansetFilter(newAttribute("a")), + newSpansetFilter(newAttribute("b")), + ), + ), + ), + }, + } + + for _, tc := range tests { + t.Run(tc.in, func(t *testing.T) { + actual, err := Parse(tc.in) + + assert.NoError(t, err) + assert.Equal(t, &RootExpr{newPipeline(tc.expected)}, actual) + }) + } +} + +func TestPipelineSpansetOperators(t *testing.T) { + tests := []struct { + in string + expected SpansetOperation + }{ + { + in: "({ .a } | { .b }) > ({ .a } | { .b })", + expected: newSpansetOperation(opSpansetChild, + newPipeline( + newSpansetFilter(newAttribute("a")), + newSpansetFilter(newAttribute("b")), + ), + newPipeline( + newSpansetFilter(newAttribute("a")), + newSpansetFilter(newAttribute("b")), + ), + ), + }, + { + in: "({ .a } | { .b }) && ({ .a } | { .b })", + expected: newSpansetOperation(opSpansetAnd, + newPipeline( + newSpansetFilter(newAttribute("a")), + newSpansetFilter(newAttribute("b")), + ), + newPipeline( + newSpansetFilter(newAttribute("a")), + newSpansetFilter(newAttribute("b")), + ), + ), + }, + { + in: "({ .a } | { .b }) >> ({ .a } | { .b })", + expected: newSpansetOperation(opSpansetDescendant, + newPipeline( + newSpansetFilter(newAttribute("a")), + newSpansetFilter(newAttribute("b")), + ), + newPipeline( + newSpansetFilter(newAttribute("a")), + newSpansetFilter(newAttribute("b")), + ), + ), + }, + } + + for _, tc := range tests { + t.Run(tc.in, func(t *testing.T) { + actual, err := Parse(tc.in) + + assert.NoError(t, err) + assert.Equal(t, &RootExpr{newPipeline(tc.expected)}, actual) + }) + } +} + +func TestPipelineScalarOperators(t *testing.T) { + tests := []struct { + in string + expected ScalarFilter + }{ + { + in: "({ .a } | count()) = ({ .a } | count())", + expected: newScalarFilter(opEqual, + newPipeline( + newSpansetFilter(newAttribute("a")), + newAggregate(aggregateCount, nil), + ), + newPipeline( + newSpansetFilter(newAttribute("a")), + newAggregate(aggregateCount, nil), + ), + ), + }, + { + in: "({ .a } | count()) != ({ .a } | count())", + expected: newScalarFilter(opNotEqual, + newPipeline( + newSpansetFilter(newAttribute("a")), + newAggregate(aggregateCount, nil), + ), + newPipeline( + newSpansetFilter(newAttribute("a")), + newAggregate(aggregateCount, nil), + ), + ), + }, + { + in: "({ .a } | count()) < ({ .a } | count())", + expected: newScalarFilter(opLess, + newPipeline( + newSpansetFilter(newAttribute("a")), + newAggregate(aggregateCount, nil), + ), + newPipeline( + newSpansetFilter(newAttribute("a")), + newAggregate(aggregateCount, nil), + ), + ), + }, + { + in: "({ .a } | count()) <= ({ .a } | count())", + expected: newScalarFilter(opLessEqual, + newPipeline( + newSpansetFilter(newAttribute("a")), + newAggregate(aggregateCount, nil), + ), + newPipeline( + newSpansetFilter(newAttribute("a")), + newAggregate(aggregateCount, nil), + ), + ), + }, + { + in: "({ .a } | count()) >= ({ .a } | count())", + expected: newScalarFilter(opGreaterEqual, + newPipeline( + newSpansetFilter(newAttribute("a")), + newAggregate(aggregateCount, nil), + ), + newPipeline( + newSpansetFilter(newAttribute("a")), + newAggregate(aggregateCount, nil), + ), + ), + }, + } + + for _, tc := range tests { + t.Run(tc.in, func(t *testing.T) { + actual, err := Parse(tc.in) + + assert.NoError(t, err) + assert.Equal(t, &RootExpr{newPipeline(tc.expected)}, actual) + }) + } +} + +func TestPipelines(t *testing.T) { + tests := []struct { + in string + expected Pipeline + }{ + { + in: "{ .a } | { .b }", + expected: newPipeline( + newSpansetFilter(newAttribute("a")), + newSpansetFilter(newAttribute("b")), + ), + }, + { + in: "{ .a } | count() > 1", + expected: newPipeline( + newSpansetFilter(newAttribute("a")), + newScalarFilter(opGreater, newAggregate(aggregateCount, nil), newStaticInt(1)), + ), + }, + { + in: "{ .a } | by(.namespace) | coalesce() | avg(duration) = 1s ", + expected: newPipeline( + newSpansetFilter(newAttribute("a")), + newGroupOperation(newAttribute("namespace")), + newCoalesceOperation(), + newScalarFilter(opEqual, newAggregate(aggregateAvg, newIntrinsic(intrinsicDuration)), newStaticDuration(time.Second)), + ), + }, + } + + for _, tc := range tests { + t.Run(tc.in, func(t *testing.T) { + actual, err := Parse(tc.in) + + assert.NoError(t, err) + assert.Equal(t, &RootExpr{tc.expected}, actual) + }) + } +} + +func TestGroupCoalesceErrors(t *testing.T) { + tests := []struct { + in string + err error + }{ + {in: "by(.a) && { .b }", err: newParseError("syntax error: unexpected &&", 0, 8)}, + {in: "by()", err: newParseError("syntax error: unexpected )", 1, 4)}, + {in: "coalesce()", err: newParseError("syntax error: unexpected coalesce", 1, 1)}, + } + + for _, tc := range tests { + t.Run(tc.in, func(t *testing.T) { + _, err := Parse(tc.in) + + assert.Equal(t, tc.err, err) + }) + } +} + +func TestGroupCoalesceOperation(t *testing.T) { + tests := []struct { + in string + expected Pipeline + }{ + {in: "by(.a) | coalesce()", expected: newPipeline(newGroupOperation(newAttribute("a")), newCoalesceOperation())}, + {in: "by(.a + .b)", expected: newPipeline(newGroupOperation(newBinaryOperation(opAdd, newAttribute("a"), newAttribute("b"))))}, + } + + for _, tc := range tests { + t.Run(tc.in, func(t *testing.T) { + actual, err := Parse(tc.in) + + assert.NoError(t, err) + assert.Equal(t, &RootExpr{tc.expected}, actual) + }) + } +} + +func TestSpansetExpressionErrors(t *testing.T) { + tests := []struct { + in string + err error + }{ + {in: "{ true } &&", err: newParseError("syntax error: unexpected $end, expecting { or (", 1, 12)}, + } + + for _, tc := range tests { + t.Run(tc.in, func(t *testing.T) { + _, err := Parse(tc.in) + + assert.Equal(t, tc.err, err) + }) + } +} + +func TestSpansetExpressionPrecedence(t *testing.T) { + tests := []struct { + in string + expected SpansetOperation + }{ + { + in: "{ true } && { false } >> { `a` }", + expected: newSpansetOperation(opSpansetAnd, + newSpansetFilter(newStaticBool(true)), + newSpansetOperation(opSpansetDescendant, newSpansetFilter(newStaticBool(false)), newSpansetFilter(newStaticString("a"))), + ), + }, + { + in: "{ true } >> { false } && { `a` }", + expected: newSpansetOperation(opSpansetAnd, + newSpansetOperation(opSpansetDescendant, newSpansetFilter(newStaticBool(true)), newSpansetFilter(newStaticBool(false))), + newSpansetFilter(newStaticString("a")), + ), + }, + { + in: "({ true } >> { false }) && { `a` }", + expected: newSpansetOperation(opSpansetAnd, + newSpansetOperation(opSpansetDescendant, newSpansetFilter(newStaticBool(true)), newSpansetFilter(newStaticBool(false))), + newSpansetFilter(newStaticString("a")), + ), + }, + { + in: "{ true } >> { false } ~ { `a` }", + expected: newSpansetOperation(opSpansetSibling, + newSpansetOperation(opSpansetDescendant, newSpansetFilter(newStaticBool(true)), newSpansetFilter(newStaticBool(false))), + newSpansetFilter(newStaticString("a")), + ), + }, + { + in: "{ true } ~ { false } >> { `a` }", + expected: newSpansetOperation(opSpansetDescendant, + newSpansetOperation(opSpansetSibling, newSpansetFilter(newStaticBool(true)), newSpansetFilter(newStaticBool(false))), + newSpansetFilter(newStaticString("a")), + ), + }, + } + + for _, tc := range tests { + t.Run(tc.in, func(t *testing.T) { + actual, err := Parse(tc.in) + + assert.NoError(t, err) + assert.Equal(t, &RootExpr{newPipeline(tc.expected)}, actual) + }) + } +} + +func TestSpansetExpressionOperators(t *testing.T) { + tests := []struct { + in string + expected SpansetOperation + }{ + {in: "{ true } && { false }", expected: newSpansetOperation(opSpansetAnd, newSpansetFilter(newStaticBool(true)), newSpansetFilter(newStaticBool(false)))}, + {in: "{ true } > { false }", expected: newSpansetOperation(opSpansetChild, newSpansetFilter(newStaticBool(true)), newSpansetFilter(newStaticBool(false)))}, + {in: "{ true } >> { false }", expected: newSpansetOperation(opSpansetDescendant, newSpansetFilter(newStaticBool(true)), newSpansetFilter(newStaticBool(false)))}, + {in: "{ true } || { false }", expected: newSpansetOperation(opSpansetUnion, newSpansetFilter(newStaticBool(true)), newSpansetFilter(newStaticBool(false)))}, + {in: "{ true } ~ { false }", expected: newSpansetOperation(opSpansetSibling, newSpansetFilter(newStaticBool(true)), newSpansetFilter(newStaticBool(false)))}, + // this test was added to highlight the one shift/reduce conflict in the grammar. this could also be parsed as two spanset pipelines &&ed together. + {in: "({ true }) && ({ false })", expected: newSpansetOperation(opSpansetAnd, newSpansetFilter(newStaticBool(true)), newSpansetFilter(newStaticBool(false)))}, + } + + for _, tc := range tests { + t.Run(tc.in, func(t *testing.T) { + actual, err := Parse(tc.in) + + assert.NoError(t, err) + assert.Equal(t, &RootExpr{newPipeline(tc.expected)}, actual) + }) + } +} + +func TestScalarExpressionErrors(t *testing.T) { + tests := []struct { + in string + err error + }{ + {in: "(avg(.foo) > count()) + sum(.bar)", err: newParseError("syntax error: unexpected +", 1, 23)}, + {in: "count(", err: newParseError("syntax error: unexpected $end, expecting )", 1, 7)}, + {in: "count(avg)", err: newParseError("syntax error: unexpected avg, expecting )", 1, 7)}, + {in: "count(.thing)", err: newParseError("syntax error: unexpected ., expecting )", 1, 7)}, + } + + for _, tc := range tests { + t.Run(tc.in, func(t *testing.T) { + _, err := Parse(tc.in) + + assert.Equal(t, tc.err, err) + }) + } +} + +func TestScalarExpressionPrecedence(t *testing.T) { + tests := []struct { + in string + expected ScalarFilter + }{ + { + in: "avg(.foo) > count() + sum(.bar)", + expected: newScalarFilter(opGreater, + newAggregate(aggregateAvg, newAttribute("foo")), + newScalarOperation(opAdd, + newAggregate(aggregateCount, nil), + newAggregate(aggregateSum, newAttribute("bar")), + ), + ), + }, + { + in: "avg(.foo) + count() > sum(.bar)", + expected: newScalarFilter(opGreater, + newScalarOperation(opAdd, + newAggregate(aggregateAvg, newAttribute("foo")), + newAggregate(aggregateCount, nil), + ), + newAggregate(aggregateSum, newAttribute("bar")), + ), + }, + } + + for _, tc := range tests { + t.Run(tc.in, func(t *testing.T) { + actual, err := Parse(tc.in) + + assert.NoError(t, err) + assert.Equal(t, &RootExpr{newPipeline(tc.expected)}, actual) + }) + } +} + +func TestScalarExpressionOperators(t *testing.T) { + tests := []struct { + in string + expected ScalarFilter + }{ + {in: "count() > 1", expected: newScalarFilter(opGreater, newAggregate(aggregateCount, nil), newStaticInt(1))}, + {in: "max(.a) > 1", expected: newScalarFilter(opGreater, newAggregate(aggregateMax, newAttribute("a")), newStaticInt(1))}, + {in: "min(1) > 1", expected: newScalarFilter(opGreater, newAggregate(aggregateMin, newStaticInt(1)), newStaticInt(1))}, + {in: "sum(true) > 1", expected: newScalarFilter(opGreater, newAggregate(aggregateSum, newStaticBool(true)), newStaticInt(1))}, + {in: "avg(`c`) > 1", expected: newScalarFilter(opGreater, newAggregate(aggregateAvg, newStaticString("c")), newStaticInt(1))}, + } + + for _, tc := range tests { + t.Run(tc.in, func(t *testing.T) { + actual, err := Parse(tc.in) + + assert.NoError(t, err) + assert.Equal(t, &RootExpr{newPipeline(tc.expected)}, actual) + }) + } +} + +func TestSpansetFilterErrors(t *testing.T) { + tests := []struct { + in string + err error + }{ + {in: "wharblgarbl", err: newParseError("syntax error: unexpected IDENTIFIER", 1, 1)}, + {in: "{ 2 <> 3}", err: newParseError("syntax error: unexpected >", 1, 6)}, + {in: "{ 2 = .b ", err: newParseError("syntax error: unexpected $end", 1, 10)}, + {in: "{ + }", err: newParseError("syntax error: unexpected +", 1, 3)}, + } + + for _, tc := range tests { + t.Run(tc.in, func(t *testing.T) { + _, err := Parse(tc.in) + + assert.Equal(t, tc.err, err) + }) + } +} + +func TestSpansetFilterOperatorPrecedence(t *testing.T) { + tests := []struct { + in string + expected FieldExpression + }{ + { + in: "{ .a * .b + .c }", + expected: newBinaryOperation(opAdd, + newBinaryOperation(opMult, newAttribute("a"), newAttribute("b")), + newAttribute("c")), + }, + { + in: "{ .a + .b * .c }", + expected: newBinaryOperation(opAdd, + newAttribute("a"), + newBinaryOperation(opMult, newAttribute("b"), newAttribute("c"))), + }, + { + in: "{ ( .a + .b ) * .c }", + expected: newBinaryOperation(opMult, + newBinaryOperation(opAdd, newAttribute("a"), newAttribute("b")), + newAttribute("c")), + }, + { + in: "{ .a + .b ^ .c }", + expected: newBinaryOperation(opAdd, + newAttribute("a"), + newBinaryOperation(opPower, newAttribute("b"), newAttribute("c"))), + }, + { + in: "{ .a = .b + .c }", + expected: newBinaryOperation(opEqual, + newAttribute("a"), + newBinaryOperation(opAdd, newAttribute("b"), newAttribute("c"))), + }, + { + in: "{ .a + .b = .c }", + expected: newBinaryOperation(opEqual, + newBinaryOperation(opAdd, newAttribute("a"), newAttribute("b")), + newAttribute("c")), + }, + { + in: "{ .c - -.a + .b }", + expected: newBinaryOperation(opAdd, + newBinaryOperation(opSub, newAttribute("c"), newUnaryOperation(opSub, newAttribute("a"))), + newAttribute("b")), + }, + { + in: "{ .c - -( .a + .b ) }", + expected: newBinaryOperation(opSub, + newAttribute("c"), + newUnaryOperation(opSub, newBinaryOperation(opAdd, newAttribute("a"), newAttribute("b")))), + }, + { + in: "{ .a && .b = .c }", + expected: newBinaryOperation(opAnd, + newAttribute("a"), + newBinaryOperation(opEqual, newAttribute("b"), newAttribute("c"))), + }, + { + in: "{ .a = .b && .c }", + expected: newBinaryOperation(opAnd, + newBinaryOperation(opEqual, newAttribute("a"), newAttribute("b")), + newAttribute("c")), + }, + { + in: "{ .a = !.b && .c }", + expected: newBinaryOperation(opAnd, + newBinaryOperation(opEqual, newAttribute("a"), newUnaryOperation(opNot, newAttribute("b"))), + newAttribute("c")), + }, + { + in: "{ .a = !( .b && .c ) }", + expected: newBinaryOperation(opEqual, + newAttribute("a"), + newUnaryOperation(opNot, newBinaryOperation(opAnd, newAttribute("b"), newAttribute("c")))), + }, + { + in: "{ .a = .b || .c = .d}", + expected: newBinaryOperation(opOr, + newBinaryOperation(opEqual, newAttribute("a"), newAttribute("b")), + newBinaryOperation(opEqual, newAttribute("c"), newAttribute("d"))), + }, + { + in: "{ !.a = .b }", + expected: newBinaryOperation(opEqual, + newUnaryOperation(opNot, newAttribute("a")), + newAttribute("b")), + }, + { + in: "{ !(.a = .b) }", + expected: newUnaryOperation(opNot, newBinaryOperation(opEqual, + newAttribute("a"), + newAttribute("b"))), + }, + { + in: "{ -.a = .b }", + expected: newBinaryOperation(opEqual, + newUnaryOperation(opSub, newAttribute("a")), + newAttribute("b")), + }, + { + in: "{ -(.a = .b) }", + expected: newUnaryOperation(opSub, newBinaryOperation(opEqual, + newAttribute("a"), + newAttribute("b"))), + }, + } + + for _, tc := range tests { + t.Run(tc.in, func(t *testing.T) { + actual, err := Parse(tc.in) + + assert.NoError(t, err) + assert.Equal(t, &RootExpr{newPipeline(newSpansetFilter(tc.expected))}, actual) + }) + } +} + +func TestSpansetFilterStatics(t *testing.T) { + tests := []struct { + in string + expected FieldExpression + }{ + {in: "{ true }", expected: newStaticBool(true)}, + {in: "{ false }", expected: newStaticBool(false)}, + {in: `{ "true" }`, expected: newStaticString("true")}, + {in: `{ "true\"" }`, expected: newStaticString("true\"")}, + {in: "{ `foo` }", expected: newStaticString("foo")}, + {in: "{ .foo }", expected: newAttribute("foo")}, + {in: "{ duration }", expected: newIntrinsic(intrinsicDuration)}, + {in: "{ childCount }", expected: newIntrinsic(intrinsicChildCount)}, + {in: "{ name }", expected: newIntrinsic(intrinsicName)}, + {in: "{ parent }", expected: newIntrinsic(intrinsicParent)}, + {in: "{ status }", expected: newIntrinsic(intrinsicStatus)}, + {in: "{ 4321 }", expected: newStaticInt(4321)}, + {in: "{ 1.234 }", expected: newStaticFloat(1.234)}, + {in: "{ nil }", expected: newStaticNil()}, + {in: "{ 3h }", expected: newStaticDuration(3 * time.Hour)}, + {in: "{ error }", expected: newStaticStatus(statusError)}, + {in: "{ ok }", expected: newStaticStatus(statusOk)}, + {in: "{ unset }", expected: newStaticStatus(statusUnset)}, + } + + for _, tc := range tests { + t.Run(tc.in, func(t *testing.T) { + actual, err := Parse(tc.in) + + assert.NoError(t, err) + assert.Equal(t, &RootExpr{newPipeline(newSpansetFilter(tc.expected))}, actual) + }) + } +} + +func TestSpansetFilterOperators(t *testing.T) { + tests := []struct { + in string + err error + expected FieldExpression + }{ + {in: "{ .a + .b }", expected: newBinaryOperation(opAdd, newAttribute("a"), newAttribute("b"))}, + {in: "{ .a - .b }", expected: newBinaryOperation(opSub, newAttribute("a"), newAttribute("b"))}, + {in: "{ .a / .b }", expected: newBinaryOperation(opDiv, newAttribute("a"), newAttribute("b"))}, + {in: "{ .a % .b }", expected: newBinaryOperation(opMod, newAttribute("a"), newAttribute("b"))}, + {in: "{ .a * .b }", expected: newBinaryOperation(opMult, newAttribute("a"), newAttribute("b"))}, + {in: "{ .a = .b }", expected: newBinaryOperation(opEqual, newAttribute("a"), newAttribute("b"))}, + {in: "{ .a != .b }", expected: newBinaryOperation(opNotEqual, newAttribute("a"), newAttribute("b"))}, + {in: "{ .a =~ .b }", expected: newBinaryOperation(opRegex, newAttribute("a"), newAttribute("b"))}, + {in: "{ .a !~ .b }", expected: newBinaryOperation(opNotRegex, newAttribute("a"), newAttribute("b"))}, + {in: "{ .a > .b }", expected: newBinaryOperation(opGreater, newAttribute("a"), newAttribute("b"))}, + {in: "{ .a >= .b }", expected: newBinaryOperation(opGreaterEqual, newAttribute("a"), newAttribute("b"))}, + {in: "{ .a < .b }", expected: newBinaryOperation(opLess, newAttribute("a"), newAttribute("b"))}, + {in: "{ .a <= .b }", expected: newBinaryOperation(opLessEqual, newAttribute("a"), newAttribute("b"))}, + {in: "{ .a ^ .b }", expected: newBinaryOperation(opPower, newAttribute("a"), newAttribute("b"))}, + {in: "{ .a && .b }", expected: newBinaryOperation(opAnd, newAttribute("a"), newAttribute("b"))}, + {in: "{ .a || .b }", expected: newBinaryOperation(opOr, newAttribute("a"), newAttribute("b"))}, + {in: "{ !.b }", expected: newUnaryOperation(opNot, newAttribute("b"))}, + {in: "{ -.b }", expected: newUnaryOperation(opSub, newAttribute("b"))}, + } + + for _, tc := range tests { + t.Run(tc.in, func(t *testing.T) { + actual, err := Parse(tc.in) + + assert.NoError(t, err) + assert.Equal(t, &RootExpr{newPipeline(newSpansetFilter(tc.expected))}, actual) + }) + } +} + +func TestAttributeNameErrors(t *testing.T) { + tests := []struct { + in string + err error + }{ + {in: "{ . foo }", err: newParseError("syntax error: unexpected END_ATTRIBUTE, expecting IDENTIFIER", 1, 3)}, + {in: "{ .foo .bar }", err: newParseError("syntax error: unexpected .", 1, 8)}, + {in: "{ parent. }", err: newParseError("syntax error: unexpected END_ATTRIBUTE, expecting IDENTIFIER or resource. or span.", 0, 3)}, + {in: ".3foo", err: newParseError("syntax error: unexpected IDENTIFIER", 1, 3)}, + } + + for _, tc := range tests { + t.Run(tc.in, func(t *testing.T) { + _, err := Parse(tc.in) + + assert.Equal(t, tc.err, err) + }) + } +} + +func TestAttributes(t *testing.T) { + tests := []struct { + in string + expected FieldExpression + }{ + {in: "duration", expected: newIntrinsic(intrinsicDuration)}, + {in: ".foo", expected: newAttribute("foo")}, + {in: ".max", expected: newAttribute("max")}, + {in: ".status", expected: newAttribute("status")}, + {in: ".foo.bar", expected: newAttribute("foo.bar")}, + {in: ".foo.bar.baz", expected: newAttribute("foo.bar.baz")}, + {in: ".foo.3", expected: newAttribute("foo.3")}, + {in: ".foo3", expected: newAttribute("foo3")}, + {in: ".http_status", expected: newAttribute("http_status")}, + {in: ".http-status", expected: newAttribute("http-status")}, + {in: ".http+", expected: newAttribute("http+")}, + {in: ".😝", expected: newAttribute("😝")}, + {in: ".http-other", expected: newAttribute("http-other")}, + {in: "parent.duration", expected: newScopedAttribute(attributeScopeNone, true, "duration")}, + {in: "parent.foo.bar.baz", expected: newScopedAttribute(attributeScopeNone, true, "foo.bar.baz")}, + {in: "resource.foo.bar.baz", expected: newScopedAttribute(attributeScopeResource, false, "foo.bar.baz")}, + {in: "span.foo.bar", expected: newScopedAttribute(attributeScopeSpan, false, "foo.bar")}, + {in: "parent.resource.foo", expected: newScopedAttribute(attributeScopeResource, true, "foo")}, + {in: "parent.span.foo", expected: newScopedAttribute(attributeScopeSpan, true, "foo")}, + {in: "parent.resource.foo.bar.baz", expected: newScopedAttribute(attributeScopeResource, true, "foo.bar.baz")}, + {in: "parent.span.foo.bar", expected: newScopedAttribute(attributeScopeSpan, true, "foo.bar")}, + } + + for _, tc := range tests { + t.Run(tc.in, func(t *testing.T) { + s := "{ " + tc.in + " }" + actual, err := Parse(s) + + assert.NoError(t, err) + assert.Equal(t, &RootExpr{newPipeline(newSpansetFilter(tc.expected))}, actual) + + s = "{" + tc.in + "}" + actual, err = Parse(s) + + assert.NoError(t, err) + assert.Equal(t, &RootExpr{newPipeline(newSpansetFilter(tc.expected))}, actual) + + s = "{ (" + tc.in + ") }" + actual, err = Parse(s) + + assert.NoError(t, err) + assert.Equal(t, &RootExpr{newPipeline(newSpansetFilter(tc.expected))}, actual) + + s = "{ " + tc.in + " + " + tc.in + " }" + actual, err = Parse(s) + + assert.NoError(t, err) + assert.Equal(t, &RootExpr{newPipeline(newSpansetFilter(newBinaryOperation(opAdd, tc.expected, tc.expected)))}, actual) + }) + } +} + +func TestIntrinsics(t *testing.T) { + tests := []struct { + in string + expected Intrinsic + }{ + {in: "duration", expected: intrinsicDuration}, + {in: "childCount", expected: intrinsicChildCount}, + {in: "name", expected: intrinsicName}, + {in: "status", expected: intrinsicStatus}, + {in: "parent", expected: intrinsicParent}, + } + + for _, tc := range tests { + t.Run(tc.in, func(t *testing.T) { + // as intrinsic e.g. duration + s := "{ " + tc.in + " }" + actual, err := Parse(s) + + assert.NoError(t, err) + assert.Equal(t, &RootExpr{newPipeline( + newSpansetFilter(Attribute{ + scope: attributeScopeNone, + parent: false, + name: tc.in, + intrinsic: tc.expected, + }))}, actual) + + // as attribute e.g .duration + s = "{ ." + tc.in + "}" + actual, err = Parse(s) + + assert.NoError(t, err) + assert.Equal(t, &RootExpr{newPipeline( + newSpansetFilter(Attribute{ + scope: attributeScopeNone, + parent: false, + name: tc.in, + intrinsic: tc.expected, + }))}, actual) + + // as span scoped attribute e.g span.duration + s = "{ span." + tc.in + "}" + actual, err = Parse(s) + + assert.NoError(t, err) + assert.Equal(t, &RootExpr{newPipeline( + newSpansetFilter(Attribute{ + scope: attributeScopeSpan, + parent: false, + name: tc.in, + intrinsic: intrinsicNone, + }))}, actual) + + // as resource scoped attribute e.g resource.duration + s = "{ resource." + tc.in + "}" + actual, err = Parse(s) + + assert.NoError(t, err) + assert.Equal(t, &RootExpr{newPipeline( + newSpansetFilter(Attribute{ + scope: attributeScopeResource, + parent: false, + name: tc.in, + intrinsic: intrinsicNone, + }))}, actual) + + // as parent scoped intrinsic e.g parent.duration + s = "{ parent." + tc.in + "}" + actual, err = Parse(s) + + assert.NoError(t, err) + assert.Equal(t, &RootExpr{newPipeline( + newSpansetFilter(Attribute{ + scope: attributeScopeNone, + parent: true, + name: tc.in, + intrinsic: tc.expected, + }))}, actual) + + // as nested parent scoped intrinsic e.g. parent.duration.foo + s = "{ parent." + tc.in + ".foo }" + actual, err = Parse(s) + + assert.NoError(t, err) + assert.Equal(t, &RootExpr{newPipeline( + newSpansetFilter(Attribute{ + scope: attributeScopeNone, + parent: true, + name: tc.in + ".foo", + intrinsic: intrinsicNone, + }))}, actual) + + // as parent resource scoped attribute e.g. parent.resource.duration + s = "{ parent.resource." + tc.in + "}" + actual, err = Parse(s) + + assert.NoError(t, err) + assert.Equal(t, &RootExpr{newPipeline( + newSpansetFilter(Attribute{ + scope: attributeScopeResource, + parent: true, + name: tc.in, + intrinsic: intrinsicNone, + }))}, actual) + + // as parent span scoped attribute e.g. praent.span.duration + s = "{ parent.span." + tc.in + "}" + actual, err = Parse(s) + + assert.NoError(t, err) + assert.Equal(t, &RootExpr{newPipeline( + newSpansetFilter(Attribute{ + scope: attributeScopeSpan, + parent: true, + name: tc.in, + intrinsic: intrinsicNone, + }))}, actual) + }) + } +} diff --git a/pkg/traceql/test_examples.yaml b/pkg/traceql/test_examples.yaml new file mode 100644 index 00000000000..dfd629e98bd --- /dev/null +++ b/pkg/traceql/test_examples.yaml @@ -0,0 +1,235 @@ +# valid queries parse successfully and return nil when calling .validate() +valid: + # spanset filters + - '{ true }' + - '{ !true }' + - '{ true && false }' + - '{ true || false }' + - '{ 1 = 2 }' + - '{ 1 != 2 }' + - '{ 1 > 2 }' + - '{ 1 >= 2 }' + - '{ 1 < 2 }' + - '{ 1 <= 2 }' + - '{ 1 + 1 = 2 }' + - '{ 1 - 1 = 2 }' + - '{ 1 * 1 = 2 }' + - '{ 1 / 1 = 2 }' + - '{ 1 ^ 1 = 2 }' + - '{ -1 = 2 }' + - '{ "test" =~ "test" }' + - '{ "test" !~ "test" }' + - '{ "test" = "test" }' + - '{ "test" != "test" }' + - '{ .a }' + - '{ !.a }' + - '{ .a && false }' + - '{ .a || true }' + - '{ .a = 2 }' + - '{ .a != 2 }' + - '{ .a > 2 }' + - '{ .a >= 2 }' + - '{ .a < 2 }' + - '{ .a <= 2 }' + - '{ .a + 1 = 2 }' + - '{ .a - 1 = 2 }' + - '{ .a * 1 = 2 }' + - '{ .a / 1 = 2 }' + - '{ .a ^ 1 = 2 }' + - '{ -.a = 2 }' + - '{ .a =~ "test" }' + - '{ .a !~ "test" }' + - '{ .a = "test" }' + - '{ .a != "test" }' + - '{ parent.a != 3 }' + - '{ parent.resource.a && true }' + - '{ parent.span.a > 3 }' + - '{ parent.duration = 1h }' + - '{ resource.a != 3 }' + - '{ span.a != 3 }' + - '{ !("test" != .c || ((true && .b) || 3 < .a)) }' + - '{ parent = nil }' + - '{ status = ok }' + - '{ status = unset }' + - '{ status = error }' + - '{ status != error }' + - '{ duration > 1s }' + - '{ duration > 1s * 2s }' + - '{ .foo = nil }' + - '{ 1 = childCount }' + - '{ 1 * 1h = 1 }' # combining float, int and duration can make sense, but can also be weird. we just accept it all + - '{ 1 / 1.1 = 1 }' + - '{ 1 < 1h }' + - '{ 1 <= 1.1 }' + # spanset expressions + - '{ true } && { true }' + - '{ true } || { true }' + - '{ true } >> { true }' + - '{ true } > { true }' + - '{ true } ~ { true }' + # scalar filters + - 'avg(.field) > 1' + - 'min(childCount) < 2' + - 'max(duration) >= 1s' + - 'min(.field) < max(duration)' + - 'sum(.field) = min(.field)' + - 'max(duration) > 1' # same note as above for int, float and duration + - 'min(.field) + max(.field) > 1' + - 'min(.field) + max(childCount) > max(duration) - min(.field)' + - 'avg(.field) > 1 - 3' # scalar expressions in scalar filters are currently not allowed. possible future addition + - 'min(childCount) < 2 / 6' + - 'max(1 - (2 + .field)) < avg(3 * duration ^ 2)' + - '3 = 2' # naked scalar filter, technically allowed + # pipelines + - '{ true } | { .a }' + - '{ true } | count() = 1' + - '{ true } | max(duration) = 1h' + - '{ true } | min(duration) = 1h' + - '{ true } | avg(duration) = 1h' + - '{ true } | sum(duration) = 1h' + - '{ true } | count() + count() = 1' + - 'count() = 1 | { true }' + - '{ true } | max(.a) = 1' + - '{ true } | max(parent.a) = 1' + - '{ true } | max(span.a) = 1' + - '{ true } | max(resource.a) = 1' + - '{ true } | max(1 + .a) = 1' + - '{ true } | max((1 + .a) * 2) = 1' + - '{ true } | coalesce()' + - '{ true } | by(.a)' + - '{ true } | by(1 + .a)' + - 'by(.a) | { true }' + - '{ true } | by(1 + .a) | coalesce()' + - '{ true } | by(name) | count() > 2' + - '{ true } | by(.field) | avg(.b) = 2' + - '{ true } | by(3 * .field - 2) | max(duration) < 1s' + - '{ true } | count() = 1 | { true }' + # pipeline expressions + - '({ true } | count()) + ({ true } | count()) = 1' + - '({ true } | count()) - ({ true } | count()) <= 1' + - '({ true } | count()) / ({ true } | count()) > ({ true } | count()) / ({ true } | count())' + - '({ true } | count()) * ({ true } | count()) < ({ true } | count()) / ({ true } | count())' + - '({ true } | count() > 1 | { false }) && ({ true } | count() > 1 | { false })' + - '({ true } | count() > 1 | { false }) || ({ true } | count() > 1 | { false })' + - '({ true } | count() > 1 | { false }) >> ({ true } | count() > 1 | { false })' + - '({ true } | count() > 1 | { false }) > ({ true } | count() > 1 | { false })' + - '({ true } | count() > 1 | { false }) ~ ({ true } | count() > 1 | { false })' + # random + - 'max(duration) > 3s | { status = error || .http.status = 500 }' + - '{ .http.status = 200 } | max(.field) - min(.field) > 3' + - '({ .http.status = 200 } | count()) + ({ name = `foo` } | avg(duration)) = 2' + - '{ (-(3 / 2) * .test - parent.blerg + .other)^3 = 2 }' + - '({ .a } | count()) > ({ .b } | count())' + +# parse_fails throw an error when parsing +parse_fails: + - 'true' + - '[ true ]' + - '( true )' + # spanset filters + - '{ }' # possibly allow this? same as { true }? + - '{ . }' + - '{ < }' + - '{ .a < }' + - '{ .a < 3' + - '{ (.a < 3 }' + - '{ attribute = 4 }' # custom attribute not prefixed with ., span., resource. or parent. + - '{ .attribute == 4 }' # invalid operator + - '{ span. }' + # spanset expressions + - '{ true } + { true }' + - '{ true } - { true }' + - '{ true } * { true }' + - '{ true } / { true }' + - '{ true } ^ { true }' + - '{ true } = { true }' # an interesting operator. possible future addition + - '{ true } <= { true }' + - '{ true } >= { true }' + - '{ true } < { true }' + # scalar filters + - 'avg(.field) + 1' # scalar filters must resolve to boolean + - 'sum(3) - 2' + - 'min(childCount) && 2' + # pipelines + - 'coalesce() | { true }' # pipelines can't start with coalesce + - 'count() > 3 && { true }' # scalar filters have to be in pipeline + - '{ true } | count()' # naked scalar pipelines not allowed + - '{ true } | notAnAggregate() = 1' + - '{ true } | count = 1' + - '{ true } | max() = 1' + - '{ true } | by()' + # pipeline expressions + - '({ true }) + (count()) = 1' + - '({ true }) && (count())' + - '({ true } | count()) && ({ true } | count()) = 1' + - '({ true }) + ({ true }) = 1' + - '({ true } | count()) + ({ true } | count())' + # todo: improve the following + - '(by(namespace) | count()) > 2 * 2' # scalar expressions are currently not allowed in scalar pipelines + - '(by(namespace) | count()) * 2 > 2' + - '2 < (by(namespace) | count())' # static value needs to be on the RHS to remove conflicts with scalar expressions + +# validate_fails parse correctly and return an error when calling .validate() +validate_fails: + # span expressions must evaluate to a boolean + - '{ 1 + 1 }' + - '{ parent }' + - '{ status }' + - '{ ok }' + - '{ 1.1 }' + - '{ 1h }' + - '{ "foo" }' + # binary operators - incorrect types + - '{ 1 + "foo" = 1 }' + - '{ 1 - true = 1 }' + - '{ 1 / ok = 1 }' + - '{ 1 % parent = 1 }' + - '{ 1 ^ name = 1 }' + - '{ 1 = "foo" }' + - '{ 1 != true }' + - '{ 1 > ok }' + - '{ 1 >= parent }' + - '{ 1 = name }' + - '{ 1 =~ 2}' + - '{ 1 && "foo" }' + - '{ 1 || ok }' + - '{ true || 1.1 }' + - '{ "foo" = childCount }' + - '{ status > ok }' + # unary operators - incorrect types + - '{ -true }' + - '{ -"foo" = "bar" }' + - '{ -ok = status }' + - '{ -parent = nil }' + - '{ -name = "foo" }' + - '{ !"foo" = "bar" }' + - '{ !ok = status }' + - '{ !parent = nil }' + - '{ !name = "foo" }' + - '{ !1 = 1 }' + - '{ !1h = 1 }' + - '{ !1.1 = 1.1 }' + # scalar expressions must evaluate to a number + - 'max(name) = "foo"' + - 'min(parent) = nil' + - 'avg("foo") = "bar"' + - 'max(status) = ok' + - 'min(1 = 3) = 1' + # scalar expressions must reference the span + - 'sum(3) = 2' + - 'sum(3) = min(14)' + - 'min(2h) < max(duration)' + - 'max(1h + 2h) > 1' + - 'min(1.1 - 3) > 1' + - 'min(3) = max(duration)' + - 'min(1) = max(2) + 3' + # group expressions must reference the span + - '{ true } | by(1)' + - '{ true } | by("foo")' + # scalar filters have to match types + - 'min(1) = "foo"' + - 'avg(childCount) > "foo"' + - 'max(duration) < ok' + +# parsed and the ast is dumped to stdout. this is a debugging tool +dump: \ No newline at end of file