diff --git a/go.mod b/go.mod
index 014186841..e4e6310c8 100644
--- a/go.mod
+++ b/go.mod
@@ -32,6 +32,7 @@ require (
github.com/gogo/protobuf v1.3.2 // indirect
github.com/google/gnostic-models v0.7.0 // indirect
github.com/google/uuid v1.6.0 // indirect
+ github.com/grafana/loki/operator/apis/loki v0.0.0-20241021105923-5e970e50b166 // indirect
github.com/josharian/intern v1.0.0 // indirect
github.com/jpillora/backoff v1.0.0 // indirect
github.com/mailru/easyjson v0.9.0 // indirect
@@ -63,6 +64,7 @@ require (
k8s.io/klog/v2 v2.130.1 // indirect
k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b // indirect
k8s.io/utils v0.0.0-20250604170112-4c0f3b243397 // indirect
+ sigs.k8s.io/controller-runtime v0.20.4 // indirect
sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 // indirect
sigs.k8s.io/randfill v1.0.0 // indirect
sigs.k8s.io/structured-merge-diff/v6 v6.3.0 // indirect
diff --git a/go.sum b/go.sum
index 3c11d65e3..67a9b0a9c 100644
--- a/go.sum
+++ b/go.sum
@@ -53,6 +53,8 @@ github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY=
github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ=
+github.com/grafana/loki/operator/apis/loki v0.0.0-20241021105923-5e970e50b166 h1:cmG5fwmF+0PsyerLecb7CU4bzNRg5+tDgO3PiNxskKo=
+github.com/grafana/loki/operator/apis/loki v0.0.0-20241021105923-5e970e50b166/go.mod h1:QggEReYyQzjnwTlj9hMeRaI2M/w3UPAwrMOXYzIyonc=
github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
github.com/jpillora/backoff v1.0.0 h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2EA=
@@ -233,6 +235,8 @@ k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b h1:MloQ9/bdJyIu9lb1PzujOP
k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b/go.mod h1:UZ2yyWbFTpuhSbFhv24aGNOdoRdJZgsIObGBUaYVsts=
k8s.io/utils v0.0.0-20250604170112-4c0f3b243397 h1:hwvWFiBzdWw1FhfY1FooPn3kzWuJ8tmbZBHi4zVsl1Y=
k8s.io/utils v0.0.0-20250604170112-4c0f3b243397/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
+sigs.k8s.io/controller-runtime v0.20.4 h1:X3c+Odnxz+iPTRobG4tp092+CvBU9UK0t/bRf+n0DGU=
+sigs.k8s.io/controller-runtime v0.20.4/go.mod h1:xg2XB0K5ShQzAgsoujxuKN4LNXR2LfwwHsPj7Iaw+XY=
sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 h1:gBQPwqORJ8d8/YNZWEjoZs7npUVDpVXUUOFfW6CgAqE=
sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8/go.mod h1:mdzfpAEoE6DHQEN0uh9ZbOCuHbLK5wOm7dK4ctXE9Tg=
sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU=
diff --git a/pkg/config/loki.go b/pkg/config/loki.go
index 54151359b..58ad478ec 100644
--- a/pkg/config/loki.go
+++ b/pkg/config/loki.go
@@ -4,26 +4,28 @@ import (
"fmt"
"strings"
+ lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
"github.com/netobserv/network-observability-console-plugin/pkg/utils"
)
type Loki struct {
- URL string `yaml:"url" json:"url"`
- Labels []string `yaml:"labels" json:"labels"`
- FieldsType map[string]string `yaml:"fieldsType" json:"fieldsType"`
- FieldsFormat map[string]string `yaml:"fieldsFormat" json:"fieldsFormat"`
- StatusURL string `yaml:"statusUrl,omitempty" json:"statusUrl,omitempty"`
- Timeout Duration `yaml:"timeout,omitempty" json:"timeout,omitempty"`
- TenantID string `yaml:"tenantID,omitempty" json:"tenantID,omitempty"`
- TokenPath string `yaml:"tokenPath,omitempty" json:"tokenPath,omitempty"`
- SkipTLS bool `yaml:"skipTls,omitempty" json:"skipTls,omitempty"`
- CAPath string `yaml:"caPath,omitempty" json:"caPath,omitempty"`
- StatusSkipTLS bool `yaml:"statusSkipTls,omitempty" json:"statusSkipTls,omitempty"`
- StatusCAPath string `yaml:"statusCaPath,omitempty" json:"statusCaPath,omitempty"`
- StatusUserCertPath string `yaml:"statusUserCertPath,omitempty" json:"statusUserCertPath,omitempty"`
- StatusUserKeyPath string `yaml:"statusUserKeyPath,omitempty" json:"statusUserKeyPath,omitempty"`
- UseMocks bool `yaml:"useMocks,omitempty" json:"useMocks,omitempty"`
- ForwardUserToken bool `yaml:"forwardUserToken,omitempty" json:"forwardUserToken,omitempty"`
+ URL string `yaml:"url" json:"url"`
+ Labels []string `yaml:"labels" json:"labels"`
+ FieldsType map[string]string `yaml:"fieldsType" json:"fieldsType"`
+ FieldsFormat map[string]string `yaml:"fieldsFormat" json:"fieldsFormat"`
+ StatusURL string `yaml:"statusUrl,omitempty" json:"statusUrl,omitempty"`
+ Timeout Duration `yaml:"timeout,omitempty" json:"timeout,omitempty"`
+ TenantID string `yaml:"tenantID,omitempty" json:"tenantID,omitempty"`
+ TokenPath string `yaml:"tokenPath,omitempty" json:"tokenPath,omitempty"`
+ SkipTLS bool `yaml:"skipTls,omitempty" json:"skipTls,omitempty"`
+ CAPath string `yaml:"caPath,omitempty" json:"caPath,omitempty"`
+ Status *lokiv1.LokiStackStatus `yaml:"status,omitempty" json:"status,omitempty"`
+ StatusSkipTLS bool `yaml:"statusSkipTls,omitempty" json:"statusSkipTls,omitempty"`
+ StatusCAPath string `yaml:"statusCaPath,omitempty" json:"statusCaPath,omitempty"`
+ StatusUserCertPath string `yaml:"statusUserCertPath,omitempty" json:"statusUserCertPath,omitempty"`
+ StatusUserKeyPath string `yaml:"statusUserKeyPath,omitempty" json:"statusUserKeyPath,omitempty"`
+ UseMocks bool `yaml:"useMocks,omitempty" json:"useMocks,omitempty"`
+ ForwardUserToken bool `yaml:"forwardUserToken,omitempty" json:"forwardUserToken,omitempty"`
labelsMap map[string]struct{}
}
diff --git a/pkg/handler/loki.go b/pkg/handler/loki.go
index d36adaca7..f0f4ec8a7 100644
--- a/pkg/handler/loki.go
+++ b/pkg/handler/loki.go
@@ -198,6 +198,18 @@ func getLokiNamesForPrefix(cfg *config.Loki, lokiClient httpclient.Caller, filts
}
func (h *Handlers) getLokiStatus(r *http.Request) ([]byte, int, error) {
+ // Check if the status was provided by the operator
+ if h.Cfg.Loki.Status != nil {
+ for _, conditions := range h.Cfg.Loki.Status.Conditions {
+ if conditions.Reason == "ReadyComponents" {
+ if conditions.Status == "True" {
+ return []byte("ready"), 200, nil
+ }
+ break
+ }
+ }
+ return []byte("pending"), 400, nil
+ }
lokiClient := newLokiClient(&h.Cfg.Loki, r.Header, true)
baseURL := strings.TrimRight(h.Cfg.Loki.GetStatusURL(), "/")
return executeLokiQuery(fmt.Sprintf("%s/%s", baseURL, "ready"), lokiClient)
@@ -231,6 +243,10 @@ func (h *Handlers) LokiMetrics() func(w http.ResponseWriter, r *http.Request) {
writeError(w, http.StatusBadRequest, "Loki is disabled")
return
}
+ if h.Cfg.Loki.Status != nil {
+ writeError(w, http.StatusBadRequest, "Loki status URL is not usable with Loki operator")
+ return
+ }
lokiClient := newLokiClient(&h.Cfg.Loki, r.Header, true)
baseURL := strings.TrimRight(h.Cfg.Loki.GetStatusURL(), "/")
@@ -250,6 +266,10 @@ func (h *Handlers) LokiBuildInfos() func(w http.ResponseWriter, r *http.Request)
writeError(w, http.StatusBadRequest, "Loki is disabled")
return
}
+ if h.Cfg.Loki.Status != nil {
+ writeError(w, http.StatusBadRequest, "Loki status URL is not usable with Loki operator")
+ return
+ }
lokiClient := newLokiClient(&h.Cfg.Loki, r.Header, true)
baseURL := strings.TrimRight(h.Cfg.Loki.GetStatusURL(), "/")
@@ -264,6 +284,10 @@ func (h *Handlers) LokiBuildInfos() func(w http.ResponseWriter, r *http.Request)
}
func (h *Handlers) fetchLokiConfig(cl httpclient.Caller, output any) error {
+ if h.Cfg.Loki.Status != nil {
+ return fmt.Errorf("loki status url is not usable with Loki operator")
+ }
+
baseURL := strings.TrimRight(h.Cfg.Loki.GetStatusURL(), "/")
resp, _, err := executeLokiQuery(fmt.Sprintf("%s/%s", baseURL, "config"), cl)
diff --git a/vendor/github.com/grafana/loki/operator/apis/loki/LICENSE b/vendor/github.com/grafana/loki/operator/apis/loki/LICENSE
new file mode 100644
index 000000000..be3f7b28e
--- /dev/null
+++ b/vendor/github.com/grafana/loki/operator/apis/loki/LICENSE
@@ -0,0 +1,661 @@
+ GNU AFFERO GENERAL PUBLIC LICENSE
+ Version 3, 19 November 2007
+
+ Copyright (C) 2007 Free Software Foundation, Inc.
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+ Preamble
+
+ The GNU Affero General Public License is a free, copyleft license for
+software and other kinds of works, specifically designed to ensure
+cooperation with the community in the case of network server software.
+
+ The licenses for most software and other practical works are designed
+to take away your freedom to share and change the works. By contrast,
+our General Public Licenses are intended to guarantee your freedom to
+share and change all versions of a program--to make sure it remains free
+software for all its users.
+
+ When we speak of free software, we are referring to freedom, not
+price. Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+them if you wish), that you receive source code or can get it if you
+want it, that you can change the software or use pieces of it in new
+free programs, and that you know you can do these things.
+
+ Developers that use our General Public Licenses protect your rights
+with two steps: (1) assert copyright on the software, and (2) offer
+you this License which gives you legal permission to copy, distribute
+and/or modify the software.
+
+ A secondary benefit of defending all users' freedom is that
+improvements made in alternate versions of the program, if they
+receive widespread use, become available for other developers to
+incorporate. Many developers of free software are heartened and
+encouraged by the resulting cooperation. However, in the case of
+software used on network servers, this result may fail to come about.
+The GNU General Public License permits making a modified version and
+letting the public access it on a server without ever releasing its
+source code to the public.
+
+ The GNU Affero General Public License is designed specifically to
+ensure that, in such cases, the modified source code becomes available
+to the community. It requires the operator of a network server to
+provide the source code of the modified version running there to the
+users of that server. Therefore, public use of a modified version, on
+a publicly accessible server, gives the public access to the source
+code of the modified version.
+
+ An older license, called the Affero General Public License and
+published by Affero, was designed to accomplish similar goals. This is
+a different license, not a version of the Affero GPL, but Affero has
+released a new version of the Affero GPL which permits relicensing under
+this license.
+
+ The precise terms and conditions for copying, distribution and
+modification follow.
+
+ TERMS AND CONDITIONS
+
+ 0. Definitions.
+
+ "This License" refers to version 3 of the GNU Affero General Public License.
+
+ "Copyright" also means copyright-like laws that apply to other kinds of
+works, such as semiconductor masks.
+
+ "The Program" refers to any copyrightable work licensed under this
+License. Each licensee is addressed as "you". "Licensees" and
+"recipients" may be individuals or organizations.
+
+ To "modify" a work means to copy from or adapt all or part of the work
+in a fashion requiring copyright permission, other than the making of an
+exact copy. The resulting work is called a "modified version" of the
+earlier work or a work "based on" the earlier work.
+
+ A "covered work" means either the unmodified Program or a work based
+on the Program.
+
+ To "propagate" a work means to do anything with it that, without
+permission, would make you directly or secondarily liable for
+infringement under applicable copyright law, except executing it on a
+computer or modifying a private copy. Propagation includes copying,
+distribution (with or without modification), making available to the
+public, and in some countries other activities as well.
+
+ To "convey" a work means any kind of propagation that enables other
+parties to make or receive copies. Mere interaction with a user through
+a computer network, with no transfer of a copy, is not conveying.
+
+ An interactive user interface displays "Appropriate Legal Notices"
+to the extent that it includes a convenient and prominently visible
+feature that (1) displays an appropriate copyright notice, and (2)
+tells the user that there is no warranty for the work (except to the
+extent that warranties are provided), that licensees may convey the
+work under this License, and how to view a copy of this License. If
+the interface presents a list of user commands or options, such as a
+menu, a prominent item in the list meets this criterion.
+
+ 1. Source Code.
+
+ The "source code" for a work means the preferred form of the work
+for making modifications to it. "Object code" means any non-source
+form of a work.
+
+ A "Standard Interface" means an interface that either is an official
+standard defined by a recognized standards body, or, in the case of
+interfaces specified for a particular programming language, one that
+is widely used among developers working in that language.
+
+ The "System Libraries" of an executable work include anything, other
+than the work as a whole, that (a) is included in the normal form of
+packaging a Major Component, but which is not part of that Major
+Component, and (b) serves only to enable use of the work with that
+Major Component, or to implement a Standard Interface for which an
+implementation is available to the public in source code form. A
+"Major Component", in this context, means a major essential component
+(kernel, window system, and so on) of the specific operating system
+(if any) on which the executable work runs, or a compiler used to
+produce the work, or an object code interpreter used to run it.
+
+ The "Corresponding Source" for a work in object code form means all
+the source code needed to generate, install, and (for an executable
+work) run the object code and to modify the work, including scripts to
+control those activities. However, it does not include the work's
+System Libraries, or general-purpose tools or generally available free
+programs which are used unmodified in performing those activities but
+which are not part of the work. For example, Corresponding Source
+includes interface definition files associated with source files for
+the work, and the source code for shared libraries and dynamically
+linked subprograms that the work is specifically designed to require,
+such as by intimate data communication or control flow between those
+subprograms and other parts of the work.
+
+ The Corresponding Source need not include anything that users
+can regenerate automatically from other parts of the Corresponding
+Source.
+
+ The Corresponding Source for a work in source code form is that
+same work.
+
+ 2. Basic Permissions.
+
+ All rights granted under this License are granted for the term of
+copyright on the Program, and are irrevocable provided the stated
+conditions are met. This License explicitly affirms your unlimited
+permission to run the unmodified Program. The output from running a
+covered work is covered by this License only if the output, given its
+content, constitutes a covered work. This License acknowledges your
+rights of fair use or other equivalent, as provided by copyright law.
+
+ You may make, run and propagate covered works that you do not
+convey, without conditions so long as your license otherwise remains
+in force. You may convey covered works to others for the sole purpose
+of having them make modifications exclusively for you, or provide you
+with facilities for running those works, provided that you comply with
+the terms of this License in conveying all material for which you do
+not control copyright. Those thus making or running the covered works
+for you must do so exclusively on your behalf, under your direction
+and control, on terms that prohibit them from making any copies of
+your copyrighted material outside their relationship with you.
+
+ Conveying under any other circumstances is permitted solely under
+the conditions stated below. Sublicensing is not allowed; section 10
+makes it unnecessary.
+
+ 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
+
+ No covered work shall be deemed part of an effective technological
+measure under any applicable law fulfilling obligations under article
+11 of the WIPO copyright treaty adopted on 20 December 1996, or
+similar laws prohibiting or restricting circumvention of such
+measures.
+
+ When you convey a covered work, you waive any legal power to forbid
+circumvention of technological measures to the extent such circumvention
+is effected by exercising rights under this License with respect to
+the covered work, and you disclaim any intention to limit operation or
+modification of the work as a means of enforcing, against the work's
+users, your or third parties' legal rights to forbid circumvention of
+technological measures.
+
+ 4. Conveying Verbatim Copies.
+
+ You may convey verbatim copies of the Program's source code as you
+receive it, in any medium, provided that you conspicuously and
+appropriately publish on each copy an appropriate copyright notice;
+keep intact all notices stating that this License and any
+non-permissive terms added in accord with section 7 apply to the code;
+keep intact all notices of the absence of any warranty; and give all
+recipients a copy of this License along with the Program.
+
+ You may charge any price or no price for each copy that you convey,
+and you may offer support or warranty protection for a fee.
+
+ 5. Conveying Modified Source Versions.
+
+ You may convey a work based on the Program, or the modifications to
+produce it from the Program, in the form of source code under the
+terms of section 4, provided that you also meet all of these conditions:
+
+ a) The work must carry prominent notices stating that you modified
+ it, and giving a relevant date.
+
+ b) The work must carry prominent notices stating that it is
+ released under this License and any conditions added under section
+ 7. This requirement modifies the requirement in section 4 to
+ "keep intact all notices".
+
+ c) You must license the entire work, as a whole, under this
+ License to anyone who comes into possession of a copy. This
+ License will therefore apply, along with any applicable section 7
+ additional terms, to the whole of the work, and all its parts,
+ regardless of how they are packaged. This License gives no
+ permission to license the work in any other way, but it does not
+ invalidate such permission if you have separately received it.
+
+ d) If the work has interactive user interfaces, each must display
+ Appropriate Legal Notices; however, if the Program has interactive
+ interfaces that do not display Appropriate Legal Notices, your
+ work need not make them do so.
+
+ A compilation of a covered work with other separate and independent
+works, which are not by their nature extensions of the covered work,
+and which are not combined with it such as to form a larger program,
+in or on a volume of a storage or distribution medium, is called an
+"aggregate" if the compilation and its resulting copyright are not
+used to limit the access or legal rights of the compilation's users
+beyond what the individual works permit. Inclusion of a covered work
+in an aggregate does not cause this License to apply to the other
+parts of the aggregate.
+
+ 6. Conveying Non-Source Forms.
+
+ You may convey a covered work in object code form under the terms
+of sections 4 and 5, provided that you also convey the
+machine-readable Corresponding Source under the terms of this License,
+in one of these ways:
+
+ a) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by the
+ Corresponding Source fixed on a durable physical medium
+ customarily used for software interchange.
+
+ b) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by a
+ written offer, valid for at least three years and valid for as
+ long as you offer spare parts or customer support for that product
+ model, to give anyone who possesses the object code either (1) a
+ copy of the Corresponding Source for all the software in the
+ product that is covered by this License, on a durable physical
+ medium customarily used for software interchange, for a price no
+ more than your reasonable cost of physically performing this
+ conveying of source, or (2) access to copy the
+ Corresponding Source from a network server at no charge.
+
+ c) Convey individual copies of the object code with a copy of the
+ written offer to provide the Corresponding Source. This
+ alternative is allowed only occasionally and noncommercially, and
+ only if you received the object code with such an offer, in accord
+ with subsection 6b.
+
+ d) Convey the object code by offering access from a designated
+ place (gratis or for a charge), and offer equivalent access to the
+ Corresponding Source in the same way through the same place at no
+ further charge. You need not require recipients to copy the
+ Corresponding Source along with the object code. If the place to
+ copy the object code is a network server, the Corresponding Source
+ may be on a different server (operated by you or a third party)
+ that supports equivalent copying facilities, provided you maintain
+ clear directions next to the object code saying where to find the
+ Corresponding Source. Regardless of what server hosts the
+ Corresponding Source, you remain obligated to ensure that it is
+ available for as long as needed to satisfy these requirements.
+
+ e) Convey the object code using peer-to-peer transmission, provided
+ you inform other peers where the object code and Corresponding
+ Source of the work are being offered to the general public at no
+ charge under subsection 6d.
+
+ A separable portion of the object code, whose source code is excluded
+from the Corresponding Source as a System Library, need not be
+included in conveying the object code work.
+
+ A "User Product" is either (1) a "consumer product", which means any
+tangible personal property which is normally used for personal, family,
+or household purposes, or (2) anything designed or sold for incorporation
+into a dwelling. In determining whether a product is a consumer product,
+doubtful cases shall be resolved in favor of coverage. For a particular
+product received by a particular user, "normally used" refers to a
+typical or common use of that class of product, regardless of the status
+of the particular user or of the way in which the particular user
+actually uses, or expects or is expected to use, the product. A product
+is a consumer product regardless of whether the product has substantial
+commercial, industrial or non-consumer uses, unless such uses represent
+the only significant mode of use of the product.
+
+ "Installation Information" for a User Product means any methods,
+procedures, authorization keys, or other information required to install
+and execute modified versions of a covered work in that User Product from
+a modified version of its Corresponding Source. The information must
+suffice to ensure that the continued functioning of the modified object
+code is in no case prevented or interfered with solely because
+modification has been made.
+
+ If you convey an object code work under this section in, or with, or
+specifically for use in, a User Product, and the conveying occurs as
+part of a transaction in which the right of possession and use of the
+User Product is transferred to the recipient in perpetuity or for a
+fixed term (regardless of how the transaction is characterized), the
+Corresponding Source conveyed under this section must be accompanied
+by the Installation Information. But this requirement does not apply
+if neither you nor any third party retains the ability to install
+modified object code on the User Product (for example, the work has
+been installed in ROM).
+
+ The requirement to provide Installation Information does not include a
+requirement to continue to provide support service, warranty, or updates
+for a work that has been modified or installed by the recipient, or for
+the User Product in which it has been modified or installed. Access to a
+network may be denied when the modification itself materially and
+adversely affects the operation of the network or violates the rules and
+protocols for communication across the network.
+
+ Corresponding Source conveyed, and Installation Information provided,
+in accord with this section must be in a format that is publicly
+documented (and with an implementation available to the public in
+source code form), and must require no special password or key for
+unpacking, reading or copying.
+
+ 7. Additional Terms.
+
+ "Additional permissions" are terms that supplement the terms of this
+License by making exceptions from one or more of its conditions.
+Additional permissions that are applicable to the entire Program shall
+be treated as though they were included in this License, to the extent
+that they are valid under applicable law. If additional permissions
+apply only to part of the Program, that part may be used separately
+under those permissions, but the entire Program remains governed by
+this License without regard to the additional permissions.
+
+ When you convey a copy of a covered work, you may at your option
+remove any additional permissions from that copy, or from any part of
+it. (Additional permissions may be written to require their own
+removal in certain cases when you modify the work.) You may place
+additional permissions on material, added by you to a covered work,
+for which you have or can give appropriate copyright permission.
+
+ Notwithstanding any other provision of this License, for material you
+add to a covered work, you may (if authorized by the copyright holders of
+that material) supplement the terms of this License with terms:
+
+ a) Disclaiming warranty or limiting liability differently from the
+ terms of sections 15 and 16 of this License; or
+
+ b) Requiring preservation of specified reasonable legal notices or
+ author attributions in that material or in the Appropriate Legal
+ Notices displayed by works containing it; or
+
+ c) Prohibiting misrepresentation of the origin of that material, or
+ requiring that modified versions of such material be marked in
+ reasonable ways as different from the original version; or
+
+ d) Limiting the use for publicity purposes of names of licensors or
+ authors of the material; or
+
+ e) Declining to grant rights under trademark law for use of some
+ trade names, trademarks, or service marks; or
+
+ f) Requiring indemnification of licensors and authors of that
+ material by anyone who conveys the material (or modified versions of
+ it) with contractual assumptions of liability to the recipient, for
+ any liability that these contractual assumptions directly impose on
+ those licensors and authors.
+
+ All other non-permissive additional terms are considered "further
+restrictions" within the meaning of section 10. If the Program as you
+received it, or any part of it, contains a notice stating that it is
+governed by this License along with a term that is a further
+restriction, you may remove that term. If a license document contains
+a further restriction but permits relicensing or conveying under this
+License, you may add to a covered work material governed by the terms
+of that license document, provided that the further restriction does
+not survive such relicensing or conveying.
+
+ If you add terms to a covered work in accord with this section, you
+must place, in the relevant source files, a statement of the
+additional terms that apply to those files, or a notice indicating
+where to find the applicable terms.
+
+ Additional terms, permissive or non-permissive, may be stated in the
+form of a separately written license, or stated as exceptions;
+the above requirements apply either way.
+
+ 8. Termination.
+
+ You may not propagate or modify a covered work except as expressly
+provided under this License. Any attempt otherwise to propagate or
+modify it is void, and will automatically terminate your rights under
+this License (including any patent licenses granted under the third
+paragraph of section 11).
+
+ However, if you cease all violation of this License, then your
+license from a particular copyright holder is reinstated (a)
+provisionally, unless and until the copyright holder explicitly and
+finally terminates your license, and (b) permanently, if the copyright
+holder fails to notify you of the violation by some reasonable means
+prior to 60 days after the cessation.
+
+ Moreover, your license from a particular copyright holder is
+reinstated permanently if the copyright holder notifies you of the
+violation by some reasonable means, this is the first time you have
+received notice of violation of this License (for any work) from that
+copyright holder, and you cure the violation prior to 30 days after
+your receipt of the notice.
+
+ Termination of your rights under this section does not terminate the
+licenses of parties who have received copies or rights from you under
+this License. If your rights have been terminated and not permanently
+reinstated, you do not qualify to receive new licenses for the same
+material under section 10.
+
+ 9. Acceptance Not Required for Having Copies.
+
+ You are not required to accept this License in order to receive or
+run a copy of the Program. Ancillary propagation of a covered work
+occurring solely as a consequence of using peer-to-peer transmission
+to receive a copy likewise does not require acceptance. However,
+nothing other than this License grants you permission to propagate or
+modify any covered work. These actions infringe copyright if you do
+not accept this License. Therefore, by modifying or propagating a
+covered work, you indicate your acceptance of this License to do so.
+
+ 10. Automatic Licensing of Downstream Recipients.
+
+ Each time you convey a covered work, the recipient automatically
+receives a license from the original licensors, to run, modify and
+propagate that work, subject to this License. You are not responsible
+for enforcing compliance by third parties with this License.
+
+ An "entity transaction" is a transaction transferring control of an
+organization, or substantially all assets of one, or subdividing an
+organization, or merging organizations. If propagation of a covered
+work results from an entity transaction, each party to that
+transaction who receives a copy of the work also receives whatever
+licenses to the work the party's predecessor in interest had or could
+give under the previous paragraph, plus a right to possession of the
+Corresponding Source of the work from the predecessor in interest, if
+the predecessor has it or can get it with reasonable efforts.
+
+ You may not impose any further restrictions on the exercise of the
+rights granted or affirmed under this License. For example, you may
+not impose a license fee, royalty, or other charge for exercise of
+rights granted under this License, and you may not initiate litigation
+(including a cross-claim or counterclaim in a lawsuit) alleging that
+any patent claim is infringed by making, using, selling, offering for
+sale, or importing the Program or any portion of it.
+
+ 11. Patents.
+
+ A "contributor" is a copyright holder who authorizes use under this
+License of the Program or a work on which the Program is based. The
+work thus licensed is called the contributor's "contributor version".
+
+ A contributor's "essential patent claims" are all patent claims
+owned or controlled by the contributor, whether already acquired or
+hereafter acquired, that would be infringed by some manner, permitted
+by this License, of making, using, or selling its contributor version,
+but do not include claims that would be infringed only as a
+consequence of further modification of the contributor version. For
+purposes of this definition, "control" includes the right to grant
+patent sublicenses in a manner consistent with the requirements of
+this License.
+
+ Each contributor grants you a non-exclusive, worldwide, royalty-free
+patent license under the contributor's essential patent claims, to
+make, use, sell, offer for sale, import and otherwise run, modify and
+propagate the contents of its contributor version.
+
+ In the following three paragraphs, a "patent license" is any express
+agreement or commitment, however denominated, not to enforce a patent
+(such as an express permission to practice a patent or covenant not to
+sue for patent infringement). To "grant" such a patent license to a
+party means to make such an agreement or commitment not to enforce a
+patent against the party.
+
+ If you convey a covered work, knowingly relying on a patent license,
+and the Corresponding Source of the work is not available for anyone
+to copy, free of charge and under the terms of this License, through a
+publicly available network server or other readily accessible means,
+then you must either (1) cause the Corresponding Source to be so
+available, or (2) arrange to deprive yourself of the benefit of the
+patent license for this particular work, or (3) arrange, in a manner
+consistent with the requirements of this License, to extend the patent
+license to downstream recipients. "Knowingly relying" means you have
+actual knowledge that, but for the patent license, your conveying the
+covered work in a country, or your recipient's use of the covered work
+in a country, would infringe one or more identifiable patents in that
+country that you have reason to believe are valid.
+
+ If, pursuant to or in connection with a single transaction or
+arrangement, you convey, or propagate by procuring conveyance of, a
+covered work, and grant a patent license to some of the parties
+receiving the covered work authorizing them to use, propagate, modify
+or convey a specific copy of the covered work, then the patent license
+you grant is automatically extended to all recipients of the covered
+work and works based on it.
+
+ A patent license is "discriminatory" if it does not include within
+the scope of its coverage, prohibits the exercise of, or is
+conditioned on the non-exercise of one or more of the rights that are
+specifically granted under this License. You may not convey a covered
+work if you are a party to an arrangement with a third party that is
+in the business of distributing software, under which you make payment
+to the third party based on the extent of your activity of conveying
+the work, and under which the third party grants, to any of the
+parties who would receive the covered work from you, a discriminatory
+patent license (a) in connection with copies of the covered work
+conveyed by you (or copies made from those copies), or (b) primarily
+for and in connection with specific products or compilations that
+contain the covered work, unless you entered into that arrangement,
+or that patent license was granted, prior to 28 March 2007.
+
+ Nothing in this License shall be construed as excluding or limiting
+any implied license or other defenses to infringement that may
+otherwise be available to you under applicable patent law.
+
+ 12. No Surrender of Others' Freedom.
+
+ If conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License. If you cannot convey a
+covered work so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you may
+not convey it at all. For example, if you agree to terms that obligate you
+to collect a royalty for further conveying from those to whom you convey
+the Program, the only way you could satisfy both those terms and this
+License would be to refrain entirely from conveying the Program.
+
+ 13. Remote Network Interaction; Use with the GNU General Public License.
+
+ Notwithstanding any other provision of this License, if you modify the
+Program, your modified version must prominently offer all users
+interacting with it remotely through a computer network (if your version
+supports such interaction) an opportunity to receive the Corresponding
+Source of your version by providing access to the Corresponding Source
+from a network server at no charge, through some standard or customary
+means of facilitating copying of software. This Corresponding Source
+shall include the Corresponding Source for any work covered by version 3
+of the GNU General Public License that is incorporated pursuant to the
+following paragraph.
+
+ Notwithstanding any other provision of this License, you have
+permission to link or combine any covered work with a work licensed
+under version 3 of the GNU General Public License into a single
+combined work, and to convey the resulting work. The terms of this
+License will continue to apply to the part which is the covered work,
+but the work with which it is combined will remain governed by version
+3 of the GNU General Public License.
+
+ 14. Revised Versions of this License.
+
+ The Free Software Foundation may publish revised and/or new versions of
+the GNU Affero General Public License from time to time. Such new versions
+will be similar in spirit to the present version, but may differ in detail to
+address new problems or concerns.
+
+ Each version is given a distinguishing version number. If the
+Program specifies that a certain numbered version of the GNU Affero General
+Public License "or any later version" applies to it, you have the
+option of following the terms and conditions either of that numbered
+version or of any later version published by the Free Software
+Foundation. If the Program does not specify a version number of the
+GNU Affero General Public License, you may choose any version ever published
+by the Free Software Foundation.
+
+ If the Program specifies that a proxy can decide which future
+versions of the GNU Affero General Public License can be used, that proxy's
+public statement of acceptance of a version permanently authorizes you
+to choose that version for the Program.
+
+ Later license versions may give you additional or different
+permissions. However, no additional obligations are imposed on any
+author or copyright holder as a result of your choosing to follow a
+later version.
+
+ 15. Disclaimer of Warranty.
+
+ THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
+APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
+HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
+OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
+THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
+IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
+ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+
+ 16. Limitation of Liability.
+
+ IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
+THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
+GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
+USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
+DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
+PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
+EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
+SUCH DAMAGES.
+
+ 17. Interpretation of Sections 15 and 16.
+
+ If the disclaimer of warranty and limitation of liability provided
+above cannot be given local legal effect according to their terms,
+reviewing courts shall apply local law that most closely approximates
+an absolute waiver of all civil liability in connection with the
+Program, unless a warranty or assumption of liability accompanies a
+copy of the Program in return for a fee.
+
+ END OF TERMS AND CONDITIONS
+
+ How to Apply These Terms to Your New Programs
+
+ If you develop a new program, and you want it to be of the greatest
+possible use to the public, the best way to achieve this is to make it
+free software which everyone can redistribute and change under these terms.
+
+ To do so, attach the following notices to the program. It is safest
+to attach them to the start of each source file to most effectively
+state the exclusion of warranty; and each file should have at least
+the "copyright" line and a pointer to where the full notice is found.
+
+
+ Copyright (C)
+
+ This program is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with this program. If not, see .
+
+Also add information on how to contact you by electronic and paper mail.
+
+ If your software can interact with users remotely through a computer
+network, you should also make sure that it provides a way for users to
+get its source. For example, if your program is a web application, its
+interface could display a "Source" link that leads users to an archive
+of the code. There are many ways you could offer source, and different
+solutions will be better for different programs; see section 13 for the
+specific requirements.
+
+ You should also get your employer (if you work as a programmer) or school,
+if any, to sign a "copyright disclaimer" for the program, if necessary.
+For more information on this, and how to apply and follow the GNU AGPL, see
+.
diff --git a/vendor/github.com/grafana/loki/operator/apis/loki/v1/alertingrule_types.go b/vendor/github.com/grafana/loki/operator/apis/loki/v1/alertingrule_types.go
new file mode 100644
index 000000000..cb5f4981d
--- /dev/null
+++ b/vendor/github.com/grafana/loki/operator/apis/loki/v1/alertingrule_types.go
@@ -0,0 +1,138 @@
+package v1
+
+import (
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+// AlertingRuleSpec defines the desired state of AlertingRule
+type AlertingRuleSpec struct {
+ // TenantID of tenant where the alerting rules are evaluated in.
+ //
+ // +required
+ // +kubebuilder:validation:Required
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Tenant ID"
+ TenantID string `json:"tenantID"`
+
+ // List of groups for alerting rules.
+ //
+ // +optional
+ // +kubebuilder:validation:Optional
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Groups"
+ Groups []*AlertingRuleGroup `json:"groups"`
+}
+
+// AlertingRuleGroup defines a group of Loki alerting rules.
+type AlertingRuleGroup struct {
+ // Name of the alerting rule group. Must be unique within all alerting rules.
+ //
+ // +required
+ // +kubebuilder:validation:Required
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Name"
+ Name string `json:"name"`
+
+ // Interval defines the time interval between evaluation of the given
+ // alerting rule.
+ //
+ // +optional
+ // +kubebuilder:validation:Optional
+ // +kubebuilder:default:="1m"
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Evaluation Interval"
+ Interval PrometheusDuration `json:"interval"`
+
+ // Limit defines the number of alerts an alerting rule can produce. 0 is no limit.
+ //
+ // +optional
+ // +kubebuilder:validation:Optional
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,xDescriptors="urn:alm:descriptor:com.tectonic.ui:number",displayName="Limit of firing alerts"
+ Limit int32 `json:"limit,omitempty"`
+
+ // Rules defines a list of alerting rules
+ //
+ // +required
+ // +kubebuilder:validation:Required
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Rules"
+ Rules []*AlertingRuleGroupSpec `json:"rules"`
+}
+
+// AlertingRuleGroupSpec defines the spec for a Loki alerting rule.
+type AlertingRuleGroupSpec struct {
+ // The name of the alert. Must be a valid label value.
+ //
+ // +optional
+ // +kubebuilder:validation:Optional
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Name"
+ Alert string `json:"alert,omitempty"`
+
+ // The LogQL expression to evaluate. Every evaluation cycle this is
+ // evaluated at the current time, and all resultant time series become
+ // pending/firing alerts.
+ //
+ // +required
+ // +kubebuilder:validation:Required
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="LogQL Expression"
+ Expr string `json:"expr"`
+
+ // Alerts are considered firing once they have been returned for this long.
+ // Alerts which have not yet fired for long enough are considered pending.
+ //
+ // +optional
+ // +kubebuilder:validation:Optional
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Firing Threshold"
+ For PrometheusDuration `json:"for,omitempty"`
+
+ // Annotations to add to each alert.
+ //
+ // +optional
+ // +kubebuilder:validation:Optional
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Annotations"
+ Annotations map[string]string `json:"annotations,omitempty"`
+
+ // Labels to add to each alert.
+ //
+ // +optional
+ // +kubebuilder:validation:Optional
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Labels"
+ Labels map[string]string `json:"labels,omitempty"`
+}
+
+// AlertingRuleStatus defines the observed state of AlertingRule
+type AlertingRuleStatus struct {
+ // Conditions of the AlertingRule generation health.
+ //
+ // +optional
+ // +kubebuilder:validation:Optional
+ // +operator-sdk:csv:customresourcedefinitions:type=status,xDescriptors="urn:alm:descriptor:io.kubernetes.conditions"
+ Conditions []metav1.Condition `json:"conditions,omitempty"`
+}
+
+//+kubebuilder:object:root=true
+//+kubebuilder:subresource:status
+//+kubebuilder:storageversion
+//+kubebuilder:webhook:path=/validate-loki-grafana-com-v1-alertingrule,mutating=false,failurePolicy=fail,sideEffects=None,groups=loki.grafana.com,resources=alertingrules,verbs=create;update,versions=v1,name=valertingrule.loki.grafana.com,admissionReviewVersions=v1
+
+// AlertingRule is the Schema for the alertingrules API
+//
+// +operator-sdk:csv:customresourcedefinitions:displayName="AlertingRule",resources={{LokiStack,v1}}
+type AlertingRule struct {
+ metav1.TypeMeta `json:",inline"`
+ metav1.ObjectMeta `json:"metadata,omitempty"`
+
+ Spec AlertingRuleSpec `json:"spec,omitempty"`
+ Status AlertingRuleStatus `json:"status,omitempty"`
+}
+
+//+kubebuilder:object:root=true
+
+// AlertingRuleList contains a list of AlertingRule
+type AlertingRuleList struct {
+ metav1.TypeMeta `json:",inline"`
+ metav1.ListMeta `json:"metadata,omitempty"`
+ Items []AlertingRule `json:"items"`
+}
+
+func init() {
+ SchemeBuilder.Register(&AlertingRule{}, &AlertingRuleList{})
+}
+
+// Hub declares the v1.AlertingRule as the hub CRD version.
+func (*AlertingRule) Hub() {}
diff --git a/vendor/github.com/grafana/loki/operator/apis/loki/v1/doc.go b/vendor/github.com/grafana/loki/operator/apis/loki/v1/doc.go
new file mode 100644
index 000000000..357a3290f
--- /dev/null
+++ b/vendor/github.com/grafana/loki/operator/apis/loki/v1/doc.go
@@ -0,0 +1,4 @@
+// Package v1 contains API Schema definitions for the loki v1 API group
+// +kubebuilder:object:generate=true
+// +groupName=loki.grafana.com
+package v1
diff --git a/vendor/github.com/grafana/loki/operator/apis/loki/v1/groupversion_info.go b/vendor/github.com/grafana/loki/operator/apis/loki/v1/groupversion_info.go
new file mode 100644
index 000000000..a7a74c47f
--- /dev/null
+++ b/vendor/github.com/grafana/loki/operator/apis/loki/v1/groupversion_info.go
@@ -0,0 +1,17 @@
+package v1
+
+import (
+ "k8s.io/apimachinery/pkg/runtime/schema"
+ "sigs.k8s.io/controller-runtime/pkg/scheme"
+)
+
+var (
+ // GroupVersion is group version used to register these objects
+ GroupVersion = schema.GroupVersion{Group: "loki.grafana.com", Version: "v1"}
+
+ // SchemeBuilder is used to add go types to the GroupVersionKind scheme
+ SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion}
+
+ // AddToScheme adds the types in this group-version to the given scheme.
+ AddToScheme = SchemeBuilder.AddToScheme
+)
diff --git a/vendor/github.com/grafana/loki/operator/apis/loki/v1/lokistack_types.go b/vendor/github.com/grafana/loki/operator/apis/loki/v1/lokistack_types.go
new file mode 100644
index 000000000..e0c55ec5d
--- /dev/null
+++ b/vendor/github.com/grafana/loki/operator/apis/loki/v1/lokistack_types.go
@@ -0,0 +1,1478 @@
+package v1
+
+import (
+ "strings"
+
+ corev1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN!
+// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized.
+
+// ManagementStateType defines the type for CR management states.
+//
+// +kubebuilder:validation:Enum=Managed;Unmanaged
+type ManagementStateType string
+
+const (
+ // ManagementStateManaged when the LokiStack custom resource should be
+ // reconciled by the operator.
+ ManagementStateManaged ManagementStateType = "Managed"
+
+ // ManagementStateUnmanaged when the LokiStack custom resource should not be
+ // reconciled by the operator.
+ ManagementStateUnmanaged ManagementStateType = "Unmanaged"
+)
+
+// LokiStackSizeType declares the type for loki cluster scale outs.
+//
+// +kubebuilder:validation:Enum="1x.demo";"1x.extra-small";"1x.small";"1x.medium"
+type LokiStackSizeType string
+
+const (
+ // SizeOneXDemo defines the size of a single Loki deployment
+ // with tiny resource requirements and without HA support.
+ // This size is intended to run in single-node clusters on laptops,
+ // it is only useful for very light testing, demonstrations, or prototypes.
+ // There are no ingestion/query performance guarantees.
+ // DO NOT USE THIS IN PRODUCTION!
+ SizeOneXDemo LokiStackSizeType = "1x.demo"
+
+ // SizeOneXExtraSmall defines the size of a single Loki deployment
+ // with extra small resources/limits requirements and without HA support.
+ // This size is ultimately dedicated for development and demo purposes.
+ // DO NOT USE THIS IN PRODUCTION!
+ //
+ // FIXME: Add clear description of ingestion/query performance expectations.
+ SizeOneXExtraSmall LokiStackSizeType = "1x.extra-small"
+
+ // SizeOneXSmall defines the size of a single Loki deployment
+ // with small resources/limits requirements and HA support for all
+ // Loki components. This size is dedicated for setup **without** the
+ // requirement for single replication factor and auto-compaction.
+ //
+ // FIXME: Add clear description of ingestion/query performance expectations.
+ SizeOneXSmall LokiStackSizeType = "1x.small"
+
+ // SizeOneXMedium defines the size of a single Loki deployment
+ // with small resources/limits requirements and HA support for all
+ // Loki components. This size is dedicated for setup **with** the
+ // requirement for single replication factor and auto-compaction.
+ //
+ // FIXME: Add clear description of ingestion/query performance expectations.
+ SizeOneXMedium LokiStackSizeType = "1x.medium"
+)
+
+// SubjectKind is a kind of LokiStack Gateway RBAC subject.
+//
+// +kubebuilder:validation:Enum=user;group
+type SubjectKind string
+
+const (
+ // User represents a subject that is a user.
+ User SubjectKind = "user"
+ // Group represents a subject that is a group.
+ Group SubjectKind = "group"
+)
+
+// Subject represents a subject that has been bound to a role.
+type Subject struct {
+ Name string `json:"name"`
+ Kind SubjectKind `json:"kind"`
+}
+
+// RoleBindingsSpec binds a set of roles to a set of subjects.
+type RoleBindingsSpec struct {
+ Name string `json:"name"`
+ Subjects []Subject `json:"subjects"`
+ Roles []string `json:"roles"`
+}
+
+// PermissionType is a LokiStack Gateway RBAC permission.
+//
+// +kubebuilder:validation:Enum=read;write
+type PermissionType string
+
+const (
+ // Write gives access to write data to a tenant.
+ Write PermissionType = "write"
+ // Read gives access to read data from a tenant.
+ Read PermissionType = "read"
+)
+
+// RoleSpec describes a set of permissions to interact with a tenant.
+type RoleSpec struct {
+ Name string `json:"name"`
+ Resources []string `json:"resources"`
+ Tenants []string `json:"tenants"`
+ Permissions []PermissionType `json:"permissions"`
+}
+
+// OPASpec defines the opa configuration spec for lokiStack Gateway component.
+type OPASpec struct {
+ // URL defines the third-party endpoint for authorization.
+ //
+ // +required
+ // +kubebuilder:validation:Required
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="OpenPolicyAgent URL"
+ URL string `json:"url"`
+}
+
+// AuthorizationSpec defines the opa, role bindings and roles
+// configuration per tenant for lokiStack Gateway component.
+type AuthorizationSpec struct {
+ // OPA defines the spec for the third-party endpoint for tenant's authorization.
+ //
+ // +optional
+ // +kubebuilder:validation:Optional
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="OPA Configuration"
+ OPA *OPASpec `json:"opa"`
+ // Roles defines a set of permissions to interact with a tenant.
+ //
+ // +optional
+ // +kubebuilder:validation:Optional
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Static Roles"
+ Roles []RoleSpec `json:"roles"`
+ // RoleBindings defines configuration to bind a set of roles to a set of subjects.
+ //
+ // +optional
+ // +kubebuilder:validation:Optional
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Static Role Bindings"
+ RoleBindings []RoleBindingsSpec `json:"roleBindings"`
+}
+
+// TenantSecretSpec is a secret reference containing name only
+// for a secret living in the same namespace as the LokiStack custom resource.
+type TenantSecretSpec struct {
+ // Name of a secret in the namespace configured for tenant secrets.
+ //
+ // +required
+ // +kubebuilder:validation:Required
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,xDescriptors="urn:alm:descriptor:io.kubernetes:Secret",displayName="Tenant Secret Name"
+ Name string `json:"name"`
+}
+
+// OIDCSpec defines the oidc configuration spec for lokiStack Gateway component.
+type OIDCSpec struct {
+ // Secret defines the spec for the clientID and clientSecret for tenant's authentication.
+ //
+ // +required
+ // +kubebuilder:validation:Required
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Tenant Secret"
+ Secret *TenantSecretSpec `json:"secret"`
+ // IssuerCA defines the spec for the issuer CA for tenant's authentication.
+ //
+ // +optional
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="IssuerCA ConfigMap"
+ IssuerCA *CASpec `json:"issuerCA"`
+ // IssuerURL defines the URL for issuer.
+ //
+ // +required
+ // +kubebuilder:validation:Required
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Issuer URL"
+ IssuerURL string `json:"issuerURL"`
+ // RedirectURL defines the URL for redirect.
+ //
+ // +optional
+ // +kubebuilder:validation:Optional
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Redirect URL"
+ RedirectURL string `json:"redirectURL,omitempty"`
+ // Group claim field from ID Token
+ //
+ // +optional
+ // +kubebuilder:validation:Optional
+ GroupClaim string `json:"groupClaim,omitempty"`
+ // User claim field from ID Token
+ //
+ // +optional
+ // +kubebuilder:validation:Optional
+ UsernameClaim string `json:"usernameClaim,omitempty"`
+}
+
+// MTLSSpec specifies mTLS configuration parameters.
+type MTLSSpec struct {
+ // CA defines the spec for the custom CA for tenant's authentication.
+ //
+ // +required
+ // +kubebuilder:validation:Required
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="CA ConfigMap"
+ CA *CASpec `json:"ca"`
+}
+
+// AuthenticationSpec defines the oidc configuration per tenant for lokiStack Gateway component.
+type AuthenticationSpec struct {
+ // TenantName defines the name of the tenant.
+ //
+ // +required
+ // +kubebuilder:validation:Required
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Tenant Name"
+ TenantName string `json:"tenantName"`
+ // TenantID defines the id of the tenant.
+ //
+ // +required
+ // +kubebuilder:validation:Required
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Tenant ID"
+ TenantID string `json:"tenantId"`
+ // OIDC defines the spec for the OIDC tenant's authentication.
+ //
+ // +optional
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="OIDC Configuration"
+ OIDC *OIDCSpec `json:"oidc,omitempty"`
+
+ // TLSConfig defines the spec for the mTLS tenant's authentication.
+ //
+ // +optional
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="mTLS Configuration"
+ MTLS *MTLSSpec `json:"mTLS,omitempty"`
+}
+
+// ModeType is the authentication/authorization mode in which LokiStack Gateway will be configured.
+//
+// +kubebuilder:validation:Enum=static;dynamic;openshift-logging;openshift-network
+type ModeType string
+
+const (
+ // Static mode asserts the Authorization Spec's Roles and RoleBindings
+ // using an in-process OpenPolicyAgent Rego authorizer.
+ Static ModeType = "static"
+ // Dynamic mode delegates the authorization to a third-party OPA-compatible endpoint.
+ Dynamic ModeType = "dynamic"
+ // OpenshiftLogging mode provides fully automatic OpenShift in-cluster authentication and authorization support for application, infrastructure and audit logs.
+ OpenshiftLogging ModeType = "openshift-logging"
+ // OpenshiftNetwork mode provides fully automatic OpenShift in-cluster authentication and authorization support for network logs only.
+ OpenshiftNetwork ModeType = "openshift-network"
+)
+
+// TenantsSpec defines the mode, authentication and authorization
+// configuration of the lokiStack gateway component.
+type TenantsSpec struct {
+ // Mode defines the mode in which lokistack-gateway component will be configured.
+ //
+ // +required
+ // +kubebuilder:validation:Required
+ // +kubebuilder:default:=openshift-logging
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,xDescriptors={"urn:alm:descriptor:com.tectonic.ui:select:static","urn:alm:descriptor:com.tectonic.ui:select:dynamic","urn:alm:descriptor:com.tectonic.ui:select:openshift-logging","urn:alm:descriptor:com.tectonic.ui:select:openshift-network"},displayName="Mode"
+ Mode ModeType `json:"mode"`
+ // Authentication defines the lokistack-gateway component authentication configuration spec per tenant.
+ //
+ // +optional
+ // +kubebuilder:validation:Optional
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Authentication"
+ Authentication []AuthenticationSpec `json:"authentication,omitempty"`
+ // Authorization defines the lokistack-gateway component authorization configuration spec per tenant.
+ //
+ // +optional
+ // +kubebuilder:validation:Optional
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Authorization"
+ Authorization *AuthorizationSpec `json:"authorization,omitempty"`
+
+ // Openshift defines the configuration specific to Openshift modes.
+ //
+ // +optional
+ // +kubebuilder:validation:Optional
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Openshift"
+ Openshift *OpenshiftTenantSpec `json:"openshift,omitempty"`
+}
+
+// OpenshiftTenantSpec defines the configuration specific to Openshift modes.
+type OpenshiftTenantSpec struct {
+ // AdminGroups defines a list of groups, whose members are considered to have admin-privileges by the Loki Operator.
+ // Setting this to an empty array disables admin groups.
+ //
+ // By default the following groups are considered admin-groups:
+ // - system:cluster-admins
+ // - cluster-admin
+ // - dedicated-admin
+ //
+ // +optional
+ // +kubebuilder:validation:Optional
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Admin Groups"
+ AdminGroups []string `json:"adminGroups"`
+}
+
+// LokiComponentSpec defines the requirements to configure scheduling
+// of each loki component individually.
+type LokiComponentSpec struct {
+ // Replicas defines the number of replica pods of the component.
+ //
+ // +optional
+ // +kubebuilder:validation:Optional
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,xDescriptors="urn:alm:descriptor:com.tectonic.ui:hidden"
+ Replicas int32 `json:"replicas,omitempty"`
+
+ // NodeSelector defines the labels required by a node to schedule
+ // the component onto it.
+ //
+ // +optional
+ // +kubebuilder:validation:Optional
+ NodeSelector map[string]string `json:"nodeSelector,omitempty"`
+
+ // Tolerations defines the tolerations required by a node to schedule
+ // the component onto it.
+ //
+ // +optional
+ // +kubebuilder:validation:Optional
+ Tolerations []corev1.Toleration `json:"tolerations,omitempty"`
+
+ // PodAntiAffinity defines the pod anti affinity scheduling rules to schedule pods
+ // of a component.
+ //
+ // +optional
+ // +kubebuilder:validation:Optional
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,xDescriptors="urn:alm:descriptor:com.tectonic.ui:podAntiAffinity",displayName="PodAntiAffinity"
+ PodAntiAffinity *corev1.PodAntiAffinity `json:"podAntiAffinity,omitempty"`
+}
+
+// LokiTemplateSpec defines the template of all requirements to configure
+// scheduling of all Loki components to be deployed.
+type LokiTemplateSpec struct {
+ // Compactor defines the compaction component spec.
+ //
+ // +optional
+ // +kubebuilder:validation:Optional
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Compactor pods"
+ Compactor *LokiComponentSpec `json:"compactor,omitempty"`
+
+ // Distributor defines the distributor component spec.
+ //
+ // +optional
+ // +kubebuilder:validation:Optional
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Distributor pods"
+ Distributor *LokiComponentSpec `json:"distributor,omitempty"`
+
+ // Ingester defines the ingester component spec.
+ //
+ // +optional
+ // +kubebuilder:validation:Optional
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Ingester pods"
+ Ingester *LokiComponentSpec `json:"ingester,omitempty"`
+
+ // Querier defines the querier component spec.
+ //
+ // +optional
+ // +kubebuilder:validation:Optional
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Querier pods"
+ Querier *LokiComponentSpec `json:"querier,omitempty"`
+
+ // QueryFrontend defines the query frontend component spec.
+ //
+ // +optional
+ // +kubebuilder:validation:Optional
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Query Frontend pods"
+ QueryFrontend *LokiComponentSpec `json:"queryFrontend,omitempty"`
+
+ // Gateway defines the lokistack gateway component spec.
+ //
+ // +optional
+ // +kubebuilder:validation:Optional
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Gateway pods"
+ Gateway *LokiComponentSpec `json:"gateway,omitempty"`
+
+ // IndexGateway defines the index gateway component spec.
+ //
+ // +optional
+ // +kubebuilder:validation:Optional
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Index Gateway pods"
+ IndexGateway *LokiComponentSpec `json:"indexGateway,omitempty"`
+
+ // Ruler defines the ruler component spec.
+ //
+ // +optional
+ // +kubebuilder:validation:Optional
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Ruler pods"
+ Ruler *LokiComponentSpec `json:"ruler,omitempty"`
+}
+
+// ClusterProxy is the Proxy configuration when the cluster is behind a Proxy.
+type ClusterProxy struct {
+ // HTTPProxy configures the HTTP_PROXY/http_proxy env variable.
+ //
+ // +optional
+ // +kubebuilder:validation:optional
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="HTTPProxy"
+ HTTPProxy string `json:"httpProxy,omitempty"`
+ // HTTPSProxy configures the HTTPS_PROXY/https_proxy env variable.
+ //
+ // +optional
+ // +kubebuilder:validation:optional
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="HTTPSProxy"
+ HTTPSProxy string `json:"httpsProxy,omitempty"`
+ // NoProxy configures the NO_PROXY/no_proxy env variable.
+ //
+ // +optional
+ // +kubebuilder:validation:optional
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="NoProxy"
+ NoProxy string `json:"noProxy,omitempty"`
+}
+
+// HashRingType defines the type of hash ring which can be used with the Loki cluster.
+//
+// +kubebuilder:validation:Enum=memberlist
+type HashRingType string
+
+const (
+ // HashRingMemberList when using memberlist for the distributed hash ring.
+ HashRingMemberList HashRingType = "memberlist"
+)
+
+// InstanceAddrType defines the type of pod network to use for advertising IPs to the ring.
+//
+// +kubebuilder:validation:Enum=default;podIP
+type InstanceAddrType string
+
+const (
+ // InstanceAddrDefault when using the first from any private network interfaces (RFC 1918 and RFC 6598).
+ InstanceAddrDefault InstanceAddrType = "default"
+ // InstanceAddrPodIP when using the public pod IP from the cluster's pod network.
+ InstanceAddrPodIP InstanceAddrType = "podIP"
+)
+
+// MemberListSpec defines the configuration for the memberlist based hash ring.
+type MemberListSpec struct {
+ // InstanceAddrType defines the type of address to use to advertise to the ring.
+ // Defaults to the first address from any private network interfaces of the current pod.
+ // Alternatively the public pod IP can be used in case private networks (RFC 1918 and RFC 6598)
+ // are not available.
+ //
+ // +optional
+ // +kubebuilder:validation:optional
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,xDescriptors={"urn:alm:descriptor:com.tectonic.ui:select:default","urn:alm:descriptor:com.tectonic.ui:select:podIP"},displayName="Instance Address"
+ InstanceAddrType InstanceAddrType `json:"instanceAddrType,omitempty"`
+
+ // EnableIPv6 enables IPv6 support for the memberlist based hash ring.
+ //
+ // Currently this also forces the instanceAddrType to podIP to avoid local address lookup
+ // for the memberlist.
+ //
+ // +optional
+ // +kubebuilder:validation:Optional
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,xDescriptors="urn:alm:descriptor:com.tectonic.ui:booleanSwitch",displayName="Enable IPv6"
+ EnableIPv6 bool `json:"enableIPv6,omitempty"`
+}
+
+// HashRingSpec defines the hash ring configuration
+type HashRingSpec struct {
+ // Type of hash ring implementation that should be used
+ //
+ // +required
+ // +kubebuilder:validation:Required
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,xDescriptors={"urn:alm:descriptor:com.tectonic.ui:select:memberlist"},displayName="Type"
+ // +kubebuilder:default:=memberlist
+ Type HashRingType `json:"type"`
+
+ // MemberList configuration spec
+ //
+ // +optional
+ // +kubebuilder:validation:Optional
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Memberlist Config"
+ MemberList *MemberListSpec `json:"memberlist,omitempty"`
+}
+
+type CASpec struct {
+ // Key is the data key of a ConfigMap containing a CA certificate.
+ // It needs to be in the same namespace as the LokiStack custom resource.
+ // If empty, it defaults to "service-ca.crt".
+ //
+ // +optional
+ // +kubebuilder:validation:optional
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="CA ConfigMap Key"
+ CAKey string `json:"caKey,omitempty"`
+ // CA is the name of a ConfigMap containing a CA certificate.
+ // It needs to be in the same namespace as the LokiStack custom resource.
+ //
+ // +required
+ // +kubebuilder:validation:required
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,xDescriptors="urn:alm:descriptor:io.kubernetes:ConfigMap",displayName="CA ConfigMap Name"
+ CA string `json:"caName"`
+}
+
+// ObjectStorageTLSSpec is the TLS configuration for reaching the object storage endpoint.
+type ObjectStorageTLSSpec struct {
+ CASpec `json:",inline"`
+}
+
+// ObjectStorageSecretType defines the type of storage which can be used with the Loki cluster.
+//
+// +kubebuilder:validation:Enum=azure;gcs;s3;swift;alibabacloud;
+type ObjectStorageSecretType string
+
+const (
+ // ObjectStorageSecretAzure when using Azure for Loki storage
+ ObjectStorageSecretAzure ObjectStorageSecretType = "azure"
+
+ // ObjectStorageSecretGCS when using GCS for Loki storage
+ ObjectStorageSecretGCS ObjectStorageSecretType = "gcs"
+
+ // ObjectStorageSecretS3 when using S3 for Loki storage
+ ObjectStorageSecretS3 ObjectStorageSecretType = "s3"
+
+ // ObjectStorageSecretSwift when using Swift for Loki storage
+ ObjectStorageSecretSwift ObjectStorageSecretType = "swift"
+
+ // ObjectStorageSecretAlibabaCloud when using AlibabaCloud OSS for Loki storage
+ ObjectStorageSecretAlibabaCloud ObjectStorageSecretType = "alibabacloud"
+)
+
+// ObjectStorageSecretSpec is a secret reference containing name only, no namespace.
+type ObjectStorageSecretSpec struct {
+ // Type of object storage that should be used
+ //
+ // +required
+ // +kubebuilder:validation:Required
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,xDescriptors={"urn:alm:descriptor:com.tectonic.ui:select:azure","urn:alm:descriptor:com.tectonic.ui:select:gcs","urn:alm:descriptor:com.tectonic.ui:select:s3","urn:alm:descriptor:com.tectonic.ui:select:swift","urn:alm:descriptor:com.tectonic.ui:select:alibabacloud"},displayName="Object Storage Secret Type"
+ Type ObjectStorageSecretType `json:"type"`
+
+ // Name of a secret in the namespace configured for object storage secrets.
+ //
+ // +required
+ // +kubebuilder:validation:Required
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,xDescriptors="urn:alm:descriptor:io.kubernetes:Secret",displayName="Object Storage Secret Name"
+ Name string `json:"name"`
+
+ // CredentialMode can be used to set the desired credential mode for authenticating with the object storage.
+ // If this is not set, then the operator tries to infer the credential mode from the provided secret and its
+ // own configuration.
+ //
+ // +optional
+ // +kubebuilder:validation:Optional
+ CredentialMode CredentialMode `json:"credentialMode,omitempty"`
+}
+
+// ObjectStorageSchemaVersion defines the storage schema version which will be
+// used with the Loki cluster.
+//
+// +kubebuilder:validation:Enum=v11;v12;v13
+type ObjectStorageSchemaVersion string
+
+const (
+ // ObjectStorageSchemaV11 when using v11 for the storage schema
+ ObjectStorageSchemaV11 ObjectStorageSchemaVersion = "v11"
+
+ // ObjectStorageSchemaV12 when using v12 for the storage schema
+ ObjectStorageSchemaV12 ObjectStorageSchemaVersion = "v12"
+
+ // ObjectStorageSchemaV13 when using v13 for the storage schema
+ ObjectStorageSchemaV13 ObjectStorageSchemaVersion = "v13"
+)
+
+// ObjectStorageSchema defines a schema version and the date when it will become effective.
+type ObjectStorageSchema struct {
+ // Version for writing and reading logs.
+ //
+ // +required
+ // +kubebuilder:validation:Required
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,xDescriptors={"urn:alm:descriptor:com.tectonic.ui:select:v11","urn:alm:descriptor:com.tectonic.ui:select:v12","urn:alm:descriptor:com.tectonic.ui:select:v13"},displayName="Version"
+ Version ObjectStorageSchemaVersion `json:"version"`
+
+ // EffectiveDate contains a date in YYYY-MM-DD format which is interpreted in the UTC time zone.
+ //
+ // The configuration always needs at least one schema that is currently valid. This means that when creating a new
+ // LokiStack it is recommended to add a schema with the latest available version and an effective date of "yesterday".
+ // New schema versions added to the configuration always needs to be placed "in the future", so that Loki can start
+ // using it once the day rolls over.
+ //
+ // +required
+ // +kubebuilder:validation:Required
+ EffectiveDate StorageSchemaEffectiveDate `json:"effectiveDate"`
+}
+
+// ObjectStorageSpec defines the requirements to access the object
+// storage bucket to persist logs by the ingester component.
+type ObjectStorageSpec struct {
+ // Schemas for reading and writing logs.
+ //
+ // +optional
+ // +kubebuilder:validation:Optional
+ // +kubebuilder:validation:MinItems:=1
+ // +kubebuilder:default:={{version:v11,effectiveDate:"2020-10-11"}}
+ Schemas []ObjectStorageSchema `json:"schemas"`
+
+ // Secret for object storage authentication.
+ // Name of a secret in the same namespace as the LokiStack custom resource.
+ //
+ // +required
+ // +kubebuilder:validation:Required
+ Secret ObjectStorageSecretSpec `json:"secret"`
+
+ // TLS configuration for reaching the object storage endpoint.
+ //
+ // +optional
+ // +kubebuilder:validation:Optional
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="TLS Config"
+ TLS *ObjectStorageTLSSpec `json:"tls,omitempty"`
+}
+
+// QueryLimitSpec defines the limits applies at the query path.
+type QueryLimitSpec struct {
+ // MaxEntriesLimitsPerQuery defines the maximum number of log entries
+ // that will be returned for a query.
+ //
+ // +optional
+ // +kubebuilder:validation:Optional
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,xDescriptors="urn:alm:descriptor:com.tectonic.ui:number",displayName="Max Entries Limit per Query"
+ MaxEntriesLimitPerQuery int32 `json:"maxEntriesLimitPerQuery,omitempty"`
+
+ // MaxChunksPerQuery defines the maximum number of chunks
+ // that can be fetched by a single query.
+ //
+ // +optional
+ // +kubebuilder:validation:Optional
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,xDescriptors="urn:alm:descriptor:com.tectonic.ui:number",displayName="Max Chunk per Query"
+ MaxChunksPerQuery int32 `json:"maxChunksPerQuery,omitempty"`
+
+ // MaxQuerySeries defines the maximum of unique series
+ // that is returned by a metric query.
+ //
+ // + optional
+ // +kubebuilder:validation:Optional
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,xDescriptors="urn:alm:descriptor:com.tectonic.ui:number",displayName="Max Query Series"
+ MaxQuerySeries int32 `json:"maxQuerySeries,omitempty"`
+
+ // Timeout when querying ingesters or storage during the execution of a query request.
+ //
+ // +optional
+ // +kubebuilder:validation:Optional
+ // +kubebuilder:default:="3m"
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Query Timeout"
+ QueryTimeout string `json:"queryTimeout,omitempty"`
+
+ // CardinalityLimit defines the cardinality limit for index queries.
+ //
+ // +optional
+ // +kubebuilder:validation:Optional
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,xDescriptors="urn:alm:descriptor:com.tectonic.ui:number",displayName="Cardinality Limit"
+ CardinalityLimit int32 `json:"cardinalityLimit,omitempty"`
+
+ // MaxVolumeSeries defines the maximum number of aggregated series in a log-volume response
+ //
+ // +optional
+ // +kubebuilder:validation:Optional
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,xDescriptors="urn:alm:descriptor:com.tectonic.ui:number",displayName="Max Volume Series"
+ MaxVolumeSeries int32 `json:"maxVolumeSeries,omitempty"`
+}
+
+// BlockedQueryType defines which type of query a blocked query should apply to.
+//
+// +kubebuilder:validation:Enum=filter;limited;metric
+type BlockedQueryType string
+
+const (
+ // BlockedQueryFilter is used, when the blocked query should apply to queries using a log filter.
+ BlockedQueryFilter BlockedQueryType = "filter"
+ // BlockedQueryLimited is used, when the blocked query should apply to queries without a filter or a metric aggregation.
+ BlockedQueryLimited BlockedQueryType = "limited"
+ // BlockedQueryMetric is used, when the blocked query should apply to queries with an aggregation.
+ BlockedQueryMetric BlockedQueryType = "metric"
+)
+
+// BlockedQueryTypes defines a slice of BlockedQueryType values to be used for a blocked query.
+type BlockedQueryTypes []BlockedQueryType
+
+// BlockedQuerySpec defines the rule spec for queries to be blocked.
+//
+// +kubebuilder:validation:MinProperties:=1
+type BlockedQuerySpec struct {
+ // Hash is a 32-bit FNV-1 hash of the query string.
+ //
+ // +optional
+ // +kubebuilder:validation:Optional
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,xDescriptors="urn:alm:descriptor:com.tectonic.ui:number",displayName="Query Hash"
+ Hash int32 `json:"hash,omitempty"`
+ // Pattern defines the pattern matching the queries to be blocked.
+ //
+ // +optional
+ // +kubebuilder:validation:Optional
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Query Pattern"
+ Pattern string `json:"pattern,omitempty"`
+ // Regex defines if the pattern is a regular expression. If false the pattern will be used only for exact matches.
+ //
+ // +optional
+ // +kubebuilder:validation:Optional
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,xDescriptors="urn:alm:descriptor:com.tectonic.ui:booleanSwitch",displayName="Regex"
+ Regex bool `json:"regex,omitempty"`
+ // Types defines the list of query types that should be considered for blocking.
+ //
+ // +optional
+ // +kubebuilder:validation:Optional
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Query Types"
+ Types BlockedQueryTypes `json:"types,omitempty"`
+}
+
+// PerTenantQueryLimitSpec defines the limits applied to per tenant query path.
+type PerTenantQueryLimitSpec struct {
+ QueryLimitSpec `json:",omitempty"`
+
+ // Blocked defines the list of rules to block matching queries.
+ //
+ // +optional
+ // +kubebuilder:validation:Optional
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Blocked"
+ Blocked []BlockedQuerySpec `json:"blocked,omitempty"`
+}
+
+// IngestionLimitSpec defines the limits applied at the ingestion path.
+type IngestionLimitSpec struct {
+ // IngestionRate defines the sample size per second. Units MB.
+ //
+ // +optional
+ // +kubebuilder:validation:Optional
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,xDescriptors="urn:alm:descriptor:com.tectonic.ui:number",displayName="Ingestion Rate (in MB)"
+ IngestionRate int32 `json:"ingestionRate,omitempty"`
+
+ // IngestionBurstSize defines the local rate-limited sample size per
+ // distributor replica. It should be set to the set at least to the
+ // maximum logs size expected in a single push request.
+ //
+ // +optional
+ // +kubebuilder:validation:Optional
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,xDescriptors="urn:alm:descriptor:com.tectonic.ui:number",displayName="Ingestion Burst Size (in MB)"
+ IngestionBurstSize int32 `json:"ingestionBurstSize,omitempty"`
+
+ // MaxLabelNameLength defines the maximum number of characters allowed
+ // for label keys in log streams.
+ //
+ // +optional
+ // +kubebuilder:validation:Optional
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,xDescriptors="urn:alm:descriptor:com.tectonic.ui:number",displayName="Max Label Name Length"
+ MaxLabelNameLength int32 `json:"maxLabelNameLength,omitempty"`
+
+ // MaxLabelValueLength defines the maximum number of characters allowed
+ // for label values in log streams.
+ //
+ // +optional
+ // +kubebuilder:validation:Optional
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,xDescriptors="urn:alm:descriptor:com.tectonic.ui:number",displayName="Max Label Value Length"
+ MaxLabelValueLength int32 `json:"maxLabelValueLength,omitempty"`
+
+ // MaxLabelNamesPerSeries defines the maximum number of label names per series
+ // in each log stream.
+ //
+ // +optional
+ // +kubebuilder:validation:Optional
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,xDescriptors="urn:alm:descriptor:com.tectonic.ui:number",displayName="Max Labels Names per Series"
+ MaxLabelNamesPerSeries int32 `json:"maxLabelNamesPerSeries,omitempty"`
+
+ // MaxGlobalStreamsPerTenant defines the maximum number of active streams
+ // per tenant, across the cluster.
+ //
+ // +optional
+ // +kubebuilder:validation:Optional
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,xDescriptors="urn:alm:descriptor:com.tectonic.ui:number",displayName="Max Global Streams per Tenant"
+ MaxGlobalStreamsPerTenant int32 `json:"maxGlobalStreamsPerTenant,omitempty"`
+
+ // MaxLineSize defines the maximum line size on ingestion path. Units in Bytes.
+ //
+ // +optional
+ // +kubebuilder:validation:Optional
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,xDescriptors="urn:alm:descriptor:com.tectonic.ui:number",displayName="Max Line Size"
+ MaxLineSize int32 `json:"maxLineSize,omitempty"`
+
+ // PerStreamDesiredRate defines the desired ingestion rate per second that LokiStack should
+ // target applying automatic stream sharding. Units MB.
+ //
+ // +optional
+ // +kubebuilder:validation:Optional
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,xDescriptors="urn:alm:descriptor:com.tectonic.ui:number",displayName="Per Stream Desired Rate (in MB)"
+ PerStreamDesiredRate int32 `json:"perStreamDesiredRate,omitempty"`
+
+ // PerStreamRateLimit defines the maximum byte rate per second per stream. Units MB.
+ //
+ // +optional
+ // +kubebuilder:validation:Optional
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,xDescriptors="urn:alm:descriptor:com.tectonic.ui:number",displayName="Maximum byte rate per second per stream (in MB)"
+ PerStreamRateLimit int32 `json:"perStreamRateLimit,omitempty"`
+
+ // PerStreamRateLimitBurst defines the maximum burst bytes per stream. Units MB.
+ //
+ // +optional
+ // +kubebuilder:validation:Optional
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,xDescriptors="urn:alm:descriptor:com.tectonic.ui:number",displayName="Maximum burst bytes per stream (in MB)"
+ PerStreamRateLimitBurst int32 `json:"perStreamRateLimitBurst,omitempty"`
+}
+
+// OTLPAttributeAction defines the action to executed when indexing
+// OTLP resource attributes. Resource attributes can be either added
+// to the index, the chunk structured metadata or entirely dropped.
+type OTLPAttributeAction string
+
+const (
+ // OTLPAttributeActionIndexLabel stores a resource attribute as a label, which is part of the index identifying streams.
+ OTLPAttributeActionIndexLabel OTLPAttributeAction = "indexLabel"
+ // OTLPAttributeActionStructuredMetadata stores an attribute as structured metadata with each log entry.
+ OTLPAttributeActionStructuredMetadata OTLPAttributeAction = "structuredMetadata"
+ // OTLPAttributeActionDrop removes the matching attributes from the log entry.
+ OTLPAttributeActionDrop OTLPAttributeAction = "drop"
+)
+
+// OTLPAttributesSpec contains the configuration for a set of attributes
+// to store them as index labels or structured metadata or drop them altogether.
+type OTLPAttributesSpec struct {
+ // Action defines the indexing action for the selected attributes. They
+ // can be either added to structured metadata or drop altogether.
+ //
+ // +required
+ // +kubebuilder:validation:Required
+ // +kubebuilder:validation:Enum=structured_metadata;drop
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Action"
+ Action OTLPAttributeAction `json:"action"`
+
+ // Attributes allows choosing the attributes by listing their names.
+ //
+ // +optional
+ // +kubebuilder:validation:Optional
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Attribute Names"
+ Attributes []string `json:"attributes,omitempty"`
+
+ // Regex allows choosing the attributes by matching a regular expression.
+ //
+ // +optional
+ // +kubebuilder:validation:Optional
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Regular Expression"
+ Regex string `json:"regex,omitempty"`
+}
+
+// OTLPResourceAttributesConfigSpec contains the configuration for a set of resource attributes
+// to store them as index labels or structured metadata or drop them altogether.
+type OTLPResourceAttributesConfigSpec struct {
+ // Action defines the indexing action for the selected resoure attributes. They
+ // can be either indexed as labels, added to structured metadata or drop altogether.
+ //
+ // +required
+ // +kubebuilder:validation:Required
+ // +kubebuilder:validation:Enum=index_label;structured_metadata;drop
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Action"
+ Action OTLPAttributeAction `json:"action"`
+
+ // Attributes is the list of attributes to configure indexing or drop them
+ // altogether.
+ //
+ // +optional
+ // +kubebuilder:validation:Optional
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Attribute Names"
+ Attributes []string `json:"attributes,omitempty"`
+
+ // Regex allows choosing the attributes by matching a regular expression.
+ //
+ // +optional
+ // +kubebuilder:validation:Optional
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Regular Expression"
+ Regex string `json:"regex,omitempty"`
+}
+
+// OTLPResourceAttributesSpec contains the configuration for resource attributes
+// to store them as index labels or structured metadata or drop them altogether.
+type OTLPResourceAttributesSpec struct {
+ // IgnoreDefaults controls whether to ignore the global configuration for resource attributes
+ // indexed as labels.
+ //
+ // If IgnoreDefaults is true, then this spec needs to contain at least one mapping to a index label.
+ //
+ // +optional
+ // +kubebuilder:validation:Optional
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,xDescriptors="urn:alm:descriptor:com.tectonic.ui:booleanSwitch",displayName="Ignore Global Defaults"
+ IgnoreDefaults bool `json:"ignoreDefaults,omitempty"`
+
+ // Attributes contains the configuration for resource attributes
+ // to store them as index labels or structured metadata or drop them altogether.
+ //
+ // +optional
+ // +kubebuilder:validation:Optional
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Attributes"
+ Attributes []OTLPResourceAttributesConfigSpec `json:"attributes,omitempty"`
+}
+
+// GlobalOTLPSpec defines which resource, scope and log attributes to
+// be stored as index or structured metadata or drop altogether for all
+// tenants.
+type GlobalOTLPSpec struct {
+ // IndexedResourceAttributes contains the global configuration for resource attributes
+ // to store them as index labels.
+ //
+ // +optional
+ // +kubebuilder:validation:Optional
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Indexed Resource Attributes"
+ IndexedResourceAttributes []string `json:"indexedResourceAttributes,omitempty"`
+
+ OTLPSpec `json:",omitempty"`
+}
+
+// OTLPSpec defines which resource, scope and log attributes to
+// be stored as index or structured metadata or drop altogether
+type OTLPSpec struct {
+ // ResourceAttributes contains the configuration for resource attributes
+ // to store them as index labels or structured metadata or drop them altogether.
+ //
+ // +optional
+ // +kubebuilder:validation:Optional
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Resource Attributes"
+ ResourceAttributes *OTLPResourceAttributesSpec `json:"resourceAttributes,omitempty"`
+
+ // ScopeAttributes contains the configuration for scope attributes
+ // to store them as structured metadata or drop them altogether.
+ //
+ // +optional
+ // +kubebuilder:validation:Optional
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Scope Attributes"
+ ScopeAttributes []OTLPAttributesSpec `json:"scopeAttributes,omitempty"`
+
+ // LogAttributes contains the configuration for log attributes
+ // to store them as structured metadata or drop them altogether.
+ //
+ // +optional
+ // +kubebuilder:validation:Optional
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Log Attributes"
+ LogAttributes []OTLPAttributesSpec `json:"logAttributes,omitempty"`
+}
+
+// RetentionStreamSpec defines a log stream with separate retention time.
+type RetentionStreamSpec struct {
+ // Days contains the number of days logs are kept.
+ //
+ // +required
+ // +kubebuilder:validation:Required
+ // +kubebuilder:validation:Minimum:=1
+ Days uint `json:"days"`
+
+ // Priority defines the priority of this selector compared to other retention rules.
+ //
+ // +optional
+ // +kubebuilder:validation:Optional
+ // +kubebuilder:default:=1
+ Priority uint32 `json:"priority,omitempty"`
+
+ // Selector contains the LogQL query used to define the log stream.
+ //
+ // +required
+ // +kubebuilder:validation:Required
+ Selector string `json:"selector"`
+}
+
+// RetentionLimitSpec controls how long logs will be kept in storage.
+type RetentionLimitSpec struct {
+ // Days contains the number of days logs are kept.
+ //
+ // +required
+ // +kubebuilder:validation:Required
+ // +kubebuilder:validation:Minimum:=1
+ Days uint `json:"days"`
+
+ // Stream defines the log stream.
+ //
+ // +optional
+ // +kubebuilder:validation:Optional
+ Streams []*RetentionStreamSpec `json:"streams,omitempty"`
+}
+
+// LimitsTemplateSpec defines the limits applied at ingestion or query path.
+type LimitsTemplateSpec struct {
+ // IngestionLimits defines the limits applied on ingested log streams.
+ //
+ // +optional
+ // +kubebuilder:validation:Optional
+ IngestionLimits *IngestionLimitSpec `json:"ingestion,omitempty"`
+
+ // QueryLimits defines the limit applied on querying log streams.
+ //
+ // +optional
+ // +kubebuilder:validation:Optional
+ QueryLimits *QueryLimitSpec `json:"queries,omitempty"`
+
+ // OTLP to configure which resource, scope and log attributes
+ // to store as labels or structured metadata or drop them altogether
+ // for all tenants.
+ //
+ // +optional
+ // +kubebuilder:validation:Optional
+ OTLP *GlobalOTLPSpec `json:"otlp,omitempty"`
+
+ // Retention defines how long logs are kept in storage.
+ //
+ // +optional
+ // +kubebuilder:validation:Optional
+ Retention *RetentionLimitSpec `json:"retention,omitempty"`
+}
+
+// LimitsTemplateSpec defines the limits applied at ingestion or query path.
+type PerTenantLimitsTemplateSpec struct {
+ // IngestionLimits defines the limits applied on ingested log streams.
+ //
+ // +optional
+ // +kubebuilder:validation:Optional
+ IngestionLimits *IngestionLimitSpec `json:"ingestion,omitempty"`
+
+ // QueryLimits defines the limit applied on querying log streams.
+ //
+ // +optional
+ // +kubebuilder:validation:Optional
+ QueryLimits *PerTenantQueryLimitSpec `json:"queries,omitempty"`
+
+ // OTLP to configure which resource, scope and log attributes
+ // to store as labels or structured metadata or drop them altogether
+ // for a single tenants.
+ //
+ // +optional
+ // +kubebuilder:validation:Optional
+ OTLP *OTLPSpec `json:"otlp,omitempty"`
+
+ // Retention defines how long logs are kept in storage.
+ //
+ // +optional
+ // +kubebuilder:validation:Optional
+ Retention *RetentionLimitSpec `json:"retention,omitempty"`
+}
+
+// LimitsSpec defines the spec for limits applied at ingestion or query
+// path across the cluster or per tenant.
+type LimitsSpec struct {
+ // Global defines the limits applied globally across the cluster.
+ //
+ // +optional
+ // +kubebuilder:validation:Optional
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Global Limits"
+ Global *LimitsTemplateSpec `json:"global,omitempty"`
+
+ // Tenants defines the limits applied per tenant.
+ //
+ // +optional
+ // +kubebuilder:validation:Optional
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Limits per Tenant"
+ Tenants map[string]PerTenantLimitsTemplateSpec `json:"tenants,omitempty"`
+}
+
+// RulesSpec defines the spec for the ruler component.
+type RulesSpec struct {
+ // Enabled defines a flag to enable/disable the ruler component
+ //
+ // +required
+ // +kubebuilder:validation:Required
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,xDescriptors="urn:alm:descriptor:com.tectonic.ui:booleanSwitch",displayName="Enable"
+ Enabled bool `json:"enabled"`
+
+ // A selector to select which LokiRules to mount for loading alerting/recording
+ // rules from.
+ //
+ // +optional
+ // +kubebuilder:validation:optional
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Selector"
+ Selector *metav1.LabelSelector `json:"selector,omitempty"`
+
+ // Namespaces to be selected for PrometheusRules discovery. If unspecified, only
+ // the same namespace as the LokiStack object is in is used.
+ //
+ // +optional
+ // +kubebuilder:validation:Optional
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Namespace Selector"
+ NamespaceSelector *metav1.LabelSelector `json:"namespaceSelector,omitempty"`
+}
+
+// LokiStackSpec defines the desired state of LokiStack
+type LokiStackSpec struct {
+ // ManagementState defines if the CR should be managed by the operator or not.
+ // Default is managed.
+ //
+ // +optional
+ // +kubebuilder:validation:optional
+ // +kubebuilder:default:=Managed
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,xDescriptors={"urn:alm:descriptor:com.tectonic.ui:select:Managed","urn:alm:descriptor:com.tectonic.ui:select:Unmanaged"},displayName="Management State"
+ ManagementState ManagementStateType `json:"managementState,omitempty"`
+
+ // Size defines one of the support Loki deployment scale out sizes.
+ //
+ // +required
+ // +kubebuilder:validation:Required
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,xDescriptors={"urn:alm:descriptor:com.tectonic.ui:select:1x.extra-small","urn:alm:descriptor:com.tectonic.ui:select:1x.small","urn:alm:descriptor:com.tectonic.ui:select:1x.medium"},displayName="LokiStack Size"
+ Size LokiStackSizeType `json:"size"`
+
+ // HashRing defines the spec for the distributed hash ring configuration.
+ //
+ // +optional
+ // +kubebuilder:validation:Optional
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,xDescriptors="urn:alm:descriptor:com.tectonic.ui:advanced",displayName="Hash Ring"
+ HashRing *HashRingSpec `json:"hashRing,omitempty"`
+
+ // Storage defines the spec for the object storage endpoint to store logs.
+ //
+ // +required
+ // +kubebuilder:validation:Required
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Object Storage"
+ Storage ObjectStorageSpec `json:"storage"`
+
+ // Storage class name defines the storage class for ingester/querier PVCs.
+ //
+ // +required
+ // +kubebuilder:validation:Required
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,xDescriptors="urn:alm:descriptor:io.kubernetes:StorageClass",displayName="Storage Class Name"
+ StorageClassName string `json:"storageClassName"`
+
+ // Proxy defines the spec for the object proxy to configure cluster proxy information.
+ //
+ // +optional
+ // +kubebuilder:validation:Optional
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Cluster Proxy"
+ Proxy *ClusterProxy `json:"proxy,omitempty"`
+
+ // Deprecated: Please use replication.factor instead. This field will be removed in future versions of this CRD.
+ // ReplicationFactor defines the policy for log stream replication.
+ //
+ // +optional
+ // +kubebuilder:validation:Optional
+ // +kubebuilder:validation:Minimum:=1
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,xDescriptors="urn:alm:descriptor:com.tectonic.ui:number",displayName="Replication Factor"
+ ReplicationFactor int32 `json:"replicationFactor,omitempty"`
+
+ // Replication defines the configuration for Loki data replication.
+ //
+ // +optional
+ // +kubebuilder:validation:Optional
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Replication Spec"
+ Replication *ReplicationSpec `json:"replication,omitempty"`
+
+ // Rules defines the spec for the ruler component.
+ //
+ // +optional
+ // +kubebuilder:validation:Optional
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,xDescriptors="urn:alm:descriptor:com.tectonic.ui:advanced",displayName="Rules"
+ Rules *RulesSpec `json:"rules,omitempty"`
+
+ // Limits defines the limits to be applied to log stream processing.
+ //
+ // +optional
+ // +kubebuilder:validation:Optional
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,xDescriptors="urn:alm:descriptor:com.tectonic.ui:advanced",displayName="Rate Limiting"
+ Limits *LimitsSpec `json:"limits,omitempty"`
+
+ // Template defines the resource/limits/tolerations/nodeselectors per component.
+ //
+ // +optional
+ // +kubebuilder:validation:Optional
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,xDescriptors="urn:alm:descriptor:com.tectonic.ui:advanced",displayName="Node Placement"
+ Template *LokiTemplateSpec `json:"template,omitempty"`
+
+ // Tenants defines the per-tenant authentication and authorization spec for the lokistack-gateway component.
+ //
+ // +optional
+ // +kubebuilder:validation:Optional
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Tenants Configuration"
+ Tenants *TenantsSpec `json:"tenants,omitempty"`
+}
+
+type ReplicationSpec struct {
+ // Factor defines the policy for log stream replication.
+ //
+ // +optional
+ // +kubebuilder:validation:Optional
+ // +kubebuilder:validation:Minimum:=1
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,xDescriptors="urn:alm:descriptor:com.tectonic.ui:number",displayName="Replication Factor"
+ Factor int32 `json:"factor,omitempty"`
+
+ // Zones defines an array of ZoneSpec that the scheduler will try to satisfy.
+ // IMPORTANT: Make sure that the replication factor defined is less than or equal to the number of available zones.
+ //
+ // +optional
+ // +kubebuilder:validation:Optional
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Zones Spec"
+ Zones []ZoneSpec `json:"zones,omitempty"`
+}
+
+// ZoneSpec defines the spec to support zone-aware component deployments.
+type ZoneSpec struct {
+ // MaxSkew describes the maximum degree to which Pods can be unevenly distributed.
+ //
+ // +required
+ // +kubebuilder:default:=1
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,xDescriptors="urn:alm:descriptor:com.tectonic.ui:number",displayName="Max Skew"
+ MaxSkew int `json:"maxSkew"`
+
+ // TopologyKey is the key that defines a topology in the Nodes' labels.
+ //
+ // +required
+ // +kubebuilder:validation:Required
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Topology Key"
+ TopologyKey string `json:"topologyKey"`
+}
+
+// LokiStackConditionType deifnes the type of condition types of a Loki deployment.
+type LokiStackConditionType string
+
+const (
+ // ConditionReady defines the condition that all components in the Loki deployment are ready.
+ ConditionReady LokiStackConditionType = "Ready"
+
+ // ConditionPending defines the condition that some or all components are in pending state.
+ ConditionPending LokiStackConditionType = "Pending"
+
+ // ConditionFailed defines the condition that components in the Loki deployment failed to roll out.
+ ConditionFailed LokiStackConditionType = "Failed"
+
+ // ConditionDegraded defines the condition that some or all components in the Loki deployment
+ // are degraded or the cluster cannot connect to object storage.
+ ConditionDegraded LokiStackConditionType = "Degraded"
+
+ // ConditionWarning is used for configurations that are not recommended, but don't currently cause
+ // issues. There can be multiple warning conditions active at a time.
+ ConditionWarning LokiStackConditionType = "Warning"
+)
+
+// LokiStackConditionReason defines the type for valid reasons of a Loki deployment conditions.
+type LokiStackConditionReason string
+
+const (
+ // ReasonFailedComponents when all/some LokiStack components fail to roll out.
+ ReasonFailedComponents LokiStackConditionReason = "FailedComponents"
+ // ReasonPendingComponents when all/some LokiStack components pending dependencies
+ ReasonPendingComponents LokiStackConditionReason = "PendingComponents"
+ // ReasonReadyComponents when all LokiStack components are ready to serve traffic.
+ ReasonReadyComponents LokiStackConditionReason = "ReadyComponents"
+ // ReasonMissingObjectStorageSecret when the required secret to store logs to object
+ // storage is missing.
+ ReasonMissingObjectStorageSecret LokiStackConditionReason = "MissingObjectStorageSecret"
+ // ReasonInvalidObjectStorageSecret when the format of the secret is invalid.
+ ReasonInvalidObjectStorageSecret LokiStackConditionReason = "InvalidObjectStorageSecret"
+ // ReasonMissingTokenCCOAuthSecret when the secret generated by CCO for token authentication is missing.
+ // This is usually a transient error because the secret is not immediately available after creating the
+ // CredentialsRequest, but it can persist if the CCO or its configuration are incorrect.
+ ReasonMissingTokenCCOAuthSecret LokiStackConditionReason = "MissingTokenCCOAuthenticationSecret"
+ // ReasonInvalidObjectStorageSchema when the spec contains an invalid schema(s).
+ ReasonInvalidObjectStorageSchema LokiStackConditionReason = "InvalidObjectStorageSchema"
+ // ReasonMissingObjectStorageCAConfigMap when the required configmap to verify object storage
+ // certificates is missing.
+ ReasonMissingObjectStorageCAConfigMap LokiStackConditionReason = "MissingObjectStorageCAConfigMap"
+ // ReasonInvalidObjectStorageCAConfigMap when the format of the CA configmap is invalid.
+ ReasonInvalidObjectStorageCAConfigMap LokiStackConditionReason = "InvalidObjectStorageCAConfigMap"
+ // ReasonMissingRulerSecret when the required secret to authorization remote write connections
+ // for the ruler is missing.
+ ReasonMissingRulerSecret LokiStackConditionReason = "MissingRulerSecret"
+ // ReasonInvalidRulerSecret when the format of the ruler remote write authorization secret is invalid.
+ ReasonInvalidRulerSecret LokiStackConditionReason = "InvalidRulerSecret"
+ // ReasonInvalidReplicationConfiguration when the configurated replication factor is not valid
+ // with the select cluster size.
+ ReasonInvalidReplicationConfiguration LokiStackConditionReason = "InvalidReplicationConfiguration"
+ // ReasonMissingGatewayTenantSecret when the required tenant secret
+ // for authentication is missing.
+ ReasonMissingGatewayTenantSecret LokiStackConditionReason = "MissingGatewayTenantSecret"
+ // ReasonMissingGatewayTenantConfigMap when the required tenant configmap
+ // for authentication is missing.
+ ReasonMissingGatewayTenantConfigMap LokiStackConditionReason = "MissingGatewayTenantConfigMap"
+ // ReasonInvalidGatewayTenantSecret when the format of the secret is invalid.
+ ReasonInvalidGatewayTenantSecret LokiStackConditionReason = "InvalidGatewayTenantSecret"
+ // ReasonInvalidGatewayTenantConfigMap when the format of the configmap is invalid.
+ ReasonInvalidGatewayTenantConfigMap LokiStackConditionReason = "InvalidGatewayTenantConfigMap"
+ // ReasonMissingGatewayAuthenticationConfig when the config for when a tenant is missing authentication config
+ ReasonMissingGatewayAuthenticationConfig LokiStackConditionReason = "MissingGatewayTenantAuthenticationConfig"
+ // ReasonInvalidTenantsConfiguration when the tenant configuration provided is invalid.
+ ReasonInvalidTenantsConfiguration LokiStackConditionReason = "InvalidTenantsConfiguration"
+ // ReasonMissingGatewayOpenShiftBaseDomain when the reconciler cannot lookup the OpenShift DNS base domain.
+ ReasonMissingGatewayOpenShiftBaseDomain LokiStackConditionReason = "MissingGatewayOpenShiftBaseDomain"
+ // ReasonFailedCertificateRotation when the reconciler cannot rotate any of the required TLS certificates.
+ ReasonFailedCertificateRotation LokiStackConditionReason = "FailedCertificateRotation"
+ // ReasonQueryTimeoutInvalid when the QueryTimeout can not be parsed.
+ ReasonQueryTimeoutInvalid LokiStackConditionReason = "ReasonQueryTimeoutInvalid"
+ // ReasonZoneAwareNodesMissing when the cluster does not contain any nodes with the labels needed for zone-awareness.
+ ReasonZoneAwareNodesMissing LokiStackConditionReason = "ReasonZoneAwareNodesMissing"
+ // ReasonZoneAwareEmptyLabel when the node-label used for zone-awareness has an empty value.
+ ReasonZoneAwareEmptyLabel LokiStackConditionReason = "ReasonZoneAwareEmptyLabel"
+ // ReasonStorageNeedsSchemaUpdate when the object storage schema version is older than V13
+ ReasonStorageNeedsSchemaUpdate LokiStackConditionReason = "StorageNeedsSchemaUpdate"
+)
+
+// PodStatus is a short description of the status a Pod can be in.
+type PodStatus string
+
+const (
+ // PodPending means the pod has been accepted by the system, but one or more of the containers
+ // has not been started. This includes time before being bound to a node, as well as time spent
+ // pulling images onto the host.
+ PodPending PodStatus = "Pending"
+ // PodRunning means the pod has been bound to a node and all of the containers have been started.
+ // At least one container is still running or is in the process of being restarted.
+ PodRunning PodStatus = "Running"
+ // PodReady means the pod has been started and the readiness probe reports a successful status.
+ PodReady PodStatus = "Ready"
+ // PodFailed means that all containers in the pod have terminated, and at least one container has
+ // terminated in a failure (exited with a non-zero exit code or was stopped by the system).
+ PodFailed PodStatus = "Failed"
+ // PodStatusUnknown is used when none of the other statuses apply or the information is not ready yet.
+ PodStatusUnknown PodStatus = "Unknown"
+)
+
+// PodStatusMap defines the type for mapping pod status to pod name.
+type PodStatusMap map[PodStatus][]string
+
+// LokiStackComponentStatus defines the map of per pod status per LokiStack component.
+// Each component is represented by a separate map of v1.Phase to a list of pods.
+type LokiStackComponentStatus struct {
+ // Compactor is a map to the pod status of the compactor pod.
+ //
+ // +optional
+ // +kubebuilder:validation:Optional
+ // +operator-sdk:csv:customresourcedefinitions:type=status,xDescriptors="urn:alm:descriptor:com.tectonic.ui:podStatuses",displayName="Compactor",order=5
+ Compactor PodStatusMap `json:"compactor,omitempty"`
+
+ // Distributor is a map to the per pod status of the distributor deployment
+ //
+ // +optional
+ // +kubebuilder:validation:Optional
+ // +operator-sdk:csv:customresourcedefinitions:type=status,xDescriptors="urn:alm:descriptor:com.tectonic.ui:podStatuses",displayName="Distributor",order=1
+ Distributor PodStatusMap `json:"distributor,omitempty"`
+
+ // IndexGateway is a map to the per pod status of the index gateway statefulset
+ //
+ // +optional
+ // +kubebuilder:validation:Optional
+ // +operator-sdk:csv:customresourcedefinitions:type=status,xDescriptors="urn:alm:descriptor:com.tectonic.ui:podStatuses",displayName="IndexGateway",order=6
+ IndexGateway PodStatusMap `json:"indexGateway,omitempty"`
+
+ // Ingester is a map to the per pod status of the ingester statefulset
+ //
+ // +optional
+ // +kubebuilder:validation:Optional
+ // +operator-sdk:csv:customresourcedefinitions:type=status,xDescriptors="urn:alm:descriptor:com.tectonic.ui:podStatuses",displayName="Ingester",order=2
+ Ingester PodStatusMap `json:"ingester,omitempty"`
+
+ // Querier is a map to the per pod status of the querier deployment
+ //
+ // +optional
+ // +kubebuilder:validation:Optional
+ // +operator-sdk:csv:customresourcedefinitions:type=status,xDescriptors="urn:alm:descriptor:com.tectonic.ui:podStatuses",displayName="Querier",order=3
+ Querier PodStatusMap `json:"querier,omitempty"`
+
+ // QueryFrontend is a map to the per pod status of the query frontend deployment
+ //
+ // +optional
+ // +kubebuilder:validation:Optional
+ // +operator-sdk:csv:customresourcedefinitions:type=status,xDescriptors="urn:alm:descriptor:com.tectonic.ui:podStatuses",displayName="Query Frontend",order=4
+ QueryFrontend PodStatusMap `json:"queryFrontend,omitempty"`
+
+ // Gateway is a map to the per pod status of the lokistack gateway deployment.
+ //
+ // +optional
+ // +kubebuilder:validation:Optional
+ // +operator-sdk:csv:customresourcedefinitions:type=status,xDescriptors="urn:alm:descriptor:com.tectonic.ui:podStatuses",displayName="Gateway",order=5
+ Gateway PodStatusMap `json:"gateway,omitempty"`
+
+ // Ruler is a map to the per pod status of the lokistack ruler statefulset.
+ //
+ // +optional
+ // +kubebuilder:validation:Optional
+ // +operator-sdk:csv:customresourcedefinitions:type=status,xDescriptors="urn:alm:descriptor:com.tectonic.ui:podStatuses",displayName="Ruler",order=6
+ Ruler PodStatusMap `json:"ruler,omitempty"`
+}
+
+// CredentialMode represents the type of authentication used for accessing the object storage.
+//
+// +kubebuilder:validation:Enum=static;token;token-cco
+type CredentialMode string
+
+const (
+ // CredentialModeStatic represents the usage of static, long-lived credentials stored in a Secret.
+ // This is the default authentication mode and available for all supported object storage types.
+ CredentialModeStatic CredentialMode = "static"
+ // CredentialModeToken represents the usage of short-lived tokens retrieved from a credential source.
+ // In this mode the static configuration does not contain credentials needed for the object storage.
+ // Instead, they are generated during runtime using a service, which allows for shorter-lived credentials and
+ // much more granular control. This authentication mode is not supported for all object storage types.
+ CredentialModeToken CredentialMode = "token"
+ // CredentialModeTokenCCO represents the usage of short-lived tokens retrieved from a credential source.
+ // This mode is similar to CredentialModeToken, but instead of having a user-configured credential source,
+ // it is configured by the environment and the operator relies on the Cloud Credential Operator to provide
+ // a secret. This mode is only supported for certain object storage types in certain runtime environments.
+ CredentialModeTokenCCO CredentialMode = "token-cco"
+)
+
+// LokiStackStorageStatus defines the observed state of
+// the Loki storage configuration.
+type LokiStackStorageStatus struct {
+ // Schemas is a list of schemas which have been applied
+ // to the LokiStack.
+ //
+ // +optional
+ // +kubebuilder:validation:Optional
+ Schemas []ObjectStorageSchema `json:"schemas,omitempty"`
+
+ // CredentialMode contains the authentication mode used for accessing the object storage.
+ //
+ // +optional
+ // +kubebuilder:validation:Optional
+ CredentialMode CredentialMode `json:"credentialMode,omitempty"`
+}
+
+// LokiStackStatus defines the observed state of LokiStack
+type LokiStackStatus struct {
+ // Components provides summary of all Loki pod status grouped
+ // per component.
+ //
+ // +optional
+ // +kubebuilder:validation:Optional
+ Components LokiStackComponentStatus `json:"components,omitempty"`
+
+ // Storage provides summary of all changes that have occurred
+ // to the storage configuration.
+ //
+ // +optional
+ // +kubebuilder:validation:Optional
+ Storage LokiStackStorageStatus `json:"storage,omitempty"`
+
+ // Conditions of the Loki deployment health.
+ //
+ // +optional
+ // +kubebuilder:validation:Optional
+ // +operator-sdk:csv:customresourcedefinitions:type=status,xDescriptors="urn:alm:descriptor:io.kubernetes.conditions"
+ Conditions []metav1.Condition `json:"conditions,omitempty"`
+}
+
+// +kubebuilder:object:root=true
+// +kubebuilder:subresource:status
+// +kubebuilder:storageversion
+// +kubebuilder:resource:categories=logging
+// +kubebuilder:webhook:path=/validate-loki-grafana-com-v1-lokistack,mutating=false,failurePolicy=fail,sideEffects=None,groups=loki.grafana.com,resources=lokistacks,verbs=create;update,versions=v1,name=vlokistack.loki.grafana.com,admissionReviewVersions=v1
+
+// LokiStack is the Schema for the lokistacks API
+//
+// +operator-sdk:csv:customresourcedefinitions:displayName="LokiStack",resources={{Deployment,v1},{StatefulSet,v1},{ConfigMap,v1},{Ingress,v1},{Service,v1},{ServiceAccount,v1},{PersistentVolumeClaims,v1},{Route,v1},{ServiceMonitor,v1}}
+type LokiStack struct {
+ // LokiStack CR spec field.
+ Spec LokiStackSpec `json:"spec,omitempty"`
+ // LokiStack CR spec Status.
+ Status LokiStackStatus `json:"status,omitempty"`
+ metav1.ObjectMeta `json:"metadata,omitempty"`
+ metav1.TypeMeta `json:",inline"`
+}
+
+// +kubebuilder:object:root=true
+
+// LokiStackList contains a list of LokiStack
+type LokiStackList struct {
+ metav1.TypeMeta `json:",inline"`
+ metav1.ListMeta `json:"metadata,omitempty"`
+ Items []LokiStack `json:"items"`
+}
+
+func init() {
+ SchemeBuilder.Register(&LokiStack{}, &LokiStackList{})
+}
+
+// Hub declares the v1.LokiStack as the hub CRD version.
+func (*LokiStack) Hub() {}
+
+func (t BlockedQueryTypes) String() string {
+ res := make([]string, 0, len(t))
+ for _, t := range t {
+ res = append(res, string(t))
+ }
+
+ return strings.Join(res, ",")
+}
+
+func (a OTLPAttributeAction) Value() string {
+ switch a {
+ case OTLPAttributeActionIndexLabel:
+ return "index_label"
+ case OTLPAttributeActionStructuredMetadata:
+ return "structured_metadata"
+ case OTLPAttributeActionDrop:
+ return "drop"
+ default:
+ return string(a)
+ }
+}
diff --git a/vendor/github.com/grafana/loki/operator/apis/loki/v1/recordingrule_types.go b/vendor/github.com/grafana/loki/operator/apis/loki/v1/recordingrule_types.go
new file mode 100644
index 000000000..e07c7d10e
--- /dev/null
+++ b/vendor/github.com/grafana/loki/operator/apis/loki/v1/recordingrule_types.go
@@ -0,0 +1,123 @@
+package v1
+
+import (
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+// RecordingRuleSpec defines the desired state of RecordingRule
+type RecordingRuleSpec struct {
+ // TenantID of tenant where the recording rules are evaluated in.
+ //
+ // +required
+ // +kubebuilder:validation:Required
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Tenant ID"
+ TenantID string `json:"tenantID"`
+
+ // List of groups for recording rules.
+ //
+ // +optional
+ // +kubebuilder:validation:Optional
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Groups"
+ Groups []*RecordingRuleGroup `json:"groups"`
+}
+
+// RecordingRuleGroup defines a group of Loki recording rules.
+type RecordingRuleGroup struct {
+ // Name of the recording rule group. Must be unique within all recording rules.
+ //
+ // +required
+ // +kubebuilder:validation:Required
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Name"
+ Name string `json:"name"`
+
+ // Interval defines the time interval between evaluation of the given
+ // recoding rule.
+ //
+ // +optional
+ // +kubebuilder:validation:Optional
+ // +kubebuilder:default:="1m"
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Evaluation Interval"
+ Interval PrometheusDuration `json:"interval"`
+
+ // Limit defines the number of series a recording rule can produce. 0 is no limit.
+ //
+ // +optional
+ // +kubebuilder:validation:Optional
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,xDescriptors="urn:alm:descriptor:com.tectonic.ui:number",displayName="Limit of produced series"
+ Limit int32 `json:"limit,omitempty"`
+
+ // Rules defines a list of recording rules
+ //
+ // +required
+ // +kubebuilder:validation:Required
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Rules"
+ Rules []*RecordingRuleGroupSpec `json:"rules"`
+}
+
+// RecordingRuleGroupSpec defines the spec for a Loki recording rule.
+type RecordingRuleGroupSpec struct {
+ // The name of the time series to output to. Must be a valid metric name.
+ //
+ // +optional
+ // +kubebuilder:validation:Optional
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Metric Name"
+ Record string `json:"record,omitempty"`
+
+ // The LogQL expression to evaluate. Every evaluation cycle this is
+ // evaluated at the current time, and all resultant time series become
+ // pending/firing alerts.
+ //
+ // +required
+ // +kubebuilder:validation:Required
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="LogQL Expression"
+ Expr string `json:"expr"`
+
+ // Labels to add to each recording rule.
+ //
+ // +optional
+ // +kubebuilder:validation:Optional
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Labels"
+ Labels map[string]string `json:"labels,omitempty"`
+}
+
+// RecordingRuleStatus defines the observed state of RecordingRule
+type RecordingRuleStatus struct {
+ // Conditions of the RecordingRule generation health.
+ //
+ // +optional
+ // +kubebuilder:validation:Optional
+ // +operator-sdk:csv:customresourcedefinitions:type=status,xDescriptors="urn:alm:descriptor:io.kubernetes.conditions"
+ Conditions []metav1.Condition `json:"conditions,omitempty"`
+}
+
+//+kubebuilder:object:root=true
+//+kubebuilder:subresource:status
+//+kubebuilder:storageversion
+//+kubebuilder:webhook:path=/validate-loki-grafana-com-v1-recordingrule,mutating=false,failurePolicy=fail,sideEffects=None,groups=loki.grafana.com,resources=recordingrules,verbs=create;update,versions=v1,name=vrecordingrule.loki.grafana.com,admissionReviewVersions=v1
+
+// RecordingRule is the Schema for the recordingrules API
+//
+// +operator-sdk:csv:customresourcedefinitions:displayName="RecordingRule",resources={{LokiStack,v1}}
+type RecordingRule struct {
+ metav1.TypeMeta `json:",inline"`
+ metav1.ObjectMeta `json:"metadata,omitempty"`
+
+ Spec RecordingRuleSpec `json:"spec,omitempty"`
+ Status RecordingRuleStatus `json:"status,omitempty"`
+}
+
+//+kubebuilder:object:root=true
+
+// RecordingRuleList contains a list of RecordingRule
+type RecordingRuleList struct {
+ metav1.TypeMeta `json:",inline"`
+ metav1.ListMeta `json:"metadata,omitempty"`
+ Items []RecordingRule `json:"items"`
+}
+
+func init() {
+ SchemeBuilder.Register(&RecordingRule{}, &RecordingRuleList{})
+}
+
+// Hub declares the v1.RecordingRule as the hub CRD version.
+func (*RecordingRule) Hub() {}
diff --git a/vendor/github.com/grafana/loki/operator/apis/loki/v1/rulerconfig_types.go b/vendor/github.com/grafana/loki/operator/apis/loki/v1/rulerconfig_types.go
new file mode 100644
index 000000000..8321d2bc6
--- /dev/null
+++ b/vendor/github.com/grafana/loki/operator/apis/loki/v1/rulerconfig_types.go
@@ -0,0 +1,560 @@
+package v1
+
+import (
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+// AlertManagerDiscoverySpec defines the configuration to use DNS resolution for AlertManager hosts.
+type AlertManagerDiscoverySpec struct {
+ // Use DNS SRV records to discover Alertmanager hosts.
+ //
+ // +optional
+ // +kubebuilder:validation:Optional
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Enable SRV"
+ EnableSRV bool `json:"enableSRV"`
+
+ // How long to wait between refreshing DNS resolutions of Alertmanager hosts.
+ //
+ // +optional
+ // +kubebuilder:validation:Optional
+ // +kubebuilder:default:="1m"
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Refresh Interval"
+ RefreshInterval PrometheusDuration `json:"refreshInterval,omitempty"`
+}
+
+// AlertManagerNotificationQueueSpec defines the configuration for AlertManager notification settings.
+type AlertManagerNotificationQueueSpec struct {
+ // Capacity of the queue for notifications to be sent to the Alertmanager.
+ //
+ // +optional
+ // +kubebuilder:validation:Optional
+ // +kubebuilder:default:=10000
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,xDescriptors="urn:alm:descriptor:com.tectonic.ui:number",displayName="Notification Queue Capacity"
+ Capacity int32 `json:"capacity,omitempty"`
+
+ // HTTP timeout duration when sending notifications to the Alertmanager.
+ //
+ // +optional
+ // +kubebuilder:validation:Optional
+ // +kubebuilder:default:="10s"
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Timeout"
+ Timeout PrometheusDuration `json:"timeout,omitempty"`
+
+ // Max time to tolerate outage for restoring "for" state of alert.
+ //
+ // +optional
+ // +kubebuilder:validation:Optional
+ // +kubebuilder:default:="1h"
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Outage Tolerance"
+ ForOutageTolerance PrometheusDuration `json:"forOutageTolerance,omitempty"`
+
+ // Minimum duration between alert and restored "for" state. This is maintained
+ // only for alerts with configured "for" time greater than the grace period.
+ //
+ // +optional
+ // +kubebuilder:validation:Optional
+ // +kubebuilder:default:="10m"
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Firing Grace Period"
+ ForGracePeriod PrometheusDuration `json:"forGracePeriod,omitempty"`
+
+ // Minimum amount of time to wait before resending an alert to Alertmanager.
+ //
+ // +optional
+ // +kubebuilder:validation:Optional
+ // +kubebuilder:default:="1m"
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Resend Delay"
+ ResendDelay PrometheusDuration `json:"resendDelay,omitempty"`
+}
+
+// AlertManagerSpec defines the configuration for ruler's alertmanager connectivity.
+type AlertManagerSpec struct {
+ // URL for alerts return path.
+ //
+ // +optional
+ // +kubebuilder:validation:Optional
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Alert External URL"
+ ExternalURL string `json:"externalUrl,omitempty"`
+
+ // Additional labels to add to all alerts.
+ //
+ // +optional
+ // +kubebuilder:validation:Optional
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Extra Alert Labels"
+ ExternalLabels map[string]string `json:"externalLabels,omitempty"`
+
+ // If enabled, then requests to Alertmanager use the v2 API.
+ //
+ // +optional
+ // +kubebuilder:validation:Optional
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,xDescriptors="urn:alm:descriptor:com.tectonic.ui:booleanSwitch",displayName="Enable AlertManager V2 API"
+ EnableV2 bool `json:"enableV2"`
+
+ // List of AlertManager URLs to send notifications to. Each Alertmanager URL is treated as
+ // a separate group in the configuration. Multiple Alertmanagers in HA per group can be
+ // supported by using DNS resolution (See EnableDNSDiscovery).
+ //
+ // +required
+ // +kubebuilder:validation:Required
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="AlertManager Endpoints"
+ Endpoints []string `json:"endpoints"`
+
+ // Defines the configuration for DNS-based discovery of AlertManager hosts.
+ //
+ // +optional
+ // +kubebuilder:validation:Optional
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,xDescriptors="urn:alm:descriptor:com.tectonic.ui:advanced",displayName="DNS Discovery"
+ DiscoverySpec *AlertManagerDiscoverySpec `json:"discovery,omitempty"`
+
+ // Defines the configuration for the notification queue to AlertManager hosts.
+ //
+ // +optional
+ // +kubebuilder:validation:Optional
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,xDescriptors="urn:alm:descriptor:com.tectonic.ui:advanced",displayName="Notification Queue"
+ NotificationQueueSpec *AlertManagerNotificationQueueSpec `json:"notificationQueue,omitempty"`
+
+ // List of alert relabel configurations.
+ //
+ // +optional
+ // +kubebuilder:validation:Optional
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Alert Relabel Configuration"
+ RelabelConfigs []RelabelConfig `json:"relabelConfigs,omitempty"`
+
+ // Client configuration for reaching the alertmanager endpoint.
+ //
+ // +optional
+ // +kubebuilder:validation:Optional
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="TLS Config"
+ Client *AlertManagerClientConfig `json:"client,omitempty"`
+}
+
+// AlertManagerClientConfig defines the client configuration for reaching alertmanager endpoints.
+type AlertManagerClientConfig struct {
+ // TLS configuration for reaching the alertmanager endpoints.
+ //
+ // +optional
+ // +kubebuilder:validation:Optional
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,xDescriptors="urn:alm:descriptor:com.tectonic.ui:advanced",displayName="TLS"
+ TLS *AlertManagerClientTLSConfig `json:"tls,omitempty"`
+
+ // Header authentication configuration for reaching the alertmanager endpoints.
+ //
+ // +optional
+ // +kubebuilder:validation:Optional
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,xDescriptors="urn:alm:descriptor:com.tectonic.ui:advanced",displayName="Header Authentication"
+ HeaderAuth *AlertManagerClientHeaderAuth `json:"headerAuth,omitempty"`
+
+ // Basic authentication configuration for reaching the alertmanager endpoints.
+ //
+ // +optional
+ // +kubebuilder:validation:Optional
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,xDescriptors="urn:alm:descriptor:com.tectonic.ui:advanced",displayName="Basic Authentication"
+ BasicAuth *AlertManagerClientBasicAuth `json:"basicAuth,omitempty"`
+}
+
+// AlertManagerClientBasicAuth defines the basic authentication configuration for reaching alertmanager endpoints.
+type AlertManagerClientBasicAuth struct {
+ // The subject's username for the basic authentication configuration.
+ //
+ // +optional
+ // +kubebuilder:validation:Optional
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Username"
+ Username *string `json:"username,omitempty"`
+
+ // The subject's password for the basic authentication configuration.
+ //
+ // +optional
+ // +kubebuilder:validation:Optional
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Password"
+ Password *string `json:"password,omitempty"`
+}
+
+// AlertManagerClientHeaderAuth defines the header configuration reaching alertmanager endpoints.
+type AlertManagerClientHeaderAuth struct {
+ // The authentication type for the header authentication configuration.
+ //
+ // +optional
+ // +kubebuilder:validation:Optional
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Type"
+ Type *string `json:"type,omitempty"`
+
+ // The credentials for the header authentication configuration.
+ //
+ // +optional
+ // +kubebuilder:validation:Optional
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Credentials"
+ Credentials *string `json:"credentials,omitempty"`
+
+ // The credentials file for the Header authentication configuration. It is mutually exclusive with `credentials`.
+ //
+ // +optional
+ // +kubebuilder:validation:Optional
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Credentials File"
+ CredentialsFile *string `json:"credentialsFile,omitempty"`
+}
+
+// AlertManagerClientTLSConfig defines the TLS configuration for reaching alertmanager endpoints.
+type AlertManagerClientTLSConfig struct {
+ // The CA certificate file path for the TLS configuration.
+ //
+ // +optional
+ // +kubebuilder:validation:Optional
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="CA Path"
+ CAPath *string `json:"caPath,omitempty"`
+
+ // The server name to validate in the alertmanager server certificates.
+ //
+ // +optional
+ // +kubebuilder:validation:Optional
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Server Name"
+ ServerName *string `json:"serverName,omitempty"`
+
+ // The client-side certificate file path for the TLS configuration.
+ //
+ // +optional
+ // +kubebuilder:validation:Optional
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Cert Path"
+ CertPath *string `json:"certPath,omitempty"`
+
+ // The client-side key file path for the TLS configuration.
+ //
+ // +optional
+ // +kubebuilder:validation:Optional
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Key Path"
+ KeyPath *string `json:"keyPath,omitempty"`
+
+ // Skip validating server certificate.
+ //
+ // +optional
+ // +kubebuilder:validation:Optional
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Skip validating server certificate"
+ InsecureSkipVerify *bool `json:"insecureSkipVerify,omitempty"`
+}
+
+// RemoteWriteAuthType defines the type of authorization to use to access the remote write endpoint.
+//
+// +kubebuilder:validation:Enum=basic;header
+type RemoteWriteAuthType string
+
+const (
+ // BasicAuthorization defines the remote write client to use HTTP basic authorization.
+ BasicAuthorization RemoteWriteAuthType = "basic"
+ // BearerAuthorization defines the remote write client to use HTTP bearer authorization.
+ BearerAuthorization RemoteWriteAuthType = "bearer"
+)
+
+// RemoteWriteClientSpec defines the configuration of the remote write client.
+type RemoteWriteClientSpec struct {
+ // Name of the remote write config, which if specified must be unique among remote write configs.
+ //
+ // +required
+ // +kubebuilder:validation:Required
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Name"
+ Name string `json:"name"`
+
+ // The URL of the endpoint to send samples to.
+ //
+ // +required
+ // +kubebuilder:validation:Required
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Endpoint"
+ URL string `json:"url"`
+
+ // Timeout for requests to the remote write endpoint.
+ //
+ // +optional
+ // +kubebuilder:validation:Optional
+ // +kubebuilder:default:="30s"
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Remote Write Timeout"
+ Timeout PrometheusDuration `json:"timeout,omitempty"`
+
+ // Type of authorzation to use to access the remote write endpoint
+ //
+ // +required
+ // +kubebuilder:validation:Required
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,xDescriptors={"urn:alm:descriptor:com.tectonic.ui:select:basic","urn:alm:descriptor:com.tectonic.ui:select:header"},displayName="Authorization Type"
+ AuthorizationType RemoteWriteAuthType `json:"authorization"`
+
+ // Name of a secret in the namespace configured for authorization secrets.
+ //
+ // +required
+ // +kubebuilder:validation:Required
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,xDescriptors="urn:alm:descriptor:io.kubernetes:Secret",displayName="Authorization Secret Name"
+ AuthorizationSecretName string `json:"authorizationSecretName"`
+
+ // Additional HTTP headers to be sent along with each remote write request.
+ //
+ // +optional
+ // +kubebuilder:validation:Optional
+ AdditionalHeaders map[string]string `json:"additionalHeaders,omitempty"`
+
+ // List of remote write relabel configurations.
+ //
+ // +optional
+ // +kubebuilder:validation:Optional
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Metric Relabel Configuration"
+ RelabelConfigs []RelabelConfig `json:"relabelConfigs,omitempty"`
+
+ // Optional proxy URL.
+ //
+ // +optional
+ // +kubebuilder:validation:Optional
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="HTTP Proxy URL"
+ ProxyURL string `json:"proxyUrl,omitempty"`
+
+ // Configure whether HTTP requests follow HTTP 3xx redirects.
+ //
+ // +optional
+ // +kubebuilder:validation:Optional
+ // +kubebuilder:default:=true
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,xDescriptors="urn:alm:descriptor:com.tectonic.ui:booleanSwitch",displayName="Follow HTTP Redirects"
+ FollowRedirects bool `json:"followRedirects"`
+}
+
+// RelabelActionType defines the enumeration type for RelabelConfig actions.
+//
+// +kubebuilder:validation:Enum=drop;hashmod;keep;labeldrop;labelkeep;labelmap;replace
+type RelabelActionType string
+
+// RelabelConfig allows dynamic rewriting of the label set, being applied to samples before ingestion.
+// It defines `` and `` sections of Prometheus configuration.
+// More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#metric_relabel_configs
+type RelabelConfig struct {
+ // The source labels select values from existing labels. Their content is concatenated
+ // using the configured separator and matched against the configured regular expression
+ // for the replace, keep, and drop actions.
+ //
+ // +required
+ // +kubebuilder:validation:Required
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Source Labels"
+ SourceLabels []string `json:"sourceLabels"`
+
+ // Separator placed between concatenated source label values. default is ';'.
+ //
+ // +optional
+ // +kubebuilder:validation:Optional
+ // +kubebuilder:default:=";"
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Separator"
+ Separator string `json:"separator,omitempty"`
+
+ // Label to which the resulting value is written in a replace action.
+ // It is mandatory for replace actions. Regex capture groups are available.
+ //
+ // +optional
+ // +kubebuilder:validation:Optional
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Target Label"
+ TargetLabel string `json:"targetLabel,omitempty"`
+
+ // Regular expression against which the extracted value is matched. Default is '(.*)'
+ //
+ // +optional
+ // +kubebuilder:validation:Optional
+ // +kubebuilder:default:="(.*)"
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Regex"
+ Regex string `json:"regex,omitempty"`
+
+ // Modulus to take of the hash of the source label values.
+ //
+ // +optional
+ // +kubebuilder:validation:Optional
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Modulus"
+ Modulus uint64 `json:"modulus,omitempty"`
+
+ // Replacement value against which a regex replace is performed if the
+ // regular expression matches. Regex capture groups are available. Default is '$1'
+ //
+ // +optional
+ // +kubebuilder:validation:Optional
+ // +kubebuilder:default:="$1"
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Replacement"
+ Replacement string `json:"replacement,omitempty"`
+
+ // Action to perform based on regex matching. Default is 'replace'
+ //
+ // +optional
+ // +kubebuilder:validation:Optional
+ // +kubebuilder:default:="replace"
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Action"
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,xDescriptors={"urn:alm:descriptor:com.tectonic.ui:select:drop","urn:alm:descriptor:com.tectonic.ui:select:hashmod","urn:alm:descriptor:com.tectonic.ui:select:keep","urn:alm:descriptor:com.tectonic.ui:select:labeldrop","urn:alm:descriptor:com.tectonic.ui:select:labelkeep","urn:alm:descriptor:com.tectonic.ui:select:labelmap","urn:alm:descriptor:com.tectonic.ui:select:replace"},displayName="Action"
+ Action RelabelActionType `json:"action,omitempty"`
+}
+
+// RemoteWriteClientQueueSpec defines the configuration of the remote write client queue.
+type RemoteWriteClientQueueSpec struct {
+ // Number of samples to buffer per shard before we block reading of more
+ //
+ // +optional
+ // +kubebuilder:validation:Optional
+ // +kubebuilder:default:=2500
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,xDescriptors="urn:alm:descriptor:com.tectonic.ui:number",displayName="Queue Capacity"
+ Capacity int32 `json:"capacity,omitempty"`
+
+ // Maximum number of shards, i.e. amount of concurrency.
+ //
+ // +optional
+ // +kubebuilder:validation:Optional
+ // +kubebuilder:default:=200
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,xDescriptors="urn:alm:descriptor:com.tectonic.ui:number",displayName="Maximum Shards"
+ MaxShards int32 `json:"maxShards,omitempty"`
+
+ // Minimum number of shards, i.e. amount of concurrency.
+ //
+ // +optional
+ // +kubebuilder:validation:Optional
+ // +kubebuilder:default:=200
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,xDescriptors="urn:alm:descriptor:com.tectonic.ui:number",displayName="Minimum Shards"
+ MinShards int32 `json:"minShards,omitempty"`
+
+ // Maximum number of samples per send.
+ //
+ // +optional
+ // +kubebuilder:validation:Optional
+ // +kubebuilder:default:=500
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,xDescriptors="urn:alm:descriptor:com.tectonic.ui:number",displayName="Maximum Shards per Send"
+ MaxSamplesPerSend int32 `json:"maxSamplesPerSend,omitempty"`
+
+ // Maximum time a sample will wait in buffer.
+ //
+ // +optional
+ // +kubebuilder:validation:Optional
+ // +kubebuilder:default:="5s"
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Batch Send Deadline"
+ BatchSendDeadline PrometheusDuration `json:"batchSendDeadline,omitempty"`
+
+ // Initial retry delay. Gets doubled for every retry.
+ //
+ // +optional
+ // +kubebuilder:validation:Optional
+ // +kubebuilder:default:="30ms"
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Min BackOff Period"
+ MinBackOffPeriod PrometheusDuration `json:"minBackOffPeriod,omitempty"`
+
+ // Maximum retry delay.
+ //
+ // +optional
+ // +kubebuilder:validation:Optional
+ // +kubebuilder:default:="100ms"
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Max BackOff Period"
+ MaxBackOffPeriod PrometheusDuration `json:"maxBackOffPeriod,omitempty"`
+}
+
+// RemoteWriteSpec defines the configuration for ruler's remote_write connectivity.
+type RemoteWriteSpec struct {
+ // Enable remote-write functionality.
+ //
+ // +optional
+ // +kubebuilder:validation:Optional
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,xDescriptors="urn:alm:descriptor:com.tectonic.ui:booleanSwitch",displayName="Enabled"
+ Enabled bool `json:"enabled,omitempty"`
+
+ // Minimum period to wait between refreshing remote-write reconfigurations.
+ //
+ // +optional
+ // +kubebuilder:validation:Optional
+ // +kubebuilder:default:="10s"
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Min Refresh Period"
+ RefreshPeriod PrometheusDuration `json:"refreshPeriod,omitempty"`
+
+ // Defines the configuration for remote write client.
+ //
+ // +optional
+ // +kubebuilder:validation:Optional
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,xDescriptors="urn:alm:descriptor:com.tectonic.ui:advanced",displayName="Client"
+ ClientSpec *RemoteWriteClientSpec `json:"client,omitempty"`
+
+ // Defines the configuration for remote write client queue.
+ //
+ // +optional
+ // +kubebuilder:validation:Optional
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,xDescriptors="urn:alm:descriptor:com.tectonic.ui:advanced",displayName="Client Queue"
+ QueueSpec *RemoteWriteClientQueueSpec `json:"queue,omitempty"`
+}
+
+// RulerConfigSpec defines the desired state of Ruler
+type RulerConfigSpec struct {
+ // Interval on how frequently to evaluate rules.
+ //
+ // +optional
+ // +kubebuilder:validation:Optional
+ // +kubebuilder:default:="1m"
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Evaluation Interval"
+ EvalutionInterval PrometheusDuration `json:"evaluationInterval,omitempty"`
+
+ // Interval on how frequently to poll for new rule definitions.
+ //
+ // +optional
+ // +kubebuilder:validation:Optional
+ // +kubebuilder:default:="1m"
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Poll Interval"
+ PollInterval PrometheusDuration `json:"pollInterval,omitempty"`
+
+ // Defines alert manager configuration to notify on firing alerts.
+ //
+ // +optional
+ // +kubebuilder:validation:Optional
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,xDescriptors="urn:alm:descriptor:com.tectonic.ui:advanced",displayName="Alert Manager Configuration"
+ AlertManagerSpec *AlertManagerSpec `json:"alertmanager,omitempty"`
+
+ // Defines a remote write endpoint to write recording rule metrics.
+ //
+ // +optional
+ // +kubebuilder:validation:Optional
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,xDescriptors="urn:alm:descriptor:com.tectonic.ui:advanced",displayName="Remote Write Configuration"
+ RemoteWriteSpec *RemoteWriteSpec `json:"remoteWrite,omitempty"`
+
+ // Overrides defines the config overrides to be applied per-tenant.
+ //
+ // +optional
+ // +kubebuilder:validation:Optional
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,xDescriptors="urn:alm:descriptor:com.tectonic.ui:advanced",displayName="Rate Limiting"
+ Overrides map[string]RulerOverrides `json:"overrides,omitempty"`
+}
+
+// RulerOverrides defines the overrides applied per-tenant.
+type RulerOverrides struct {
+ // AlertManagerOverrides defines the overrides to apply to the alertmanager config.
+ //
+ // +optional
+ // +kubebuilder:validation:Optional
+ AlertManagerOverrides *AlertManagerSpec `json:"alertmanager,omitempty"`
+}
+
+// RulerConfigStatus defines the observed state of RulerConfig
+type RulerConfigStatus struct {
+ // Conditions of the RulerConfig health.
+ //
+ // +optional
+ // +kubebuilder:validation:Optional
+ // +operator-sdk:csv:customresourcedefinitions:type=status,xDescriptors="urn:alm:descriptor:io.kubernetes.conditions"
+ Conditions []metav1.Condition `json:"conditions,omitempty"`
+}
+
+//+kubebuilder:object:root=true
+//+kubebuilder:subresource:status
+//+kubebuilder:storageversion
+//+kubebuilder:webhook:path=/validate-loki-grafana-com-v1-rulerconfig,mutating=false,failurePolicy=fail,sideEffects=None,groups=loki.grafana.com,resources=rulerconfigs,verbs=create;update,versions=v1,name=vrulerconfig.loki.grafana.com,admissionReviewVersions=v1
+
+// RulerConfig is the Schema for the rulerconfigs API
+//
+// +operator-sdk:csv:customresourcedefinitions:displayName="RulerConfig",resources={{LokiStack,v1}}
+type RulerConfig struct {
+ metav1.TypeMeta `json:",inline"`
+ metav1.ObjectMeta `json:"metadata,omitempty"`
+
+ Spec RulerConfigSpec `json:"spec,omitempty"`
+ Status RulerConfigStatus `json:"status,omitempty"`
+}
+
+//+kubebuilder:object:root=true
+
+// RulerConfigList contains a list of RuleConfig
+type RulerConfigList struct {
+ metav1.TypeMeta `json:",inline"`
+ metav1.ListMeta `json:"metadata,omitempty"`
+ Items []RulerConfig `json:"items"`
+}
+
+func init() {
+ SchemeBuilder.Register(&RulerConfig{}, &RulerConfigList{})
+}
+
+// Hub declares the v1.RulerConfig as the hub CRD version.
+func (*RulerConfig) Hub() {}
diff --git a/vendor/github.com/grafana/loki/operator/apis/loki/v1/v1.go b/vendor/github.com/grafana/loki/operator/apis/loki/v1/v1.go
new file mode 100644
index 000000000..a17e7244d
--- /dev/null
+++ b/vendor/github.com/grafana/loki/operator/apis/loki/v1/v1.go
@@ -0,0 +1,105 @@
+package v1
+
+import (
+ "errors"
+ "time"
+)
+
+// PrometheusDuration defines the type for Prometheus durations.
+//
+// +kubebuilder:validation:Pattern:="((([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?|0)"
+type PrometheusDuration string
+
+// StorageSchemaEffectiveDate defines the type for the Storage Schema Effect Date
+//
+// +kubebuilder:validation:Pattern:="^([0-9]{4,})([-]([0-9]{2})){2}$"
+type StorageSchemaEffectiveDate string
+
+// UTCTime returns the date as a time object in the UTC time zone
+func (d StorageSchemaEffectiveDate) UTCTime() (time.Time, error) {
+ return time.Parse(StorageSchemaEffectiveDateFormat, string(d))
+}
+
+const (
+ // StorageSchemaEffectiveDateFormat is the datetime string need to format the time.
+ StorageSchemaEffectiveDateFormat = "2006-01-02"
+ // StorageSchemaUpdateBuffer is the amount of time used as a buffer to prevent
+ // storage schemas from being added too close to midnight in UTC.
+ StorageSchemaUpdateBuffer = time.Hour * 2
+)
+
+const (
+ // The AnnotationDisableTenantValidation annotation can contain a boolean value that, if true, disables the tenant-ID validation.
+ AnnotationDisableTenantValidation = "loki.grafana.com/disable-tenant-validation"
+
+ // The AnnotationAvailabilityZone annotation contains the availability zone used in the Loki configuration of that pod.
+ // It is automatically added to managed Pods by the operator, if needed.
+ AnnotationAvailabilityZone = "loki.grafana.com/availability-zone"
+
+ // The AnnotationAvailabilityZoneLabels annotation contains a list of node-labels that are used to construct the availability zone
+ // of the annotated Pod. It is used by the zone-awareness controller and automatically added to managed Pods by the operator,
+ // if needed.
+ AnnotationAvailabilityZoneLabels string = "loki.grafana.com/availability-zone-labels"
+
+ // LabelZoneAwarePod is a pod-label that is added to Pods that should be reconciled by the zone-awareness controller.
+ // It is automatically added to managed Pods by the operator, if needed.
+ LabelZoneAwarePod string = "loki.grafana.com/zone-aware"
+)
+
+var (
+ // ErrGroupNamesNotUnique is the error type when loki groups have not unique names.
+ ErrGroupNamesNotUnique = errors.New("Group names are not unique")
+ // ErrInvalidRecordMetricName when any loki recording rule has a invalid PromQL metric name.
+ ErrInvalidRecordMetricName = errors.New("Failed to parse record metric name")
+ // ErrParseAlertForPeriod when any loki alerting rule for period is not a valid PromQL duration.
+ ErrParseAlertForPeriod = errors.New("Failed to parse alert firing period")
+ // ErrParseEvaluationInterval when any loki group evaluation internal is not a valid PromQL duration.
+ ErrParseEvaluationInterval = errors.New("Failed to parse evaluation")
+ // ErrParseLogQLExpression when any loki rule expression is not a valid LogQL expression.
+ ErrParseLogQLExpression = errors.New("Failed to parse LogQL expression")
+ // ErrParseLogQLNotSample when the Loki rule expression does not evaluate to a sample expression.
+ ErrParseLogQLNotSample = errors.New("LogQL expression is not a sample query")
+ // ErrParseLogQLSelector when the Loki rule expression does not have a valid selector.
+ ErrParseLogQLSelector = errors.New("Failed to get selector from LogQL expression")
+ // ErrEffectiveDatesNotUnique when effective dates are not unique.
+ ErrEffectiveDatesNotUnique = errors.New("Effective dates are not unique")
+ // ErrParseEffectiveDates when effective dates cannot be parsed.
+ ErrParseEffectiveDates = errors.New("Failed to parse effective date")
+ // ErrMissingValidStartDate when a schema list is created without a valid effective date
+ ErrMissingValidStartDate = errors.New("Schema does not contain a valid starting effective date")
+ // ErrSchemaRetroactivelyAdded when a schema has been retroactively added
+ ErrSchemaRetroactivelyAdded = errors.New("Cannot retroactively add schema")
+ // ErrSchemaRetroactivelyRemoved when a schema or schemas has been retroactively removed
+ ErrSchemaRetroactivelyRemoved = errors.New("Cannot retroactively remove schema(s)")
+ // ErrSchemaRetroactivelyChanged when a schema has been retroactively changed
+ ErrSchemaRetroactivelyChanged = errors.New("Cannot retroactively change schema")
+ // ErrHeaderAuthCredentialsConflict when both Credentials and CredentialsFile are used in a header authentication client.
+ ErrHeaderAuthCredentialsConflict = errors.New("credentials and credentialsFile cannot be used at the same time")
+ // ErrReplicationZonesNodes when there is an error retrieving nodes with replication zones labels.
+ ErrReplicationZonesNodes = errors.New("Failed to retrieve nodes for zone replication")
+ // ErrReplicationFactorToZonesRatio when the replication factor defined is greater than the number of available zones.
+ ErrReplicationFactorToZonesRatio = errors.New("replication factor is greater than the number of available zones")
+ // ErrReplicationSpecConflict when both the ReplicationSpec and depricated ReplicationFactor are used.
+ ErrReplicationSpecConflict = errors.New("replicationSpec and replicationFactor (deprecated) cannot be used at the same time")
+ // ErrIPv6InstanceAddrTypeNotAllowed when the default InstanceAddrType is used with enableIPv6.
+ ErrIPv6InstanceAddrTypeNotAllowed = errors.New(`instanceAddrType "default" cannot be used with enableIPv6 at the same time`)
+
+ // ErrOTLPResourceAttributesEmptyNotAllowed when the OTLP ResourceAttributes are empty even though ignoreDefaults is enabled.
+ ErrOTLPResourceAttributesEmptyNotAllowed = errors.New(`resourceAttributes cannot be empty when ignoreDefaults is true`)
+ // ErrOTLPResourceAttributesIndexLabelActionMissing when OTLP ResourceAttributes does not contain at least one index label when ignoreDefaults is enabled.
+ ErrOTLPResourceAttributesIndexLabelActionMissing = errors.New(`resourceAttributes does not contain at least one attributed mapped to "index_label"`)
+ // ErrOTLPAttributesSpecInvalid when the OTLPAttributesSpec attibutes and regex fields are both empty.
+ ErrOTLPAttributesSpecInvalid = errors.New(`attributes and regex cannot be empty at the same time`)
+
+ // ErrRuleMustMatchNamespace indicates that an expression used in an alerting or recording rule is missing
+ // matchers for a namespace.
+ ErrRuleMustMatchNamespace = errors.New("rule needs to have a matcher for the namespace")
+ // ErrSeverityLabelMissing indicates that an alerting rule is missing the severity label
+ ErrSeverityLabelMissing = errors.New("rule requires label: severity")
+ // ErrSeverityLabelInvalid indicates that an alerting rule has an invalid value for the summary label
+ ErrSeverityLabelInvalid = errors.New("rule severity label value invalid, allowed values: critical, warning, info")
+ // ErrSummaryAnnotationMissing indicates that an alerting rule is missing the summary annotation
+ ErrSummaryAnnotationMissing = errors.New("rule requires annotation: summary")
+ // ErrDescriptionAnnotationMissing indicates that an alerting rule is missing the description annotation
+ ErrDescriptionAnnotationMissing = errors.New("rule requires annotation: description")
+)
diff --git a/vendor/github.com/grafana/loki/operator/apis/loki/v1/zz_generated.deepcopy.go b/vendor/github.com/grafana/loki/operator/apis/loki/v1/zz_generated.deepcopy.go
new file mode 100644
index 000000000..faab229b2
--- /dev/null
+++ b/vendor/github.com/grafana/loki/operator/apis/loki/v1/zz_generated.deepcopy.go
@@ -0,0 +1,1970 @@
+//go:build !ignore_autogenerated
+
+// Code generated by controller-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ corev1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ runtime "k8s.io/apimachinery/pkg/runtime"
+)
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AlertManagerClientBasicAuth) DeepCopyInto(out *AlertManagerClientBasicAuth) {
+ *out = *in
+ if in.Username != nil {
+ in, out := &in.Username, &out.Username
+ *out = new(string)
+ **out = **in
+ }
+ if in.Password != nil {
+ in, out := &in.Password, &out.Password
+ *out = new(string)
+ **out = **in
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AlertManagerClientBasicAuth.
+func (in *AlertManagerClientBasicAuth) DeepCopy() *AlertManagerClientBasicAuth {
+ if in == nil {
+ return nil
+ }
+ out := new(AlertManagerClientBasicAuth)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AlertManagerClientConfig) DeepCopyInto(out *AlertManagerClientConfig) {
+ *out = *in
+ if in.TLS != nil {
+ in, out := &in.TLS, &out.TLS
+ *out = new(AlertManagerClientTLSConfig)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.HeaderAuth != nil {
+ in, out := &in.HeaderAuth, &out.HeaderAuth
+ *out = new(AlertManagerClientHeaderAuth)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.BasicAuth != nil {
+ in, out := &in.BasicAuth, &out.BasicAuth
+ *out = new(AlertManagerClientBasicAuth)
+ (*in).DeepCopyInto(*out)
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AlertManagerClientConfig.
+func (in *AlertManagerClientConfig) DeepCopy() *AlertManagerClientConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(AlertManagerClientConfig)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AlertManagerClientHeaderAuth) DeepCopyInto(out *AlertManagerClientHeaderAuth) {
+ *out = *in
+ if in.Type != nil {
+ in, out := &in.Type, &out.Type
+ *out = new(string)
+ **out = **in
+ }
+ if in.Credentials != nil {
+ in, out := &in.Credentials, &out.Credentials
+ *out = new(string)
+ **out = **in
+ }
+ if in.CredentialsFile != nil {
+ in, out := &in.CredentialsFile, &out.CredentialsFile
+ *out = new(string)
+ **out = **in
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AlertManagerClientHeaderAuth.
+func (in *AlertManagerClientHeaderAuth) DeepCopy() *AlertManagerClientHeaderAuth {
+ if in == nil {
+ return nil
+ }
+ out := new(AlertManagerClientHeaderAuth)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AlertManagerClientTLSConfig) DeepCopyInto(out *AlertManagerClientTLSConfig) {
+ *out = *in
+ if in.CAPath != nil {
+ in, out := &in.CAPath, &out.CAPath
+ *out = new(string)
+ **out = **in
+ }
+ if in.ServerName != nil {
+ in, out := &in.ServerName, &out.ServerName
+ *out = new(string)
+ **out = **in
+ }
+ if in.CertPath != nil {
+ in, out := &in.CertPath, &out.CertPath
+ *out = new(string)
+ **out = **in
+ }
+ if in.KeyPath != nil {
+ in, out := &in.KeyPath, &out.KeyPath
+ *out = new(string)
+ **out = **in
+ }
+ if in.InsecureSkipVerify != nil {
+ in, out := &in.InsecureSkipVerify, &out.InsecureSkipVerify
+ *out = new(bool)
+ **out = **in
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AlertManagerClientTLSConfig.
+func (in *AlertManagerClientTLSConfig) DeepCopy() *AlertManagerClientTLSConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(AlertManagerClientTLSConfig)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AlertManagerDiscoverySpec) DeepCopyInto(out *AlertManagerDiscoverySpec) {
+ *out = *in
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AlertManagerDiscoverySpec.
+func (in *AlertManagerDiscoverySpec) DeepCopy() *AlertManagerDiscoverySpec {
+ if in == nil {
+ return nil
+ }
+ out := new(AlertManagerDiscoverySpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AlertManagerNotificationQueueSpec) DeepCopyInto(out *AlertManagerNotificationQueueSpec) {
+ *out = *in
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AlertManagerNotificationQueueSpec.
+func (in *AlertManagerNotificationQueueSpec) DeepCopy() *AlertManagerNotificationQueueSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(AlertManagerNotificationQueueSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AlertManagerSpec) DeepCopyInto(out *AlertManagerSpec) {
+ *out = *in
+ if in.ExternalLabels != nil {
+ in, out := &in.ExternalLabels, &out.ExternalLabels
+ *out = make(map[string]string, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val
+ }
+ }
+ if in.Endpoints != nil {
+ in, out := &in.Endpoints, &out.Endpoints
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.DiscoverySpec != nil {
+ in, out := &in.DiscoverySpec, &out.DiscoverySpec
+ *out = new(AlertManagerDiscoverySpec)
+ **out = **in
+ }
+ if in.NotificationQueueSpec != nil {
+ in, out := &in.NotificationQueueSpec, &out.NotificationQueueSpec
+ *out = new(AlertManagerNotificationQueueSpec)
+ **out = **in
+ }
+ if in.RelabelConfigs != nil {
+ in, out := &in.RelabelConfigs, &out.RelabelConfigs
+ *out = make([]RelabelConfig, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.Client != nil {
+ in, out := &in.Client, &out.Client
+ *out = new(AlertManagerClientConfig)
+ (*in).DeepCopyInto(*out)
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AlertManagerSpec.
+func (in *AlertManagerSpec) DeepCopy() *AlertManagerSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(AlertManagerSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AlertingRule) DeepCopyInto(out *AlertingRule) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ in.Status.DeepCopyInto(&out.Status)
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AlertingRule.
+func (in *AlertingRule) DeepCopy() *AlertingRule {
+ if in == nil {
+ return nil
+ }
+ out := new(AlertingRule)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *AlertingRule) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AlertingRuleGroup) DeepCopyInto(out *AlertingRuleGroup) {
+ *out = *in
+ if in.Rules != nil {
+ in, out := &in.Rules, &out.Rules
+ *out = make([]*AlertingRuleGroupSpec, len(*in))
+ for i := range *in {
+ if (*in)[i] != nil {
+ in, out := &(*in)[i], &(*out)[i]
+ *out = new(AlertingRuleGroupSpec)
+ (*in).DeepCopyInto(*out)
+ }
+ }
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AlertingRuleGroup.
+func (in *AlertingRuleGroup) DeepCopy() *AlertingRuleGroup {
+ if in == nil {
+ return nil
+ }
+ out := new(AlertingRuleGroup)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AlertingRuleGroupSpec) DeepCopyInto(out *AlertingRuleGroupSpec) {
+ *out = *in
+ if in.Annotations != nil {
+ in, out := &in.Annotations, &out.Annotations
+ *out = make(map[string]string, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val
+ }
+ }
+ if in.Labels != nil {
+ in, out := &in.Labels, &out.Labels
+ *out = make(map[string]string, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val
+ }
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AlertingRuleGroupSpec.
+func (in *AlertingRuleGroupSpec) DeepCopy() *AlertingRuleGroupSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(AlertingRuleGroupSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AlertingRuleList) DeepCopyInto(out *AlertingRuleList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]AlertingRule, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AlertingRuleList.
+func (in *AlertingRuleList) DeepCopy() *AlertingRuleList {
+ if in == nil {
+ return nil
+ }
+ out := new(AlertingRuleList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *AlertingRuleList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AlertingRuleSpec) DeepCopyInto(out *AlertingRuleSpec) {
+ *out = *in
+ if in.Groups != nil {
+ in, out := &in.Groups, &out.Groups
+ *out = make([]*AlertingRuleGroup, len(*in))
+ for i := range *in {
+ if (*in)[i] != nil {
+ in, out := &(*in)[i], &(*out)[i]
+ *out = new(AlertingRuleGroup)
+ (*in).DeepCopyInto(*out)
+ }
+ }
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AlertingRuleSpec.
+func (in *AlertingRuleSpec) DeepCopy() *AlertingRuleSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(AlertingRuleSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AlertingRuleStatus) DeepCopyInto(out *AlertingRuleStatus) {
+ *out = *in
+ if in.Conditions != nil {
+ in, out := &in.Conditions, &out.Conditions
+ *out = make([]metav1.Condition, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AlertingRuleStatus.
+func (in *AlertingRuleStatus) DeepCopy() *AlertingRuleStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(AlertingRuleStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AuthenticationSpec) DeepCopyInto(out *AuthenticationSpec) {
+ *out = *in
+ if in.OIDC != nil {
+ in, out := &in.OIDC, &out.OIDC
+ *out = new(OIDCSpec)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.MTLS != nil {
+ in, out := &in.MTLS, &out.MTLS
+ *out = new(MTLSSpec)
+ (*in).DeepCopyInto(*out)
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthenticationSpec.
+func (in *AuthenticationSpec) DeepCopy() *AuthenticationSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(AuthenticationSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AuthorizationSpec) DeepCopyInto(out *AuthorizationSpec) {
+ *out = *in
+ if in.OPA != nil {
+ in, out := &in.OPA, &out.OPA
+ *out = new(OPASpec)
+ **out = **in
+ }
+ if in.Roles != nil {
+ in, out := &in.Roles, &out.Roles
+ *out = make([]RoleSpec, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.RoleBindings != nil {
+ in, out := &in.RoleBindings, &out.RoleBindings
+ *out = make([]RoleBindingsSpec, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthorizationSpec.
+func (in *AuthorizationSpec) DeepCopy() *AuthorizationSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(AuthorizationSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *BlockedQuerySpec) DeepCopyInto(out *BlockedQuerySpec) {
+ *out = *in
+ if in.Types != nil {
+ in, out := &in.Types, &out.Types
+ *out = make(BlockedQueryTypes, len(*in))
+ copy(*out, *in)
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BlockedQuerySpec.
+func (in *BlockedQuerySpec) DeepCopy() *BlockedQuerySpec {
+ if in == nil {
+ return nil
+ }
+ out := new(BlockedQuerySpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in BlockedQueryTypes) DeepCopyInto(out *BlockedQueryTypes) {
+ {
+ in := &in
+ *out = make(BlockedQueryTypes, len(*in))
+ copy(*out, *in)
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BlockedQueryTypes.
+func (in BlockedQueryTypes) DeepCopy() BlockedQueryTypes {
+ if in == nil {
+ return nil
+ }
+ out := new(BlockedQueryTypes)
+ in.DeepCopyInto(out)
+ return *out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *CASpec) DeepCopyInto(out *CASpec) {
+ *out = *in
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CASpec.
+func (in *CASpec) DeepCopy() *CASpec {
+ if in == nil {
+ return nil
+ }
+ out := new(CASpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ClusterProxy) DeepCopyInto(out *ClusterProxy) {
+ *out = *in
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterProxy.
+func (in *ClusterProxy) DeepCopy() *ClusterProxy {
+ if in == nil {
+ return nil
+ }
+ out := new(ClusterProxy)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *GlobalOTLPSpec) DeepCopyInto(out *GlobalOTLPSpec) {
+ *out = *in
+ if in.IndexedResourceAttributes != nil {
+ in, out := &in.IndexedResourceAttributes, &out.IndexedResourceAttributes
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ in.OTLPSpec.DeepCopyInto(&out.OTLPSpec)
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GlobalOTLPSpec.
+func (in *GlobalOTLPSpec) DeepCopy() *GlobalOTLPSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(GlobalOTLPSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *HashRingSpec) DeepCopyInto(out *HashRingSpec) {
+ *out = *in
+ if in.MemberList != nil {
+ in, out := &in.MemberList, &out.MemberList
+ *out = new(MemberListSpec)
+ **out = **in
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HashRingSpec.
+func (in *HashRingSpec) DeepCopy() *HashRingSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(HashRingSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *IngestionLimitSpec) DeepCopyInto(out *IngestionLimitSpec) {
+ *out = *in
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngestionLimitSpec.
+func (in *IngestionLimitSpec) DeepCopy() *IngestionLimitSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(IngestionLimitSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *LimitsSpec) DeepCopyInto(out *LimitsSpec) {
+ *out = *in
+ if in.Global != nil {
+ in, out := &in.Global, &out.Global
+ *out = new(LimitsTemplateSpec)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.Tenants != nil {
+ in, out := &in.Tenants, &out.Tenants
+ *out = make(map[string]PerTenantLimitsTemplateSpec, len(*in))
+ for key, val := range *in {
+ (*out)[key] = *val.DeepCopy()
+ }
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LimitsSpec.
+func (in *LimitsSpec) DeepCopy() *LimitsSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(LimitsSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *LimitsTemplateSpec) DeepCopyInto(out *LimitsTemplateSpec) {
+ *out = *in
+ if in.IngestionLimits != nil {
+ in, out := &in.IngestionLimits, &out.IngestionLimits
+ *out = new(IngestionLimitSpec)
+ **out = **in
+ }
+ if in.QueryLimits != nil {
+ in, out := &in.QueryLimits, &out.QueryLimits
+ *out = new(QueryLimitSpec)
+ **out = **in
+ }
+ if in.OTLP != nil {
+ in, out := &in.OTLP, &out.OTLP
+ *out = new(GlobalOTLPSpec)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.Retention != nil {
+ in, out := &in.Retention, &out.Retention
+ *out = new(RetentionLimitSpec)
+ (*in).DeepCopyInto(*out)
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LimitsTemplateSpec.
+func (in *LimitsTemplateSpec) DeepCopy() *LimitsTemplateSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(LimitsTemplateSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *LokiComponentSpec) DeepCopyInto(out *LokiComponentSpec) {
+ *out = *in
+ if in.NodeSelector != nil {
+ in, out := &in.NodeSelector, &out.NodeSelector
+ *out = make(map[string]string, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val
+ }
+ }
+ if in.Tolerations != nil {
+ in, out := &in.Tolerations, &out.Tolerations
+ *out = make([]corev1.Toleration, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.PodAntiAffinity != nil {
+ in, out := &in.PodAntiAffinity, &out.PodAntiAffinity
+ *out = new(corev1.PodAntiAffinity)
+ (*in).DeepCopyInto(*out)
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LokiComponentSpec.
+func (in *LokiComponentSpec) DeepCopy() *LokiComponentSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(LokiComponentSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *LokiStack) DeepCopyInto(out *LokiStack) {
+ *out = *in
+ in.Spec.DeepCopyInto(&out.Spec)
+ in.Status.DeepCopyInto(&out.Status)
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ out.TypeMeta = in.TypeMeta
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LokiStack.
+func (in *LokiStack) DeepCopy() *LokiStack {
+ if in == nil {
+ return nil
+ }
+ out := new(LokiStack)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *LokiStack) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *LokiStackComponentStatus) DeepCopyInto(out *LokiStackComponentStatus) {
+ *out = *in
+ if in.Compactor != nil {
+ in, out := &in.Compactor, &out.Compactor
+ *out = make(PodStatusMap, len(*in))
+ for key, val := range *in {
+ var outVal []string
+ if val == nil {
+ (*out)[key] = nil
+ } else {
+ inVal := (*in)[key]
+ in, out := &inVal, &outVal
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ (*out)[key] = outVal
+ }
+ }
+ if in.Distributor != nil {
+ in, out := &in.Distributor, &out.Distributor
+ *out = make(PodStatusMap, len(*in))
+ for key, val := range *in {
+ var outVal []string
+ if val == nil {
+ (*out)[key] = nil
+ } else {
+ inVal := (*in)[key]
+ in, out := &inVal, &outVal
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ (*out)[key] = outVal
+ }
+ }
+ if in.IndexGateway != nil {
+ in, out := &in.IndexGateway, &out.IndexGateway
+ *out = make(PodStatusMap, len(*in))
+ for key, val := range *in {
+ var outVal []string
+ if val == nil {
+ (*out)[key] = nil
+ } else {
+ inVal := (*in)[key]
+ in, out := &inVal, &outVal
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ (*out)[key] = outVal
+ }
+ }
+ if in.Ingester != nil {
+ in, out := &in.Ingester, &out.Ingester
+ *out = make(PodStatusMap, len(*in))
+ for key, val := range *in {
+ var outVal []string
+ if val == nil {
+ (*out)[key] = nil
+ } else {
+ inVal := (*in)[key]
+ in, out := &inVal, &outVal
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ (*out)[key] = outVal
+ }
+ }
+ if in.Querier != nil {
+ in, out := &in.Querier, &out.Querier
+ *out = make(PodStatusMap, len(*in))
+ for key, val := range *in {
+ var outVal []string
+ if val == nil {
+ (*out)[key] = nil
+ } else {
+ inVal := (*in)[key]
+ in, out := &inVal, &outVal
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ (*out)[key] = outVal
+ }
+ }
+ if in.QueryFrontend != nil {
+ in, out := &in.QueryFrontend, &out.QueryFrontend
+ *out = make(PodStatusMap, len(*in))
+ for key, val := range *in {
+ var outVal []string
+ if val == nil {
+ (*out)[key] = nil
+ } else {
+ inVal := (*in)[key]
+ in, out := &inVal, &outVal
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ (*out)[key] = outVal
+ }
+ }
+ if in.Gateway != nil {
+ in, out := &in.Gateway, &out.Gateway
+ *out = make(PodStatusMap, len(*in))
+ for key, val := range *in {
+ var outVal []string
+ if val == nil {
+ (*out)[key] = nil
+ } else {
+ inVal := (*in)[key]
+ in, out := &inVal, &outVal
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ (*out)[key] = outVal
+ }
+ }
+ if in.Ruler != nil {
+ in, out := &in.Ruler, &out.Ruler
+ *out = make(PodStatusMap, len(*in))
+ for key, val := range *in {
+ var outVal []string
+ if val == nil {
+ (*out)[key] = nil
+ } else {
+ inVal := (*in)[key]
+ in, out := &inVal, &outVal
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ (*out)[key] = outVal
+ }
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LokiStackComponentStatus.
+func (in *LokiStackComponentStatus) DeepCopy() *LokiStackComponentStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(LokiStackComponentStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *LokiStackList) DeepCopyInto(out *LokiStackList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]LokiStack, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LokiStackList.
+func (in *LokiStackList) DeepCopy() *LokiStackList {
+ if in == nil {
+ return nil
+ }
+ out := new(LokiStackList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *LokiStackList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *LokiStackSpec) DeepCopyInto(out *LokiStackSpec) {
+ *out = *in
+ if in.HashRing != nil {
+ in, out := &in.HashRing, &out.HashRing
+ *out = new(HashRingSpec)
+ (*in).DeepCopyInto(*out)
+ }
+ in.Storage.DeepCopyInto(&out.Storage)
+ if in.Proxy != nil {
+ in, out := &in.Proxy, &out.Proxy
+ *out = new(ClusterProxy)
+ **out = **in
+ }
+ if in.Replication != nil {
+ in, out := &in.Replication, &out.Replication
+ *out = new(ReplicationSpec)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.Rules != nil {
+ in, out := &in.Rules, &out.Rules
+ *out = new(RulesSpec)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.Limits != nil {
+ in, out := &in.Limits, &out.Limits
+ *out = new(LimitsSpec)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.Template != nil {
+ in, out := &in.Template, &out.Template
+ *out = new(LokiTemplateSpec)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.Tenants != nil {
+ in, out := &in.Tenants, &out.Tenants
+ *out = new(TenantsSpec)
+ (*in).DeepCopyInto(*out)
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LokiStackSpec.
+func (in *LokiStackSpec) DeepCopy() *LokiStackSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(LokiStackSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *LokiStackStatus) DeepCopyInto(out *LokiStackStatus) {
+ *out = *in
+ in.Components.DeepCopyInto(&out.Components)
+ in.Storage.DeepCopyInto(&out.Storage)
+ if in.Conditions != nil {
+ in, out := &in.Conditions, &out.Conditions
+ *out = make([]metav1.Condition, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LokiStackStatus.
+func (in *LokiStackStatus) DeepCopy() *LokiStackStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(LokiStackStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *LokiStackStorageStatus) DeepCopyInto(out *LokiStackStorageStatus) {
+ *out = *in
+ if in.Schemas != nil {
+ in, out := &in.Schemas, &out.Schemas
+ *out = make([]ObjectStorageSchema, len(*in))
+ copy(*out, *in)
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LokiStackStorageStatus.
+func (in *LokiStackStorageStatus) DeepCopy() *LokiStackStorageStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(LokiStackStorageStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *LokiTemplateSpec) DeepCopyInto(out *LokiTemplateSpec) {
+ *out = *in
+ if in.Compactor != nil {
+ in, out := &in.Compactor, &out.Compactor
+ *out = new(LokiComponentSpec)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.Distributor != nil {
+ in, out := &in.Distributor, &out.Distributor
+ *out = new(LokiComponentSpec)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.Ingester != nil {
+ in, out := &in.Ingester, &out.Ingester
+ *out = new(LokiComponentSpec)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.Querier != nil {
+ in, out := &in.Querier, &out.Querier
+ *out = new(LokiComponentSpec)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.QueryFrontend != nil {
+ in, out := &in.QueryFrontend, &out.QueryFrontend
+ *out = new(LokiComponentSpec)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.Gateway != nil {
+ in, out := &in.Gateway, &out.Gateway
+ *out = new(LokiComponentSpec)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.IndexGateway != nil {
+ in, out := &in.IndexGateway, &out.IndexGateway
+ *out = new(LokiComponentSpec)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.Ruler != nil {
+ in, out := &in.Ruler, &out.Ruler
+ *out = new(LokiComponentSpec)
+ (*in).DeepCopyInto(*out)
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LokiTemplateSpec.
+func (in *LokiTemplateSpec) DeepCopy() *LokiTemplateSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(LokiTemplateSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *MTLSSpec) DeepCopyInto(out *MTLSSpec) {
+ *out = *in
+ if in.CA != nil {
+ in, out := &in.CA, &out.CA
+ *out = new(CASpec)
+ **out = **in
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MTLSSpec.
+func (in *MTLSSpec) DeepCopy() *MTLSSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(MTLSSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *MemberListSpec) DeepCopyInto(out *MemberListSpec) {
+ *out = *in
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MemberListSpec.
+func (in *MemberListSpec) DeepCopy() *MemberListSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(MemberListSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *OIDCSpec) DeepCopyInto(out *OIDCSpec) {
+ *out = *in
+ if in.Secret != nil {
+ in, out := &in.Secret, &out.Secret
+ *out = new(TenantSecretSpec)
+ **out = **in
+ }
+ if in.IssuerCA != nil {
+ in, out := &in.IssuerCA, &out.IssuerCA
+ *out = new(CASpec)
+ **out = **in
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OIDCSpec.
+func (in *OIDCSpec) DeepCopy() *OIDCSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(OIDCSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *OPASpec) DeepCopyInto(out *OPASpec) {
+ *out = *in
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OPASpec.
+func (in *OPASpec) DeepCopy() *OPASpec {
+ if in == nil {
+ return nil
+ }
+ out := new(OPASpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *OTLPAttributesSpec) DeepCopyInto(out *OTLPAttributesSpec) {
+ *out = *in
+ if in.Attributes != nil {
+ in, out := &in.Attributes, &out.Attributes
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OTLPAttributesSpec.
+func (in *OTLPAttributesSpec) DeepCopy() *OTLPAttributesSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(OTLPAttributesSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *OTLPResourceAttributesConfigSpec) DeepCopyInto(out *OTLPResourceAttributesConfigSpec) {
+ *out = *in
+ if in.Attributes != nil {
+ in, out := &in.Attributes, &out.Attributes
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OTLPResourceAttributesConfigSpec.
+func (in *OTLPResourceAttributesConfigSpec) DeepCopy() *OTLPResourceAttributesConfigSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(OTLPResourceAttributesConfigSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *OTLPResourceAttributesSpec) DeepCopyInto(out *OTLPResourceAttributesSpec) {
+ *out = *in
+ if in.Attributes != nil {
+ in, out := &in.Attributes, &out.Attributes
+ *out = make([]OTLPResourceAttributesConfigSpec, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OTLPResourceAttributesSpec.
+func (in *OTLPResourceAttributesSpec) DeepCopy() *OTLPResourceAttributesSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(OTLPResourceAttributesSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *OTLPSpec) DeepCopyInto(out *OTLPSpec) {
+ *out = *in
+ if in.ResourceAttributes != nil {
+ in, out := &in.ResourceAttributes, &out.ResourceAttributes
+ *out = new(OTLPResourceAttributesSpec)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.ScopeAttributes != nil {
+ in, out := &in.ScopeAttributes, &out.ScopeAttributes
+ *out = make([]OTLPAttributesSpec, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.LogAttributes != nil {
+ in, out := &in.LogAttributes, &out.LogAttributes
+ *out = make([]OTLPAttributesSpec, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OTLPSpec.
+func (in *OTLPSpec) DeepCopy() *OTLPSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(OTLPSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ObjectStorageSchema) DeepCopyInto(out *ObjectStorageSchema) {
+ *out = *in
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectStorageSchema.
+func (in *ObjectStorageSchema) DeepCopy() *ObjectStorageSchema {
+ if in == nil {
+ return nil
+ }
+ out := new(ObjectStorageSchema)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ObjectStorageSecretSpec) DeepCopyInto(out *ObjectStorageSecretSpec) {
+ *out = *in
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectStorageSecretSpec.
+func (in *ObjectStorageSecretSpec) DeepCopy() *ObjectStorageSecretSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(ObjectStorageSecretSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ObjectStorageSpec) DeepCopyInto(out *ObjectStorageSpec) {
+ *out = *in
+ if in.Schemas != nil {
+ in, out := &in.Schemas, &out.Schemas
+ *out = make([]ObjectStorageSchema, len(*in))
+ copy(*out, *in)
+ }
+ out.Secret = in.Secret
+ if in.TLS != nil {
+ in, out := &in.TLS, &out.TLS
+ *out = new(ObjectStorageTLSSpec)
+ **out = **in
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectStorageSpec.
+func (in *ObjectStorageSpec) DeepCopy() *ObjectStorageSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(ObjectStorageSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ObjectStorageTLSSpec) DeepCopyInto(out *ObjectStorageTLSSpec) {
+ *out = *in
+ out.CASpec = in.CASpec
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectStorageTLSSpec.
+func (in *ObjectStorageTLSSpec) DeepCopy() *ObjectStorageTLSSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(ObjectStorageTLSSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *OpenshiftTenantSpec) DeepCopyInto(out *OpenshiftTenantSpec) {
+ *out = *in
+ if in.AdminGroups != nil {
+ in, out := &in.AdminGroups, &out.AdminGroups
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenshiftTenantSpec.
+func (in *OpenshiftTenantSpec) DeepCopy() *OpenshiftTenantSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(OpenshiftTenantSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PerTenantLimitsTemplateSpec) DeepCopyInto(out *PerTenantLimitsTemplateSpec) {
+ *out = *in
+ if in.IngestionLimits != nil {
+ in, out := &in.IngestionLimits, &out.IngestionLimits
+ *out = new(IngestionLimitSpec)
+ **out = **in
+ }
+ if in.QueryLimits != nil {
+ in, out := &in.QueryLimits, &out.QueryLimits
+ *out = new(PerTenantQueryLimitSpec)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.OTLP != nil {
+ in, out := &in.OTLP, &out.OTLP
+ *out = new(OTLPSpec)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.Retention != nil {
+ in, out := &in.Retention, &out.Retention
+ *out = new(RetentionLimitSpec)
+ (*in).DeepCopyInto(*out)
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PerTenantLimitsTemplateSpec.
+func (in *PerTenantLimitsTemplateSpec) DeepCopy() *PerTenantLimitsTemplateSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(PerTenantLimitsTemplateSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PerTenantQueryLimitSpec) DeepCopyInto(out *PerTenantQueryLimitSpec) {
+ *out = *in
+ out.QueryLimitSpec = in.QueryLimitSpec
+ if in.Blocked != nil {
+ in, out := &in.Blocked, &out.Blocked
+ *out = make([]BlockedQuerySpec, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PerTenantQueryLimitSpec.
+func (in *PerTenantQueryLimitSpec) DeepCopy() *PerTenantQueryLimitSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(PerTenantQueryLimitSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in PodStatusMap) DeepCopyInto(out *PodStatusMap) {
+ {
+ in := &in
+ *out = make(PodStatusMap, len(*in))
+ for key, val := range *in {
+ var outVal []string
+ if val == nil {
+ (*out)[key] = nil
+ } else {
+ inVal := (*in)[key]
+ in, out := &inVal, &outVal
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ (*out)[key] = outVal
+ }
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodStatusMap.
+func (in PodStatusMap) DeepCopy() PodStatusMap {
+ if in == nil {
+ return nil
+ }
+ out := new(PodStatusMap)
+ in.DeepCopyInto(out)
+ return *out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *QueryLimitSpec) DeepCopyInto(out *QueryLimitSpec) {
+ *out = *in
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QueryLimitSpec.
+func (in *QueryLimitSpec) DeepCopy() *QueryLimitSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(QueryLimitSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *RecordingRule) DeepCopyInto(out *RecordingRule) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ in.Status.DeepCopyInto(&out.Status)
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RecordingRule.
+func (in *RecordingRule) DeepCopy() *RecordingRule {
+ if in == nil {
+ return nil
+ }
+ out := new(RecordingRule)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *RecordingRule) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *RecordingRuleGroup) DeepCopyInto(out *RecordingRuleGroup) {
+ *out = *in
+ if in.Rules != nil {
+ in, out := &in.Rules, &out.Rules
+ *out = make([]*RecordingRuleGroupSpec, len(*in))
+ for i := range *in {
+ if (*in)[i] != nil {
+ in, out := &(*in)[i], &(*out)[i]
+ *out = new(RecordingRuleGroupSpec)
+ (*in).DeepCopyInto(*out)
+ }
+ }
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RecordingRuleGroup.
+func (in *RecordingRuleGroup) DeepCopy() *RecordingRuleGroup {
+ if in == nil {
+ return nil
+ }
+ out := new(RecordingRuleGroup)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *RecordingRuleGroupSpec) DeepCopyInto(out *RecordingRuleGroupSpec) {
+ *out = *in
+ if in.Labels != nil {
+ in, out := &in.Labels, &out.Labels
+ *out = make(map[string]string, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val
+ }
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RecordingRuleGroupSpec.
+func (in *RecordingRuleGroupSpec) DeepCopy() *RecordingRuleGroupSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(RecordingRuleGroupSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *RecordingRuleList) DeepCopyInto(out *RecordingRuleList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]RecordingRule, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RecordingRuleList.
+func (in *RecordingRuleList) DeepCopy() *RecordingRuleList {
+ if in == nil {
+ return nil
+ }
+ out := new(RecordingRuleList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *RecordingRuleList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *RecordingRuleSpec) DeepCopyInto(out *RecordingRuleSpec) {
+ *out = *in
+ if in.Groups != nil {
+ in, out := &in.Groups, &out.Groups
+ *out = make([]*RecordingRuleGroup, len(*in))
+ for i := range *in {
+ if (*in)[i] != nil {
+ in, out := &(*in)[i], &(*out)[i]
+ *out = new(RecordingRuleGroup)
+ (*in).DeepCopyInto(*out)
+ }
+ }
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RecordingRuleSpec.
+func (in *RecordingRuleSpec) DeepCopy() *RecordingRuleSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(RecordingRuleSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *RecordingRuleStatus) DeepCopyInto(out *RecordingRuleStatus) {
+ *out = *in
+ if in.Conditions != nil {
+ in, out := &in.Conditions, &out.Conditions
+ *out = make([]metav1.Condition, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RecordingRuleStatus.
+func (in *RecordingRuleStatus) DeepCopy() *RecordingRuleStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(RecordingRuleStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *RelabelConfig) DeepCopyInto(out *RelabelConfig) {
+ *out = *in
+ if in.SourceLabels != nil {
+ in, out := &in.SourceLabels, &out.SourceLabels
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RelabelConfig.
+func (in *RelabelConfig) DeepCopy() *RelabelConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(RelabelConfig)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *RemoteWriteClientQueueSpec) DeepCopyInto(out *RemoteWriteClientQueueSpec) {
+ *out = *in
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RemoteWriteClientQueueSpec.
+func (in *RemoteWriteClientQueueSpec) DeepCopy() *RemoteWriteClientQueueSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(RemoteWriteClientQueueSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *RemoteWriteClientSpec) DeepCopyInto(out *RemoteWriteClientSpec) {
+ *out = *in
+ if in.AdditionalHeaders != nil {
+ in, out := &in.AdditionalHeaders, &out.AdditionalHeaders
+ *out = make(map[string]string, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val
+ }
+ }
+ if in.RelabelConfigs != nil {
+ in, out := &in.RelabelConfigs, &out.RelabelConfigs
+ *out = make([]RelabelConfig, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RemoteWriteClientSpec.
+func (in *RemoteWriteClientSpec) DeepCopy() *RemoteWriteClientSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(RemoteWriteClientSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *RemoteWriteSpec) DeepCopyInto(out *RemoteWriteSpec) {
+ *out = *in
+ if in.ClientSpec != nil {
+ in, out := &in.ClientSpec, &out.ClientSpec
+ *out = new(RemoteWriteClientSpec)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.QueueSpec != nil {
+ in, out := &in.QueueSpec, &out.QueueSpec
+ *out = new(RemoteWriteClientQueueSpec)
+ **out = **in
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RemoteWriteSpec.
+func (in *RemoteWriteSpec) DeepCopy() *RemoteWriteSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(RemoteWriteSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ReplicationSpec) DeepCopyInto(out *ReplicationSpec) {
+ *out = *in
+ if in.Zones != nil {
+ in, out := &in.Zones, &out.Zones
+ *out = make([]ZoneSpec, len(*in))
+ copy(*out, *in)
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicationSpec.
+func (in *ReplicationSpec) DeepCopy() *ReplicationSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(ReplicationSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *RetentionLimitSpec) DeepCopyInto(out *RetentionLimitSpec) {
+ *out = *in
+ if in.Streams != nil {
+ in, out := &in.Streams, &out.Streams
+ *out = make([]*RetentionStreamSpec, len(*in))
+ for i := range *in {
+ if (*in)[i] != nil {
+ in, out := &(*in)[i], &(*out)[i]
+ *out = new(RetentionStreamSpec)
+ **out = **in
+ }
+ }
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RetentionLimitSpec.
+func (in *RetentionLimitSpec) DeepCopy() *RetentionLimitSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(RetentionLimitSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *RetentionStreamSpec) DeepCopyInto(out *RetentionStreamSpec) {
+ *out = *in
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RetentionStreamSpec.
+func (in *RetentionStreamSpec) DeepCopy() *RetentionStreamSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(RetentionStreamSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *RoleBindingsSpec) DeepCopyInto(out *RoleBindingsSpec) {
+ *out = *in
+ if in.Subjects != nil {
+ in, out := &in.Subjects, &out.Subjects
+ *out = make([]Subject, len(*in))
+ copy(*out, *in)
+ }
+ if in.Roles != nil {
+ in, out := &in.Roles, &out.Roles
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RoleBindingsSpec.
+func (in *RoleBindingsSpec) DeepCopy() *RoleBindingsSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(RoleBindingsSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *RoleSpec) DeepCopyInto(out *RoleSpec) {
+ *out = *in
+ if in.Resources != nil {
+ in, out := &in.Resources, &out.Resources
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.Tenants != nil {
+ in, out := &in.Tenants, &out.Tenants
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.Permissions != nil {
+ in, out := &in.Permissions, &out.Permissions
+ *out = make([]PermissionType, len(*in))
+ copy(*out, *in)
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RoleSpec.
+func (in *RoleSpec) DeepCopy() *RoleSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(RoleSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *RulerConfig) DeepCopyInto(out *RulerConfig) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ in.Status.DeepCopyInto(&out.Status)
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RulerConfig.
+func (in *RulerConfig) DeepCopy() *RulerConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(RulerConfig)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *RulerConfig) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *RulerConfigList) DeepCopyInto(out *RulerConfigList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]RulerConfig, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RulerConfigList.
+func (in *RulerConfigList) DeepCopy() *RulerConfigList {
+ if in == nil {
+ return nil
+ }
+ out := new(RulerConfigList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *RulerConfigList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *RulerConfigSpec) DeepCopyInto(out *RulerConfigSpec) {
+ *out = *in
+ if in.AlertManagerSpec != nil {
+ in, out := &in.AlertManagerSpec, &out.AlertManagerSpec
+ *out = new(AlertManagerSpec)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.RemoteWriteSpec != nil {
+ in, out := &in.RemoteWriteSpec, &out.RemoteWriteSpec
+ *out = new(RemoteWriteSpec)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.Overrides != nil {
+ in, out := &in.Overrides, &out.Overrides
+ *out = make(map[string]RulerOverrides, len(*in))
+ for key, val := range *in {
+ (*out)[key] = *val.DeepCopy()
+ }
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RulerConfigSpec.
+func (in *RulerConfigSpec) DeepCopy() *RulerConfigSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(RulerConfigSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *RulerConfigStatus) DeepCopyInto(out *RulerConfigStatus) {
+ *out = *in
+ if in.Conditions != nil {
+ in, out := &in.Conditions, &out.Conditions
+ *out = make([]metav1.Condition, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RulerConfigStatus.
+func (in *RulerConfigStatus) DeepCopy() *RulerConfigStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(RulerConfigStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *RulerOverrides) DeepCopyInto(out *RulerOverrides) {
+ *out = *in
+ if in.AlertManagerOverrides != nil {
+ in, out := &in.AlertManagerOverrides, &out.AlertManagerOverrides
+ *out = new(AlertManagerSpec)
+ (*in).DeepCopyInto(*out)
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RulerOverrides.
+func (in *RulerOverrides) DeepCopy() *RulerOverrides {
+ if in == nil {
+ return nil
+ }
+ out := new(RulerOverrides)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *RulesSpec) DeepCopyInto(out *RulesSpec) {
+ *out = *in
+ if in.Selector != nil {
+ in, out := &in.Selector, &out.Selector
+ *out = new(metav1.LabelSelector)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.NamespaceSelector != nil {
+ in, out := &in.NamespaceSelector, &out.NamespaceSelector
+ *out = new(metav1.LabelSelector)
+ (*in).DeepCopyInto(*out)
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RulesSpec.
+func (in *RulesSpec) DeepCopy() *RulesSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(RulesSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Subject) DeepCopyInto(out *Subject) {
+ *out = *in
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Subject.
+func (in *Subject) DeepCopy() *Subject {
+ if in == nil {
+ return nil
+ }
+ out := new(Subject)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *TenantSecretSpec) DeepCopyInto(out *TenantSecretSpec) {
+ *out = *in
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TenantSecretSpec.
+func (in *TenantSecretSpec) DeepCopy() *TenantSecretSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(TenantSecretSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *TenantsSpec) DeepCopyInto(out *TenantsSpec) {
+ *out = *in
+ if in.Authentication != nil {
+ in, out := &in.Authentication, &out.Authentication
+ *out = make([]AuthenticationSpec, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.Authorization != nil {
+ in, out := &in.Authorization, &out.Authorization
+ *out = new(AuthorizationSpec)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.Openshift != nil {
+ in, out := &in.Openshift, &out.Openshift
+ *out = new(OpenshiftTenantSpec)
+ (*in).DeepCopyInto(*out)
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TenantsSpec.
+func (in *TenantsSpec) DeepCopy() *TenantsSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(TenantsSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ZoneSpec) DeepCopyInto(out *ZoneSpec) {
+ *out = *in
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ZoneSpec.
+func (in *ZoneSpec) DeepCopy() *ZoneSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(ZoneSpec)
+ in.DeepCopyInto(out)
+ return out
+}
diff --git a/vendor/modules.txt b/vendor/modules.txt
index 9c54699bf..1479e8db4 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -44,6 +44,9 @@ github.com/google/uuid
# github.com/gorilla/mux v1.8.1
## explicit; go 1.20
github.com/gorilla/mux
+# github.com/grafana/loki/operator/apis/loki v0.0.0-20241021105923-5e970e50b166
+## explicit; go 1.19
+github.com/grafana/loki/operator/apis/loki/v1
# github.com/josharian/intern v1.0.0
## explicit; go 1.5
github.com/josharian/intern
@@ -500,6 +503,9 @@ k8s.io/utils/clock
k8s.io/utils/internal/third_party/forked/golang/net
k8s.io/utils/net
k8s.io/utils/ptr
+# sigs.k8s.io/controller-runtime v0.20.4
+## explicit; go 1.23.0
+sigs.k8s.io/controller-runtime/pkg/scheme
# sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8
## explicit; go 1.23
sigs.k8s.io/json
diff --git a/vendor/sigs.k8s.io/controller-runtime/LICENSE b/vendor/sigs.k8s.io/controller-runtime/LICENSE
new file mode 100644
index 000000000..8dada3eda
--- /dev/null
+++ b/vendor/sigs.k8s.io/controller-runtime/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "{}"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright {yyyy} {name of copyright owner}
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/scheme/scheme.go b/vendor/sigs.k8s.io/controller-runtime/pkg/scheme/scheme.go
new file mode 100644
index 000000000..55ebe2177
--- /dev/null
+++ b/vendor/sigs.k8s.io/controller-runtime/pkg/scheme/scheme.go
@@ -0,0 +1,93 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package scheme contains utilities for gradually building Schemes,
+// which contain information associating Go types with Kubernetes
+// groups, versions, and kinds.
+//
+// Each API group should define a utility function
+// called AddToScheme for adding its types to a Scheme:
+//
+// // in package myapigroupv1...
+// var (
+// SchemeGroupVersion = schema.GroupVersion{Group: "my.api.group", Version: "v1"}
+// SchemeBuilder = &scheme.Builder{GroupVersion: SchemeGroupVersion}
+// AddToScheme = SchemeBuilder.AddToScheme
+// )
+//
+// func init() {
+// SchemeBuilder.Register(&MyType{}, &MyTypeList)
+// }
+// var (
+// scheme *runtime.Scheme = runtime.NewScheme()
+// )
+//
+// This also true of the built-in Kubernetes types. Then, in the entrypoint for
+// your manager, assemble the scheme containing exactly the types you need,
+// panicing if scheme registration failed. For instance, if our controller needs
+// types from the core/v1 API group (e.g. Pod), plus types from my.api.group/v1:
+//
+// func init() {
+// utilruntime.Must(myapigroupv1.AddToScheme(scheme))
+// utilruntime.Must(kubernetesscheme.AddToScheme(scheme))
+// }
+//
+// func main() {
+// mgr := controllers.NewManager(context.Background(), controllers.GetConfigOrDie(), manager.Options{
+// Scheme: scheme,
+// })
+// // ...
+// }
+package scheme
+
+import (
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+)
+
+// Builder builds a new Scheme for mapping go types to Kubernetes GroupVersionKinds.
+type Builder struct {
+ GroupVersion schema.GroupVersion
+ runtime.SchemeBuilder
+}
+
+// Register adds one or more objects to the SchemeBuilder so they can be added to a Scheme. Register mutates bld.
+func (bld *Builder) Register(object ...runtime.Object) *Builder {
+ bld.SchemeBuilder.Register(func(scheme *runtime.Scheme) error {
+ scheme.AddKnownTypes(bld.GroupVersion, object...)
+ metav1.AddToGroupVersion(scheme, bld.GroupVersion)
+ return nil
+ })
+ return bld
+}
+
+// RegisterAll registers all types from the Builder argument. RegisterAll mutates bld.
+func (bld *Builder) RegisterAll(b *Builder) *Builder {
+ bld.SchemeBuilder = append(bld.SchemeBuilder, b.SchemeBuilder...)
+ return bld
+}
+
+// AddToScheme adds all registered types to s.
+func (bld *Builder) AddToScheme(s *runtime.Scheme) error {
+ return bld.SchemeBuilder.AddToScheme(s)
+}
+
+// Build returns a new Scheme containing the registered types.
+func (bld *Builder) Build() (*runtime.Scheme, error) {
+ s := runtime.NewScheme()
+ return s, bld.AddToScheme(s)
+}