diff --git a/go.mod b/go.mod index cb7b5957..049a7c06 100644 --- a/go.mod +++ b/go.mod @@ -4,7 +4,7 @@ go 1.23.0 require ( github.com/container-storage-interface/spec v1.8.0 - github.com/exoscale/egoscale/v3 v3.1.1 + github.com/exoscale/egoscale/v3 v3.1.8 github.com/golang/protobuf v1.5.4 github.com/stretchr/testify v1.9.0 golang.org/x/sys v0.29.0 @@ -58,12 +58,12 @@ require ( github.com/x448/float16 v0.8.4 // indirect go.uber.org/atomic v1.9.0 // indirect go.uber.org/multierr v1.9.0 // indirect - golang.org/x/crypto v0.28.0 // indirect + golang.org/x/crypto v0.31.0 // indirect golang.org/x/exp v0.0.0-20240213143201-ec583247a57a // indirect golang.org/x/net v0.30.0 // indirect golang.org/x/oauth2 v0.23.0 // indirect - golang.org/x/term v0.25.0 // indirect - golang.org/x/text v0.19.0 // indirect + golang.org/x/term v0.27.0 // indirect + golang.org/x/text v0.21.0 // indirect golang.org/x/time v0.5.0 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20241015192408-796eee8c2d53 // indirect gopkg.in/djherbis/times.v1 v1.3.0 // indirect diff --git a/go.sum b/go.sum index 8a41edb0..436e16d3 100644 --- a/go.sum +++ b/go.sum @@ -13,8 +13,8 @@ github.com/elliotwutingfeng/asciiset v0.0.0-20230602022725-51bbb787efab h1:h1Ugj github.com/elliotwutingfeng/asciiset v0.0.0-20230602022725-51bbb787efab/go.mod h1:GLo/8fDswSAniFG+BFIaiSPcK610jyzgEhWYPQwuQdw= github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= -github.com/exoscale/egoscale/v3 v3.1.1 h1:NwTlXE2sKe2kBWm+c3bsHV+aWDFiEJ9JQpS6X3j4wbc= -github.com/exoscale/egoscale/v3 v3.1.1/go.mod h1:lPsza7G+giSxdzvzaHSEcjEAYz/YTiu2bEEha9KVAc4= +github.com/exoscale/egoscale/v3 v3.1.8 h1:LrZ7cAk3Wum9ZncKR1gRPpXsdvCoQb7KoUi3+ZxSTvY= +github.com/exoscale/egoscale/v3 v3.1.8/go.mod h1:t9+MpSEam94na48O/xgvvPFpQPRiwZ3kBN4/UuQtKco= github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= @@ -163,8 +163,8 @@ golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACk golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.28.0 h1:GBDwsMXVQi34v5CCYUm2jkJvu4cbtru2U4TN2PSyQnw= -golang.org/x/crypto v0.28.0/go.mod h1:rmgy+3RHxRZMyY0jjAJShp2zgEdOqj2AO7U0pYmeQ7U= +golang.org/x/crypto v0.31.0 h1:ihbySMvVjLAeSH1IbfcRTkD/iNscyz8rGzjF/E5hV6U= +golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk= golang.org/x/exp v0.0.0-20240213143201-ec583247a57a h1:HinSgX1tJRX3KsL//Gxynpw5CTOAIPhgL4W8PNiIpVE= golang.org/x/exp v0.0.0-20240213143201-ec583247a57a/go.mod h1:CxmFvTBINI24O/j8iY7H1xHzx2i4OsyguNBmN/uPtqc= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= @@ -193,13 +193,13 @@ golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.29.0 h1:TPYlXGxvx1MGTn2GiZDhnjPA9wZzZeGKHHmKhHYvgaU= golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.25.0 h1:WtHI/ltw4NvSUig5KARz9h521QvRC8RmF/cuYqifU24= -golang.org/x/term v0.25.0/go.mod h1:RPyXicDX+6vLxogjjRxjgD2TKtmAO6NZBsBRfrOLu7M= +golang.org/x/term v0.27.0 h1:WP60Sv1nlK1T6SupCHbXzSaN0b9wUmsPoRS9b61A23Q= +golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.19.0 h1:kTxAhCbGbxhK0IwgSKiMO5awPoDQ0RpfiVYBfK860YM= -golang.org/x/text v0.19.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= +golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= +golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= diff --git a/internal/integ/go.mod b/internal/integ/go.mod index 7283e86b..780789cf 100644 --- a/internal/integ/go.mod +++ b/internal/integ/go.mod @@ -1,11 +1,11 @@ module github.com/exoscale/exoscale/csi-driver/internal/integ -go 1.22 +go 1.22.0 toolchain go1.23.4 require ( - github.com/exoscale/egoscale/v3 v3.1.1 + github.com/exoscale/egoscale/v3 v3.1.8 github.com/stretchr/testify v1.9.0 k8s.io/api v0.29.0 k8s.io/apimachinery v0.29.0 @@ -51,13 +51,13 @@ require ( github.com/spf13/viper v1.18.2 // indirect github.com/subosito/gotenv v1.6.0 // indirect go.uber.org/multierr v1.11.0 // indirect - golang.org/x/crypto v0.21.0 // indirect + golang.org/x/crypto v0.31.0 // indirect golang.org/x/exp v0.0.0-20240222234643-814bf88cf225 // indirect golang.org/x/net v0.22.0 // indirect golang.org/x/oauth2 v0.15.0 // indirect - golang.org/x/sys v0.20.0 // indirect - golang.org/x/term v0.18.0 // indirect - golang.org/x/text v0.14.0 // indirect + golang.org/x/sys v0.28.0 // indirect + golang.org/x/term v0.27.0 // indirect + golang.org/x/text v0.21.0 // indirect golang.org/x/time v0.5.0 // indirect google.golang.org/appengine v1.6.7 // indirect google.golang.org/protobuf v1.33.0 // indirect diff --git a/internal/integ/go.sum b/internal/integ/go.sum index 521a4263..e068f12a 100644 --- a/internal/integ/go.sum +++ b/internal/integ/go.sum @@ -5,8 +5,8 @@ github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1 github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= -github.com/exoscale/egoscale/v3 v3.1.1 h1:NwTlXE2sKe2kBWm+c3bsHV+aWDFiEJ9JQpS6X3j4wbc= -github.com/exoscale/egoscale/v3 v3.1.1/go.mod h1:lPsza7G+giSxdzvzaHSEcjEAYz/YTiu2bEEha9KVAc4= +github.com/exoscale/egoscale/v3 v3.1.8 h1:LrZ7cAk3Wum9ZncKR1gRPpXsdvCoQb7KoUi3+ZxSTvY= +github.com/exoscale/egoscale/v3 v3.1.8/go.mod h1:t9+MpSEam94na48O/xgvvPFpQPRiwZ3kBN4/UuQtKco= github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= @@ -126,8 +126,8 @@ go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN8 golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.21.0 h1:X31++rzVUdKhX5sWmSOFZxx8UW/ldWx55cbf08iNAMA= -golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs= +golang.org/x/crypto v0.31.0 h1:ihbySMvVjLAeSH1IbfcRTkD/iNscyz8rGzjF/E5hV6U= +golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk= golang.org/x/exp v0.0.0-20240222234643-814bf88cf225 h1:LfspQV/FYTatPTr/3HzIcmiUFH7PGP+OQ6mgDYo3yuQ= golang.org/x/exp v0.0.0-20240222234643-814bf88cf225/go.mod h1:CxmFvTBINI24O/j8iY7H1xHzx2i4OsyguNBmN/uPtqc= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= @@ -147,23 +147,23 @@ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.20.0 h1:Od9JTbYCk261bKm4M/mw7AklTlFYIa0bIp9BgSm1S8Y= -golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/term v0.18.0 h1:FcHjZXDMxI8mM3nwhX9HlKop4C0YQvCVCdwYl2wOtE8= -golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58= +golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA= +golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/term v0.27.0 h1:WP60Sv1nlK1T6SupCHbXzSaN0b9wUmsPoRS9b61A23Q= +golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= -golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= +golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.18.0 h1:k8NLag8AGHnn+PHbl7g43CtqZAwG60vZkLqgyZgIHgQ= -golang.org/x/tools v0.18.0/go.mod h1:GL7B4CwcLLeo59yx/9UWWuNOW1n3VZ4f5axWfML7Lcg= +golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d h1:vU5i/LfpvrRCpgM/VPfJLg5KjxD3E+hfT1SH+d9zLwg= +golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= diff --git a/vendor/github.com/exoscale/egoscale/v3/README.md b/vendor/github.com/exoscale/egoscale/v3/README.md index cae793dc..da648f47 100644 --- a/vendor/github.com/exoscale/egoscale/v3/README.md +++ b/vendor/github.com/exoscale/egoscale/v3/README.md @@ -70,12 +70,30 @@ func main() { } ``` +### Findable + +Most of the list request `ListX()` return a type containing the list of the resource requested and a method `FindX()` to be able to retrieve a resource by its `name` or `id` most of the time. + +```Golang +pools, err := client.ListInstancePools(ctx) +if err != nil { + log.Fatal(err) +} +pool, err := pools.FindInstancePool("my-pool-example") +if err != nil { + log.Fatal(err) +} + +fmt.Println(pool.Name) +``` + ## Development ### Generate Egoscale v3 From the root repo ```Bash +make pull-oapi-spec # Optional(to pull latest Exoscale Open-API spec) make generate ``` @@ -87,3 +105,53 @@ GENERATOR_DEBUG=client make generate > test/client.go GENERATOR_DEBUG=schemas make generate > test/schemas.go GENERATOR_DEBUG=operations make generate > test/operations.go ``` + +### OpenAPI Extensions + +The generator support two types of extension: +- `x-go-type` to specify a type definition in Golang. + + OpenAPI Spec + ```yaml + api-endpoint: + type: string + x-go-type: Endpoint + description: Zone API endpoint + ``` + Generated code + ```Golang + type Endpoint string + + type Zone struct { + APIEndpoint Endpoint // Here is the generated type definition. + ... + } + ``` +- `x-go-findable` to specify which fields in the findable resource to fetch + OpenAPI Spec + ```yaml + elastic-ip: + type: object + properties: + id: + type: string + x-go-findable: "1" + description: Elastic IP ID + ip: + type: string + x-go-findable: "2" + description: Elastic IP address + ``` + Generated code + ```Golang + // FindElasticIP attempts to find an ElasticIP by idOrIP. + func (l ListElasticIPSResponse) FindElasticIP(idOrIP string) (ElasticIP, error) { + for i, elem := range l.ElasticIPS { + if string(elem.ID) == idOrIP || string(elem.IP) == idOrIP { + return l.ElasticIPS[i], nil + } + } + + return ElasticIP{}, fmt.Errorf("%q not found in ListElasticIPSResponse: %w", idOrIP, ErrNotFound) + } + ``` diff --git a/vendor/github.com/exoscale/egoscale/v3/api.go b/vendor/github.com/exoscale/egoscale/v3/api.go index 5a569194..f4151874 100644 --- a/vendor/github.com/exoscale/egoscale/v3/api.go +++ b/vendor/github.com/exoscale/egoscale/v3/api.go @@ -7,8 +7,10 @@ import ( "crypto/sha256" "encoding/base64" "encoding/json" + "errors" "fmt" "io" + "math" "net/http" "net/http/httputil" "os" @@ -39,26 +41,46 @@ func ParseUUID(s string) (UUID, error) { // Final states are one of: failure, success, timeout. // If states argument are given, returns an error if the final state not match on of those. func (c Client) Wait(ctx context.Context, op *Operation, states ...OperationState) (*Operation, error) { + const abortErrorsCount = 5 + if op == nil { return nil, fmt.Errorf("operation is nil") } - ticker := time.NewTicker(c.pollingInterval) + startTime := time.Now() + + ticker := time.NewTicker(pollInterval(0)) defer ticker.Stop() if op.State != OperationStatePending { return op, nil } + var subsequentErrors int var operation *Operation polling: for { select { case <-ticker.C: + runTime := time.Since(startTime) + + if c.waitTimeout != 0 && runTime > c.waitTimeout { + return nil, fmt.Errorf("operation: %q: max wait timeout reached", op.ID) + } + + newInterval := pollInterval(runTime) + ticker.Reset(newInterval) + o, err := c.GetOperation(ctx, op.ID) if err != nil { - return nil, err + subsequentErrors++ + if subsequentErrors >= abortErrorsCount { + return nil, err + } + continue } + subsequentErrors = 0 + if o.State == OperationStatePending { continue } @@ -133,12 +155,34 @@ func (c Client) Validate(s any) error { } else { errorString += fmt.Sprintf("'%s=%v'", e.ActualTag(), e.Param()) } - return fmt.Errorf(errorString) + return errors.New(errorString) } return err } +// pollInterval returns the wait interval (as a time.Duration) before the next poll, based on the current runtime of a job. +// The polling frequency is: +// - every 3 seconds for the first 30 seconds +// - then increases linearly to reach 1 minute at 15 minutes of runtime +// - after 15 minutes, it stays at 1 minute intervals +func pollInterval(runTime time.Duration) time.Duration { + runTimeSeconds := runTime.Seconds() + + // Coefficients for the linear equation y = a * x + b + a := 57.0 / 870.0 + b := 3.0 - 30.0*a + + minWait := 3.0 + maxWait := 60.0 + + interval := a*runTimeSeconds + b + interval = math.Max(minWait, interval) + interval = math.Min(maxWait, interval) + + return time.Duration(interval) * time.Second +} + func prepareJSONBody(body any) (*bytes.Reader, error) { buf, err := json.Marshal(body) if err != nil { diff --git a/vendor/github.com/exoscale/egoscale/v3/client.go b/vendor/github.com/exoscale/egoscale/v3/client.go index 804a3780..4d003976 100644 --- a/vendor/github.com/exoscale/egoscale/v3/client.go +++ b/vendor/github.com/exoscale/egoscale/v3/client.go @@ -30,26 +30,26 @@ const ( func (c Client) GetZoneName(ctx context.Context, endpoint Endpoint) (ZoneName, error) { resp, err := c.ListZones(ctx) if err != nil { - return "", fmt.Errorf("get zone name: %w", err) + return "", fmt.Errorf("get zone name: list zones: %w", err) } - for _, zone := range resp.Zones { - if zone.APIEndpoint == endpoint { - return zone.Name, nil - } + + zone, err := resp.FindZone(string(endpoint)) + if err != nil { + return "", fmt.Errorf("get zone name: find zone: %w", err) } - return "", fmt.Errorf("get zone name: no matching zone for %s", endpoint) + return zone.Name, nil } func (c Client) GetZoneAPIEndpoint(ctx context.Context, zoneName ZoneName) (Endpoint, error) { resp, err := c.ListZones(ctx) if err != nil { - return "", fmt.Errorf("get zone api endpoint: %w", err) + return "", fmt.Errorf("get zone api endpoint: list zones: %w", err) } zone, err := resp.FindZone(string(zoneName)) if err != nil { - return "", fmt.Errorf("get zone api endpoint: %w", err) + return "", fmt.Errorf("get zone api endpoint: find zone: %w", err) } return zone.APIEndpoint, nil @@ -57,13 +57,14 @@ func (c Client) GetZoneAPIEndpoint(ctx context.Context, zoneName ZoneName) (Endp // Client represents an Exoscale API client. type Client struct { - apiKey string - apiSecret string - serverEndpoint string - httpClient *http.Client - pollingInterval time.Duration - validate *validator.Validate - trace bool + apiKey string + apiSecret string + userAgent string + serverEndpoint string + httpClient *http.Client + waitTimeout time.Duration + validate *validator.Validate + trace bool // A list of callbacks for modifying requests which are generated before sending over // the network. @@ -73,14 +74,8 @@ type Client struct { // RequestInterceptorFn is the function signature for the RequestInterceptor callback function type RequestInterceptorFn func(ctx context.Context, req *http.Request) error -// UserAgent is the "User-Agent" HTTP request header added to outgoing HTTP requests. -var UserAgent = fmt.Sprintf("egoscale/%s (%s; %s/%s)", - Version, - runtime.Version(), - runtime.GOOS, - runtime.GOARCH) - -const pollingInterval = 3 * time.Second +// Deprecated: use ClientOptWithUserAgent instead. +var UserAgent = getDefaultUserAgent() // ClientOpt represents a function setting Exoscale API client option. type ClientOpt func(*Client) error @@ -93,6 +88,14 @@ func ClientOptWithTrace() ClientOpt { } } +// ClientOptWithUserAgent returns a ClientOpt setting the user agent header. +func ClientOptWithUserAgent(ua string) ClientOpt { + return func(c *Client) error { + c.userAgent = ua + " " + getDefaultUserAgent() + return nil + } +} + // ClientOptWithValidator returns a ClientOpt with a given validator. func ClientOptWithValidator(validate *validator.Validate) ClientOpt { return func(c *Client) error { @@ -109,6 +112,14 @@ func ClientOptWithEndpoint(endpoint Endpoint) ClientOpt { } } +// ClientOptWithWaitTimeout returns a ClientOpt With a given wait timeout. +func ClientOptWithWaitTimeout(t time.Duration) ClientOpt { + return func(c *Client) error { + c.waitTimeout = t + return nil + } +} + // ClientOptWithRequestInterceptors returns a ClientOpt With given RequestInterceptors. func ClientOptWithRequestInterceptors(f ...RequestInterceptorFn) ClientOpt { return func(c *Client) error { @@ -131,6 +142,15 @@ func ClientOptWithHTTPClient(v *http.Client) ClientOpt { } } +// getDefaultUserAgent returns the "User-Agent" HTTP request header added to outgoing HTTP requests. +func getDefaultUserAgent() string { + return fmt.Sprintf("egoscale/%s (%s; %s/%s)", + Version, + runtime.Version(), + runtime.GOOS, + runtime.GOARCH) +} + // NewClient returns a new Exoscale API client. func NewClient(credentials *credentials.Credentials, opts ...ClientOpt) (*Client, error) { values, err := credentials.Get() @@ -139,12 +159,12 @@ func NewClient(credentials *credentials.Credentials, opts ...ClientOpt) (*Client } client := &Client{ - apiKey: values.APIKey, - apiSecret: values.APISecret, - serverEndpoint: string(CHGva2), - httpClient: http.DefaultClient, - pollingInterval: pollingInterval, - validate: validator.New(), + apiKey: values.APIKey, + apiSecret: values.APISecret, + serverEndpoint: string(CHGva2), + httpClient: http.DefaultClient, + validate: validator.New(), + userAgent: getDefaultUserAgent(), } for _, opt := range opts { @@ -156,60 +176,83 @@ func NewClient(credentials *credentials.Credentials, opts ...ClientOpt) (*Client return client, nil } +// getUserAgent only for compatibility with UserAgent. +func (c *Client) getUserAgent() string { + defaultUA := getDefaultUserAgent() + + if c.userAgent != defaultUA { + return c.userAgent + } + + if UserAgent != defaultUA { + return UserAgent + } + + return c.userAgent +} + // WithEndpoint returns a copy of Client with new zone Endpoint. func (c *Client) WithEndpoint(endpoint Endpoint) *Client { - return &Client{ - apiKey: c.apiKey, - apiSecret: c.apiSecret, - serverEndpoint: string(endpoint), - httpClient: c.httpClient, - requestInterceptors: c.requestInterceptors, - pollingInterval: c.pollingInterval, - trace: c.trace, - validate: c.validate, - } + clone := cloneClient(c) + + clone.serverEndpoint = string(endpoint) + + return clone +} + +// WithWaitTimeout returns a copy of Client with new wait timeout. +func (c *Client) WithWaitTimeout(t time.Duration) *Client { + clone := cloneClient(c) + + clone.waitTimeout = t + + return clone +} + +// WithUserAgent returns a copy of Client with new User-Agent. +func (c *Client) WithUserAgent(ua string) *Client { + clone := cloneClient(c) + + clone.userAgent = ua + " " + getDefaultUserAgent() + + return clone } // WithTrace returns a copy of Client with tracing enabled. func (c *Client) WithTrace() *Client { - return &Client{ - apiKey: c.apiKey, - apiSecret: c.apiSecret, - serverEndpoint: c.serverEndpoint, - httpClient: c.httpClient, - requestInterceptors: c.requestInterceptors, - pollingInterval: c.pollingInterval, - trace: true, - validate: c.validate, - } + clone := cloneClient(c) + + clone.trace = true + + return clone } // WithHttpClient returns a copy of Client with new http.Client. +// Deprecated: use WithHTTPClient instead. func (c *Client) WithHttpClient(client *http.Client) *Client { - return &Client{ - apiKey: c.apiKey, - apiSecret: c.apiSecret, - serverEndpoint: c.serverEndpoint, - httpClient: client, - requestInterceptors: c.requestInterceptors, - pollingInterval: c.pollingInterval, - trace: c.trace, - validate: c.validate, - } + clone := cloneClient(c) + + clone.httpClient = client + + return clone +} + +// WithHTTPClient returns a copy of Client with new http.Client. +func (c *Client) WithHTTPClient(client *http.Client) *Client { + clone := cloneClient(c) + + clone.httpClient = client + + return clone } // WithRequestInterceptor returns a copy of Client with new RequestInterceptors. func (c *Client) WithRequestInterceptor(f ...RequestInterceptorFn) *Client { - return &Client{ - apiKey: c.apiKey, - apiSecret: c.apiSecret, - serverEndpoint: c.serverEndpoint, - httpClient: c.httpClient, - requestInterceptors: append(c.requestInterceptors, f...), - pollingInterval: c.pollingInterval, - trace: c.trace, - validate: c.validate, - } + clone := cloneClient(c) + + clone.requestInterceptors = append(clone.requestInterceptors, f...) + + return clone } func (c *Client) executeRequestInterceptors(ctx context.Context, req *http.Request) error { @@ -221,3 +264,17 @@ func (c *Client) executeRequestInterceptors(ctx context.Context, req *http.Reque return nil } + +func cloneClient(c *Client) *Client { + return &Client{ + apiKey: c.apiKey, + apiSecret: c.apiSecret, + userAgent: c.userAgent, + serverEndpoint: c.serverEndpoint, + httpClient: c.httpClient, + requestInterceptors: c.requestInterceptors, + waitTimeout: c.waitTimeout, + trace: c.trace, + validate: c.validate, + } +} diff --git a/vendor/github.com/exoscale/egoscale/v3/operations.go b/vendor/github.com/exoscale/egoscale/v3/operations.go index 8a103355..915ce6dd 100644 --- a/vendor/github.com/exoscale/egoscale/v3/operations.go +++ b/vendor/github.com/exoscale/egoscale/v3/operations.go @@ -18,11 +18,19 @@ type ListAntiAffinityGroupsResponse struct { // FindAntiAffinityGroup attempts to find an AntiAffinityGroup by nameOrID. func (l ListAntiAffinityGroupsResponse) FindAntiAffinityGroup(nameOrID string) (AntiAffinityGroup, error) { + var result []AntiAffinityGroup for i, elem := range l.AntiAffinityGroups { - if string(elem.Name) == nameOrID || elem.ID.String() == nameOrID { - return l.AntiAffinityGroups[i], nil + if string(elem.Name) == nameOrID || string(elem.ID) == nameOrID { + result = append(result, l.AntiAffinityGroups[i]) } } + if len(result) == 1 { + return result[0], nil + } + + if len(result) > 1 { + return AntiAffinityGroup{}, fmt.Errorf("%q too many found in ListAntiAffinityGroupsResponse: %w", nameOrID, ErrConflict) + } return AntiAffinityGroup{}, fmt.Errorf("%q not found in ListAntiAffinityGroupsResponse: %w", nameOrID, ErrNotFound) } @@ -35,7 +43,8 @@ func (c Client) ListAntiAffinityGroups(ctx context.Context) (*ListAntiAffinityGr if err != nil { return nil, fmt.Errorf("ListAntiAffinityGroups: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) if err := c.executeRequestInterceptors(ctx, request); err != nil { return nil, fmt.Errorf("ListAntiAffinityGroups: execute request editors: %w", err) @@ -90,7 +99,8 @@ func (c Client) CreateAntiAffinityGroup(ctx context.Context, req CreateAntiAffin if err != nil { return nil, fmt.Errorf("CreateAntiAffinityGroup: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) request.Header.Add("Content-Type", "application/json") @@ -135,7 +145,8 @@ func (c Client) DeleteAntiAffinityGroup(ctx context.Context, id UUID) (*Operatio if err != nil { return nil, fmt.Errorf("DeleteAntiAffinityGroup: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) if err := c.executeRequestInterceptors(ctx, request); err != nil { return nil, fmt.Errorf("DeleteAntiAffinityGroup: execute request editors: %w", err) @@ -178,7 +189,8 @@ func (c Client) GetAntiAffinityGroup(ctx context.Context, id UUID) (*AntiAffinit if err != nil { return nil, fmt.Errorf("GetAntiAffinityGroup: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) if err := c.executeRequestInterceptors(ctx, request); err != nil { return nil, fmt.Errorf("GetAntiAffinityGroup: execute request editors: %w", err) @@ -217,15 +229,23 @@ type ListAPIKeysResponse struct { APIKeys []IAMAPIKey `json:"api-keys,omitempty"` } -// FindIAMAPIKey attempts to find an IAMAPIKey by name. -func (l ListAPIKeysResponse) FindIAMAPIKey(name string) (IAMAPIKey, error) { +// FindIAMAPIKey attempts to find an IAMAPIKey by nameOrKey. +func (l ListAPIKeysResponse) FindIAMAPIKey(nameOrKey string) (IAMAPIKey, error) { + var result []IAMAPIKey for i, elem := range l.APIKeys { - if string(elem.Name) == name { - return l.APIKeys[i], nil + if string(elem.Name) == nameOrKey || string(elem.Key) == nameOrKey { + result = append(result, l.APIKeys[i]) } } + if len(result) == 1 { + return result[0], nil + } + + if len(result) > 1 { + return IAMAPIKey{}, fmt.Errorf("%q too many found in ListAPIKeysResponse: %w", nameOrKey, ErrConflict) + } - return IAMAPIKey{}, fmt.Errorf("%q not found in ListAPIKeysResponse: %w", name, ErrNotFound) + return IAMAPIKey{}, fmt.Errorf("%q not found in ListAPIKeysResponse: %w", nameOrKey, ErrNotFound) } // List API keys @@ -236,7 +256,8 @@ func (c Client) ListAPIKeys(ctx context.Context) (*ListAPIKeysResponse, error) { if err != nil { return nil, fmt.Errorf("ListAPIKeys: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) if err := c.executeRequestInterceptors(ctx, request); err != nil { return nil, fmt.Errorf("ListAPIKeys: execute request editors: %w", err) @@ -291,7 +312,8 @@ func (c Client) CreateAPIKey(ctx context.Context, req CreateAPIKeyRequest) (*IAM if err != nil { return nil, fmt.Errorf("CreateAPIKey: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) request.Header.Add("Content-Type", "application/json") @@ -336,7 +358,8 @@ func (c Client) DeleteAPIKey(ctx context.Context, id string) (*Operation, error) if err != nil { return nil, fmt.Errorf("DeleteAPIKey: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) if err := c.executeRequestInterceptors(ctx, request); err != nil { return nil, fmt.Errorf("DeleteAPIKey: execute request editors: %w", err) @@ -379,7 +402,8 @@ func (c Client) GetAPIKey(ctx context.Context, id string) (*IAMAPIKey, error) { if err != nil { return nil, fmt.Errorf("GetAPIKey: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) if err := c.executeRequestInterceptors(ctx, request); err != nil { return nil, fmt.Errorf("GetAPIKey: execute request editors: %w", err) @@ -420,11 +444,19 @@ type ListBlockStorageVolumesResponse struct { // FindBlockStorageVolume attempts to find an BlockStorageVolume by nameOrID. func (l ListBlockStorageVolumesResponse) FindBlockStorageVolume(nameOrID string) (BlockStorageVolume, error) { + var result []BlockStorageVolume for i, elem := range l.BlockStorageVolumes { - if string(elem.Name) == nameOrID || elem.ID.String() == nameOrID { - return l.BlockStorageVolumes[i], nil + if string(elem.Name) == nameOrID || string(elem.ID) == nameOrID { + result = append(result, l.BlockStorageVolumes[i]) } } + if len(result) == 1 { + return result[0], nil + } + + if len(result) > 1 { + return BlockStorageVolume{}, fmt.Errorf("%q too many found in ListBlockStorageVolumesResponse: %w", nameOrID, ErrConflict) + } return BlockStorageVolume{}, fmt.Errorf("%q not found in ListBlockStorageVolumesResponse: %w", nameOrID, ErrNotFound) } @@ -445,7 +477,8 @@ func (c Client) ListBlockStorageVolumes(ctx context.Context, opts ...ListBlockSt if err != nil { return nil, fmt.Errorf("ListBlockStorageVolumes: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) if len(opts) > 0 { q := request.URL.Query() @@ -512,7 +545,8 @@ func (c Client) CreateBlockStorageVolume(ctx context.Context, req CreateBlockSto if err != nil { return nil, fmt.Errorf("CreateBlockStorageVolume: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) request.Header.Add("Content-Type", "application/json") @@ -555,11 +589,19 @@ type ListBlockStorageSnapshotsResponse struct { // FindBlockStorageSnapshot attempts to find an BlockStorageSnapshot by nameOrID. func (l ListBlockStorageSnapshotsResponse) FindBlockStorageSnapshot(nameOrID string) (BlockStorageSnapshot, error) { + var result []BlockStorageSnapshot for i, elem := range l.BlockStorageSnapshots { - if string(elem.Name) == nameOrID || elem.ID.String() == nameOrID { - return l.BlockStorageSnapshots[i], nil + if string(elem.Name) == nameOrID || string(elem.ID) == nameOrID { + result = append(result, l.BlockStorageSnapshots[i]) } } + if len(result) == 1 { + return result[0], nil + } + + if len(result) > 1 { + return BlockStorageSnapshot{}, fmt.Errorf("%q too many found in ListBlockStorageSnapshotsResponse: %w", nameOrID, ErrConflict) + } return BlockStorageSnapshot{}, fmt.Errorf("%q not found in ListBlockStorageSnapshotsResponse: %w", nameOrID, ErrNotFound) } @@ -572,7 +614,8 @@ func (c Client) ListBlockStorageSnapshots(ctx context.Context) (*ListBlockStorag if err != nil { return nil, fmt.Errorf("ListBlockStorageSnapshots: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) if err := c.executeRequestInterceptors(ctx, request); err != nil { return nil, fmt.Errorf("ListBlockStorageSnapshots: execute request editors: %w", err) @@ -615,7 +658,8 @@ func (c Client) DeleteBlockStorageSnapshot(ctx context.Context, id UUID) (*Opera if err != nil { return nil, fmt.Errorf("DeleteBlockStorageSnapshot: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) if err := c.executeRequestInterceptors(ctx, request); err != nil { return nil, fmt.Errorf("DeleteBlockStorageSnapshot: execute request editors: %w", err) @@ -658,7 +702,8 @@ func (c Client) GetBlockStorageSnapshot(ctx context.Context, id UUID) (*BlockSto if err != nil { return nil, fmt.Errorf("GetBlockStorageSnapshot: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) if err := c.executeRequestInterceptors(ctx, request); err != nil { return nil, fmt.Errorf("GetBlockStorageSnapshot: execute request editors: %w", err) @@ -712,7 +757,8 @@ func (c Client) UpdateBlockStorageSnapshot(ctx context.Context, id UUID, req Upd if err != nil { return nil, fmt.Errorf("UpdateBlockStorageSnapshot: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) request.Header.Add("Content-Type", "application/json") @@ -757,7 +803,8 @@ func (c Client) DeleteBlockStorageVolume(ctx context.Context, id UUID) (*Operati if err != nil { return nil, fmt.Errorf("DeleteBlockStorageVolume: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) if err := c.executeRequestInterceptors(ctx, request); err != nil { return nil, fmt.Errorf("DeleteBlockStorageVolume: execute request editors: %w", err) @@ -800,7 +847,8 @@ func (c Client) GetBlockStorageVolume(ctx context.Context, id UUID) (*BlockStora if err != nil { return nil, fmt.Errorf("GetBlockStorageVolume: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) if err := c.executeRequestInterceptors(ctx, request); err != nil { return nil, fmt.Errorf("GetBlockStorageVolume: execute request editors: %w", err) @@ -854,7 +902,8 @@ func (c Client) UpdateBlockStorageVolume(ctx context.Context, id UUID, req Updat if err != nil { return nil, fmt.Errorf("UpdateBlockStorageVolume: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) request.Header.Add("Content-Type", "application/json") @@ -909,7 +958,8 @@ func (c Client) AttachBlockStorageVolumeToInstance(ctx context.Context, id UUID, if err != nil { return nil, fmt.Errorf("AttachBlockStorageVolumeToInstance: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) request.Header.Add("Content-Type", "application/json") @@ -965,7 +1015,8 @@ func (c Client) CreateBlockStorageSnapshot(ctx context.Context, id UUID, req Cre if err != nil { return nil, fmt.Errorf("CreateBlockStorageSnapshot: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) request.Header.Add("Content-Type", "application/json") @@ -1010,7 +1061,8 @@ func (c Client) DetachBlockStorageVolume(ctx context.Context, id UUID) (*Operati if err != nil { return nil, fmt.Errorf("DetachBlockStorageVolume: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) if err := c.executeRequestInterceptors(ctx, request); err != nil { return nil, fmt.Errorf("DetachBlockStorageVolume: execute request editors: %w", err) @@ -1063,7 +1115,8 @@ func (c Client) ResizeBlockStorageVolume(ctx context.Context, id UUID, req Resiz if err != nil { return nil, fmt.Errorf("ResizeBlockStorageVolume: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) request.Header.Add("Content-Type", "application/json") @@ -1114,7 +1167,8 @@ func (c Client) GetConsoleProxyURL(ctx context.Context, id UUID) (*GetConsolePro if err != nil { return nil, fmt.Errorf("GetConsoleProxyURL: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) if err := c.executeRequestInterceptors(ctx, request); err != nil { return nil, fmt.Errorf("GetConsoleProxyURL: execute request editors: %w", err) @@ -1161,7 +1215,8 @@ func (c Client) GetDBAASCACertificate(ctx context.Context) (*GetDBAASCACertifica if err != nil { return nil, fmt.Errorf("GetDBAASCACertificate: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) if err := c.executeRequestInterceptors(ctx, request); err != nil { return nil, fmt.Errorf("GetDBAASCACertificate: execute request editors: %w", err) @@ -1196,31 +1251,32 @@ func (c Client) GetDBAASCACertificate(ctx context.Context) (*GetDBAASCACertifica return bodyresp, nil } -// Delete a Grafana service -func (c Client) DeleteDBAASServiceGrafana(ctx context.Context, name string) (*Operation, error) { - path := fmt.Sprintf("/dbaas-grafana/%v", name) +// [BETA] Delete DataDog external integration endpoint +func (c Client) DeleteDBAASExternalEndpointDatadog(ctx context.Context, endpointID UUID) (*Operation, error) { + path := fmt.Sprintf("/dbaas-external-endpoint-datadog/%v", endpointID) request, err := http.NewRequestWithContext(ctx, "DELETE", c.serverEndpoint+path, nil) if err != nil { - return nil, fmt.Errorf("DeleteDBAASServiceGrafana: new request: %w", err) + return nil, fmt.Errorf("DeleteDBAASExternalEndpointDatadog: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) if err := c.executeRequestInterceptors(ctx, request); err != nil { - return nil, fmt.Errorf("DeleteDBAASServiceGrafana: execute request editors: %w", err) + return nil, fmt.Errorf("DeleteDBAASExternalEndpointDatadog: execute request editors: %w", err) } if err := c.signRequest(request); err != nil { - return nil, fmt.Errorf("DeleteDBAASServiceGrafana: sign request: %w", err) + return nil, fmt.Errorf("DeleteDBAASExternalEndpointDatadog: sign request: %w", err) } if c.trace { - dumpRequest(request, "delete-dbaas-service-grafana") + dumpRequest(request, "delete-dbaas-external-endpoint-datadog") } response, err := c.httpClient.Do(request) if err != nil { - return nil, fmt.Errorf("DeleteDBAASServiceGrafana: http client do: %w", err) + return nil, fmt.Errorf("DeleteDBAASExternalEndpointDatadog: http client do: %w", err) } if c.trace { @@ -1228,42 +1284,43 @@ func (c Client) DeleteDBAASServiceGrafana(ctx context.Context, name string) (*Op } if err := handleHTTPErrorResp(response); err != nil { - return nil, fmt.Errorf("DeleteDBAASServiceGrafana: http response: %w", err) + return nil, fmt.Errorf("DeleteDBAASExternalEndpointDatadog: http response: %w", err) } bodyresp := &Operation{} if err := prepareJSONResponse(response, bodyresp); err != nil { - return nil, fmt.Errorf("DeleteDBAASServiceGrafana: prepare Json response: %w", err) + return nil, fmt.Errorf("DeleteDBAASExternalEndpointDatadog: prepare Json response: %w", err) } return bodyresp, nil } -// Get a DBaaS Grafana service -func (c Client) GetDBAASServiceGrafana(ctx context.Context, name string) (*DBAASServiceGrafana, error) { - path := fmt.Sprintf("/dbaas-grafana/%v", name) +// [BETA] Get DataDog external endpoint settings +func (c Client) GetDBAASExternalEndpointDatadog(ctx context.Context, endpointID UUID) (*DBAASExternalEndpointDatadogOutput, error) { + path := fmt.Sprintf("/dbaas-external-endpoint-datadog/%v", endpointID) request, err := http.NewRequestWithContext(ctx, "GET", c.serverEndpoint+path, nil) if err != nil { - return nil, fmt.Errorf("GetDBAASServiceGrafana: new request: %w", err) + return nil, fmt.Errorf("GetDBAASExternalEndpointDatadog: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) if err := c.executeRequestInterceptors(ctx, request); err != nil { - return nil, fmt.Errorf("GetDBAASServiceGrafana: execute request editors: %w", err) + return nil, fmt.Errorf("GetDBAASExternalEndpointDatadog: execute request editors: %w", err) } if err := c.signRequest(request); err != nil { - return nil, fmt.Errorf("GetDBAASServiceGrafana: sign request: %w", err) + return nil, fmt.Errorf("GetDBAASExternalEndpointDatadog: sign request: %w", err) } if c.trace { - dumpRequest(request, "get-dbaas-service-grafana") + dumpRequest(request, "get-dbaas-external-endpoint-datadog") } response, err := c.httpClient.Do(request) if err != nil { - return nil, fmt.Errorf("GetDBAASServiceGrafana: http client do: %w", err) + return nil, fmt.Errorf("GetDBAASExternalEndpointDatadog: http client do: %w", err) } if c.trace { @@ -1271,84 +1328,101 @@ func (c Client) GetDBAASServiceGrafana(ctx context.Context, name string) (*DBAAS } if err := handleHTTPErrorResp(response); err != nil { - return nil, fmt.Errorf("GetDBAASServiceGrafana: http response: %w", err) + return nil, fmt.Errorf("GetDBAASExternalEndpointDatadog: http response: %w", err) } - bodyresp := &DBAASServiceGrafana{} + bodyresp := &DBAASExternalEndpointDatadogOutput{} if err := prepareJSONResponse(response, bodyresp); err != nil { - return nil, fmt.Errorf("GetDBAASServiceGrafana: prepare Json response: %w", err) + return nil, fmt.Errorf("GetDBAASExternalEndpointDatadog: prepare Json response: %w", err) } return bodyresp, nil } -type CreateDBAASServiceGrafanaRequestMaintenanceDow string +// [BETA] Update DataDog external integration endpoint +func (c Client) UpdateDBAASExternalEndpointDatadog(ctx context.Context, endpointID UUID, req DBAASEndpointDatadogInputUpdate) (*Operation, error) { + path := fmt.Sprintf("/dbaas-external-endpoint-datadog/%v", endpointID) -const ( - CreateDBAASServiceGrafanaRequestMaintenanceDowSaturday CreateDBAASServiceGrafanaRequestMaintenanceDow = "saturday" - CreateDBAASServiceGrafanaRequestMaintenanceDowTuesday CreateDBAASServiceGrafanaRequestMaintenanceDow = "tuesday" - CreateDBAASServiceGrafanaRequestMaintenanceDowNever CreateDBAASServiceGrafanaRequestMaintenanceDow = "never" - CreateDBAASServiceGrafanaRequestMaintenanceDowWednesday CreateDBAASServiceGrafanaRequestMaintenanceDow = "wednesday" - CreateDBAASServiceGrafanaRequestMaintenanceDowSunday CreateDBAASServiceGrafanaRequestMaintenanceDow = "sunday" - CreateDBAASServiceGrafanaRequestMaintenanceDowFriday CreateDBAASServiceGrafanaRequestMaintenanceDow = "friday" - CreateDBAASServiceGrafanaRequestMaintenanceDowMonday CreateDBAASServiceGrafanaRequestMaintenanceDow = "monday" - CreateDBAASServiceGrafanaRequestMaintenanceDowThursday CreateDBAASServiceGrafanaRequestMaintenanceDow = "thursday" -) + body, err := prepareJSONBody(req) + if err != nil { + return nil, fmt.Errorf("UpdateDBAASExternalEndpointDatadog: prepare Json body: %w", err) + } -// Automatic maintenance settings -type CreateDBAASServiceGrafanaRequestMaintenance struct { - // Day of week for installing updates - Dow CreateDBAASServiceGrafanaRequestMaintenanceDow `json:"dow" validate:"required"` - // Time for installing updates, UTC - Time string `json:"time" validate:"required,gte=8,lte=8"` -} + request, err := http.NewRequestWithContext(ctx, "PUT", c.serverEndpoint+path, body) + if err != nil { + return nil, fmt.Errorf("UpdateDBAASExternalEndpointDatadog: new request: %w", err) + } -type CreateDBAASServiceGrafanaRequest struct { - ForkFromService DBAASServiceName `json:"fork-from-service,omitempty" validate:"omitempty,gte=0,lte=63"` - // Grafana settings - GrafanaSettings *JSONSchemaGrafana `json:"grafana-settings,omitempty"` - // Allowed CIDR address blocks for incoming connections - IPFilter []string `json:"ip-filter,omitempty"` - // Automatic maintenance settings - Maintenance *CreateDBAASServiceGrafanaRequestMaintenance `json:"maintenance,omitempty"` - // Subscription plan - Plan string `json:"plan" validate:"required,gte=1,lte=128"` - // Service is protected against termination and powering off - TerminationProtection *bool `json:"termination-protection,omitempty"` + request.Header.Add("User-Agent", c.getUserAgent()) + + request.Header.Add("Content-Type", "application/json") + + if err := c.executeRequestInterceptors(ctx, request); err != nil { + return nil, fmt.Errorf("UpdateDBAASExternalEndpointDatadog: execute request editors: %w", err) + } + + if err := c.signRequest(request); err != nil { + return nil, fmt.Errorf("UpdateDBAASExternalEndpointDatadog: sign request: %w", err) + } + + if c.trace { + dumpRequest(request, "update-dbaas-external-endpoint-datadog") + } + + response, err := c.httpClient.Do(request) + if err != nil { + return nil, fmt.Errorf("UpdateDBAASExternalEndpointDatadog: http client do: %w", err) + } + + if c.trace { + dumpResponse(response) + } + + if err := handleHTTPErrorResp(response); err != nil { + return nil, fmt.Errorf("UpdateDBAASExternalEndpointDatadog: http response: %w", err) + } + + bodyresp := &Operation{} + if err := prepareJSONResponse(response, bodyresp); err != nil { + return nil, fmt.Errorf("UpdateDBAASExternalEndpointDatadog: prepare Json response: %w", err) + } + + return bodyresp, nil } -// Create a DBaaS Grafana service -func (c Client) CreateDBAASServiceGrafana(ctx context.Context, name string, req CreateDBAASServiceGrafanaRequest) (*Operation, error) { - path := fmt.Sprintf("/dbaas-grafana/%v", name) +// [BETA] Create DataDog external integration endpoint +func (c Client) CreateDBAASExternalEndpointDatadog(ctx context.Context, name string, req DBAASEndpointDatadogInputCreate) (*Operation, error) { + path := fmt.Sprintf("/dbaas-external-endpoint-datadog/%v", name) body, err := prepareJSONBody(req) if err != nil { - return nil, fmt.Errorf("CreateDBAASServiceGrafana: prepare Json body: %w", err) + return nil, fmt.Errorf("CreateDBAASExternalEndpointDatadog: prepare Json body: %w", err) } request, err := http.NewRequestWithContext(ctx, "POST", c.serverEndpoint+path, body) if err != nil { - return nil, fmt.Errorf("CreateDBAASServiceGrafana: new request: %w", err) + return nil, fmt.Errorf("CreateDBAASExternalEndpointDatadog: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) request.Header.Add("Content-Type", "application/json") if err := c.executeRequestInterceptors(ctx, request); err != nil { - return nil, fmt.Errorf("CreateDBAASServiceGrafana: execute request editors: %w", err) + return nil, fmt.Errorf("CreateDBAASExternalEndpointDatadog: execute request editors: %w", err) } if err := c.signRequest(request); err != nil { - return nil, fmt.Errorf("CreateDBAASServiceGrafana: sign request: %w", err) + return nil, fmt.Errorf("CreateDBAASExternalEndpointDatadog: sign request: %w", err) } if c.trace { - dumpRequest(request, "create-dbaas-service-grafana") + dumpRequest(request, "create-dbaas-external-endpoint-datadog") } response, err := c.httpClient.Do(request) if err != nil { - return nil, fmt.Errorf("CreateDBAASServiceGrafana: http client do: %w", err) + return nil, fmt.Errorf("CreateDBAASExternalEndpointDatadog: http client do: %w", err) } if c.trace { @@ -1356,83 +1430,87 @@ func (c Client) CreateDBAASServiceGrafana(ctx context.Context, name string, req } if err := handleHTTPErrorResp(response); err != nil { - return nil, fmt.Errorf("CreateDBAASServiceGrafana: http response: %w", err) + return nil, fmt.Errorf("CreateDBAASExternalEndpointDatadog: http response: %w", err) } bodyresp := &Operation{} if err := prepareJSONResponse(response, bodyresp); err != nil { - return nil, fmt.Errorf("CreateDBAASServiceGrafana: prepare Json response: %w", err) + return nil, fmt.Errorf("CreateDBAASExternalEndpointDatadog: prepare Json response: %w", err) } return bodyresp, nil } -type UpdateDBAASServiceGrafanaRequestMaintenanceDow string +// [BETA] Delete ElasticSearch logs external integration endpoint +func (c Client) DeleteDBAASExternalEndpointElasticsearch(ctx context.Context, endpointID UUID) (*Operation, error) { + path := fmt.Sprintf("/dbaas-external-endpoint-elasticsearch/%v", endpointID) -const ( - UpdateDBAASServiceGrafanaRequestMaintenanceDowSaturday UpdateDBAASServiceGrafanaRequestMaintenanceDow = "saturday" - UpdateDBAASServiceGrafanaRequestMaintenanceDowTuesday UpdateDBAASServiceGrafanaRequestMaintenanceDow = "tuesday" - UpdateDBAASServiceGrafanaRequestMaintenanceDowNever UpdateDBAASServiceGrafanaRequestMaintenanceDow = "never" - UpdateDBAASServiceGrafanaRequestMaintenanceDowWednesday UpdateDBAASServiceGrafanaRequestMaintenanceDow = "wednesday" - UpdateDBAASServiceGrafanaRequestMaintenanceDowSunday UpdateDBAASServiceGrafanaRequestMaintenanceDow = "sunday" - UpdateDBAASServiceGrafanaRequestMaintenanceDowFriday UpdateDBAASServiceGrafanaRequestMaintenanceDow = "friday" - UpdateDBAASServiceGrafanaRequestMaintenanceDowMonday UpdateDBAASServiceGrafanaRequestMaintenanceDow = "monday" - UpdateDBAASServiceGrafanaRequestMaintenanceDowThursday UpdateDBAASServiceGrafanaRequestMaintenanceDow = "thursday" -) + request, err := http.NewRequestWithContext(ctx, "DELETE", c.serverEndpoint+path, nil) + if err != nil { + return nil, fmt.Errorf("DeleteDBAASExternalEndpointElasticsearch: new request: %w", err) + } -// Automatic maintenance settings -type UpdateDBAASServiceGrafanaRequestMaintenance struct { - // Day of week for installing updates - Dow UpdateDBAASServiceGrafanaRequestMaintenanceDow `json:"dow" validate:"required"` - // Time for installing updates, UTC - Time string `json:"time" validate:"required,gte=8,lte=8"` -} + request.Header.Add("User-Agent", c.getUserAgent()) -type UpdateDBAASServiceGrafanaRequest struct { - // Grafana settings - GrafanaSettings *JSONSchemaGrafana `json:"grafana-settings,omitempty"` - // Allowed CIDR address blocks for incoming connections - IPFilter []string `json:"ip-filter,omitempty"` - // Automatic maintenance settings - Maintenance *UpdateDBAASServiceGrafanaRequestMaintenance `json:"maintenance,omitempty"` - // Subscription plan - Plan string `json:"plan,omitempty" validate:"omitempty,gte=1,lte=128"` - // Service is protected against termination and powering off - TerminationProtection *bool `json:"termination-protection,omitempty"` -} + if err := c.executeRequestInterceptors(ctx, request); err != nil { + return nil, fmt.Errorf("DeleteDBAASExternalEndpointElasticsearch: execute request editors: %w", err) + } -// Update a DBaaS Grafana service -func (c Client) UpdateDBAASServiceGrafana(ctx context.Context, name string, req UpdateDBAASServiceGrafanaRequest) (*Operation, error) { - path := fmt.Sprintf("/dbaas-grafana/%v", name) + if err := c.signRequest(request); err != nil { + return nil, fmt.Errorf("DeleteDBAASExternalEndpointElasticsearch: sign request: %w", err) + } - body, err := prepareJSONBody(req) + if c.trace { + dumpRequest(request, "delete-dbaas-external-endpoint-elasticsearch") + } + + response, err := c.httpClient.Do(request) if err != nil { - return nil, fmt.Errorf("UpdateDBAASServiceGrafana: prepare Json body: %w", err) + return nil, fmt.Errorf("DeleteDBAASExternalEndpointElasticsearch: http client do: %w", err) } - request, err := http.NewRequestWithContext(ctx, "PUT", c.serverEndpoint+path, body) + if c.trace { + dumpResponse(response) + } + + if err := handleHTTPErrorResp(response); err != nil { + return nil, fmt.Errorf("DeleteDBAASExternalEndpointElasticsearch: http response: %w", err) + } + + bodyresp := &Operation{} + if err := prepareJSONResponse(response, bodyresp); err != nil { + return nil, fmt.Errorf("DeleteDBAASExternalEndpointElasticsearch: prepare Json response: %w", err) + } + + return bodyresp, nil +} + +// [BETA] Get ElasticSearch Logs external integration endpoint settings +func (c Client) GetDBAASExternalEndpointElasticsearch(ctx context.Context, endpointID UUID) (*DBAASEndpointElasticsearchOutput, error) { + path := fmt.Sprintf("/dbaas-external-endpoint-elasticsearch/%v", endpointID) + + request, err := http.NewRequestWithContext(ctx, "GET", c.serverEndpoint+path, nil) if err != nil { - return nil, fmt.Errorf("UpdateDBAASServiceGrafana: new request: %w", err) + return nil, fmt.Errorf("GetDBAASExternalEndpointElasticsearch: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) - request.Header.Add("Content-Type", "application/json") + request.Header.Add("User-Agent", c.getUserAgent()) if err := c.executeRequestInterceptors(ctx, request); err != nil { - return nil, fmt.Errorf("UpdateDBAASServiceGrafana: execute request editors: %w", err) + return nil, fmt.Errorf("GetDBAASExternalEndpointElasticsearch: execute request editors: %w", err) } if err := c.signRequest(request); err != nil { - return nil, fmt.Errorf("UpdateDBAASServiceGrafana: sign request: %w", err) + return nil, fmt.Errorf("GetDBAASExternalEndpointElasticsearch: sign request: %w", err) } if c.trace { - dumpRequest(request, "update-dbaas-service-grafana") + dumpRequest(request, "get-dbaas-external-endpoint-elasticsearch") } response, err := c.httpClient.Do(request) if err != nil { - return nil, fmt.Errorf("UpdateDBAASServiceGrafana: http client do: %w", err) + return nil, fmt.Errorf("GetDBAASExternalEndpointElasticsearch: http client do: %w", err) } if c.trace { @@ -1440,42 +1518,50 @@ func (c Client) UpdateDBAASServiceGrafana(ctx context.Context, name string, req } if err := handleHTTPErrorResp(response); err != nil { - return nil, fmt.Errorf("UpdateDBAASServiceGrafana: http response: %w", err) + return nil, fmt.Errorf("GetDBAASExternalEndpointElasticsearch: http response: %w", err) } - bodyresp := &Operation{} + bodyresp := &DBAASEndpointElasticsearchOutput{} if err := prepareJSONResponse(response, bodyresp); err != nil { - return nil, fmt.Errorf("UpdateDBAASServiceGrafana: prepare Json response: %w", err) + return nil, fmt.Errorf("GetDBAASExternalEndpointElasticsearch: prepare Json response: %w", err) } return bodyresp, nil } -// Initiate Grafana maintenance update -func (c Client) StartDBAASGrafanaMaintenance(ctx context.Context, name string) (*Operation, error) { - path := fmt.Sprintf("/dbaas-grafana/%v/maintenance/start", name) +// [BETA] Update ElasticSearch Logs external integration endpoint +func (c Client) UpdateDBAASExternalEndpointElasticsearch(ctx context.Context, endpointID UUID, req DBAASEndpointElasticsearchInputUpdate) (*Operation, error) { + path := fmt.Sprintf("/dbaas-external-endpoint-elasticsearch/%v", endpointID) - request, err := http.NewRequestWithContext(ctx, "PUT", c.serverEndpoint+path, nil) + body, err := prepareJSONBody(req) if err != nil { - return nil, fmt.Errorf("StartDBAASGrafanaMaintenance: new request: %w", err) + return nil, fmt.Errorf("UpdateDBAASExternalEndpointElasticsearch: prepare Json body: %w", err) + } + + request, err := http.NewRequestWithContext(ctx, "PUT", c.serverEndpoint+path, body) + if err != nil { + return nil, fmt.Errorf("UpdateDBAASExternalEndpointElasticsearch: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) + + request.Header.Add("Content-Type", "application/json") if err := c.executeRequestInterceptors(ctx, request); err != nil { - return nil, fmt.Errorf("StartDBAASGrafanaMaintenance: execute request editors: %w", err) + return nil, fmt.Errorf("UpdateDBAASExternalEndpointElasticsearch: execute request editors: %w", err) } if err := c.signRequest(request); err != nil { - return nil, fmt.Errorf("StartDBAASGrafanaMaintenance: sign request: %w", err) + return nil, fmt.Errorf("UpdateDBAASExternalEndpointElasticsearch: sign request: %w", err) } if c.trace { - dumpRequest(request, "start-dbaas-grafana-maintenance") + dumpRequest(request, "update-dbaas-external-endpoint-elasticsearch") } response, err := c.httpClient.Do(request) if err != nil { - return nil, fmt.Errorf("StartDBAASGrafanaMaintenance: http client do: %w", err) + return nil, fmt.Errorf("UpdateDBAASExternalEndpointElasticsearch: http client do: %w", err) } if c.trace { @@ -1483,35 +1569,1389 @@ func (c Client) StartDBAASGrafanaMaintenance(ctx context.Context, name string) ( } if err := handleHTTPErrorResp(response); err != nil { - return nil, fmt.Errorf("StartDBAASGrafanaMaintenance: http response: %w", err) + return nil, fmt.Errorf("UpdateDBAASExternalEndpointElasticsearch: http response: %w", err) } bodyresp := &Operation{} if err := prepareJSONResponse(response, bodyresp); err != nil { - return nil, fmt.Errorf("StartDBAASGrafanaMaintenance: prepare Json response: %w", err) + return nil, fmt.Errorf("UpdateDBAASExternalEndpointElasticsearch: prepare Json response: %w", err) } return bodyresp, nil } -type ResetDBAASGrafanaUserPasswordRequest struct { - Password DBAASUserPassword `json:"password,omitempty" validate:"omitempty,gte=8,lte=256"` -} - -// If no password is provided one will be generated automatically. -func (c Client) ResetDBAASGrafanaUserPassword(ctx context.Context, serviceName string, username string, req ResetDBAASGrafanaUserPasswordRequest) (*Operation, error) { - path := fmt.Sprintf("/dbaas-grafana/%v/user/%v/password/reset", serviceName, username) +// [BETA] Create ElasticSearch Logs external integration endpoint +func (c Client) CreateDBAASExternalEndpointElasticsearch(ctx context.Context, name string, req DBAASEndpointElasticsearchInputCreate) (*Operation, error) { + path := fmt.Sprintf("/dbaas-external-endpoint-elasticsearch/%v", name) body, err := prepareJSONBody(req) if err != nil { - return nil, fmt.Errorf("ResetDBAASGrafanaUserPassword: prepare Json body: %w", err) + return nil, fmt.Errorf("CreateDBAASExternalEndpointElasticsearch: prepare Json body: %w", err) } - request, err := http.NewRequestWithContext(ctx, "PUT", c.serverEndpoint+path, body) + request, err := http.NewRequestWithContext(ctx, "POST", c.serverEndpoint+path, body) if err != nil { - return nil, fmt.Errorf("ResetDBAASGrafanaUserPassword: new request: %w", err) + return nil, fmt.Errorf("CreateDBAASExternalEndpointElasticsearch: new request: %w", err) + } + + request.Header.Add("User-Agent", c.getUserAgent()) + + request.Header.Add("Content-Type", "application/json") + + if err := c.executeRequestInterceptors(ctx, request); err != nil { + return nil, fmt.Errorf("CreateDBAASExternalEndpointElasticsearch: execute request editors: %w", err) + } + + if err := c.signRequest(request); err != nil { + return nil, fmt.Errorf("CreateDBAASExternalEndpointElasticsearch: sign request: %w", err) + } + + if c.trace { + dumpRequest(request, "create-dbaas-external-endpoint-elasticsearch") + } + + response, err := c.httpClient.Do(request) + if err != nil { + return nil, fmt.Errorf("CreateDBAASExternalEndpointElasticsearch: http client do: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + if c.trace { + dumpResponse(response) + } + + if err := handleHTTPErrorResp(response); err != nil { + return nil, fmt.Errorf("CreateDBAASExternalEndpointElasticsearch: http response: %w", err) + } + + bodyresp := &Operation{} + if err := prepareJSONResponse(response, bodyresp); err != nil { + return nil, fmt.Errorf("CreateDBAASExternalEndpointElasticsearch: prepare Json response: %w", err) + } + + return bodyresp, nil +} + +// [BETA] Delete OpenSearch logs external integration endpoint +func (c Client) DeleteDBAASExternalEndpointOpensearch(ctx context.Context, endpointID UUID) (*Operation, error) { + path := fmt.Sprintf("/dbaas-external-endpoint-opensearch/%v", endpointID) + + request, err := http.NewRequestWithContext(ctx, "DELETE", c.serverEndpoint+path, nil) + if err != nil { + return nil, fmt.Errorf("DeleteDBAASExternalEndpointOpensearch: new request: %w", err) + } + + request.Header.Add("User-Agent", c.getUserAgent()) + + if err := c.executeRequestInterceptors(ctx, request); err != nil { + return nil, fmt.Errorf("DeleteDBAASExternalEndpointOpensearch: execute request editors: %w", err) + } + + if err := c.signRequest(request); err != nil { + return nil, fmt.Errorf("DeleteDBAASExternalEndpointOpensearch: sign request: %w", err) + } + + if c.trace { + dumpRequest(request, "delete-dbaas-external-endpoint-opensearch") + } + + response, err := c.httpClient.Do(request) + if err != nil { + return nil, fmt.Errorf("DeleteDBAASExternalEndpointOpensearch: http client do: %w", err) + } + + if c.trace { + dumpResponse(response) + } + + if err := handleHTTPErrorResp(response); err != nil { + return nil, fmt.Errorf("DeleteDBAASExternalEndpointOpensearch: http response: %w", err) + } + + bodyresp := &Operation{} + if err := prepareJSONResponse(response, bodyresp); err != nil { + return nil, fmt.Errorf("DeleteDBAASExternalEndpointOpensearch: prepare Json response: %w", err) + } + + return bodyresp, nil +} + +// [BETA] Get OpenSearch Logs external integration endpoint settings +func (c Client) GetDBAASExternalEndpointOpensearch(ctx context.Context, endpointID UUID) (*DBAASEndpointOpensearchOutput, error) { + path := fmt.Sprintf("/dbaas-external-endpoint-opensearch/%v", endpointID) + + request, err := http.NewRequestWithContext(ctx, "GET", c.serverEndpoint+path, nil) + if err != nil { + return nil, fmt.Errorf("GetDBAASExternalEndpointOpensearch: new request: %w", err) + } + + request.Header.Add("User-Agent", c.getUserAgent()) + + if err := c.executeRequestInterceptors(ctx, request); err != nil { + return nil, fmt.Errorf("GetDBAASExternalEndpointOpensearch: execute request editors: %w", err) + } + + if err := c.signRequest(request); err != nil { + return nil, fmt.Errorf("GetDBAASExternalEndpointOpensearch: sign request: %w", err) + } + + if c.trace { + dumpRequest(request, "get-dbaas-external-endpoint-opensearch") + } + + response, err := c.httpClient.Do(request) + if err != nil { + return nil, fmt.Errorf("GetDBAASExternalEndpointOpensearch: http client do: %w", err) + } + + if c.trace { + dumpResponse(response) + } + + if err := handleHTTPErrorResp(response); err != nil { + return nil, fmt.Errorf("GetDBAASExternalEndpointOpensearch: http response: %w", err) + } + + bodyresp := &DBAASEndpointOpensearchOutput{} + if err := prepareJSONResponse(response, bodyresp); err != nil { + return nil, fmt.Errorf("GetDBAASExternalEndpointOpensearch: prepare Json response: %w", err) + } + + return bodyresp, nil +} + +// [BETA] Update OpenSearch Logs external integration endpoint +func (c Client) UpdateDBAASExternalEndpointOpensearch(ctx context.Context, endpointID UUID, req DBAASEndpointOpensearchInputUpdate) (*Operation, error) { + path := fmt.Sprintf("/dbaas-external-endpoint-opensearch/%v", endpointID) + + body, err := prepareJSONBody(req) + if err != nil { + return nil, fmt.Errorf("UpdateDBAASExternalEndpointOpensearch: prepare Json body: %w", err) + } + + request, err := http.NewRequestWithContext(ctx, "PUT", c.serverEndpoint+path, body) + if err != nil { + return nil, fmt.Errorf("UpdateDBAASExternalEndpointOpensearch: new request: %w", err) + } + + request.Header.Add("User-Agent", c.getUserAgent()) + + request.Header.Add("Content-Type", "application/json") + + if err := c.executeRequestInterceptors(ctx, request); err != nil { + return nil, fmt.Errorf("UpdateDBAASExternalEndpointOpensearch: execute request editors: %w", err) + } + + if err := c.signRequest(request); err != nil { + return nil, fmt.Errorf("UpdateDBAASExternalEndpointOpensearch: sign request: %w", err) + } + + if c.trace { + dumpRequest(request, "update-dbaas-external-endpoint-opensearch") + } + + response, err := c.httpClient.Do(request) + if err != nil { + return nil, fmt.Errorf("UpdateDBAASExternalEndpointOpensearch: http client do: %w", err) + } + + if c.trace { + dumpResponse(response) + } + + if err := handleHTTPErrorResp(response); err != nil { + return nil, fmt.Errorf("UpdateDBAASExternalEndpointOpensearch: http response: %w", err) + } + + bodyresp := &Operation{} + if err := prepareJSONResponse(response, bodyresp); err != nil { + return nil, fmt.Errorf("UpdateDBAASExternalEndpointOpensearch: prepare Json response: %w", err) + } + + return bodyresp, nil +} + +// [BETA] Create OpenSearch Logs external integration endpoint +func (c Client) CreateDBAASExternalEndpointOpensearch(ctx context.Context, name string, req DBAASEndpointOpensearchInputCreate) (*Operation, error) { + path := fmt.Sprintf("/dbaas-external-endpoint-opensearch/%v", name) + + body, err := prepareJSONBody(req) + if err != nil { + return nil, fmt.Errorf("CreateDBAASExternalEndpointOpensearch: prepare Json body: %w", err) + } + + request, err := http.NewRequestWithContext(ctx, "POST", c.serverEndpoint+path, body) + if err != nil { + return nil, fmt.Errorf("CreateDBAASExternalEndpointOpensearch: new request: %w", err) + } + + request.Header.Add("User-Agent", c.getUserAgent()) + + request.Header.Add("Content-Type", "application/json") + + if err := c.executeRequestInterceptors(ctx, request); err != nil { + return nil, fmt.Errorf("CreateDBAASExternalEndpointOpensearch: execute request editors: %w", err) + } + + if err := c.signRequest(request); err != nil { + return nil, fmt.Errorf("CreateDBAASExternalEndpointOpensearch: sign request: %w", err) + } + + if c.trace { + dumpRequest(request, "create-dbaas-external-endpoint-opensearch") + } + + response, err := c.httpClient.Do(request) + if err != nil { + return nil, fmt.Errorf("CreateDBAASExternalEndpointOpensearch: http client do: %w", err) + } + + if c.trace { + dumpResponse(response) + } + + if err := handleHTTPErrorResp(response); err != nil { + return nil, fmt.Errorf("CreateDBAASExternalEndpointOpensearch: http response: %w", err) + } + + bodyresp := &Operation{} + if err := prepareJSONResponse(response, bodyresp); err != nil { + return nil, fmt.Errorf("CreateDBAASExternalEndpointOpensearch: prepare Json response: %w", err) + } + + return bodyresp, nil +} + +// [BETA] Delete Prometheus external integration endpoint +func (c Client) DeleteDBAASExternalEndpointPrometheus(ctx context.Context, endpointID UUID) (*Operation, error) { + path := fmt.Sprintf("/dbaas-external-endpoint-prometheus/%v", endpointID) + + request, err := http.NewRequestWithContext(ctx, "DELETE", c.serverEndpoint+path, nil) + if err != nil { + return nil, fmt.Errorf("DeleteDBAASExternalEndpointPrometheus: new request: %w", err) + } + + request.Header.Add("User-Agent", c.getUserAgent()) + + if err := c.executeRequestInterceptors(ctx, request); err != nil { + return nil, fmt.Errorf("DeleteDBAASExternalEndpointPrometheus: execute request editors: %w", err) + } + + if err := c.signRequest(request); err != nil { + return nil, fmt.Errorf("DeleteDBAASExternalEndpointPrometheus: sign request: %w", err) + } + + if c.trace { + dumpRequest(request, "delete-dbaas-external-endpoint-prometheus") + } + + response, err := c.httpClient.Do(request) + if err != nil { + return nil, fmt.Errorf("DeleteDBAASExternalEndpointPrometheus: http client do: %w", err) + } + + if c.trace { + dumpResponse(response) + } + + if err := handleHTTPErrorResp(response); err != nil { + return nil, fmt.Errorf("DeleteDBAASExternalEndpointPrometheus: http response: %w", err) + } + + bodyresp := &Operation{} + if err := prepareJSONResponse(response, bodyresp); err != nil { + return nil, fmt.Errorf("DeleteDBAASExternalEndpointPrometheus: prepare Json response: %w", err) + } + + return bodyresp, nil +} + +// [BETA] Get Prometheus external integration endpoint settings +func (c Client) GetDBAASExternalEndpointPrometheus(ctx context.Context, endpointID UUID) (*DBAASEndpointExternalPrometheusOutput, error) { + path := fmt.Sprintf("/dbaas-external-endpoint-prometheus/%v", endpointID) + + request, err := http.NewRequestWithContext(ctx, "GET", c.serverEndpoint+path, nil) + if err != nil { + return nil, fmt.Errorf("GetDBAASExternalEndpointPrometheus: new request: %w", err) + } + + request.Header.Add("User-Agent", c.getUserAgent()) + + if err := c.executeRequestInterceptors(ctx, request); err != nil { + return nil, fmt.Errorf("GetDBAASExternalEndpointPrometheus: execute request editors: %w", err) + } + + if err := c.signRequest(request); err != nil { + return nil, fmt.Errorf("GetDBAASExternalEndpointPrometheus: sign request: %w", err) + } + + if c.trace { + dumpRequest(request, "get-dbaas-external-endpoint-prometheus") + } + + response, err := c.httpClient.Do(request) + if err != nil { + return nil, fmt.Errorf("GetDBAASExternalEndpointPrometheus: http client do: %w", err) + } + + if c.trace { + dumpResponse(response) + } + + if err := handleHTTPErrorResp(response); err != nil { + return nil, fmt.Errorf("GetDBAASExternalEndpointPrometheus: http response: %w", err) + } + + bodyresp := &DBAASEndpointExternalPrometheusOutput{} + if err := prepareJSONResponse(response, bodyresp); err != nil { + return nil, fmt.Errorf("GetDBAASExternalEndpointPrometheus: prepare Json response: %w", err) + } + + return bodyresp, nil +} + +// [BETA] Update Prometheus external integration endpoint +func (c Client) UpdateDBAASExternalEndpointPrometheus(ctx context.Context, endpointID UUID, req DBAASEndpointPrometheusPayload) (*Operation, error) { + path := fmt.Sprintf("/dbaas-external-endpoint-prometheus/%v", endpointID) + + body, err := prepareJSONBody(req) + if err != nil { + return nil, fmt.Errorf("UpdateDBAASExternalEndpointPrometheus: prepare Json body: %w", err) + } + + request, err := http.NewRequestWithContext(ctx, "PUT", c.serverEndpoint+path, body) + if err != nil { + return nil, fmt.Errorf("UpdateDBAASExternalEndpointPrometheus: new request: %w", err) + } + + request.Header.Add("User-Agent", c.getUserAgent()) + + request.Header.Add("Content-Type", "application/json") + + if err := c.executeRequestInterceptors(ctx, request); err != nil { + return nil, fmt.Errorf("UpdateDBAASExternalEndpointPrometheus: execute request editors: %w", err) + } + + if err := c.signRequest(request); err != nil { + return nil, fmt.Errorf("UpdateDBAASExternalEndpointPrometheus: sign request: %w", err) + } + + if c.trace { + dumpRequest(request, "update-dbaas-external-endpoint-prometheus") + } + + response, err := c.httpClient.Do(request) + if err != nil { + return nil, fmt.Errorf("UpdateDBAASExternalEndpointPrometheus: http client do: %w", err) + } + + if c.trace { + dumpResponse(response) + } + + if err := handleHTTPErrorResp(response); err != nil { + return nil, fmt.Errorf("UpdateDBAASExternalEndpointPrometheus: http response: %w", err) + } + + bodyresp := &Operation{} + if err := prepareJSONResponse(response, bodyresp); err != nil { + return nil, fmt.Errorf("UpdateDBAASExternalEndpointPrometheus: prepare Json response: %w", err) + } + + return bodyresp, nil +} + +// [BETA] Create Prometheus external integration endpoint +func (c Client) CreateDBAASExternalEndpointPrometheus(ctx context.Context, name string, req DBAASEndpointPrometheusPayload) (*Operation, error) { + path := fmt.Sprintf("/dbaas-external-endpoint-prometheus/%v", name) + + body, err := prepareJSONBody(req) + if err != nil { + return nil, fmt.Errorf("CreateDBAASExternalEndpointPrometheus: prepare Json body: %w", err) + } + + request, err := http.NewRequestWithContext(ctx, "POST", c.serverEndpoint+path, body) + if err != nil { + return nil, fmt.Errorf("CreateDBAASExternalEndpointPrometheus: new request: %w", err) + } + + request.Header.Add("User-Agent", c.getUserAgent()) + + request.Header.Add("Content-Type", "application/json") + + if err := c.executeRequestInterceptors(ctx, request); err != nil { + return nil, fmt.Errorf("CreateDBAASExternalEndpointPrometheus: execute request editors: %w", err) + } + + if err := c.signRequest(request); err != nil { + return nil, fmt.Errorf("CreateDBAASExternalEndpointPrometheus: sign request: %w", err) + } + + if c.trace { + dumpRequest(request, "create-dbaas-external-endpoint-prometheus") + } + + response, err := c.httpClient.Do(request) + if err != nil { + return nil, fmt.Errorf("CreateDBAASExternalEndpointPrometheus: http client do: %w", err) + } + + if c.trace { + dumpResponse(response) + } + + if err := handleHTTPErrorResp(response); err != nil { + return nil, fmt.Errorf("CreateDBAASExternalEndpointPrometheus: http response: %w", err) + } + + bodyresp := &Operation{} + if err := prepareJSONResponse(response, bodyresp); err != nil { + return nil, fmt.Errorf("CreateDBAASExternalEndpointPrometheus: prepare Json response: %w", err) + } + + return bodyresp, nil +} + +// [BETA] Delete RSyslog external integration endpoint +func (c Client) DeleteDBAASExternalEndpointRsyslog(ctx context.Context, endpointID UUID) (*Operation, error) { + path := fmt.Sprintf("/dbaas-external-endpoint-rsyslog/%v", endpointID) + + request, err := http.NewRequestWithContext(ctx, "DELETE", c.serverEndpoint+path, nil) + if err != nil { + return nil, fmt.Errorf("DeleteDBAASExternalEndpointRsyslog: new request: %w", err) + } + + request.Header.Add("User-Agent", c.getUserAgent()) + + if err := c.executeRequestInterceptors(ctx, request); err != nil { + return nil, fmt.Errorf("DeleteDBAASExternalEndpointRsyslog: execute request editors: %w", err) + } + + if err := c.signRequest(request); err != nil { + return nil, fmt.Errorf("DeleteDBAASExternalEndpointRsyslog: sign request: %w", err) + } + + if c.trace { + dumpRequest(request, "delete-dbaas-external-endpoint-rsyslog") + } + + response, err := c.httpClient.Do(request) + if err != nil { + return nil, fmt.Errorf("DeleteDBAASExternalEndpointRsyslog: http client do: %w", err) + } + + if c.trace { + dumpResponse(response) + } + + if err := handleHTTPErrorResp(response); err != nil { + return nil, fmt.Errorf("DeleteDBAASExternalEndpointRsyslog: http response: %w", err) + } + + bodyresp := &Operation{} + if err := prepareJSONResponse(response, bodyresp); err != nil { + return nil, fmt.Errorf("DeleteDBAASExternalEndpointRsyslog: prepare Json response: %w", err) + } + + return bodyresp, nil +} + +// [BETA] Get RSyslog external integration endpoint settings +func (c Client) GetDBAASExternalEndpointRsyslog(ctx context.Context, endpointID UUID) (*DBAASExternalEndpointRsyslogOutput, error) { + path := fmt.Sprintf("/dbaas-external-endpoint-rsyslog/%v", endpointID) + + request, err := http.NewRequestWithContext(ctx, "GET", c.serverEndpoint+path, nil) + if err != nil { + return nil, fmt.Errorf("GetDBAASExternalEndpointRsyslog: new request: %w", err) + } + + request.Header.Add("User-Agent", c.getUserAgent()) + + if err := c.executeRequestInterceptors(ctx, request); err != nil { + return nil, fmt.Errorf("GetDBAASExternalEndpointRsyslog: execute request editors: %w", err) + } + + if err := c.signRequest(request); err != nil { + return nil, fmt.Errorf("GetDBAASExternalEndpointRsyslog: sign request: %w", err) + } + + if c.trace { + dumpRequest(request, "get-dbaas-external-endpoint-rsyslog") + } + + response, err := c.httpClient.Do(request) + if err != nil { + return nil, fmt.Errorf("GetDBAASExternalEndpointRsyslog: http client do: %w", err) + } + + if c.trace { + dumpResponse(response) + } + + if err := handleHTTPErrorResp(response); err != nil { + return nil, fmt.Errorf("GetDBAASExternalEndpointRsyslog: http response: %w", err) + } + + bodyresp := &DBAASExternalEndpointRsyslogOutput{} + if err := prepareJSONResponse(response, bodyresp); err != nil { + return nil, fmt.Errorf("GetDBAASExternalEndpointRsyslog: prepare Json response: %w", err) + } + + return bodyresp, nil +} + +// [BETA] Update RSyslog external integration endpoint +func (c Client) UpdateDBAASExternalEndpointRsyslog(ctx context.Context, endpointID UUID, req DBAASEndpointRsyslogInputUpdate) (*Operation, error) { + path := fmt.Sprintf("/dbaas-external-endpoint-rsyslog/%v", endpointID) + + body, err := prepareJSONBody(req) + if err != nil { + return nil, fmt.Errorf("UpdateDBAASExternalEndpointRsyslog: prepare Json body: %w", err) + } + + request, err := http.NewRequestWithContext(ctx, "PUT", c.serverEndpoint+path, body) + if err != nil { + return nil, fmt.Errorf("UpdateDBAASExternalEndpointRsyslog: new request: %w", err) + } + + request.Header.Add("User-Agent", c.getUserAgent()) + + request.Header.Add("Content-Type", "application/json") + + if err := c.executeRequestInterceptors(ctx, request); err != nil { + return nil, fmt.Errorf("UpdateDBAASExternalEndpointRsyslog: execute request editors: %w", err) + } + + if err := c.signRequest(request); err != nil { + return nil, fmt.Errorf("UpdateDBAASExternalEndpointRsyslog: sign request: %w", err) + } + + if c.trace { + dumpRequest(request, "update-dbaas-external-endpoint-rsyslog") + } + + response, err := c.httpClient.Do(request) + if err != nil { + return nil, fmt.Errorf("UpdateDBAASExternalEndpointRsyslog: http client do: %w", err) + } + + if c.trace { + dumpResponse(response) + } + + if err := handleHTTPErrorResp(response); err != nil { + return nil, fmt.Errorf("UpdateDBAASExternalEndpointRsyslog: http response: %w", err) + } + + bodyresp := &Operation{} + if err := prepareJSONResponse(response, bodyresp); err != nil { + return nil, fmt.Errorf("UpdateDBAASExternalEndpointRsyslog: prepare Json response: %w", err) + } + + return bodyresp, nil +} + +// [BETA] Create RSyslog external integration endpoint +func (c Client) CreateDBAASExternalEndpointRsyslog(ctx context.Context, name string, req DBAASEndpointRsyslogInputCreate) (*Operation, error) { + path := fmt.Sprintf("/dbaas-external-endpoint-rsyslog/%v", name) + + body, err := prepareJSONBody(req) + if err != nil { + return nil, fmt.Errorf("CreateDBAASExternalEndpointRsyslog: prepare Json body: %w", err) + } + + request, err := http.NewRequestWithContext(ctx, "POST", c.serverEndpoint+path, body) + if err != nil { + return nil, fmt.Errorf("CreateDBAASExternalEndpointRsyslog: new request: %w", err) + } + + request.Header.Add("User-Agent", c.getUserAgent()) + + request.Header.Add("Content-Type", "application/json") + + if err := c.executeRequestInterceptors(ctx, request); err != nil { + return nil, fmt.Errorf("CreateDBAASExternalEndpointRsyslog: execute request editors: %w", err) + } + + if err := c.signRequest(request); err != nil { + return nil, fmt.Errorf("CreateDBAASExternalEndpointRsyslog: sign request: %w", err) + } + + if c.trace { + dumpRequest(request, "create-dbaas-external-endpoint-rsyslog") + } + + response, err := c.httpClient.Do(request) + if err != nil { + return nil, fmt.Errorf("CreateDBAASExternalEndpointRsyslog: http client do: %w", err) + } + + if c.trace { + dumpResponse(response) + } + + if err := handleHTTPErrorResp(response); err != nil { + return nil, fmt.Errorf("CreateDBAASExternalEndpointRsyslog: http response: %w", err) + } + + bodyresp := &Operation{} + if err := prepareJSONResponse(response, bodyresp); err != nil { + return nil, fmt.Errorf("CreateDBAASExternalEndpointRsyslog: prepare Json response: %w", err) + } + + return bodyresp, nil +} + +type ListDBAASExternalEndpointTypesResponseEndpointTypes struct { + ServiceTypes []string `json:"service-types,omitempty"` + Title string `json:"title,omitempty"` + Type EnumExternalEndpointTypes `json:"type,omitempty"` +} + +type ListDBAASExternalEndpointTypesResponse struct { + EndpointTypes []ListDBAASExternalEndpointTypesResponseEndpointTypes `json:"endpoint-types,omitempty"` +} + +// [BETA] List available external endpoint types and their schemas for DBaaS external integrations +func (c Client) ListDBAASExternalEndpointTypes(ctx context.Context) (*ListDBAASExternalEndpointTypesResponse, error) { + path := "/dbaas-external-endpoint-types" + + request, err := http.NewRequestWithContext(ctx, "GET", c.serverEndpoint+path, nil) + if err != nil { + return nil, fmt.Errorf("ListDBAASExternalEndpointTypes: new request: %w", err) + } + + request.Header.Add("User-Agent", c.getUserAgent()) + + if err := c.executeRequestInterceptors(ctx, request); err != nil { + return nil, fmt.Errorf("ListDBAASExternalEndpointTypes: execute request editors: %w", err) + } + + if err := c.signRequest(request); err != nil { + return nil, fmt.Errorf("ListDBAASExternalEndpointTypes: sign request: %w", err) + } + + if c.trace { + dumpRequest(request, "list-dbaas-external-endpoint-types") + } + + response, err := c.httpClient.Do(request) + if err != nil { + return nil, fmt.Errorf("ListDBAASExternalEndpointTypes: http client do: %w", err) + } + + if c.trace { + dumpResponse(response) + } + + if err := handleHTTPErrorResp(response); err != nil { + return nil, fmt.Errorf("ListDBAASExternalEndpointTypes: http response: %w", err) + } + + bodyresp := &ListDBAASExternalEndpointTypesResponse{} + if err := prepareJSONResponse(response, bodyresp); err != nil { + return nil, fmt.Errorf("ListDBAASExternalEndpointTypes: prepare Json response: %w", err) + } + + return bodyresp, nil +} + +type AttachDBAASServiceToEndpointRequest struct { + // External endpoint id + DestEndpointID UUID `json:"dest-endpoint-id" validate:"required"` + Type EnumExternalEndpointTypes `json:"type" validate:"required"` +} + +// [BETA] Create a new DBaaS connection between a DBaaS service and an external service +func (c Client) AttachDBAASServiceToEndpoint(ctx context.Context, sourceServiceName string, req AttachDBAASServiceToEndpointRequest) (*Operation, error) { + path := fmt.Sprintf("/dbaas-external-endpoint/%v/attach", sourceServiceName) + + body, err := prepareJSONBody(req) + if err != nil { + return nil, fmt.Errorf("AttachDBAASServiceToEndpoint: prepare Json body: %w", err) + } + + request, err := http.NewRequestWithContext(ctx, "PUT", c.serverEndpoint+path, body) + if err != nil { + return nil, fmt.Errorf("AttachDBAASServiceToEndpoint: new request: %w", err) + } + + request.Header.Add("User-Agent", c.getUserAgent()) + + request.Header.Add("Content-Type", "application/json") + + if err := c.executeRequestInterceptors(ctx, request); err != nil { + return nil, fmt.Errorf("AttachDBAASServiceToEndpoint: execute request editors: %w", err) + } + + if err := c.signRequest(request); err != nil { + return nil, fmt.Errorf("AttachDBAASServiceToEndpoint: sign request: %w", err) + } + + if c.trace { + dumpRequest(request, "attach-dbaas-service-to-endpoint") + } + + response, err := c.httpClient.Do(request) + if err != nil { + return nil, fmt.Errorf("AttachDBAASServiceToEndpoint: http client do: %w", err) + } + + if c.trace { + dumpResponse(response) + } + + if err := handleHTTPErrorResp(response); err != nil { + return nil, fmt.Errorf("AttachDBAASServiceToEndpoint: http response: %w", err) + } + + bodyresp := &Operation{} + if err := prepareJSONResponse(response, bodyresp); err != nil { + return nil, fmt.Errorf("AttachDBAASServiceToEndpoint: prepare Json response: %w", err) + } + + return bodyresp, nil +} + +type DetachDBAASServiceFromEndpointRequest struct { + // External Integration ID + IntegrationID UUID `json:"integration-id" validate:"required"` +} + +// [BETA] Detach a DBaaS external integration from a service +func (c Client) DetachDBAASServiceFromEndpoint(ctx context.Context, sourceServiceName string, req DetachDBAASServiceFromEndpointRequest) (*Operation, error) { + path := fmt.Sprintf("/dbaas-external-endpoint/%v/detach", sourceServiceName) + + body, err := prepareJSONBody(req) + if err != nil { + return nil, fmt.Errorf("DetachDBAASServiceFromEndpoint: prepare Json body: %w", err) + } + + request, err := http.NewRequestWithContext(ctx, "PUT", c.serverEndpoint+path, body) + if err != nil { + return nil, fmt.Errorf("DetachDBAASServiceFromEndpoint: new request: %w", err) + } + + request.Header.Add("User-Agent", c.getUserAgent()) + + request.Header.Add("Content-Type", "application/json") + + if err := c.executeRequestInterceptors(ctx, request); err != nil { + return nil, fmt.Errorf("DetachDBAASServiceFromEndpoint: execute request editors: %w", err) + } + + if err := c.signRequest(request); err != nil { + return nil, fmt.Errorf("DetachDBAASServiceFromEndpoint: sign request: %w", err) + } + + if c.trace { + dumpRequest(request, "detach-dbaas-service-from-endpoint") + } + + response, err := c.httpClient.Do(request) + if err != nil { + return nil, fmt.Errorf("DetachDBAASServiceFromEndpoint: http client do: %w", err) + } + + if c.trace { + dumpResponse(response) + } + + if err := handleHTTPErrorResp(response); err != nil { + return nil, fmt.Errorf("DetachDBAASServiceFromEndpoint: http response: %w", err) + } + + bodyresp := &Operation{} + if err := prepareJSONResponse(response, bodyresp); err != nil { + return nil, fmt.Errorf("DetachDBAASServiceFromEndpoint: prepare Json response: %w", err) + } + + return bodyresp, nil +} + +type ListDBAASExternalEndpointsResponse struct { + DBAASEndpoints []DBAASExternalEndpoint `json:"dbaas-endpoints,omitempty"` +} + +// FindDBAASExternalEndpoint attempts to find an DBAASExternalEndpoint by nameOrID. +func (l ListDBAASExternalEndpointsResponse) FindDBAASExternalEndpoint(nameOrID string) (DBAASExternalEndpoint, error) { + var result []DBAASExternalEndpoint + for i, elem := range l.DBAASEndpoints { + if string(elem.Name) == nameOrID || string(elem.ID) == nameOrID { + result = append(result, l.DBAASEndpoints[i]) + } + } + if len(result) == 1 { + return result[0], nil + } + + if len(result) > 1 { + return DBAASExternalEndpoint{}, fmt.Errorf("%q too many found in ListDBAASExternalEndpointsResponse: %w", nameOrID, ErrConflict) + } + + return DBAASExternalEndpoint{}, fmt.Errorf("%q not found in ListDBAASExternalEndpointsResponse: %w", nameOrID, ErrNotFound) +} + +// [BETA] List available external endpoints for integrations +func (c Client) ListDBAASExternalEndpoints(ctx context.Context) (*ListDBAASExternalEndpointsResponse, error) { + path := "/dbaas-external-endpoints" + + request, err := http.NewRequestWithContext(ctx, "GET", c.serverEndpoint+path, nil) + if err != nil { + return nil, fmt.Errorf("ListDBAASExternalEndpoints: new request: %w", err) + } + + request.Header.Add("User-Agent", c.getUserAgent()) + + if err := c.executeRequestInterceptors(ctx, request); err != nil { + return nil, fmt.Errorf("ListDBAASExternalEndpoints: execute request editors: %w", err) + } + + if err := c.signRequest(request); err != nil { + return nil, fmt.Errorf("ListDBAASExternalEndpoints: sign request: %w", err) + } + + if c.trace { + dumpRequest(request, "list-dbaas-external-endpoints") + } + + response, err := c.httpClient.Do(request) + if err != nil { + return nil, fmt.Errorf("ListDBAASExternalEndpoints: http client do: %w", err) + } + + if c.trace { + dumpResponse(response) + } + + if err := handleHTTPErrorResp(response); err != nil { + return nil, fmt.Errorf("ListDBAASExternalEndpoints: http response: %w", err) + } + + bodyresp := &ListDBAASExternalEndpointsResponse{} + if err := prepareJSONResponse(response, bodyresp); err != nil { + return nil, fmt.Errorf("ListDBAASExternalEndpoints: prepare Json response: %w", err) + } + + return bodyresp, nil +} + +type GetDBAASExternalIntegrationSettingsDatadogResponse struct { + Settings *DBAASIntegrationSettingsDatadog `json:"settings,omitempty"` +} + +// [BETA] Get Datadog integration settings +func (c Client) GetDBAASExternalIntegrationSettingsDatadog(ctx context.Context, integrationID UUID) (*GetDBAASExternalIntegrationSettingsDatadogResponse, error) { + path := fmt.Sprintf("/dbaas-external-integration-settings-datadog/%v", integrationID) + + request, err := http.NewRequestWithContext(ctx, "GET", c.serverEndpoint+path, nil) + if err != nil { + return nil, fmt.Errorf("GetDBAASExternalIntegrationSettingsDatadog: new request: %w", err) + } + + request.Header.Add("User-Agent", c.getUserAgent()) + + if err := c.executeRequestInterceptors(ctx, request); err != nil { + return nil, fmt.Errorf("GetDBAASExternalIntegrationSettingsDatadog: execute request editors: %w", err) + } + + if err := c.signRequest(request); err != nil { + return nil, fmt.Errorf("GetDBAASExternalIntegrationSettingsDatadog: sign request: %w", err) + } + + if c.trace { + dumpRequest(request, "get-dbaas-external-integration-settings-datadog") + } + + response, err := c.httpClient.Do(request) + if err != nil { + return nil, fmt.Errorf("GetDBAASExternalIntegrationSettingsDatadog: http client do: %w", err) + } + + if c.trace { + dumpResponse(response) + } + + if err := handleHTTPErrorResp(response); err != nil { + return nil, fmt.Errorf("GetDBAASExternalIntegrationSettingsDatadog: http response: %w", err) + } + + bodyresp := &GetDBAASExternalIntegrationSettingsDatadogResponse{} + if err := prepareJSONResponse(response, bodyresp); err != nil { + return nil, fmt.Errorf("GetDBAASExternalIntegrationSettingsDatadog: prepare Json response: %w", err) + } + + return bodyresp, nil +} + +type UpdateDBAASExternalIntegrationSettingsDatadogRequest struct { + Settings *DBAASIntegrationSettingsDatadog `json:"settings,omitempty"` +} + +// [BETA] Manage Datadog integration settings +func (c Client) UpdateDBAASExternalIntegrationSettingsDatadog(ctx context.Context, integrationID UUID, req UpdateDBAASExternalIntegrationSettingsDatadogRequest) (*Operation, error) { + path := fmt.Sprintf("/dbaas-external-integration-settings-datadog/%v", integrationID) + + body, err := prepareJSONBody(req) + if err != nil { + return nil, fmt.Errorf("UpdateDBAASExternalIntegrationSettingsDatadog: prepare Json body: %w", err) + } + + request, err := http.NewRequestWithContext(ctx, "POST", c.serverEndpoint+path, body) + if err != nil { + return nil, fmt.Errorf("UpdateDBAASExternalIntegrationSettingsDatadog: new request: %w", err) + } + + request.Header.Add("User-Agent", c.getUserAgent()) + + request.Header.Add("Content-Type", "application/json") + + if err := c.executeRequestInterceptors(ctx, request); err != nil { + return nil, fmt.Errorf("UpdateDBAASExternalIntegrationSettingsDatadog: execute request editors: %w", err) + } + + if err := c.signRequest(request); err != nil { + return nil, fmt.Errorf("UpdateDBAASExternalIntegrationSettingsDatadog: sign request: %w", err) + } + + if c.trace { + dumpRequest(request, "update-dbaas-external-integration-settings-datadog") + } + + response, err := c.httpClient.Do(request) + if err != nil { + return nil, fmt.Errorf("UpdateDBAASExternalIntegrationSettingsDatadog: http client do: %w", err) + } + + if c.trace { + dumpResponse(response) + } + + if err := handleHTTPErrorResp(response); err != nil { + return nil, fmt.Errorf("UpdateDBAASExternalIntegrationSettingsDatadog: http response: %w", err) + } + + bodyresp := &Operation{} + if err := prepareJSONResponse(response, bodyresp); err != nil { + return nil, fmt.Errorf("UpdateDBAASExternalIntegrationSettingsDatadog: prepare Json response: %w", err) + } + + return bodyresp, nil +} + +// [BETA] Get a DBaaS external integration +func (c Client) GetDBAASExternalIntegration(ctx context.Context, integrationID UUID) (*DBAASExternalIntegration, error) { + path := fmt.Sprintf("/dbaas-external-integration/%v", integrationID) + + request, err := http.NewRequestWithContext(ctx, "GET", c.serverEndpoint+path, nil) + if err != nil { + return nil, fmt.Errorf("GetDBAASExternalIntegration: new request: %w", err) + } + + request.Header.Add("User-Agent", c.getUserAgent()) + + if err := c.executeRequestInterceptors(ctx, request); err != nil { + return nil, fmt.Errorf("GetDBAASExternalIntegration: execute request editors: %w", err) + } + + if err := c.signRequest(request); err != nil { + return nil, fmt.Errorf("GetDBAASExternalIntegration: sign request: %w", err) + } + + if c.trace { + dumpRequest(request, "get-dbaas-external-integration") + } + + response, err := c.httpClient.Do(request) + if err != nil { + return nil, fmt.Errorf("GetDBAASExternalIntegration: http client do: %w", err) + } + + if c.trace { + dumpResponse(response) + } + + if err := handleHTTPErrorResp(response); err != nil { + return nil, fmt.Errorf("GetDBAASExternalIntegration: http response: %w", err) + } + + bodyresp := &DBAASExternalIntegration{} + if err := prepareJSONResponse(response, bodyresp); err != nil { + return nil, fmt.Errorf("GetDBAASExternalIntegration: prepare Json response: %w", err) + } + + return bodyresp, nil +} + +type ListDBAASExternalIntegrationsResponse struct { + ExternalIntegrations []DBAASExternalIntegration `json:"external-integrations,omitempty"` +} + +// [BETA] List all DBaaS connections between services and external endpoints +func (c Client) ListDBAASExternalIntegrations(ctx context.Context, serviceName string) (*ListDBAASExternalIntegrationsResponse, error) { + path := fmt.Sprintf("/dbaas-external-integrations/%v", serviceName) + + request, err := http.NewRequestWithContext(ctx, "GET", c.serverEndpoint+path, nil) + if err != nil { + return nil, fmt.Errorf("ListDBAASExternalIntegrations: new request: %w", err) + } + + request.Header.Add("User-Agent", c.getUserAgent()) + + if err := c.executeRequestInterceptors(ctx, request); err != nil { + return nil, fmt.Errorf("ListDBAASExternalIntegrations: execute request editors: %w", err) + } + + if err := c.signRequest(request); err != nil { + return nil, fmt.Errorf("ListDBAASExternalIntegrations: sign request: %w", err) + } + + if c.trace { + dumpRequest(request, "list-dbaas-external-integrations") + } + + response, err := c.httpClient.Do(request) + if err != nil { + return nil, fmt.Errorf("ListDBAASExternalIntegrations: http client do: %w", err) + } + + if c.trace { + dumpResponse(response) + } + + if err := handleHTTPErrorResp(response); err != nil { + return nil, fmt.Errorf("ListDBAASExternalIntegrations: http response: %w", err) + } + + bodyresp := &ListDBAASExternalIntegrationsResponse{} + if err := prepareJSONResponse(response, bodyresp); err != nil { + return nil, fmt.Errorf("ListDBAASExternalIntegrations: prepare Json response: %w", err) + } + + return bodyresp, nil +} + +// Delete a Grafana service +func (c Client) DeleteDBAASServiceGrafana(ctx context.Context, name string) (*Operation, error) { + path := fmt.Sprintf("/dbaas-grafana/%v", name) + + request, err := http.NewRequestWithContext(ctx, "DELETE", c.serverEndpoint+path, nil) + if err != nil { + return nil, fmt.Errorf("DeleteDBAASServiceGrafana: new request: %w", err) + } + + request.Header.Add("User-Agent", c.getUserAgent()) + + if err := c.executeRequestInterceptors(ctx, request); err != nil { + return nil, fmt.Errorf("DeleteDBAASServiceGrafana: execute request editors: %w", err) + } + + if err := c.signRequest(request); err != nil { + return nil, fmt.Errorf("DeleteDBAASServiceGrafana: sign request: %w", err) + } + + if c.trace { + dumpRequest(request, "delete-dbaas-service-grafana") + } + + response, err := c.httpClient.Do(request) + if err != nil { + return nil, fmt.Errorf("DeleteDBAASServiceGrafana: http client do: %w", err) + } + + if c.trace { + dumpResponse(response) + } + + if err := handleHTTPErrorResp(response); err != nil { + return nil, fmt.Errorf("DeleteDBAASServiceGrafana: http response: %w", err) + } + + bodyresp := &Operation{} + if err := prepareJSONResponse(response, bodyresp); err != nil { + return nil, fmt.Errorf("DeleteDBAASServiceGrafana: prepare Json response: %w", err) + } + + return bodyresp, nil +} + +// Get a DBaaS Grafana service +func (c Client) GetDBAASServiceGrafana(ctx context.Context, name string) (*DBAASServiceGrafana, error) { + path := fmt.Sprintf("/dbaas-grafana/%v", name) + + request, err := http.NewRequestWithContext(ctx, "GET", c.serverEndpoint+path, nil) + if err != nil { + return nil, fmt.Errorf("GetDBAASServiceGrafana: new request: %w", err) + } + + request.Header.Add("User-Agent", c.getUserAgent()) + + if err := c.executeRequestInterceptors(ctx, request); err != nil { + return nil, fmt.Errorf("GetDBAASServiceGrafana: execute request editors: %w", err) + } + + if err := c.signRequest(request); err != nil { + return nil, fmt.Errorf("GetDBAASServiceGrafana: sign request: %w", err) + } + + if c.trace { + dumpRequest(request, "get-dbaas-service-grafana") + } + + response, err := c.httpClient.Do(request) + if err != nil { + return nil, fmt.Errorf("GetDBAASServiceGrafana: http client do: %w", err) + } + + if c.trace { + dumpResponse(response) + } + + if err := handleHTTPErrorResp(response); err != nil { + return nil, fmt.Errorf("GetDBAASServiceGrafana: http response: %w", err) + } + + bodyresp := &DBAASServiceGrafana{} + if err := prepareJSONResponse(response, bodyresp); err != nil { + return nil, fmt.Errorf("GetDBAASServiceGrafana: prepare Json response: %w", err) + } + + return bodyresp, nil +} + +type CreateDBAASServiceGrafanaRequestMaintenanceDow string + +const ( + CreateDBAASServiceGrafanaRequestMaintenanceDowSaturday CreateDBAASServiceGrafanaRequestMaintenanceDow = "saturday" + CreateDBAASServiceGrafanaRequestMaintenanceDowTuesday CreateDBAASServiceGrafanaRequestMaintenanceDow = "tuesday" + CreateDBAASServiceGrafanaRequestMaintenanceDowNever CreateDBAASServiceGrafanaRequestMaintenanceDow = "never" + CreateDBAASServiceGrafanaRequestMaintenanceDowWednesday CreateDBAASServiceGrafanaRequestMaintenanceDow = "wednesday" + CreateDBAASServiceGrafanaRequestMaintenanceDowSunday CreateDBAASServiceGrafanaRequestMaintenanceDow = "sunday" + CreateDBAASServiceGrafanaRequestMaintenanceDowFriday CreateDBAASServiceGrafanaRequestMaintenanceDow = "friday" + CreateDBAASServiceGrafanaRequestMaintenanceDowMonday CreateDBAASServiceGrafanaRequestMaintenanceDow = "monday" + CreateDBAASServiceGrafanaRequestMaintenanceDowThursday CreateDBAASServiceGrafanaRequestMaintenanceDow = "thursday" +) + +// Automatic maintenance settings +type CreateDBAASServiceGrafanaRequestMaintenance struct { + // Day of week for installing updates + Dow CreateDBAASServiceGrafanaRequestMaintenanceDow `json:"dow" validate:"required"` + // Time for installing updates, UTC + Time string `json:"time" validate:"required,gte=8,lte=8"` +} + +type CreateDBAASServiceGrafanaRequest struct { + ForkFromService DBAASServiceName `json:"fork-from-service,omitempty" validate:"omitempty,gte=0,lte=63"` + // Grafana settings + GrafanaSettings *JSONSchemaGrafana `json:"grafana-settings,omitempty"` + // Allowed CIDR address blocks for incoming connections + IPFilter []string `json:"ip-filter,omitempty"` + // Automatic maintenance settings + Maintenance *CreateDBAASServiceGrafanaRequestMaintenance `json:"maintenance,omitempty"` + // Subscription plan + Plan string `json:"plan" validate:"required,gte=1,lte=128"` + // Service is protected against termination and powering off + TerminationProtection *bool `json:"termination-protection,omitempty"` +} + +// Create a DBaaS Grafana service +func (c Client) CreateDBAASServiceGrafana(ctx context.Context, name string, req CreateDBAASServiceGrafanaRequest) (*Operation, error) { + path := fmt.Sprintf("/dbaas-grafana/%v", name) + + body, err := prepareJSONBody(req) + if err != nil { + return nil, fmt.Errorf("CreateDBAASServiceGrafana: prepare Json body: %w", err) + } + + request, err := http.NewRequestWithContext(ctx, "POST", c.serverEndpoint+path, body) + if err != nil { + return nil, fmt.Errorf("CreateDBAASServiceGrafana: new request: %w", err) + } + + request.Header.Add("User-Agent", c.getUserAgent()) + + request.Header.Add("Content-Type", "application/json") + + if err := c.executeRequestInterceptors(ctx, request); err != nil { + return nil, fmt.Errorf("CreateDBAASServiceGrafana: execute request editors: %w", err) + } + + if err := c.signRequest(request); err != nil { + return nil, fmt.Errorf("CreateDBAASServiceGrafana: sign request: %w", err) + } + + if c.trace { + dumpRequest(request, "create-dbaas-service-grafana") + } + + response, err := c.httpClient.Do(request) + if err != nil { + return nil, fmt.Errorf("CreateDBAASServiceGrafana: http client do: %w", err) + } + + if c.trace { + dumpResponse(response) + } + + if err := handleHTTPErrorResp(response); err != nil { + return nil, fmt.Errorf("CreateDBAASServiceGrafana: http response: %w", err) + } + + bodyresp := &Operation{} + if err := prepareJSONResponse(response, bodyresp); err != nil { + return nil, fmt.Errorf("CreateDBAASServiceGrafana: prepare Json response: %w", err) + } + + return bodyresp, nil +} + +type UpdateDBAASServiceGrafanaRequestMaintenanceDow string + +const ( + UpdateDBAASServiceGrafanaRequestMaintenanceDowSaturday UpdateDBAASServiceGrafanaRequestMaintenanceDow = "saturday" + UpdateDBAASServiceGrafanaRequestMaintenanceDowTuesday UpdateDBAASServiceGrafanaRequestMaintenanceDow = "tuesday" + UpdateDBAASServiceGrafanaRequestMaintenanceDowNever UpdateDBAASServiceGrafanaRequestMaintenanceDow = "never" + UpdateDBAASServiceGrafanaRequestMaintenanceDowWednesday UpdateDBAASServiceGrafanaRequestMaintenanceDow = "wednesday" + UpdateDBAASServiceGrafanaRequestMaintenanceDowSunday UpdateDBAASServiceGrafanaRequestMaintenanceDow = "sunday" + UpdateDBAASServiceGrafanaRequestMaintenanceDowFriday UpdateDBAASServiceGrafanaRequestMaintenanceDow = "friday" + UpdateDBAASServiceGrafanaRequestMaintenanceDowMonday UpdateDBAASServiceGrafanaRequestMaintenanceDow = "monday" + UpdateDBAASServiceGrafanaRequestMaintenanceDowThursday UpdateDBAASServiceGrafanaRequestMaintenanceDow = "thursday" +) + +// Automatic maintenance settings +type UpdateDBAASServiceGrafanaRequestMaintenance struct { + // Day of week for installing updates + Dow UpdateDBAASServiceGrafanaRequestMaintenanceDow `json:"dow" validate:"required"` + // Time for installing updates, UTC + Time string `json:"time" validate:"required,gte=8,lte=8"` +} + +type UpdateDBAASServiceGrafanaRequest struct { + // Grafana settings + GrafanaSettings *JSONSchemaGrafana `json:"grafana-settings,omitempty"` + // Allowed CIDR address blocks for incoming connections + IPFilter []string `json:"ip-filter,omitempty"` + // Automatic maintenance settings + Maintenance *UpdateDBAASServiceGrafanaRequestMaintenance `json:"maintenance,omitempty"` + // Subscription plan + Plan string `json:"plan,omitempty" validate:"omitempty,gte=1,lte=128"` + // Service is protected against termination and powering off + TerminationProtection *bool `json:"termination-protection,omitempty"` +} + +// Update a DBaaS Grafana service +func (c Client) UpdateDBAASServiceGrafana(ctx context.Context, name string, req UpdateDBAASServiceGrafanaRequest) (*Operation, error) { + path := fmt.Sprintf("/dbaas-grafana/%v", name) + + body, err := prepareJSONBody(req) + if err != nil { + return nil, fmt.Errorf("UpdateDBAASServiceGrafana: prepare Json body: %w", err) + } + + request, err := http.NewRequestWithContext(ctx, "PUT", c.serverEndpoint+path, body) + if err != nil { + return nil, fmt.Errorf("UpdateDBAASServiceGrafana: new request: %w", err) + } + + request.Header.Add("User-Agent", c.getUserAgent()) + + request.Header.Add("Content-Type", "application/json") + + if err := c.executeRequestInterceptors(ctx, request); err != nil { + return nil, fmt.Errorf("UpdateDBAASServiceGrafana: execute request editors: %w", err) + } + + if err := c.signRequest(request); err != nil { + return nil, fmt.Errorf("UpdateDBAASServiceGrafana: sign request: %w", err) + } + + if c.trace { + dumpRequest(request, "update-dbaas-service-grafana") + } + + response, err := c.httpClient.Do(request) + if err != nil { + return nil, fmt.Errorf("UpdateDBAASServiceGrafana: http client do: %w", err) + } + + if c.trace { + dumpResponse(response) + } + + if err := handleHTTPErrorResp(response); err != nil { + return nil, fmt.Errorf("UpdateDBAASServiceGrafana: http response: %w", err) + } + + bodyresp := &Operation{} + if err := prepareJSONResponse(response, bodyresp); err != nil { + return nil, fmt.Errorf("UpdateDBAASServiceGrafana: prepare Json response: %w", err) + } + + return bodyresp, nil +} + +// Initiate Grafana maintenance update +func (c Client) StartDBAASGrafanaMaintenance(ctx context.Context, name string) (*Operation, error) { + path := fmt.Sprintf("/dbaas-grafana/%v/maintenance/start", name) + + request, err := http.NewRequestWithContext(ctx, "PUT", c.serverEndpoint+path, nil) + if err != nil { + return nil, fmt.Errorf("StartDBAASGrafanaMaintenance: new request: %w", err) + } + + request.Header.Add("User-Agent", c.getUserAgent()) + + if err := c.executeRequestInterceptors(ctx, request); err != nil { + return nil, fmt.Errorf("StartDBAASGrafanaMaintenance: execute request editors: %w", err) + } + + if err := c.signRequest(request); err != nil { + return nil, fmt.Errorf("StartDBAASGrafanaMaintenance: sign request: %w", err) + } + + if c.trace { + dumpRequest(request, "start-dbaas-grafana-maintenance") + } + + response, err := c.httpClient.Do(request) + if err != nil { + return nil, fmt.Errorf("StartDBAASGrafanaMaintenance: http client do: %w", err) + } + + if c.trace { + dumpResponse(response) + } + + if err := handleHTTPErrorResp(response); err != nil { + return nil, fmt.Errorf("StartDBAASGrafanaMaintenance: http response: %w", err) + } + + bodyresp := &Operation{} + if err := prepareJSONResponse(response, bodyresp); err != nil { + return nil, fmt.Errorf("StartDBAASGrafanaMaintenance: prepare Json response: %w", err) + } + + return bodyresp, nil +} + +type ResetDBAASGrafanaUserPasswordRequest struct { + Password DBAASUserPassword `json:"password,omitempty" validate:"omitempty,gte=8,lte=256"` +} + +// If no password is provided one will be generated automatically. +func (c Client) ResetDBAASGrafanaUserPassword(ctx context.Context, serviceName string, username string, req ResetDBAASGrafanaUserPasswordRequest) (*Operation, error) { + path := fmt.Sprintf("/dbaas-grafana/%v/user/%v/password/reset", serviceName, username) + + body, err := prepareJSONBody(req) + if err != nil { + return nil, fmt.Errorf("ResetDBAASGrafanaUserPassword: prepare Json body: %w", err) + } + + request, err := http.NewRequestWithContext(ctx, "PUT", c.serverEndpoint+path, body) + if err != nil { + return nil, fmt.Errorf("ResetDBAASGrafanaUserPassword: new request: %w", err) + } + + request.Header.Add("User-Agent", c.getUserAgent()) request.Header.Add("Content-Type", "application/json") @@ -1556,7 +2996,8 @@ func (c Client) RevealDBAASGrafanaUserPassword(ctx context.Context, serviceName if err != nil { return nil, fmt.Errorf("RevealDBAASGrafanaUserPassword: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) if err := c.executeRequestInterceptors(ctx, request); err != nil { return nil, fmt.Errorf("RevealDBAASGrafanaUserPassword: execute request editors: %w", err) @@ -1599,7 +3040,7 @@ type CreateDBAASIntegrationRequest struct { SourceService DBAASServiceName `json:"source-service" validate:"required,gte=0,lte=63"` } -// Create a new DBaaS integration between two services +// [BETA] Create a new DBaaS integration between two services func (c Client) CreateDBAASIntegration(ctx context.Context, req CreateDBAASIntegrationRequest) (*Operation, error) { path := "/dbaas-integration" @@ -1612,7 +3053,8 @@ func (c Client) CreateDBAASIntegration(ctx context.Context, req CreateDBAASInteg if err != nil { return nil, fmt.Errorf("CreateDBAASIntegration: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) request.Header.Add("Content-Type", "application/json") @@ -1662,7 +3104,7 @@ type ListDBAASIntegrationSettingsResponse struct { Settings *ListDBAASIntegrationSettingsResponseSettings `json:"settings,omitempty"` } -// Get DBaaS integration settings +// [BETA] Get DBaaS integration settings func (c Client) ListDBAASIntegrationSettings(ctx context.Context, integrationType string, sourceType string, destType string) (*ListDBAASIntegrationSettingsResponse, error) { path := fmt.Sprintf("/dbaas-integration-settings/%v/%v/%v", integrationType, sourceType, destType) @@ -1670,7 +3112,8 @@ func (c Client) ListDBAASIntegrationSettings(ctx context.Context, integrationTyp if err != nil { return nil, fmt.Errorf("ListDBAASIntegrationSettings: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) if err := c.executeRequestInterceptors(ctx, request); err != nil { return nil, fmt.Errorf("ListDBAASIntegrationSettings: execute request editors: %w", err) @@ -1709,7 +3152,7 @@ type ListDBAASIntegrationTypesResponse struct { DBAASIntegrationTypes []DBAASIntegrationType `json:"dbaas-integration-types,omitempty"` } -// Get DBaaS integration types +// [BETA] Get DBaaS integration types func (c Client) ListDBAASIntegrationTypes(ctx context.Context) (*ListDBAASIntegrationTypesResponse, error) { path := "/dbaas-integration-types" @@ -1717,7 +3160,8 @@ func (c Client) ListDBAASIntegrationTypes(ctx context.Context) (*ListDBAASIntegr if err != nil { return nil, fmt.Errorf("ListDBAASIntegrationTypes: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) if err := c.executeRequestInterceptors(ctx, request); err != nil { return nil, fmt.Errorf("ListDBAASIntegrationTypes: execute request editors: %w", err) @@ -1752,7 +3196,7 @@ func (c Client) ListDBAASIntegrationTypes(ctx context.Context) (*ListDBAASIntegr return bodyresp, nil } -// Delete a DBaaS Integration +// [BETA] Delete a DBaaS Integration func (c Client) DeleteDBAASIntegration(ctx context.Context, id UUID) (*Operation, error) { path := fmt.Sprintf("/dbaas-integration/%v", id) @@ -1760,7 +3204,8 @@ func (c Client) DeleteDBAASIntegration(ctx context.Context, id UUID) (*Operation if err != nil { return nil, fmt.Errorf("DeleteDBAASIntegration: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) if err := c.executeRequestInterceptors(ctx, request); err != nil { return nil, fmt.Errorf("DeleteDBAASIntegration: execute request editors: %w", err) @@ -1795,7 +3240,7 @@ func (c Client) DeleteDBAASIntegration(ctx context.Context, id UUID) (*Operation return bodyresp, nil } -// Get a DBaaS Integration +// [BETA] Get a DBaaS Integration func (c Client) GetDBAASIntegration(ctx context.Context, id UUID) (*DBAASIntegration, error) { path := fmt.Sprintf("/dbaas-integration/%v", id) @@ -1803,7 +3248,8 @@ func (c Client) GetDBAASIntegration(ctx context.Context, id UUID) (*DBAASIntegra if err != nil { return nil, fmt.Errorf("GetDBAASIntegration: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) if err := c.executeRequestInterceptors(ctx, request); err != nil { return nil, fmt.Errorf("GetDBAASIntegration: execute request editors: %w", err) @@ -1843,7 +3289,7 @@ type UpdateDBAASIntegrationRequest struct { Settings map[string]any `json:"settings" validate:"required"` } -// Update a existing DBaaS integration +// [BETA] Update a existing DBaaS integration func (c Client) UpdateDBAASIntegration(ctx context.Context, id UUID, req UpdateDBAASIntegrationRequest) (*Operation, error) { path := fmt.Sprintf("/dbaas-integration/%v", id) @@ -1856,7 +3302,8 @@ func (c Client) UpdateDBAASIntegration(ctx context.Context, id UUID, req UpdateD if err != nil { return nil, fmt.Errorf("UpdateDBAASIntegration: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) request.Header.Add("Content-Type", "application/json") @@ -1901,7 +3348,8 @@ func (c Client) DeleteDBAASServiceKafka(ctx context.Context, name string) (*Oper if err != nil { return nil, fmt.Errorf("DeleteDBAASServiceKafka: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) if err := c.executeRequestInterceptors(ctx, request); err != nil { return nil, fmt.Errorf("DeleteDBAASServiceKafka: execute request editors: %w", err) @@ -1944,7 +3392,8 @@ func (c Client) GetDBAASServiceKafka(ctx context.Context, name string) (*DBAASSe if err != nil { return nil, fmt.Errorf("GetDBAASServiceKafka: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) if err := c.executeRequestInterceptors(ctx, request); err != nil { return nil, fmt.Errorf("GetDBAASServiceKafka: execute request editors: %w", err) @@ -2050,7 +3499,8 @@ func (c Client) CreateDBAASServiceKafka(ctx context.Context, name string, req Cr if err != nil { return nil, fmt.Errorf("CreateDBAASServiceKafka: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) request.Header.Add("Content-Type", "application/json") @@ -2158,7 +3608,8 @@ func (c Client) UpdateDBAASServiceKafka(ctx context.Context, name string, req Up if err != nil { return nil, fmt.Errorf("UpdateDBAASServiceKafka: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) request.Header.Add("Content-Type", "application/json") @@ -2203,7 +3654,8 @@ func (c Client) GetDBAASKafkaAclConfig(ctx context.Context, name string) (*DBAAS if err != nil { return nil, fmt.Errorf("GetDBAASKafkaAclConfig: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) if err := c.executeRequestInterceptors(ctx, request); err != nil { return nil, fmt.Errorf("GetDBAASKafkaAclConfig: execute request editors: %w", err) @@ -2246,7 +3698,8 @@ func (c Client) StartDBAASKafkaMaintenance(ctx context.Context, name string) (*O if err != nil { return nil, fmt.Errorf("StartDBAASKafkaMaintenance: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) if err := c.executeRequestInterceptors(ctx, request); err != nil { return nil, fmt.Errorf("StartDBAASKafkaMaintenance: execute request editors: %w", err) @@ -2294,7 +3747,8 @@ func (c Client) CreateDBAASKafkaSchemaRegistryAclConfig(ctx context.Context, nam if err != nil { return nil, fmt.Errorf("CreateDBAASKafkaSchemaRegistryAclConfig: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) request.Header.Add("Content-Type", "application/json") @@ -2339,7 +3793,8 @@ func (c Client) DeleteDBAASKafkaSchemaRegistryAclConfig(ctx context.Context, nam if err != nil { return nil, fmt.Errorf("DeleteDBAASKafkaSchemaRegistryAclConfig: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) if err := c.executeRequestInterceptors(ctx, request); err != nil { return nil, fmt.Errorf("DeleteDBAASKafkaSchemaRegistryAclConfig: execute request editors: %w", err) @@ -2387,7 +3842,8 @@ func (c Client) CreateDBAASKafkaTopicAclConfig(ctx context.Context, name string, if err != nil { return nil, fmt.Errorf("CreateDBAASKafkaTopicAclConfig: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) request.Header.Add("Content-Type", "application/json") @@ -2432,7 +3888,8 @@ func (c Client) DeleteDBAASKafkaTopicAclConfig(ctx context.Context, name string, if err != nil { return nil, fmt.Errorf("DeleteDBAASKafkaTopicAclConfig: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) if err := c.executeRequestInterceptors(ctx, request); err != nil { return nil, fmt.Errorf("DeleteDBAASKafkaTopicAclConfig: execute request editors: %w", err) @@ -2467,6 +3924,50 @@ func (c Client) DeleteDBAASKafkaTopicAclConfig(ctx context.Context, name string, return bodyresp, nil } +// Reveal the secrets for DBaaS Kafka Connect +func (c Client) RevealDBAASKafkaConnectPassword(ctx context.Context, serviceName string) (*DBAASUserKafkaConnectSecrets, error) { + path := fmt.Sprintf("/dbaas-kafka/%v/connect/password/reveal", serviceName) + + request, err := http.NewRequestWithContext(ctx, "GET", c.serverEndpoint+path, nil) + if err != nil { + return nil, fmt.Errorf("RevealDBAASKafkaConnectPassword: new request: %w", err) + } + + request.Header.Add("User-Agent", c.getUserAgent()) + + if err := c.executeRequestInterceptors(ctx, request); err != nil { + return nil, fmt.Errorf("RevealDBAASKafkaConnectPassword: execute request editors: %w", err) + } + + if err := c.signRequest(request); err != nil { + return nil, fmt.Errorf("RevealDBAASKafkaConnectPassword: sign request: %w", err) + } + + if c.trace { + dumpRequest(request, "reveal-dbaas-kafka-connect-password") + } + + response, err := c.httpClient.Do(request) + if err != nil { + return nil, fmt.Errorf("RevealDBAASKafkaConnectPassword: http client do: %w", err) + } + + if c.trace { + dumpResponse(response) + } + + if err := handleHTTPErrorResp(response); err != nil { + return nil, fmt.Errorf("RevealDBAASKafkaConnectPassword: http response: %w", err) + } + + bodyresp := &DBAASUserKafkaConnectSecrets{} + if err := prepareJSONResponse(response, bodyresp); err != nil { + return nil, fmt.Errorf("RevealDBAASKafkaConnectPassword: prepare Json response: %w", err) + } + + return bodyresp, nil +} + type CreateDBAASKafkaUserRequest struct { Username DBAASUserUsername `json:"username" validate:"required,gte=1,lte=64"` } @@ -2484,7 +3985,8 @@ func (c Client) CreateDBAASKafkaUser(ctx context.Context, serviceName string, re if err != nil { return nil, fmt.Errorf("CreateDBAASKafkaUser: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) request.Header.Add("Content-Type", "application/json") @@ -2529,7 +4031,8 @@ func (c Client) DeleteDBAASKafkaUser(ctx context.Context, serviceName string, us if err != nil { return nil, fmt.Errorf("DeleteDBAASKafkaUser: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) if err := c.executeRequestInterceptors(ctx, request); err != nil { return nil, fmt.Errorf("DeleteDBAASKafkaUser: execute request editors: %w", err) @@ -2581,7 +4084,8 @@ func (c Client) ResetDBAASKafkaUserPassword(ctx context.Context, serviceName str if err != nil { return nil, fmt.Errorf("ResetDBAASKafkaUserPassword: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) request.Header.Add("Content-Type", "application/json") @@ -2626,7 +4130,8 @@ func (c Client) RevealDBAASKafkaUserPassword(ctx context.Context, serviceName st if err != nil { return nil, fmt.Errorf("RevealDBAASKafkaUserPassword: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) if err := c.executeRequestInterceptors(ctx, request); err != nil { return nil, fmt.Errorf("RevealDBAASKafkaUserPassword: execute request editors: %w", err) @@ -2669,7 +4174,8 @@ func (c Client) GetDBAASMigrationStatus(ctx context.Context, name string) (*DBAA if err != nil { return nil, fmt.Errorf("GetDBAASMigrationStatus: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) if err := c.executeRequestInterceptors(ctx, request); err != nil { return nil, fmt.Errorf("GetDBAASMigrationStatus: execute request editors: %w", err) @@ -2712,7 +4218,8 @@ func (c Client) DeleteDBAASServiceMysql(ctx context.Context, name string) (*Oper if err != nil { return nil, fmt.Errorf("DeleteDBAASServiceMysql: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) if err := c.executeRequestInterceptors(ctx, request); err != nil { return nil, fmt.Errorf("DeleteDBAASServiceMysql: execute request editors: %w", err) @@ -2755,7 +4262,8 @@ func (c Client) GetDBAASServiceMysql(ctx context.Context, name string) (*DBAASSe if err != nil { return nil, fmt.Errorf("GetDBAASServiceMysql: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) if err := c.executeRequestInterceptors(ctx, request); err != nil { return nil, fmt.Errorf("GetDBAASServiceMysql: execute request editors: %w", err) @@ -2894,7 +4402,8 @@ func (c Client) CreateDBAASServiceMysql(ctx context.Context, name string, req Cr if err != nil { return nil, fmt.Errorf("CreateDBAASServiceMysql: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) request.Header.Add("Content-Type", "application/json") @@ -3009,7 +4518,8 @@ func (c Client) UpdateDBAASServiceMysql(ctx context.Context, name string, req Up if err != nil { return nil, fmt.Errorf("UpdateDBAASServiceMysql: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) request.Header.Add("Content-Type", "application/json") @@ -3046,6 +4556,50 @@ func (c Client) UpdateDBAASServiceMysql(ctx context.Context, name string, req Up return bodyresp, nil } +// Temporarily enable writes for MySQL services in read-only mode due to filled up storage +func (c Client) EnableDBAASMysqlWrites(ctx context.Context, name string) (*Operation, error) { + path := fmt.Sprintf("/dbaas-mysql/%v/enable/writes", name) + + request, err := http.NewRequestWithContext(ctx, "PUT", c.serverEndpoint+path, nil) + if err != nil { + return nil, fmt.Errorf("EnableDBAASMysqlWrites: new request: %w", err) + } + + request.Header.Add("User-Agent", c.getUserAgent()) + + if err := c.executeRequestInterceptors(ctx, request); err != nil { + return nil, fmt.Errorf("EnableDBAASMysqlWrites: execute request editors: %w", err) + } + + if err := c.signRequest(request); err != nil { + return nil, fmt.Errorf("EnableDBAASMysqlWrites: sign request: %w", err) + } + + if c.trace { + dumpRequest(request, "enable-dbaas-mysql-writes") + } + + response, err := c.httpClient.Do(request) + if err != nil { + return nil, fmt.Errorf("EnableDBAASMysqlWrites: http client do: %w", err) + } + + if c.trace { + dumpResponse(response) + } + + if err := handleHTTPErrorResp(response); err != nil { + return nil, fmt.Errorf("EnableDBAASMysqlWrites: http response: %w", err) + } + + bodyresp := &Operation{} + if err := prepareJSONResponse(response, bodyresp); err != nil { + return nil, fmt.Errorf("EnableDBAASMysqlWrites: prepare Json response: %w", err) + } + + return bodyresp, nil +} + // Initiate MySQL maintenance update func (c Client) StartDBAASMysqlMaintenance(ctx context.Context, name string) (*Operation, error) { path := fmt.Sprintf("/dbaas-mysql/%v/maintenance/start", name) @@ -3054,7 +4608,8 @@ func (c Client) StartDBAASMysqlMaintenance(ctx context.Context, name string) (*O if err != nil { return nil, fmt.Errorf("StartDBAASMysqlMaintenance: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) if err := c.executeRequestInterceptors(ctx, request); err != nil { return nil, fmt.Errorf("StartDBAASMysqlMaintenance: execute request editors: %w", err) @@ -3097,7 +4652,8 @@ func (c Client) StopDBAASMysqlMigration(ctx context.Context, name string) (*Oper if err != nil { return nil, fmt.Errorf("StopDBAASMysqlMigration: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) if err := c.executeRequestInterceptors(ctx, request); err != nil { return nil, fmt.Errorf("StopDBAASMysqlMigration: execute request editors: %w", err) @@ -3149,7 +4705,8 @@ func (c Client) CreateDBAASMysqlDatabase(ctx context.Context, serviceName string if err != nil { return nil, fmt.Errorf("CreateDBAASMysqlDatabase: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) request.Header.Add("Content-Type", "application/json") @@ -3194,7 +4751,8 @@ func (c Client) DeleteDBAASMysqlDatabase(ctx context.Context, serviceName string if err != nil { return nil, fmt.Errorf("DeleteDBAASMysqlDatabase: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) if err := c.executeRequestInterceptors(ctx, request); err != nil { return nil, fmt.Errorf("DeleteDBAASMysqlDatabase: execute request editors: %w", err) @@ -3247,7 +4805,8 @@ func (c Client) CreateDBAASMysqlUser(ctx context.Context, serviceName string, re if err != nil { return nil, fmt.Errorf("CreateDBAASMysqlUser: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) request.Header.Add("Content-Type", "application/json") @@ -3292,7 +4851,8 @@ func (c Client) DeleteDBAASMysqlUser(ctx context.Context, serviceName string, us if err != nil { return nil, fmt.Errorf("DeleteDBAASMysqlUser: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) if err := c.executeRequestInterceptors(ctx, request); err != nil { return nil, fmt.Errorf("DeleteDBAASMysqlUser: execute request editors: %w", err) @@ -3345,7 +4905,8 @@ func (c Client) ResetDBAASMysqlUserPassword(ctx context.Context, serviceName str if err != nil { return nil, fmt.Errorf("ResetDBAASMysqlUserPassword: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) request.Header.Add("Content-Type", "application/json") @@ -3390,7 +4951,8 @@ func (c Client) RevealDBAASMysqlUserPassword(ctx context.Context, serviceName st if err != nil { return nil, fmt.Errorf("RevealDBAASMysqlUserPassword: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) if err := c.executeRequestInterceptors(ctx, request); err != nil { return nil, fmt.Errorf("RevealDBAASMysqlUserPassword: execute request editors: %w", err) @@ -3433,7 +4995,8 @@ func (c Client) DeleteDBAASServiceOpensearch(ctx context.Context, name string) ( if err != nil { return nil, fmt.Errorf("DeleteDBAASServiceOpensearch: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) if err := c.executeRequestInterceptors(ctx, request); err != nil { return nil, fmt.Errorf("DeleteDBAASServiceOpensearch: execute request editors: %w", err) @@ -3476,7 +5039,8 @@ func (c Client) GetDBAASServiceOpensearch(ctx context.Context, name string) (*DB if err != nil { return nil, fmt.Errorf("GetDBAASServiceOpensearch: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) if err := c.executeRequestInterceptors(ctx, request); err != nil { return nil, fmt.Errorf("GetDBAASServiceOpensearch: execute request editors: %w", err) @@ -3609,7 +5173,8 @@ func (c Client) CreateDBAASServiceOpensearch(ctx context.Context, name string, r if err != nil { return nil, fmt.Errorf("CreateDBAASServiceOpensearch: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) request.Header.Add("Content-Type", "application/json") @@ -3741,7 +5306,8 @@ func (c Client) UpdateDBAASServiceOpensearch(ctx context.Context, name string, r if err != nil { return nil, fmt.Errorf("UpdateDBAASServiceOpensearch: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) request.Header.Add("Content-Type", "application/json") @@ -3786,7 +5352,8 @@ func (c Client) GetDBAASOpensearchAclConfig(ctx context.Context, name string) (* if err != nil { return nil, fmt.Errorf("GetDBAASOpensearchAclConfig: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) if err := c.executeRequestInterceptors(ctx, request); err != nil { return nil, fmt.Errorf("GetDBAASOpensearchAclConfig: execute request editors: %w", err) @@ -3834,7 +5401,8 @@ func (c Client) UpdateDBAASOpensearchAclConfig(ctx context.Context, name string, if err != nil { return nil, fmt.Errorf("UpdateDBAASOpensearchAclConfig: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) request.Header.Add("Content-Type", "application/json") @@ -3879,7 +5447,8 @@ func (c Client) StartDBAASOpensearchMaintenance(ctx context.Context, name string if err != nil { return nil, fmt.Errorf("StartDBAASOpensearchMaintenance: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) if err := c.executeRequestInterceptors(ctx, request); err != nil { return nil, fmt.Errorf("StartDBAASOpensearchMaintenance: execute request editors: %w", err) @@ -3931,7 +5500,8 @@ func (c Client) CreateDBAASOpensearchUser(ctx context.Context, serviceName strin if err != nil { return nil, fmt.Errorf("CreateDBAASOpensearchUser: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) request.Header.Add("Content-Type", "application/json") @@ -3976,7 +5546,8 @@ func (c Client) DeleteDBAASOpensearchUser(ctx context.Context, serviceName strin if err != nil { return nil, fmt.Errorf("DeleteDBAASOpensearchUser: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) if err := c.executeRequestInterceptors(ctx, request); err != nil { return nil, fmt.Errorf("DeleteDBAASOpensearchUser: execute request editors: %w", err) @@ -4028,7 +5599,8 @@ func (c Client) ResetDBAASOpensearchUserPassword(ctx context.Context, serviceNam if err != nil { return nil, fmt.Errorf("ResetDBAASOpensearchUserPassword: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) request.Header.Add("Content-Type", "application/json") @@ -4073,7 +5645,8 @@ func (c Client) RevealDBAASOpensearchUserPassword(ctx context.Context, serviceNa if err != nil { return nil, fmt.Errorf("RevealDBAASOpensearchUserPassword: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) if err := c.executeRequestInterceptors(ctx, request); err != nil { return nil, fmt.Errorf("RevealDBAASOpensearchUserPassword: execute request editors: %w", err) @@ -4116,7 +5689,8 @@ func (c Client) DeleteDBAASServicePG(ctx context.Context, name string) (*Operati if err != nil { return nil, fmt.Errorf("DeleteDBAASServicePG: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) if err := c.executeRequestInterceptors(ctx, request); err != nil { return nil, fmt.Errorf("DeleteDBAASServicePG: execute request editors: %w", err) @@ -4159,7 +5733,8 @@ func (c Client) GetDBAASServicePG(ctx context.Context, name string) (*DBAASServi if err != nil { return nil, fmt.Errorf("GetDBAASServicePG: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) if err := c.executeRequestInterceptors(ctx, request); err != nil { return nil, fmt.Errorf("GetDBAASServicePG: execute request editors: %w", err) @@ -4307,7 +5882,8 @@ func (c Client) CreateDBAASServicePG(ctx context.Context, name string, req Creat if err != nil { return nil, fmt.Errorf("CreateDBAASServicePG: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) request.Header.Add("Content-Type", "application/json") @@ -4434,7 +6010,8 @@ func (c Client) UpdateDBAASServicePG(ctx context.Context, name string, req Updat if err != nil { return nil, fmt.Errorf("UpdateDBAASServicePG: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) request.Header.Add("Content-Type", "application/json") @@ -4479,7 +6056,8 @@ func (c Client) StartDBAASPGMaintenance(ctx context.Context, name string) (*Oper if err != nil { return nil, fmt.Errorf("StartDBAASPGMaintenance: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) if err := c.executeRequestInterceptors(ctx, request); err != nil { return nil, fmt.Errorf("StartDBAASPGMaintenance: execute request editors: %w", err) @@ -4522,7 +6100,8 @@ func (c Client) StopDBAASPGMigration(ctx context.Context, name string) (*Operati if err != nil { return nil, fmt.Errorf("StopDBAASPGMigration: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) if err := c.executeRequestInterceptors(ctx, request); err != nil { return nil, fmt.Errorf("StopDBAASPGMigration: execute request editors: %w", err) @@ -4578,7 +6157,8 @@ func (c Client) CreateDBAASPGConnectionPool(ctx context.Context, serviceName str if err != nil { return nil, fmt.Errorf("CreateDBAASPGConnectionPool: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) request.Header.Add("Content-Type", "application/json") @@ -4623,7 +6203,8 @@ func (c Client) DeleteDBAASPGConnectionPool(ctx context.Context, serviceName str if err != nil { return nil, fmt.Errorf("DeleteDBAASPGConnectionPool: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) if err := c.executeRequestInterceptors(ctx, request); err != nil { return nil, fmt.Errorf("DeleteDBAASPGConnectionPool: execute request editors: %w", err) @@ -4678,7 +6259,8 @@ func (c Client) UpdateDBAASPGConnectionPool(ctx context.Context, serviceName str if err != nil { return nil, fmt.Errorf("UpdateDBAASPGConnectionPool: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) request.Header.Add("Content-Type", "application/json") @@ -4736,7 +6318,8 @@ func (c Client) CreateDBAASPGDatabase(ctx context.Context, serviceName string, r if err != nil { return nil, fmt.Errorf("CreateDBAASPGDatabase: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) request.Header.Add("Content-Type", "application/json") @@ -4781,7 +6364,8 @@ func (c Client) DeleteDBAASPGDatabase(ctx context.Context, serviceName string, d if err != nil { return nil, fmt.Errorf("DeleteDBAASPGDatabase: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) if err := c.executeRequestInterceptors(ctx, request); err != nil { return nil, fmt.Errorf("DeleteDBAASPGDatabase: execute request editors: %w", err) @@ -4834,7 +6418,8 @@ func (c Client) CreateDBAASPostgresUser(ctx context.Context, serviceName string, if err != nil { return nil, fmt.Errorf("CreateDBAASPostgresUser: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) request.Header.Add("Content-Type", "application/json") @@ -4879,7 +6464,8 @@ func (c Client) DeleteDBAASPostgresUser(ctx context.Context, serviceName string, if err != nil { return nil, fmt.Errorf("DeleteDBAASPostgresUser: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) if err := c.executeRequestInterceptors(ctx, request); err != nil { return nil, fmt.Errorf("DeleteDBAASPostgresUser: execute request editors: %w", err) @@ -4931,7 +6517,8 @@ func (c Client) UpdateDBAASPostgresAllowReplication(ctx context.Context, service if err != nil { return nil, fmt.Errorf("UpdateDBAASPostgresAllowReplication: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) request.Header.Add("Content-Type", "application/json") @@ -4985,7 +6572,8 @@ func (c Client) ResetDBAASPostgresUserPassword(ctx context.Context, serviceName if err != nil { return nil, fmt.Errorf("ResetDBAASPostgresUserPassword: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) request.Header.Add("Content-Type", "application/json") @@ -5030,7 +6618,8 @@ func (c Client) RevealDBAASPostgresUserPassword(ctx context.Context, serviceName if err != nil { return nil, fmt.Errorf("RevealDBAASPostgresUserPassword: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) if err := c.executeRequestInterceptors(ctx, request); err != nil { return nil, fmt.Errorf("RevealDBAASPostgresUserPassword: execute request editors: %w", err) @@ -5082,7 +6671,8 @@ func (c Client) CreateDBAASPGUpgradeCheck(ctx context.Context, service string, r if err != nil { return nil, fmt.Errorf("CreateDBAASPGUpgradeCheck: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) request.Header.Add("Content-Type", "application/json") @@ -5127,7 +6717,8 @@ func (c Client) DeleteDBAASServiceRedis(ctx context.Context, name string) (*Oper if err != nil { return nil, fmt.Errorf("DeleteDBAASServiceRedis: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) if err := c.executeRequestInterceptors(ctx, request); err != nil { return nil, fmt.Errorf("DeleteDBAASServiceRedis: execute request editors: %w", err) @@ -5170,7 +6761,8 @@ func (c Client) GetDBAASServiceRedis(ctx context.Context, name string) (*DBAASSe if err != nil { return nil, fmt.Errorf("GetDBAASServiceRedis: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) if err := c.executeRequestInterceptors(ctx, request); err != nil { return nil, fmt.Errorf("GetDBAASServiceRedis: execute request editors: %w", err) @@ -5276,7 +6868,8 @@ func (c Client) CreateDBAASServiceRedis(ctx context.Context, name string, req Cr if err != nil { return nil, fmt.Errorf("CreateDBAASServiceRedis: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) request.Header.Add("Content-Type", "application/json") @@ -5381,7 +6974,8 @@ func (c Client) UpdateDBAASServiceRedis(ctx context.Context, name string, req Up if err != nil { return nil, fmt.Errorf("UpdateDBAASServiceRedis: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) request.Header.Add("Content-Type", "application/json") @@ -5426,7 +7020,8 @@ func (c Client) StartDBAASRedisMaintenance(ctx context.Context, name string) (*O if err != nil { return nil, fmt.Errorf("StartDBAASRedisMaintenance: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) if err := c.executeRequestInterceptors(ctx, request); err != nil { return nil, fmt.Errorf("StartDBAASRedisMaintenance: execute request editors: %w", err) @@ -5469,7 +7064,8 @@ func (c Client) StopDBAASRedisMigration(ctx context.Context, name string) (*Oper if err != nil { return nil, fmt.Errorf("StopDBAASRedisMigration: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) if err := c.executeRequestInterceptors(ctx, request); err != nil { return nil, fmt.Errorf("StopDBAASRedisMigration: execute request editors: %w", err) @@ -5521,7 +7117,8 @@ func (c Client) CreateDBAASRedisUser(ctx context.Context, serviceName string, re if err != nil { return nil, fmt.Errorf("CreateDBAASRedisUser: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) request.Header.Add("Content-Type", "application/json") @@ -5566,7 +7163,8 @@ func (c Client) DeleteDBAASRedisUser(ctx context.Context, serviceName string, us if err != nil { return nil, fmt.Errorf("DeleteDBAASRedisUser: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) if err := c.executeRequestInterceptors(ctx, request); err != nil { return nil, fmt.Errorf("DeleteDBAASRedisUser: execute request editors: %w", err) @@ -5618,7 +7216,8 @@ func (c Client) ResetDBAASRedisUserPassword(ctx context.Context, serviceName str if err != nil { return nil, fmt.Errorf("ResetDBAASRedisUserPassword: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) request.Header.Add("Content-Type", "application/json") @@ -5663,7 +7262,8 @@ func (c Client) RevealDBAASRedisUserPassword(ctx context.Context, serviceName st if err != nil { return nil, fmt.Errorf("RevealDBAASRedisUserPassword: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) if err := c.executeRequestInterceptors(ctx, request); err != nil { return nil, fmt.Errorf("RevealDBAASRedisUserPassword: execute request editors: %w", err) @@ -5704,11 +7304,19 @@ type ListDBAASServicesResponse struct { // FindDBAASServiceCommon attempts to find an DBAASServiceCommon by name. func (l ListDBAASServicesResponse) FindDBAASServiceCommon(name string) (DBAASServiceCommon, error) { + var result []DBAASServiceCommon for i, elem := range l.DBAASServices { if string(elem.Name) == name { - return l.DBAASServices[i], nil + result = append(result, l.DBAASServices[i]) } } + if len(result) == 1 { + return result[0], nil + } + + if len(result) > 1 { + return DBAASServiceCommon{}, fmt.Errorf("%q too many found in ListDBAASServicesResponse: %w", name, ErrConflict) + } return DBAASServiceCommon{}, fmt.Errorf("%q not found in ListDBAASServicesResponse: %w", name, ErrNotFound) } @@ -5721,7 +7329,8 @@ func (c Client) ListDBAASServices(ctx context.Context) (*ListDBAASServicesRespon if err != nil { return nil, fmt.Errorf("ListDBAASServices: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) if err := c.executeRequestInterceptors(ctx, request); err != nil { return nil, fmt.Errorf("ListDBAASServices: execute request editors: %w", err) @@ -5777,7 +7386,8 @@ func (c Client) GetDBAASServiceLogs(ctx context.Context, serviceName string, req if err != nil { return nil, fmt.Errorf("GetDBAASServiceLogs: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) request.Header.Add("Content-Type", "application/json") @@ -5846,7 +7456,8 @@ func (c Client) GetDBAASServiceMetrics(ctx context.Context, serviceName string, if err != nil { return nil, fmt.Errorf("GetDBAASServiceMetrics: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) request.Header.Add("Content-Type", "application/json") @@ -5889,11 +7500,19 @@ type ListDBAASServiceTypesResponse struct { // FindDBAASServiceType attempts to find an DBAASServiceType by name. func (l ListDBAASServiceTypesResponse) FindDBAASServiceType(name string) (DBAASServiceType, error) { + var result []DBAASServiceType for i, elem := range l.DBAASServiceTypes { if string(elem.Name) == name { - return l.DBAASServiceTypes[i], nil + result = append(result, l.DBAASServiceTypes[i]) } } + if len(result) == 1 { + return result[0], nil + } + + if len(result) > 1 { + return DBAASServiceType{}, fmt.Errorf("%q too many found in ListDBAASServiceTypesResponse: %w", name, ErrConflict) + } return DBAASServiceType{}, fmt.Errorf("%q not found in ListDBAASServiceTypesResponse: %w", name, ErrNotFound) } @@ -5906,7 +7525,8 @@ func (c Client) ListDBAASServiceTypes(ctx context.Context) (*ListDBAASServiceTyp if err != nil { return nil, fmt.Errorf("ListDBAASServiceTypes: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) if err := c.executeRequestInterceptors(ctx, request); err != nil { return nil, fmt.Errorf("ListDBAASServiceTypes: execute request editors: %w", err) @@ -5949,7 +7569,8 @@ func (c Client) GetDBAASServiceType(ctx context.Context, serviceTypeName string) if err != nil { return nil, fmt.Errorf("GetDBAASServiceType: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) if err := c.executeRequestInterceptors(ctx, request); err != nil { return nil, fmt.Errorf("GetDBAASServiceType: execute request editors: %w", err) @@ -5992,7 +7613,8 @@ func (c Client) DeleteDBAASService(ctx context.Context, name string) (*Operation if err != nil { return nil, fmt.Errorf("DeleteDBAASService: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) if err := c.executeRequestInterceptors(ctx, request); err != nil { return nil, fmt.Errorf("DeleteDBAASService: execute request editors: %w", err) @@ -6052,7 +7674,8 @@ func (c Client) GetDBAASSettingsGrafana(ctx context.Context) (*GetDBAASSettingsG if err != nil { return nil, fmt.Errorf("GetDBAASSettingsGrafana: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) if err := c.executeRequestInterceptors(ctx, request); err != nil { return nil, fmt.Errorf("GetDBAASSettingsGrafana: execute request editors: %w", err) @@ -6142,7 +7765,8 @@ func (c Client) GetDBAASSettingsKafka(ctx context.Context) (*GetDBAASSettingsKaf if err != nil { return nil, fmt.Errorf("GetDBAASSettingsKafka: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) if err := c.executeRequestInterceptors(ctx, request); err != nil { return nil, fmt.Errorf("GetDBAASSettingsKafka: execute request editors: %w", err) @@ -6202,7 +7826,8 @@ func (c Client) GetDBAASSettingsMysql(ctx context.Context) (*GetDBAASSettingsMys if err != nil { return nil, fmt.Errorf("GetDBAASSettingsMysql: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) if err := c.executeRequestInterceptors(ctx, request); err != nil { return nil, fmt.Errorf("GetDBAASSettingsMysql: execute request editors: %w", err) @@ -6262,7 +7887,8 @@ func (c Client) GetDBAASSettingsOpensearch(ctx context.Context) (*GetDBAASSettin if err != nil { return nil, fmt.Errorf("GetDBAASSettingsOpensearch: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) if err := c.executeRequestInterceptors(ctx, request); err != nil { return nil, fmt.Errorf("GetDBAASSettingsOpensearch: execute request editors: %w", err) @@ -6352,7 +7978,8 @@ func (c Client) GetDBAASSettingsPG(ctx context.Context) (*GetDBAASSettingsPGResp if err != nil { return nil, fmt.Errorf("GetDBAASSettingsPG: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) if err := c.executeRequestInterceptors(ctx, request); err != nil { return nil, fmt.Errorf("GetDBAASSettingsPG: execute request editors: %w", err) @@ -6412,7 +8039,8 @@ func (c Client) GetDBAASSettingsRedis(ctx context.Context) (*GetDBAASSettingsRed if err != nil { return nil, fmt.Errorf("GetDBAASSettingsRedis: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) if err := c.executeRequestInterceptors(ctx, request); err != nil { return nil, fmt.Errorf("GetDBAASSettingsRedis: execute request editors: %w", err) @@ -6468,7 +8096,8 @@ func (c Client) CreateDBAASTaskMigrationCheck(ctx context.Context, service strin if err != nil { return nil, fmt.Errorf("CreateDBAASTaskMigrationCheck: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) request.Header.Add("Content-Type", "application/json") @@ -6513,7 +8142,8 @@ func (c Client) GetDBAASTask(ctx context.Context, service string, id UUID) (*DBA if err != nil { return nil, fmt.Errorf("GetDBAASTask: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) if err := c.executeRequestInterceptors(ctx, request); err != nil { return nil, fmt.Errorf("GetDBAASTask: execute request editors: %w", err) @@ -6554,11 +8184,19 @@ type ListDeployTargetsResponse struct { // FindDeployTarget attempts to find an DeployTarget by nameOrID. func (l ListDeployTargetsResponse) FindDeployTarget(nameOrID string) (DeployTarget, error) { + var result []DeployTarget for i, elem := range l.DeployTargets { - if string(elem.Name) == nameOrID || elem.ID.String() == nameOrID { - return l.DeployTargets[i], nil + if string(elem.Name) == nameOrID || string(elem.ID) == nameOrID { + result = append(result, l.DeployTargets[i]) } } + if len(result) == 1 { + return result[0], nil + } + + if len(result) > 1 { + return DeployTarget{}, fmt.Errorf("%q too many found in ListDeployTargetsResponse: %w", nameOrID, ErrConflict) + } return DeployTarget{}, fmt.Errorf("%q not found in ListDeployTargetsResponse: %w", nameOrID, ErrNotFound) } @@ -6571,7 +8209,8 @@ func (c Client) ListDeployTargets(ctx context.Context) (*ListDeployTargetsRespon if err != nil { return nil, fmt.Errorf("ListDeployTargets: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) if err := c.executeRequestInterceptors(ctx, request); err != nil { return nil, fmt.Errorf("ListDeployTargets: execute request editors: %w", err) @@ -6614,7 +8253,8 @@ func (c Client) GetDeployTarget(ctx context.Context, id UUID) (*DeployTarget, er if err != nil { return nil, fmt.Errorf("GetDeployTarget: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) if err := c.executeRequestInterceptors(ctx, request); err != nil { return nil, fmt.Errorf("GetDeployTarget: execute request editors: %w", err) @@ -6653,15 +8293,23 @@ type ListDNSDomainsResponse struct { DNSDomains []DNSDomain `json:"dns-domains,omitempty"` } -// FindDNSDomain attempts to find an DNSDomain by ID. -func (l ListDNSDomainsResponse) FindDNSDomain(ID string) (DNSDomain, error) { +// FindDNSDomain attempts to find an DNSDomain by idOrUnicodeName. +func (l ListDNSDomainsResponse) FindDNSDomain(idOrUnicodeName string) (DNSDomain, error) { + var result []DNSDomain for i, elem := range l.DNSDomains { - if elem.ID.String() == ID { - return l.DNSDomains[i], nil + if string(elem.ID) == idOrUnicodeName || string(elem.UnicodeName) == idOrUnicodeName { + result = append(result, l.DNSDomains[i]) } } + if len(result) == 1 { + return result[0], nil + } + + if len(result) > 1 { + return DNSDomain{}, fmt.Errorf("%q too many found in ListDNSDomainsResponse: %w", idOrUnicodeName, ErrConflict) + } - return DNSDomain{}, fmt.Errorf("%q not found in ListDNSDomainsResponse: %w", ID, ErrNotFound) + return DNSDomain{}, fmt.Errorf("%q not found in ListDNSDomainsResponse: %w", idOrUnicodeName, ErrNotFound) } // List DNS domains @@ -6672,7 +8320,8 @@ func (c Client) ListDNSDomains(ctx context.Context) (*ListDNSDomainsResponse, er if err != nil { return nil, fmt.Errorf("ListDNSDomains: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) if err := c.executeRequestInterceptors(ctx, request); err != nil { return nil, fmt.Errorf("ListDNSDomains: execute request editors: %w", err) @@ -6726,7 +8375,8 @@ func (c Client) CreateDNSDomain(ctx context.Context, req CreateDNSDomainRequest) if err != nil { return nil, fmt.Errorf("CreateDNSDomain: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) request.Header.Add("Content-Type", "application/json") @@ -6769,11 +8419,19 @@ type ListDNSDomainRecordsResponse struct { // FindDNSDomainRecord attempts to find an DNSDomainRecord by nameOrID. func (l ListDNSDomainRecordsResponse) FindDNSDomainRecord(nameOrID string) (DNSDomainRecord, error) { + var result []DNSDomainRecord for i, elem := range l.DNSDomainRecords { - if string(elem.Name) == nameOrID || elem.ID.String() == nameOrID { - return l.DNSDomainRecords[i], nil + if string(elem.Name) == nameOrID || string(elem.ID) == nameOrID { + result = append(result, l.DNSDomainRecords[i]) } } + if len(result) == 1 { + return result[0], nil + } + + if len(result) > 1 { + return DNSDomainRecord{}, fmt.Errorf("%q too many found in ListDNSDomainRecordsResponse: %w", nameOrID, ErrConflict) + } return DNSDomainRecord{}, fmt.Errorf("%q not found in ListDNSDomainRecordsResponse: %w", nameOrID, ErrNotFound) } @@ -6786,7 +8444,8 @@ func (c Client) ListDNSDomainRecords(ctx context.Context, domainID UUID) (*ListD if err != nil { return nil, fmt.Errorf("ListDNSDomainRecords: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) if err := c.executeRequestInterceptors(ctx, request); err != nil { return nil, fmt.Errorf("ListDNSDomainRecords: execute request editors: %w", err) @@ -6867,7 +8526,8 @@ func (c Client) CreateDNSDomainRecord(ctx context.Context, domainID UUID, req Cr if err != nil { return nil, fmt.Errorf("CreateDNSDomainRecord: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) request.Header.Add("Content-Type", "application/json") @@ -6912,7 +8572,8 @@ func (c Client) DeleteDNSDomainRecord(ctx context.Context, domainID UUID, record if err != nil { return nil, fmt.Errorf("DeleteDNSDomainRecord: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) if err := c.executeRequestInterceptors(ctx, request); err != nil { return nil, fmt.Errorf("DeleteDNSDomainRecord: execute request editors: %w", err) @@ -6955,7 +8616,8 @@ func (c Client) GetDNSDomainRecord(ctx context.Context, domainID UUID, recordID if err != nil { return nil, fmt.Errorf("GetDNSDomainRecord: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) if err := c.executeRequestInterceptors(ctx, request); err != nil { return nil, fmt.Errorf("GetDNSDomainRecord: execute request editors: %w", err) @@ -7014,7 +8676,8 @@ func (c Client) UpdateDNSDomainRecord(ctx context.Context, domainID UUID, record if err != nil { return nil, fmt.Errorf("UpdateDNSDomainRecord: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) request.Header.Add("Content-Type", "application/json") @@ -7059,7 +8722,8 @@ func (c Client) DeleteDNSDomain(ctx context.Context, id UUID) (*Operation, error if err != nil { return nil, fmt.Errorf("DeleteDNSDomain: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) if err := c.executeRequestInterceptors(ctx, request); err != nil { return nil, fmt.Errorf("DeleteDNSDomain: execute request editors: %w", err) @@ -7102,7 +8766,8 @@ func (c Client) GetDNSDomain(ctx context.Context, id UUID) (*DNSDomain, error) { if err != nil { return nil, fmt.Errorf("GetDNSDomain: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) if err := c.executeRequestInterceptors(ctx, request); err != nil { return nil, fmt.Errorf("GetDNSDomain: execute request editors: %w", err) @@ -7149,7 +8814,8 @@ func (c Client) GetDNSDomainZoneFile(ctx context.Context, id UUID) (*GetDNSDomai if err != nil { return nil, fmt.Errorf("GetDNSDomainZoneFile: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) if err := c.executeRequestInterceptors(ctx, request); err != nil { return nil, fmt.Errorf("GetDNSDomainZoneFile: execute request editors: %w", err) @@ -7188,15 +8854,23 @@ type ListElasticIPSResponse struct { ElasticIPS []ElasticIP `json:"elastic-ips,omitempty"` } -// FindElasticIP attempts to find an ElasticIP by ID. -func (l ListElasticIPSResponse) FindElasticIP(ID string) (ElasticIP, error) { +// FindElasticIP attempts to find an ElasticIP by idOrIP. +func (l ListElasticIPSResponse) FindElasticIP(idOrIP string) (ElasticIP, error) { + var result []ElasticIP for i, elem := range l.ElasticIPS { - if elem.ID.String() == ID { - return l.ElasticIPS[i], nil + if string(elem.ID) == idOrIP || string(elem.IP) == idOrIP { + result = append(result, l.ElasticIPS[i]) } } + if len(result) == 1 { + return result[0], nil + } + + if len(result) > 1 { + return ElasticIP{}, fmt.Errorf("%q too many found in ListElasticIPSResponse: %w", idOrIP, ErrConflict) + } - return ElasticIP{}, fmt.Errorf("%q not found in ListElasticIPSResponse: %w", ID, ErrNotFound) + return ElasticIP{}, fmt.Errorf("%q not found in ListElasticIPSResponse: %w", idOrIP, ErrNotFound) } // List Elastic IPs @@ -7207,7 +8881,8 @@ func (c Client) ListElasticIPS(ctx context.Context) (*ListElasticIPSResponse, er if err != nil { return nil, fmt.Errorf("ListElasticIPS: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) if err := c.executeRequestInterceptors(ctx, request); err != nil { return nil, fmt.Errorf("ListElasticIPS: execute request editors: %w", err) @@ -7272,7 +8947,8 @@ func (c Client) CreateElasticIP(ctx context.Context, req CreateElasticIPRequest) if err != nil { return nil, fmt.Errorf("CreateElasticIP: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) request.Header.Add("Content-Type", "application/json") @@ -7317,7 +8993,8 @@ func (c Client) DeleteElasticIP(ctx context.Context, id UUID) (*Operation, error if err != nil { return nil, fmt.Errorf("DeleteElasticIP: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) if err := c.executeRequestInterceptors(ctx, request); err != nil { return nil, fmt.Errorf("DeleteElasticIP: execute request editors: %w", err) @@ -7360,7 +9037,8 @@ func (c Client) GetElasticIP(ctx context.Context, id UUID) (*ElasticIP, error) { if err != nil { return nil, fmt.Errorf("GetElasticIP: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) if err := c.executeRequestInterceptors(ctx, request); err != nil { return nil, fmt.Errorf("GetElasticIP: execute request editors: %w", err) @@ -7416,7 +9094,8 @@ func (c Client) UpdateElasticIP(ctx context.Context, id UUID, req UpdateElasticI if err != nil { return nil, fmt.Errorf("UpdateElasticIP: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) request.Header.Add("Content-Type", "application/json") @@ -7467,7 +9146,8 @@ func (c Client) ResetElasticIPField(ctx context.Context, id UUID, field ResetEla if err != nil { return nil, fmt.Errorf("ResetElasticIPField: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) if err := c.executeRequestInterceptors(ctx, request); err != nil { return nil, fmt.Errorf("ResetElasticIPField: execute request editors: %w", err) @@ -7520,7 +9200,8 @@ func (c Client) AttachInstanceToElasticIP(ctx context.Context, id UUID, req Atta if err != nil { return nil, fmt.Errorf("AttachInstanceToElasticIP: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) request.Header.Add("Content-Type", "application/json") @@ -7575,7 +9256,8 @@ func (c Client) DetachInstanceFromElasticIP(ctx context.Context, id UUID, req De if err != nil { return nil, fmt.Errorf("DetachInstanceFromElasticIP: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) request.Header.Add("Content-Type", "application/json") @@ -7636,7 +9318,8 @@ func (c Client) ListEvents(ctx context.Context, opts ...ListEventsOpt) ([]Event, if err != nil { return nil, fmt.Errorf("ListEvents: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) if len(opts) > 0 { q := request.URL.Query() @@ -7687,7 +9370,8 @@ func (c Client) GetIAMOrganizationPolicy(ctx context.Context) (*IAMPolicy, error if err != nil { return nil, fmt.Errorf("GetIAMOrganizationPolicy: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) if err := c.executeRequestInterceptors(ctx, request); err != nil { return nil, fmt.Errorf("GetIAMOrganizationPolicy: execute request editors: %w", err) @@ -7735,7 +9419,8 @@ func (c Client) UpdateIAMOrganizationPolicy(ctx context.Context, req IAMPolicy) if err != nil { return nil, fmt.Errorf("UpdateIAMOrganizationPolicy: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) request.Header.Add("Content-Type", "application/json") @@ -7778,11 +9463,19 @@ type ListIAMRolesResponse struct { // FindIAMRole attempts to find an IAMRole by nameOrID. func (l ListIAMRolesResponse) FindIAMRole(nameOrID string) (IAMRole, error) { + var result []IAMRole for i, elem := range l.IAMRoles { - if string(elem.Name) == nameOrID || elem.ID.String() == nameOrID { - return l.IAMRoles[i], nil + if string(elem.Name) == nameOrID || string(elem.ID) == nameOrID { + result = append(result, l.IAMRoles[i]) } } + if len(result) == 1 { + return result[0], nil + } + + if len(result) > 1 { + return IAMRole{}, fmt.Errorf("%q too many found in ListIAMRolesResponse: %w", nameOrID, ErrConflict) + } return IAMRole{}, fmt.Errorf("%q not found in ListIAMRolesResponse: %w", nameOrID, ErrNotFound) } @@ -7795,7 +9488,8 @@ func (c Client) ListIAMRoles(ctx context.Context) (*ListIAMRolesResponse, error) if err != nil { return nil, fmt.Errorf("ListIAMRoles: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) if err := c.executeRequestInterceptors(ctx, request); err != nil { return nil, fmt.Errorf("ListIAMRoles: execute request editors: %w", err) @@ -7857,7 +9551,8 @@ func (c Client) CreateIAMRole(ctx context.Context, req CreateIAMRoleRequest) (*O if err != nil { return nil, fmt.Errorf("CreateIAMRole: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) request.Header.Add("Content-Type", "application/json") @@ -7902,7 +9597,8 @@ func (c Client) DeleteIAMRole(ctx context.Context, id UUID) (*Operation, error) if err != nil { return nil, fmt.Errorf("DeleteIAMRole: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) if err := c.executeRequestInterceptors(ctx, request); err != nil { return nil, fmt.Errorf("DeleteIAMRole: execute request editors: %w", err) @@ -7945,7 +9641,8 @@ func (c Client) GetIAMRole(ctx context.Context, id UUID) (*IAMRole, error) { if err != nil { return nil, fmt.Errorf("GetIAMRole: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) if err := c.executeRequestInterceptors(ctx, request); err != nil { return nil, fmt.Errorf("GetIAMRole: execute request editors: %w", err) @@ -8001,7 +9698,8 @@ func (c Client) UpdateIAMRole(ctx context.Context, id UUID, req UpdateIAMRoleReq if err != nil { return nil, fmt.Errorf("UpdateIAMRole: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) request.Header.Add("Content-Type", "application/json") @@ -8051,7 +9749,8 @@ func (c Client) UpdateIAMRolePolicy(ctx context.Context, id UUID, req IAMPolicy) if err != nil { return nil, fmt.Errorf("UpdateIAMRolePolicy: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) request.Header.Add("Content-Type", "application/json") @@ -8135,11 +9834,19 @@ type ListInstancesResponse struct { // FindListInstancesResponseInstances attempts to find an ListInstancesResponseInstances by nameOrID. func (l ListInstancesResponse) FindListInstancesResponseInstances(nameOrID string) (ListInstancesResponseInstances, error) { + var result []ListInstancesResponseInstances for i, elem := range l.Instances { - if string(elem.Name) == nameOrID || elem.ID.String() == nameOrID { - return l.Instances[i], nil + if string(elem.Name) == nameOrID || string(elem.ID) == nameOrID { + result = append(result, l.Instances[i]) } } + if len(result) == 1 { + return result[0], nil + } + + if len(result) > 1 { + return ListInstancesResponseInstances{}, fmt.Errorf("%q too many found in ListInstancesResponse: %w", nameOrID, ErrConflict) + } return ListInstancesResponseInstances{}, fmt.Errorf("%q not found in ListInstancesResponse: %w", nameOrID, ErrNotFound) } @@ -8178,7 +9885,8 @@ func (c Client) ListInstances(ctx context.Context, opts ...ListInstancesOpt) (*L if err != nil { return nil, fmt.Errorf("ListInstances: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) if len(opts) > 0 { q := request.URL.Query() @@ -8263,7 +9971,8 @@ func (c Client) CreateInstance(ctx context.Context, req CreateInstanceRequest) ( if err != nil { return nil, fmt.Errorf("CreateInstance: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) request.Header.Add("Content-Type", "application/json") @@ -8306,11 +10015,19 @@ type ListInstancePoolsResponse struct { // FindInstancePool attempts to find an InstancePool by nameOrID. func (l ListInstancePoolsResponse) FindInstancePool(nameOrID string) (InstancePool, error) { + var result []InstancePool for i, elem := range l.InstancePools { - if string(elem.Name) == nameOrID || elem.ID.String() == nameOrID { - return l.InstancePools[i], nil + if string(elem.Name) == nameOrID || string(elem.ID) == nameOrID { + result = append(result, l.InstancePools[i]) } } + if len(result) == 1 { + return result[0], nil + } + + if len(result) > 1 { + return InstancePool{}, fmt.Errorf("%q too many found in ListInstancePoolsResponse: %w", nameOrID, ErrConflict) + } return InstancePool{}, fmt.Errorf("%q not found in ListInstancePoolsResponse: %w", nameOrID, ErrNotFound) } @@ -8323,7 +10040,8 @@ func (c Client) ListInstancePools(ctx context.Context) (*ListInstancePoolsRespon if err != nil { return nil, fmt.Errorf("ListInstancePools: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) if err := c.executeRequestInterceptors(ctx, request); err != nil { return nil, fmt.Errorf("ListInstancePools: execute request editors: %w", err) @@ -8419,7 +10137,8 @@ func (c Client) CreateInstancePool(ctx context.Context, req CreateInstancePoolRe if err != nil { return nil, fmt.Errorf("CreateInstancePool: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) request.Header.Add("Content-Type", "application/json") @@ -8464,7 +10183,8 @@ func (c Client) DeleteInstancePool(ctx context.Context, id UUID) (*Operation, er if err != nil { return nil, fmt.Errorf("DeleteInstancePool: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) if err := c.executeRequestInterceptors(ctx, request); err != nil { return nil, fmt.Errorf("DeleteInstancePool: execute request editors: %w", err) @@ -8507,7 +10227,8 @@ func (c Client) GetInstancePool(ctx context.Context, id UUID) (*InstancePool, er if err != nil { return nil, fmt.Errorf("GetInstancePool: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) if err := c.executeRequestInterceptors(ctx, request); err != nil { return nil, fmt.Errorf("GetInstancePool: execute request editors: %w", err) @@ -8600,7 +10321,8 @@ func (c Client) UpdateInstancePool(ctx context.Context, id UUID, req UpdateInsta if err != nil { return nil, fmt.Errorf("UpdateInstancePool: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) request.Header.Add("Content-Type", "application/json") @@ -8660,7 +10382,8 @@ func (c Client) ResetInstancePoolField(ctx context.Context, id UUID, field Reset if err != nil { return nil, fmt.Errorf("ResetInstancePoolField: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) if err := c.executeRequestInterceptors(ctx, request); err != nil { return nil, fmt.Errorf("ResetInstancePoolField: execute request editors: %w", err) @@ -8712,7 +10435,8 @@ func (c Client) EvictInstancePoolMembers(ctx context.Context, id UUID, req Evict if err != nil { return nil, fmt.Errorf("EvictInstancePoolMembers: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) request.Header.Add("Content-Type", "application/json") @@ -8767,7 +10491,8 @@ func (c Client) ScaleInstancePool(ctx context.Context, id UUID, req ScaleInstanc if err != nil { return nil, fmt.Errorf("ScaleInstancePool: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) request.Header.Add("Content-Type", "application/json") @@ -8808,15 +10533,23 @@ type ListInstanceTypesResponse struct { InstanceTypes []InstanceType `json:"instance-types,omitempty"` } -// FindInstanceType attempts to find an InstanceType by ID. -func (l ListInstanceTypesResponse) FindInstanceType(ID string) (InstanceType, error) { +// FindInstanceType attempts to find an InstanceType by id. +func (l ListInstanceTypesResponse) FindInstanceType(id string) (InstanceType, error) { + var result []InstanceType for i, elem := range l.InstanceTypes { - if elem.ID.String() == ID { - return l.InstanceTypes[i], nil + if string(elem.ID) == id { + result = append(result, l.InstanceTypes[i]) } } + if len(result) == 1 { + return result[0], nil + } + + if len(result) > 1 { + return InstanceType{}, fmt.Errorf("%q too many found in ListInstanceTypesResponse: %w", id, ErrConflict) + } - return InstanceType{}, fmt.Errorf("%q not found in ListInstanceTypesResponse: %w", ID, ErrNotFound) + return InstanceType{}, fmt.Errorf("%q not found in ListInstanceTypesResponse: %w", id, ErrNotFound) } // List Compute instance Types @@ -8827,7 +10560,8 @@ func (c Client) ListInstanceTypes(ctx context.Context) (*ListInstanceTypesRespon if err != nil { return nil, fmt.Errorf("ListInstanceTypes: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) if err := c.executeRequestInterceptors(ctx, request); err != nil { return nil, fmt.Errorf("ListInstanceTypes: execute request editors: %w", err) @@ -8870,7 +10604,8 @@ func (c Client) GetInstanceType(ctx context.Context, id UUID) (*InstanceType, er if err != nil { return nil, fmt.Errorf("GetInstanceType: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) if err := c.executeRequestInterceptors(ctx, request); err != nil { return nil, fmt.Errorf("GetInstanceType: execute request editors: %w", err) @@ -8913,7 +10648,8 @@ func (c Client) DeleteInstance(ctx context.Context, id UUID) (*Operation, error) if err != nil { return nil, fmt.Errorf("DeleteInstance: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) if err := c.executeRequestInterceptors(ctx, request); err != nil { return nil, fmt.Errorf("DeleteInstance: execute request editors: %w", err) @@ -8956,7 +10692,8 @@ func (c Client) GetInstance(ctx context.Context, id UUID) (*Instance, error) { if err != nil { return nil, fmt.Errorf("GetInstance: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) if err := c.executeRequestInterceptors(ctx, request); err != nil { return nil, fmt.Errorf("GetInstance: execute request editors: %w", err) @@ -9013,7 +10750,8 @@ func (c Client) UpdateInstance(ctx context.Context, id UUID, req UpdateInstanceR if err != nil { return nil, fmt.Errorf("UpdateInstance: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) request.Header.Add("Content-Type", "application/json") @@ -9064,7 +10802,8 @@ func (c Client) ResetInstanceField(ctx context.Context, id UUID, field ResetInst if err != nil { return nil, fmt.Errorf("ResetInstanceField: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) if err := c.executeRequestInterceptors(ctx, request); err != nil { return nil, fmt.Errorf("ResetInstanceField: execute request editors: %w", err) @@ -9107,7 +10846,8 @@ func (c Client) AddInstanceProtection(ctx context.Context, id UUID) (*Operation, if err != nil { return nil, fmt.Errorf("AddInstanceProtection: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) if err := c.executeRequestInterceptors(ctx, request); err != nil { return nil, fmt.Errorf("AddInstanceProtection: execute request editors: %w", err) @@ -9150,7 +10890,8 @@ func (c Client) CreateSnapshot(ctx context.Context, id UUID) (*Operation, error) if err != nil { return nil, fmt.Errorf("CreateSnapshot: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) if err := c.executeRequestInterceptors(ctx, request); err != nil { return nil, fmt.Errorf("CreateSnapshot: execute request editors: %w", err) @@ -9197,7 +10938,8 @@ func (c Client) RevealInstancePassword(ctx context.Context, id UUID) (*InstanceP if err != nil { return nil, fmt.Errorf("RevealInstancePassword: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) if err := c.executeRequestInterceptors(ctx, request); err != nil { return nil, fmt.Errorf("RevealInstancePassword: execute request editors: %w", err) @@ -9240,7 +10982,8 @@ func (c Client) RebootInstance(ctx context.Context, id UUID) (*Operation, error) if err != nil { return nil, fmt.Errorf("RebootInstance: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) if err := c.executeRequestInterceptors(ctx, request); err != nil { return nil, fmt.Errorf("RebootInstance: execute request editors: %w", err) @@ -9283,7 +11026,8 @@ func (c Client) RemoveInstanceProtection(ctx context.Context, id UUID) (*Operati if err != nil { return nil, fmt.Errorf("RemoveInstanceProtection: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) if err := c.executeRequestInterceptors(ctx, request); err != nil { return nil, fmt.Errorf("RemoveInstanceProtection: execute request editors: %w", err) @@ -9338,7 +11082,8 @@ func (c Client) ResetInstance(ctx context.Context, id UUID, req ResetInstanceReq if err != nil { return nil, fmt.Errorf("ResetInstance: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) request.Header.Add("Content-Type", "application/json") @@ -9383,7 +11128,8 @@ func (c Client) ResetInstancePassword(ctx context.Context, id UUID) (*Operation, if err != nil { return nil, fmt.Errorf("ResetInstancePassword: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) if err := c.executeRequestInterceptors(ctx, request); err != nil { return nil, fmt.Errorf("ResetInstancePassword: execute request editors: %w", err) @@ -9436,7 +11182,8 @@ func (c Client) ResizeInstanceDisk(ctx context.Context, id UUID, req ResizeInsta if err != nil { return nil, fmt.Errorf("ResizeInstanceDisk: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) request.Header.Add("Content-Type", "application/json") @@ -9491,7 +11238,8 @@ func (c Client) ScaleInstance(ctx context.Context, id UUID, req ScaleInstanceReq if err != nil { return nil, fmt.Errorf("ScaleInstance: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) request.Header.Add("Content-Type", "application/json") @@ -9553,7 +11301,8 @@ func (c Client) StartInstance(ctx context.Context, id UUID, req StartInstanceReq if err != nil { return nil, fmt.Errorf("StartInstance: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) request.Header.Add("Content-Type", "application/json") @@ -9598,7 +11347,8 @@ func (c Client) StopInstance(ctx context.Context, id UUID) (*Operation, error) { if err != nil { return nil, fmt.Errorf("StopInstance: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) if err := c.executeRequestInterceptors(ctx, request); err != nil { return nil, fmt.Errorf("StopInstance: execute request editors: %w", err) @@ -9652,7 +11402,8 @@ func (c Client) RevertInstanceToSnapshot(ctx context.Context, instanceID UUID, r if err != nil { return nil, fmt.Errorf("RevertInstanceToSnapshot: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) request.Header.Add("Content-Type", "application/json") @@ -9695,11 +11446,19 @@ type ListLoadBalancersResponse struct { // FindLoadBalancer attempts to find an LoadBalancer by nameOrID. func (l ListLoadBalancersResponse) FindLoadBalancer(nameOrID string) (LoadBalancer, error) { + var result []LoadBalancer for i, elem := range l.LoadBalancers { - if string(elem.Name) == nameOrID || elem.ID.String() == nameOrID { - return l.LoadBalancers[i], nil + if string(elem.Name) == nameOrID || string(elem.ID) == nameOrID { + result = append(result, l.LoadBalancers[i]) } } + if len(result) == 1 { + return result[0], nil + } + + if len(result) > 1 { + return LoadBalancer{}, fmt.Errorf("%q too many found in ListLoadBalancersResponse: %w", nameOrID, ErrConflict) + } return LoadBalancer{}, fmt.Errorf("%q not found in ListLoadBalancersResponse: %w", nameOrID, ErrNotFound) } @@ -9712,7 +11471,8 @@ func (c Client) ListLoadBalancers(ctx context.Context) (*ListLoadBalancersRespon if err != nil { return nil, fmt.Errorf("ListLoadBalancers: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) if err := c.executeRequestInterceptors(ctx, request); err != nil { return nil, fmt.Errorf("ListLoadBalancers: execute request editors: %w", err) @@ -9768,7 +11528,8 @@ func (c Client) CreateLoadBalancer(ctx context.Context, req CreateLoadBalancerRe if err != nil { return nil, fmt.Errorf("CreateLoadBalancer: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) request.Header.Add("Content-Type", "application/json") @@ -9813,7 +11574,8 @@ func (c Client) DeleteLoadBalancer(ctx context.Context, id UUID) (*Operation, er if err != nil { return nil, fmt.Errorf("DeleteLoadBalancer: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) if err := c.executeRequestInterceptors(ctx, request); err != nil { return nil, fmt.Errorf("DeleteLoadBalancer: execute request editors: %w", err) @@ -9856,7 +11618,8 @@ func (c Client) GetLoadBalancer(ctx context.Context, id UUID) (*LoadBalancer, er if err != nil { return nil, fmt.Errorf("GetLoadBalancer: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) if err := c.executeRequestInterceptors(ctx, request); err != nil { return nil, fmt.Errorf("GetLoadBalancer: execute request editors: %w", err) @@ -9912,7 +11675,8 @@ func (c Client) UpdateLoadBalancer(ctx context.Context, id UUID, req UpdateLoadB if err != nil { return nil, fmt.Errorf("UpdateLoadBalancer: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) request.Header.Add("Content-Type", "application/json") @@ -9996,7 +11760,8 @@ func (c Client) AddServiceToLoadBalancer(ctx context.Context, id UUID, req AddSe if err != nil { return nil, fmt.Errorf("AddServiceToLoadBalancer: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) request.Header.Add("Content-Type", "application/json") @@ -10041,7 +11806,8 @@ func (c Client) DeleteLoadBalancerService(ctx context.Context, id UUID, serviceI if err != nil { return nil, fmt.Errorf("DeleteLoadBalancerService: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) if err := c.executeRequestInterceptors(ctx, request); err != nil { return nil, fmt.Errorf("DeleteLoadBalancerService: execute request editors: %w", err) @@ -10084,7 +11850,8 @@ func (c Client) GetLoadBalancerService(ctx context.Context, id UUID, serviceID U if err != nil { return nil, fmt.Errorf("GetLoadBalancerService: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) if err := c.executeRequestInterceptors(ctx, request); err != nil { return nil, fmt.Errorf("GetLoadBalancerService: execute request editors: %w", err) @@ -10164,7 +11931,8 @@ func (c Client) UpdateLoadBalancerService(ctx context.Context, id UUID, serviceI if err != nil { return nil, fmt.Errorf("UpdateLoadBalancerService: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) request.Header.Add("Content-Type", "application/json") @@ -10215,7 +11983,8 @@ func (c Client) ResetLoadBalancerServiceField(ctx context.Context, id UUID, serv if err != nil { return nil, fmt.Errorf("ResetLoadBalancerServiceField: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) if err := c.executeRequestInterceptors(ctx, request); err != nil { return nil, fmt.Errorf("ResetLoadBalancerServiceField: execute request editors: %w", err) @@ -10265,7 +12034,8 @@ func (c Client) ResetLoadBalancerField(ctx context.Context, id UUID, field Reset if err != nil { return nil, fmt.Errorf("ResetLoadBalancerField: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) if err := c.executeRequestInterceptors(ctx, request); err != nil { return nil, fmt.Errorf("ResetLoadBalancerField: execute request editors: %w", err) @@ -10308,7 +12078,8 @@ func (c Client) GetOperation(ctx context.Context, id UUID) (*Operation, error) { if err != nil { return nil, fmt.Errorf("GetOperation: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) if err := c.executeRequestInterceptors(ctx, request); err != nil { return nil, fmt.Errorf("GetOperation: execute request editors: %w", err) @@ -10351,7 +12122,8 @@ func (c Client) GetOrganization(ctx context.Context) (*Organization, error) { if err != nil { return nil, fmt.Errorf("GetOrganization: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) if err := c.executeRequestInterceptors(ctx, request); err != nil { return nil, fmt.Errorf("GetOrganization: execute request editors: %w", err) @@ -10392,11 +12164,19 @@ type ListPrivateNetworksResponse struct { // FindPrivateNetwork attempts to find an PrivateNetwork by nameOrID. func (l ListPrivateNetworksResponse) FindPrivateNetwork(nameOrID string) (PrivateNetwork, error) { + var result []PrivateNetwork for i, elem := range l.PrivateNetworks { - if string(elem.Name) == nameOrID || elem.ID.String() == nameOrID { - return l.PrivateNetworks[i], nil + if string(elem.Name) == nameOrID || string(elem.ID) == nameOrID { + result = append(result, l.PrivateNetworks[i]) } } + if len(result) == 1 { + return result[0], nil + } + + if len(result) > 1 { + return PrivateNetwork{}, fmt.Errorf("%q too many found in ListPrivateNetworksResponse: %w", nameOrID, ErrConflict) + } return PrivateNetwork{}, fmt.Errorf("%q not found in ListPrivateNetworksResponse: %w", nameOrID, ErrNotFound) } @@ -10409,7 +12189,8 @@ func (c Client) ListPrivateNetworks(ctx context.Context) (*ListPrivateNetworksRe if err != nil { return nil, fmt.Errorf("ListPrivateNetworks: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) if err := c.executeRequestInterceptors(ctx, request); err != nil { return nil, fmt.Errorf("ListPrivateNetworks: execute request editors: %w", err) @@ -10454,6 +12235,8 @@ type CreatePrivateNetworkRequest struct { Name string `json:"name" validate:"required,gte=1,lte=255"` // Private Network netmask Netmask net.IP `json:"netmask,omitempty"` + // Private Network DHCP Options + Options *PrivateNetworkOptions `json:"options,omitempty"` // Private Network start IP address StartIP net.IP `json:"start-ip,omitempty"` } @@ -10471,7 +12254,8 @@ func (c Client) CreatePrivateNetwork(ctx context.Context, req CreatePrivateNetwo if err != nil { return nil, fmt.Errorf("CreatePrivateNetwork: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) request.Header.Add("Content-Type", "application/json") @@ -10516,7 +12300,8 @@ func (c Client) DeletePrivateNetwork(ctx context.Context, id UUID) (*Operation, if err != nil { return nil, fmt.Errorf("DeletePrivateNetwork: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) if err := c.executeRequestInterceptors(ctx, request); err != nil { return nil, fmt.Errorf("DeletePrivateNetwork: execute request editors: %w", err) @@ -10559,7 +12344,8 @@ func (c Client) GetPrivateNetwork(ctx context.Context, id UUID) (*PrivateNetwork if err != nil { return nil, fmt.Errorf("GetPrivateNetwork: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) if err := c.executeRequestInterceptors(ctx, request); err != nil { return nil, fmt.Errorf("GetPrivateNetwork: execute request editors: %w", err) @@ -10604,6 +12390,8 @@ type UpdatePrivateNetworkRequest struct { Name string `json:"name,omitempty" validate:"omitempty,gte=1,lte=255"` // Private Network netmask Netmask net.IP `json:"netmask,omitempty"` + // Private Network DHCP Options + Options *PrivateNetworkOptions `json:"options,omitempty"` // Private Network start IP address StartIP net.IP `json:"start-ip,omitempty"` } @@ -10621,7 +12409,8 @@ func (c Client) UpdatePrivateNetwork(ctx context.Context, id UUID, req UpdatePri if err != nil { return nil, fmt.Errorf("UpdatePrivateNetwork: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) request.Header.Add("Content-Type", "application/json") @@ -10672,7 +12461,8 @@ func (c Client) ResetPrivateNetworkField(ctx context.Context, id UUID, field Res if err != nil { return nil, fmt.Errorf("ResetPrivateNetworkField: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) if err := c.executeRequestInterceptors(ctx, request); err != nil { return nil, fmt.Errorf("ResetPrivateNetworkField: execute request editors: %w", err) @@ -10733,7 +12523,8 @@ func (c Client) AttachInstanceToPrivateNetwork(ctx context.Context, id UUID, req if err != nil { return nil, fmt.Errorf("AttachInstanceToPrivateNetwork: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) request.Header.Add("Content-Type", "application/json") @@ -10788,7 +12579,8 @@ func (c Client) DetachInstanceFromPrivateNetwork(ctx context.Context, id UUID, r if err != nil { return nil, fmt.Errorf("DetachInstanceFromPrivateNetwork: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) request.Header.Add("Content-Type", "application/json") @@ -10849,7 +12641,8 @@ func (c Client) UpdatePrivateNetworkInstanceIP(ctx context.Context, id UUID, req if err != nil { return nil, fmt.Errorf("UpdatePrivateNetworkInstanceIP: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) request.Header.Add("Content-Type", "application/json") @@ -10898,7 +12691,8 @@ func (c Client) ListQuotas(ctx context.Context) (*ListQuotasResponse, error) { if err != nil { return nil, fmt.Errorf("ListQuotas: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) if err := c.executeRequestInterceptors(ctx, request); err != nil { return nil, fmt.Errorf("ListQuotas: execute request editors: %w", err) @@ -10941,7 +12735,8 @@ func (c Client) GetQuota(ctx context.Context, entity string) (*Quota, error) { if err != nil { return nil, fmt.Errorf("GetQuota: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) if err := c.executeRequestInterceptors(ctx, request); err != nil { return nil, fmt.Errorf("GetQuota: execute request editors: %w", err) @@ -10984,7 +12779,8 @@ func (c Client) DeleteReverseDNSElasticIP(ctx context.Context, id UUID) (*Operat if err != nil { return nil, fmt.Errorf("DeleteReverseDNSElasticIP: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) if err := c.executeRequestInterceptors(ctx, request); err != nil { return nil, fmt.Errorf("DeleteReverseDNSElasticIP: execute request editors: %w", err) @@ -11027,7 +12823,8 @@ func (c Client) GetReverseDNSElasticIP(ctx context.Context, id UUID) (*ReverseDN if err != nil { return nil, fmt.Errorf("GetReverseDNSElasticIP: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) if err := c.executeRequestInterceptors(ctx, request); err != nil { return nil, fmt.Errorf("GetReverseDNSElasticIP: execute request editors: %w", err) @@ -11079,7 +12876,8 @@ func (c Client) UpdateReverseDNSElasticIP(ctx context.Context, id UUID, req Upda if err != nil { return nil, fmt.Errorf("UpdateReverseDNSElasticIP: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) request.Header.Add("Content-Type", "application/json") @@ -11124,7 +12922,8 @@ func (c Client) DeleteReverseDNSInstance(ctx context.Context, id UUID) (*Operati if err != nil { return nil, fmt.Errorf("DeleteReverseDNSInstance: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) if err := c.executeRequestInterceptors(ctx, request); err != nil { return nil, fmt.Errorf("DeleteReverseDNSInstance: execute request editors: %w", err) @@ -11167,7 +12966,8 @@ func (c Client) GetReverseDNSInstance(ctx context.Context, id UUID) (*ReverseDNS if err != nil { return nil, fmt.Errorf("GetReverseDNSInstance: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) if err := c.executeRequestInterceptors(ctx, request); err != nil { return nil, fmt.Errorf("GetReverseDNSInstance: execute request editors: %w", err) @@ -11219,7 +13019,8 @@ func (c Client) UpdateReverseDNSInstance(ctx context.Context, id UUID, req Updat if err != nil { return nil, fmt.Errorf("UpdateReverseDNSInstance: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) request.Header.Add("Content-Type", "application/json") @@ -11262,11 +13063,19 @@ type ListSecurityGroupsResponse struct { // FindSecurityGroup attempts to find an SecurityGroup by nameOrID. func (l ListSecurityGroupsResponse) FindSecurityGroup(nameOrID string) (SecurityGroup, error) { + var result []SecurityGroup for i, elem := range l.SecurityGroups { - if string(elem.Name) == nameOrID || elem.ID.String() == nameOrID { - return l.SecurityGroups[i], nil + if string(elem.Name) == nameOrID || string(elem.ID) == nameOrID { + result = append(result, l.SecurityGroups[i]) } } + if len(result) == 1 { + return result[0], nil + } + + if len(result) > 1 { + return SecurityGroup{}, fmt.Errorf("%q too many found in ListSecurityGroupsResponse: %w", nameOrID, ErrConflict) + } return SecurityGroup{}, fmt.Errorf("%q not found in ListSecurityGroupsResponse: %w", nameOrID, ErrNotFound) } @@ -11297,7 +13106,8 @@ func (c Client) ListSecurityGroups(ctx context.Context, opts ...ListSecurityGrou if err != nil { return nil, fmt.Errorf("ListSecurityGroups: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) if len(opts) > 0 { q := request.URL.Query() @@ -11360,7 +13170,8 @@ func (c Client) CreateSecurityGroup(ctx context.Context, req CreateSecurityGroup if err != nil { return nil, fmt.Errorf("CreateSecurityGroup: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) request.Header.Add("Content-Type", "application/json") @@ -11405,7 +13216,8 @@ func (c Client) DeleteSecurityGroup(ctx context.Context, id UUID) (*Operation, e if err != nil { return nil, fmt.Errorf("DeleteSecurityGroup: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) if err := c.executeRequestInterceptors(ctx, request); err != nil { return nil, fmt.Errorf("DeleteSecurityGroup: execute request editors: %w", err) @@ -11448,7 +13260,8 @@ func (c Client) GetSecurityGroup(ctx context.Context, id UUID) (*SecurityGroup, if err != nil { return nil, fmt.Errorf("GetSecurityGroup: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) if err := c.executeRequestInterceptors(ctx, request); err != nil { return nil, fmt.Errorf("GetSecurityGroup: execute request editors: %w", err) @@ -11541,7 +13354,8 @@ func (c Client) AddRuleToSecurityGroup(ctx context.Context, id UUID, req AddRule if err != nil { return nil, fmt.Errorf("AddRuleToSecurityGroup: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) request.Header.Add("Content-Type", "application/json") @@ -11586,7 +13400,8 @@ func (c Client) DeleteRuleFromSecurityGroup(ctx context.Context, id UUID, ruleID if err != nil { return nil, fmt.Errorf("DeleteRuleFromSecurityGroup: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) if err := c.executeRequestInterceptors(ctx, request); err != nil { return nil, fmt.Errorf("DeleteRuleFromSecurityGroup: execute request editors: %w", err) @@ -11639,7 +13454,8 @@ func (c Client) AddExternalSourceToSecurityGroup(ctx context.Context, id UUID, r if err != nil { return nil, fmt.Errorf("AddExternalSourceToSecurityGroup: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) request.Header.Add("Content-Type", "application/json") @@ -11694,7 +13510,8 @@ func (c Client) AttachInstanceToSecurityGroup(ctx context.Context, id UUID, req if err != nil { return nil, fmt.Errorf("AttachInstanceToSecurityGroup: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) request.Header.Add("Content-Type", "application/json") @@ -11749,7 +13566,8 @@ func (c Client) DetachInstanceFromSecurityGroup(ctx context.Context, id UUID, re if err != nil { return nil, fmt.Errorf("DetachInstanceFromSecurityGroup: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) request.Header.Add("Content-Type", "application/json") @@ -11804,7 +13622,8 @@ func (c Client) RemoveExternalSourceFromSecurityGroup(ctx context.Context, id UU if err != nil { return nil, fmt.Errorf("RemoveExternalSourceFromSecurityGroup: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) request.Header.Add("Content-Type", "application/json") @@ -11847,11 +13666,19 @@ type ListSKSClustersResponse struct { // FindSKSCluster attempts to find an SKSCluster by nameOrID. func (l ListSKSClustersResponse) FindSKSCluster(nameOrID string) (SKSCluster, error) { + var result []SKSCluster for i, elem := range l.SKSClusters { - if string(elem.Name) == nameOrID || elem.ID.String() == nameOrID { - return l.SKSClusters[i], nil + if string(elem.Name) == nameOrID || string(elem.ID) == nameOrID { + result = append(result, l.SKSClusters[i]) } } + if len(result) == 1 { + return result[0], nil + } + + if len(result) > 1 { + return SKSCluster{}, fmt.Errorf("%q too many found in ListSKSClustersResponse: %w", nameOrID, ErrConflict) + } return SKSCluster{}, fmt.Errorf("%q not found in ListSKSClustersResponse: %w", nameOrID, ErrNotFound) } @@ -11864,7 +13691,8 @@ func (c Client) ListSKSClusters(ctx context.Context) (*ListSKSClustersResponse, if err != nil { return nil, fmt.Errorf("ListSKSClusters: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) if err := c.executeRequestInterceptors(ctx, request); err != nil { return nil, fmt.Errorf("ListSKSClusters: execute request editors: %w", err) @@ -11946,7 +13774,8 @@ func (c Client) CreateSKSCluster(ctx context.Context, req CreateSKSClusterReques if err != nil { return nil, fmt.Errorf("CreateSKSCluster: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) request.Header.Add("Content-Type", "application/json") @@ -11991,7 +13820,8 @@ func (c Client) ListSKSClusterDeprecatedResources(ctx context.Context, id UUID) if err != nil { return nil, fmt.Errorf("ListSKSClusterDeprecatedResources: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) if err := c.executeRequestInterceptors(ctx, request); err != nil { return nil, fmt.Errorf("ListSKSClusterDeprecatedResources: execute request editors: %w", err) @@ -12043,7 +13873,8 @@ func (c Client) GenerateSKSClusterKubeconfig(ctx context.Context, id UUID, req S if err != nil { return nil, fmt.Errorf("GenerateSKSClusterKubeconfig: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) request.Header.Add("Content-Type", "application/json") @@ -12100,7 +13931,8 @@ func (c Client) ListSKSClusterVersions(ctx context.Context, opts ...ListSKSClust if err != nil { return nil, fmt.Errorf("ListSKSClusterVersions: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) if len(opts) > 0 { q := request.URL.Query() @@ -12151,7 +13983,8 @@ func (c Client) DeleteSKSCluster(ctx context.Context, id UUID) (*Operation, erro if err != nil { return nil, fmt.Errorf("DeleteSKSCluster: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) if err := c.executeRequestInterceptors(ctx, request); err != nil { return nil, fmt.Errorf("DeleteSKSCluster: execute request editors: %w", err) @@ -12194,7 +14027,8 @@ func (c Client) GetSKSCluster(ctx context.Context, id UUID) (*SKSCluster, error) if err != nil { return nil, fmt.Errorf("GetSKSCluster: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) if err := c.executeRequestInterceptors(ctx, request); err != nil { return nil, fmt.Errorf("GetSKSCluster: execute request editors: %w", err) @@ -12256,7 +14090,8 @@ func (c Client) UpdateSKSCluster(ctx context.Context, id UUID, req UpdateSKSClus if err != nil { return nil, fmt.Errorf("UpdateSKSCluster: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) request.Header.Add("Content-Type", "application/json") @@ -12313,7 +14148,8 @@ func (c Client) GetSKSClusterAuthorityCert(ctx context.Context, id UUID, authori if err != nil { return nil, fmt.Errorf("GetSKSClusterAuthorityCert: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) if err := c.executeRequestInterceptors(ctx, request); err != nil { return nil, fmt.Errorf("GetSKSClusterAuthorityCert: execute request editors: %w", err) @@ -12358,7 +14194,8 @@ func (c Client) GetSKSClusterInspection(ctx context.Context, id UUID) (*GetSKSCl if err != nil { return nil, fmt.Errorf("GetSKSClusterInspection: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) if err := c.executeRequestInterceptors(ctx, request); err != nil { return nil, fmt.Errorf("GetSKSClusterInspection: execute request editors: %w", err) @@ -12393,6 +14230,13 @@ func (c Client) GetSKSClusterInspection(ctx context.Context, id UUID) (*GetSKSCl return bodyresp, nil } +type CreateSKSNodepoolRequestPublicIPAssignment string + +const ( + CreateSKSNodepoolRequestPublicIPAssignmentInet4 CreateSKSNodepoolRequestPublicIPAssignment = "inet4" + CreateSKSNodepoolRequestPublicIPAssignmentDual CreateSKSNodepoolRequestPublicIPAssignment = "dual" +) + type CreateSKSNodepoolRequest struct { // Nodepool addons Addons []string `json:"addons,omitempty"` @@ -12404,17 +14248,21 @@ type CreateSKSNodepoolRequest struct { Description string `json:"description,omitempty" validate:"omitempty,lte=255"` // Nodepool instances disk size in GiB DiskSize int64 `json:"disk-size" validate:"required,gte=20,lte=51200"` - // Prefix to apply to instances names (default: pool) + // Prefix to apply to instances names (default: pool), lowercase only InstancePrefix string `json:"instance-prefix,omitempty" validate:"omitempty,gte=1,lte=30"` // Compute instance type InstanceType *InstanceType `json:"instance-type" validate:"required"` // Kubelet image GC options KubeletImageGC *KubeletImageGC `json:"kubelet-image-gc,omitempty"` Labels Labels `json:"labels,omitempty"` - // Nodepool name + // Nodepool name, lowercase only Name string `json:"name" validate:"required,gte=1,lte=255"` // Nodepool Private Networks PrivateNetworks []PrivateNetwork `json:"private-networks,omitempty"` + // Configures public IP assignment of the Instances with: + // * both IPv4 and IPv6 (`dual`) addressing. + // * both IPv4 and IPv6 (`dual`) addressing. + PublicIPAssignment CreateSKSNodepoolRequestPublicIPAssignment `json:"public-ip-assignment,omitempty"` // Nodepool Security Groups SecurityGroups []SecurityGroup `json:"security-groups,omitempty"` // Number of instances @@ -12435,7 +14283,8 @@ func (c Client) CreateSKSNodepool(ctx context.Context, id UUID, req CreateSKSNod if err != nil { return nil, fmt.Errorf("CreateSKSNodepool: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) request.Header.Add("Content-Type", "application/json") @@ -12480,7 +14329,8 @@ func (c Client) DeleteSKSNodepool(ctx context.Context, id UUID, sksNodepoolID UU if err != nil { return nil, fmt.Errorf("DeleteSKSNodepool: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) if err := c.executeRequestInterceptors(ctx, request); err != nil { return nil, fmt.Errorf("DeleteSKSNodepool: execute request editors: %w", err) @@ -12523,7 +14373,8 @@ func (c Client) GetSKSNodepool(ctx context.Context, id UUID, sksNodepoolID UUID) if err != nil { return nil, fmt.Errorf("GetSKSNodepool: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) if err := c.executeRequestInterceptors(ctx, request); err != nil { return nil, fmt.Errorf("GetSKSNodepool: execute request editors: %w", err) @@ -12558,6 +14409,13 @@ func (c Client) GetSKSNodepool(ctx context.Context, id UUID, sksNodepoolID UUID) return bodyresp, nil } +type UpdateSKSNodepoolRequestPublicIPAssignment string + +const ( + UpdateSKSNodepoolRequestPublicIPAssignmentInet4 UpdateSKSNodepoolRequestPublicIPAssignment = "inet4" + UpdateSKSNodepoolRequestPublicIPAssignmentDual UpdateSKSNodepoolRequestPublicIPAssignment = "dual" +) + type UpdateSKSNodepoolRequest struct { // Nodepool Anti-affinity Groups AntiAffinityGroups []AntiAffinityGroup `json:"anti-affinity-groups,omitempty"` @@ -12567,15 +14425,19 @@ type UpdateSKSNodepoolRequest struct { Description string `json:"description,omitempty" validate:"omitempty,lte=255"` // Nodepool instances disk size in GiB DiskSize int64 `json:"disk-size,omitempty" validate:"omitempty,gte=20,lte=51200"` - // Prefix to apply to managed instances names (default: pool) + // Prefix to apply to managed instances names (default: pool), lowercase only InstancePrefix string `json:"instance-prefix,omitempty" validate:"omitempty,gte=1,lte=30"` // Compute instance type InstanceType *InstanceType `json:"instance-type,omitempty"` Labels Labels `json:"labels,omitempty"` - // Nodepool name + // Nodepool name, lowercase only Name string `json:"name,omitempty" validate:"omitempty,gte=1,lte=255"` // Nodepool Private Networks PrivateNetworks []PrivateNetwork `json:"private-networks,omitempty"` + // Configures public IP assignment of the Instances with: + // * both IPv4 and IPv6 (`dual`) addressing. + // * both IPv4 and IPv6 (`dual`) addressing. + PublicIPAssignment UpdateSKSNodepoolRequestPublicIPAssignment `json:"public-ip-assignment,omitempty"` // Nodepool Security Groups SecurityGroups []SecurityGroup `json:"security-groups,omitempty"` Taints SKSNodepoolTaints `json:"taints,omitempty"` @@ -12594,7 +14456,8 @@ func (c Client) UpdateSKSNodepool(ctx context.Context, id UUID, sksNodepoolID UU if err != nil { return nil, fmt.Errorf("UpdateSKSNodepool: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) request.Header.Add("Content-Type", "application/json") @@ -12650,7 +14513,8 @@ func (c Client) ResetSKSNodepoolField(ctx context.Context, id UUID, sksNodepoolI if err != nil { return nil, fmt.Errorf("ResetSKSNodepoolField: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) if err := c.executeRequestInterceptors(ctx, request); err != nil { return nil, fmt.Errorf("ResetSKSNodepoolField: execute request editors: %w", err) @@ -12702,7 +14566,8 @@ func (c Client) EvictSKSNodepoolMembers(ctx context.Context, id UUID, sksNodepoo if err != nil { return nil, fmt.Errorf("EvictSKSNodepoolMembers: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) request.Header.Add("Content-Type", "application/json") @@ -12757,7 +14622,8 @@ func (c Client) ScaleSKSNodepool(ctx context.Context, id UUID, sksNodepoolID UUI if err != nil { return nil, fmt.Errorf("ScaleSKSNodepool: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) request.Header.Add("Content-Type", "application/json") @@ -12802,7 +14668,8 @@ func (c Client) RotateSKSCcmCredentials(ctx context.Context, id UUID) (*Operatio if err != nil { return nil, fmt.Errorf("RotateSKSCcmCredentials: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) if err := c.executeRequestInterceptors(ctx, request); err != nil { return nil, fmt.Errorf("RotateSKSCcmCredentials: execute request editors: %w", err) @@ -12845,7 +14712,8 @@ func (c Client) RotateSKSOperatorsCA(ctx context.Context, id UUID) (*Operation, if err != nil { return nil, fmt.Errorf("RotateSKSOperatorsCA: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) if err := c.executeRequestInterceptors(ctx, request); err != nil { return nil, fmt.Errorf("RotateSKSOperatorsCA: execute request editors: %w", err) @@ -12898,7 +14766,8 @@ func (c Client) UpgradeSKSCluster(ctx context.Context, id UUID, req UpgradeSKSCl if err != nil { return nil, fmt.Errorf("UpgradeSKSCluster: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) request.Header.Add("Content-Type", "application/json") @@ -12943,7 +14812,8 @@ func (c Client) UpgradeSKSClusterServiceLevel(ctx context.Context, id UUID) (*Op if err != nil { return nil, fmt.Errorf("UpgradeSKSClusterServiceLevel: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) if err := c.executeRequestInterceptors(ctx, request); err != nil { return nil, fmt.Errorf("UpgradeSKSClusterServiceLevel: execute request editors: %w", err) @@ -12993,7 +14863,8 @@ func (c Client) ResetSKSClusterField(ctx context.Context, id UUID, field ResetSK if err != nil { return nil, fmt.Errorf("ResetSKSClusterField: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) if err := c.executeRequestInterceptors(ctx, request); err != nil { return nil, fmt.Errorf("ResetSKSClusterField: execute request editors: %w", err) @@ -13034,11 +14905,19 @@ type ListSnapshotsResponse struct { // FindSnapshot attempts to find an Snapshot by nameOrID. func (l ListSnapshotsResponse) FindSnapshot(nameOrID string) (Snapshot, error) { + var result []Snapshot for i, elem := range l.Snapshots { - if string(elem.Name) == nameOrID || elem.ID.String() == nameOrID { - return l.Snapshots[i], nil + if string(elem.Name) == nameOrID || string(elem.ID) == nameOrID { + result = append(result, l.Snapshots[i]) } } + if len(result) == 1 { + return result[0], nil + } + + if len(result) > 1 { + return Snapshot{}, fmt.Errorf("%q too many found in ListSnapshotsResponse: %w", nameOrID, ErrConflict) + } return Snapshot{}, fmt.Errorf("%q not found in ListSnapshotsResponse: %w", nameOrID, ErrNotFound) } @@ -13051,7 +14930,8 @@ func (c Client) ListSnapshots(ctx context.Context) (*ListSnapshotsResponse, erro if err != nil { return nil, fmt.Errorf("ListSnapshots: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) if err := c.executeRequestInterceptors(ctx, request); err != nil { return nil, fmt.Errorf("ListSnapshots: execute request editors: %w", err) @@ -13094,7 +14974,8 @@ func (c Client) DeleteSnapshot(ctx context.Context, id UUID) (*Operation, error) if err != nil { return nil, fmt.Errorf("DeleteSnapshot: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) if err := c.executeRequestInterceptors(ctx, request); err != nil { return nil, fmt.Errorf("DeleteSnapshot: execute request editors: %w", err) @@ -13137,7 +15018,8 @@ func (c Client) GetSnapshot(ctx context.Context, id UUID) (*Snapshot, error) { if err != nil { return nil, fmt.Errorf("GetSnapshot: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) if err := c.executeRequestInterceptors(ctx, request); err != nil { return nil, fmt.Errorf("GetSnapshot: execute request editors: %w", err) @@ -13180,7 +15062,8 @@ func (c Client) ExportSnapshot(ctx context.Context, id UUID) (*Operation, error) if err != nil { return nil, fmt.Errorf("ExportSnapshot: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) if err := c.executeRequestInterceptors(ctx, request); err != nil { return nil, fmt.Errorf("ExportSnapshot: execute request editors: %w", err) @@ -13241,7 +15124,8 @@ func (c Client) PromoteSnapshotToTemplate(ctx context.Context, id UUID, req Prom if err != nil { return nil, fmt.Errorf("PromoteSnapshotToTemplate: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) request.Header.Add("Content-Type", "application/json") @@ -13284,11 +15168,19 @@ type ListSOSBucketsUsageResponse struct { // FindSOSBucketUsage attempts to find an SOSBucketUsage by name. func (l ListSOSBucketsUsageResponse) FindSOSBucketUsage(name string) (SOSBucketUsage, error) { + var result []SOSBucketUsage for i, elem := range l.SOSBucketsUsage { if string(elem.Name) == name { - return l.SOSBucketsUsage[i], nil + result = append(result, l.SOSBucketsUsage[i]) } } + if len(result) == 1 { + return result[0], nil + } + + if len(result) > 1 { + return SOSBucketUsage{}, fmt.Errorf("%q too many found in ListSOSBucketsUsageResponse: %w", name, ErrConflict) + } return SOSBucketUsage{}, fmt.Errorf("%q not found in ListSOSBucketsUsageResponse: %w", name, ErrNotFound) } @@ -13301,7 +15193,8 @@ func (c Client) ListSOSBucketsUsage(ctx context.Context) (*ListSOSBucketsUsageRe if err != nil { return nil, fmt.Errorf("ListSOSBucketsUsage: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) if err := c.executeRequestInterceptors(ctx, request); err != nil { return nil, fmt.Errorf("ListSOSBucketsUsage: execute request editors: %w", err) @@ -13356,7 +15249,8 @@ func (c Client) GetSOSPresignedURL(ctx context.Context, bucket string, opts ...G if err != nil { return nil, fmt.Errorf("GetSOSPresignedURL: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) if len(opts) > 0 { q := request.URL.Query() @@ -13403,15 +15297,23 @@ type ListSSHKeysResponse struct { SSHKeys []SSHKey `json:"ssh-keys,omitempty"` } -// FindSSHKey attempts to find an SSHKey by name. -func (l ListSSHKeysResponse) FindSSHKey(name string) (SSHKey, error) { +// FindSSHKey attempts to find an SSHKey by nameOrFingerprint. +func (l ListSSHKeysResponse) FindSSHKey(nameOrFingerprint string) (SSHKey, error) { + var result []SSHKey for i, elem := range l.SSHKeys { - if string(elem.Name) == name { - return l.SSHKeys[i], nil + if string(elem.Name) == nameOrFingerprint || string(elem.Fingerprint) == nameOrFingerprint { + result = append(result, l.SSHKeys[i]) } } + if len(result) == 1 { + return result[0], nil + } + + if len(result) > 1 { + return SSHKey{}, fmt.Errorf("%q too many found in ListSSHKeysResponse: %w", nameOrFingerprint, ErrConflict) + } - return SSHKey{}, fmt.Errorf("%q not found in ListSSHKeysResponse: %w", name, ErrNotFound) + return SSHKey{}, fmt.Errorf("%q not found in ListSSHKeysResponse: %w", nameOrFingerprint, ErrNotFound) } // List SSH keys @@ -13422,7 +15324,8 @@ func (c Client) ListSSHKeys(ctx context.Context) (*ListSSHKeysResponse, error) { if err != nil { return nil, fmt.Errorf("ListSSHKeys: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) if err := c.executeRequestInterceptors(ctx, request); err != nil { return nil, fmt.Errorf("ListSSHKeys: execute request editors: %w", err) @@ -13477,7 +15380,8 @@ func (c Client) RegisterSSHKey(ctx context.Context, req RegisterSSHKeyRequest) ( if err != nil { return nil, fmt.Errorf("RegisterSSHKey: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) request.Header.Add("Content-Type", "application/json") @@ -13522,7 +15426,8 @@ func (c Client) DeleteSSHKey(ctx context.Context, name string) (*Operation, erro if err != nil { return nil, fmt.Errorf("DeleteSSHKey: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) if err := c.executeRequestInterceptors(ctx, request); err != nil { return nil, fmt.Errorf("DeleteSSHKey: execute request editors: %w", err) @@ -13565,7 +15470,8 @@ func (c Client) GetSSHKey(ctx context.Context, name string) (*SSHKey, error) { if err != nil { return nil, fmt.Errorf("GetSSHKey: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) if err := c.executeRequestInterceptors(ctx, request); err != nil { return nil, fmt.Errorf("GetSSHKey: execute request editors: %w", err) @@ -13606,11 +15512,19 @@ type ListTemplatesResponse struct { // FindTemplate attempts to find an Template by nameOrID. func (l ListTemplatesResponse) FindTemplate(nameOrID string) (Template, error) { + var result []Template for i, elem := range l.Templates { - if string(elem.Name) == nameOrID || elem.ID.String() == nameOrID { - return l.Templates[i], nil + if string(elem.Name) == nameOrID || string(elem.ID) == nameOrID { + result = append(result, l.Templates[i]) } } + if len(result) == 1 { + return result[0], nil + } + + if len(result) > 1 { + return Template{}, fmt.Errorf("%q too many found in ListTemplatesResponse: %w", nameOrID, ErrConflict) + } return Template{}, fmt.Errorf("%q not found in ListTemplatesResponse: %w", nameOrID, ErrNotFound) } @@ -13644,7 +15558,8 @@ func (c Client) ListTemplates(ctx context.Context, opts ...ListTemplatesOpt) (*L if err != nil { return nil, fmt.Errorf("ListTemplates: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) if len(opts) > 0 { q := request.URL.Query() @@ -13734,7 +15649,8 @@ func (c Client) RegisterTemplate(ctx context.Context, req RegisterTemplateReques if err != nil { return nil, fmt.Errorf("RegisterTemplate: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) request.Header.Add("Content-Type", "application/json") @@ -13779,7 +15695,8 @@ func (c Client) DeleteTemplate(ctx context.Context, id UUID) (*Operation, error) if err != nil { return nil, fmt.Errorf("DeleteTemplate: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) if err := c.executeRequestInterceptors(ctx, request); err != nil { return nil, fmt.Errorf("DeleteTemplate: execute request editors: %w", err) @@ -13822,7 +15739,8 @@ func (c Client) GetTemplate(ctx context.Context, id UUID) (*Template, error) { if err != nil { return nil, fmt.Errorf("GetTemplate: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) if err := c.executeRequestInterceptors(ctx, request); err != nil { return nil, fmt.Errorf("GetTemplate: execute request editors: %w", err) @@ -13875,7 +15793,8 @@ func (c Client) CopyTemplate(ctx context.Context, id UUID, req CopyTemplateReque if err != nil { return nil, fmt.Errorf("CopyTemplate: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) request.Header.Add("Content-Type", "application/json") @@ -13932,7 +15851,8 @@ func (c Client) UpdateTemplate(ctx context.Context, id UUID, req UpdateTemplateR if err != nil { return nil, fmt.Errorf("UpdateTemplate: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) request.Header.Add("Content-Type", "application/json") @@ -13969,19 +15889,252 @@ func (c Client) UpdateTemplate(ctx context.Context, id UUID, req UpdateTemplateR return bodyresp, nil } +type ListUsersResponse struct { + Users []User `json:"users,omitempty"` +} + +// FindUser attempts to find an User by id. +func (l ListUsersResponse) FindUser(id string) (User, error) { + var result []User + for i, elem := range l.Users { + if string(elem.ID) == id { + result = append(result, l.Users[i]) + } + } + if len(result) == 1 { + return result[0], nil + } + + if len(result) > 1 { + return User{}, fmt.Errorf("%q too many found in ListUsersResponse: %w", id, ErrConflict) + } + + return User{}, fmt.Errorf("%q not found in ListUsersResponse: %w", id, ErrNotFound) +} + +// List Users +func (c Client) ListUsers(ctx context.Context) (*ListUsersResponse, error) { + path := "/user" + + request, err := http.NewRequestWithContext(ctx, "GET", c.serverEndpoint+path, nil) + if err != nil { + return nil, fmt.Errorf("ListUsers: new request: %w", err) + } + + request.Header.Add("User-Agent", c.getUserAgent()) + + if err := c.executeRequestInterceptors(ctx, request); err != nil { + return nil, fmt.Errorf("ListUsers: execute request editors: %w", err) + } + + if err := c.signRequest(request); err != nil { + return nil, fmt.Errorf("ListUsers: sign request: %w", err) + } + + if c.trace { + dumpRequest(request, "list-users") + } + + response, err := c.httpClient.Do(request) + if err != nil { + return nil, fmt.Errorf("ListUsers: http client do: %w", err) + } + + if c.trace { + dumpResponse(response) + } + + if err := handleHTTPErrorResp(response); err != nil { + return nil, fmt.Errorf("ListUsers: http response: %w", err) + } + + bodyresp := &ListUsersResponse{} + if err := prepareJSONResponse(response, bodyresp); err != nil { + return nil, fmt.Errorf("ListUsers: prepare Json response: %w", err) + } + + return bodyresp, nil +} + +type CreateUserRequest struct { + // User Email + Email string `json:"email" validate:"required"` + // IAM Role + Role *IAMRole `json:"role,omitempty"` +} + +// Create a User +func (c Client) CreateUser(ctx context.Context, req CreateUserRequest) (*Operation, error) { + path := "/user" + + body, err := prepareJSONBody(req) + if err != nil { + return nil, fmt.Errorf("CreateUser: prepare Json body: %w", err) + } + + request, err := http.NewRequestWithContext(ctx, "POST", c.serverEndpoint+path, body) + if err != nil { + return nil, fmt.Errorf("CreateUser: new request: %w", err) + } + + request.Header.Add("User-Agent", c.getUserAgent()) + + request.Header.Add("Content-Type", "application/json") + + if err := c.executeRequestInterceptors(ctx, request); err != nil { + return nil, fmt.Errorf("CreateUser: execute request editors: %w", err) + } + + if err := c.signRequest(request); err != nil { + return nil, fmt.Errorf("CreateUser: sign request: %w", err) + } + + if c.trace { + dumpRequest(request, "create-user") + } + + response, err := c.httpClient.Do(request) + if err != nil { + return nil, fmt.Errorf("CreateUser: http client do: %w", err) + } + + if c.trace { + dumpResponse(response) + } + + if err := handleHTTPErrorResp(response); err != nil { + return nil, fmt.Errorf("CreateUser: http response: %w", err) + } + + bodyresp := &Operation{} + if err := prepareJSONResponse(response, bodyresp); err != nil { + return nil, fmt.Errorf("CreateUser: prepare Json response: %w", err) + } + + return bodyresp, nil +} + +// Delete User +func (c Client) DeleteUser(ctx context.Context, id UUID) (*Operation, error) { + path := fmt.Sprintf("/user/%v", id) + + request, err := http.NewRequestWithContext(ctx, "DELETE", c.serverEndpoint+path, nil) + if err != nil { + return nil, fmt.Errorf("DeleteUser: new request: %w", err) + } + + request.Header.Add("User-Agent", c.getUserAgent()) + + if err := c.executeRequestInterceptors(ctx, request); err != nil { + return nil, fmt.Errorf("DeleteUser: execute request editors: %w", err) + } + + if err := c.signRequest(request); err != nil { + return nil, fmt.Errorf("DeleteUser: sign request: %w", err) + } + + if c.trace { + dumpRequest(request, "delete-user") + } + + response, err := c.httpClient.Do(request) + if err != nil { + return nil, fmt.Errorf("DeleteUser: http client do: %w", err) + } + + if c.trace { + dumpResponse(response) + } + + if err := handleHTTPErrorResp(response); err != nil { + return nil, fmt.Errorf("DeleteUser: http response: %w", err) + } + + bodyresp := &Operation{} + if err := prepareJSONResponse(response, bodyresp); err != nil { + return nil, fmt.Errorf("DeleteUser: prepare Json response: %w", err) + } + + return bodyresp, nil +} + +type UpdateUserRoleRequest struct { + // IAM Role + Role *IAMRole `json:"role,omitempty"` +} + +// Update a User's IAM role +func (c Client) UpdateUserRole(ctx context.Context, id UUID, req UpdateUserRoleRequest) (*Operation, error) { + path := fmt.Sprintf("/user/%v", id) + + body, err := prepareJSONBody(req) + if err != nil { + return nil, fmt.Errorf("UpdateUserRole: prepare Json body: %w", err) + } + + request, err := http.NewRequestWithContext(ctx, "PUT", c.serverEndpoint+path, body) + if err != nil { + return nil, fmt.Errorf("UpdateUserRole: new request: %w", err) + } + + request.Header.Add("User-Agent", c.getUserAgent()) + + request.Header.Add("Content-Type", "application/json") + + if err := c.executeRequestInterceptors(ctx, request); err != nil { + return nil, fmt.Errorf("UpdateUserRole: execute request editors: %w", err) + } + + if err := c.signRequest(request); err != nil { + return nil, fmt.Errorf("UpdateUserRole: sign request: %w", err) + } + + if c.trace { + dumpRequest(request, "update-user-role") + } + + response, err := c.httpClient.Do(request) + if err != nil { + return nil, fmt.Errorf("UpdateUserRole: http client do: %w", err) + } + + if c.trace { + dumpResponse(response) + } + + if err := handleHTTPErrorResp(response); err != nil { + return nil, fmt.Errorf("UpdateUserRole: http response: %w", err) + } + + bodyresp := &Operation{} + if err := prepareJSONResponse(response, bodyresp); err != nil { + return nil, fmt.Errorf("UpdateUserRole: prepare Json response: %w", err) + } + + return bodyresp, nil +} + type ListZonesResponse struct { Zones []Zone `json:"zones,omitempty"` } -// FindZone attempts to find an Zone by name. -func (l ListZonesResponse) FindZone(name string) (Zone, error) { +// FindZone attempts to find an Zone by nameOrAPIEndpoint. +func (l ListZonesResponse) FindZone(nameOrAPIEndpoint string) (Zone, error) { + var result []Zone for i, elem := range l.Zones { - if string(elem.Name) == name { - return l.Zones[i], nil + if string(elem.Name) == nameOrAPIEndpoint || string(elem.APIEndpoint) == nameOrAPIEndpoint { + result = append(result, l.Zones[i]) } } + if len(result) == 1 { + return result[0], nil + } + + if len(result) > 1 { + return Zone{}, fmt.Errorf("%q too many found in ListZonesResponse: %w", nameOrAPIEndpoint, ErrConflict) + } - return Zone{}, fmt.Errorf("%q not found in ListZonesResponse: %w", name, ErrNotFound) + return Zone{}, fmt.Errorf("%q not found in ListZonesResponse: %w", nameOrAPIEndpoint, ErrNotFound) } // List Zones @@ -13992,7 +16145,8 @@ func (c Client) ListZones(ctx context.Context) (*ListZonesResponse, error) { if err != nil { return nil, fmt.Errorf("ListZones: new request: %w", err) } - request.Header.Add("User-Agent", UserAgent) + + request.Header.Add("User-Agent", c.getUserAgent()) if err := c.executeRequestInterceptors(ctx, request); err != nil { return nil, fmt.Errorf("ListZones: execute request editors: %w", err) diff --git a/vendor/github.com/exoscale/egoscale/v3/schemas.go b/vendor/github.com/exoscale/egoscale/v3/schemas.go index 5fee1602..08cd4185 100644 --- a/vendor/github.com/exoscale/egoscale/v3/schemas.go +++ b/vendor/github.com/exoscale/egoscale/v3/schemas.go @@ -191,6 +191,416 @@ type DBAASBackupConfig struct { type DBAASDatabaseName string +type DBAASDatadogTag struct { + // Optional tag explanation + Comment string `json:"comment,omitempty" validate:"omitempty,lte=1024"` + // Tag value + Tag string `json:"tag" validate:"required,gte=1,lte=200"` +} + +type DBAASEndpointDatadogCommon struct { + // Custom tags provided by user + DatadogTags []DBAASDatadogTag `json:"datadog-tags,omitempty"` + // Disable kafka consumer group metrics. Applies only when attached to kafka services. + DisableConsumerStats *bool `json:"disable-consumer-stats,omitempty"` + // Number of separate instances to fetch kafka consumer statistics with. Applies only when attached to kafka services. + KafkaConsumerCheckInstances int64 `json:"kafka-consumer-check-instances,omitempty" validate:"omitempty,gte=1,lte=100"` + // Number of seconds that datadog will wait to get consumer statistics from brokers. Applies only when attached to kafka services. + KafkaConsumerStatsTimeout int64 `json:"kafka-consumer-stats-timeout,omitempty" validate:"omitempty,gte=2,lte=300"` + // Maximum number of partition contexts to send. Applies only when attached to kafka services. + MaxPartitionContexts int64 `json:"max-partition-contexts,omitempty" validate:"omitempty,gte=200,lte=200000"` +} + +type DBAASEndpointDatadogInputCreateSettings struct { + // Datadog API key + DatadogAPIKey string `json:"datadog-api-key" validate:"required,gte=1,lte=256"` + // Custom tags provided by user + DatadogTags []DBAASDatadogTag `json:"datadog-tags,omitempty"` + // Disable kafka consumer group metrics. Applies only when attached to kafka services. + DisableConsumerStats *bool `json:"disable-consumer-stats,omitempty"` + // Number of separate instances to fetch kafka consumer statistics with. Applies only when attached to kafka services. + KafkaConsumerCheckInstances int64 `json:"kafka-consumer-check-instances,omitempty" validate:"omitempty,gte=1,lte=100"` + // Number of seconds that datadog will wait to get consumer statistics from brokers. Applies only when attached to kafka services. + KafkaConsumerStatsTimeout int64 `json:"kafka-consumer-stats-timeout,omitempty" validate:"omitempty,gte=2,lte=300"` + // Maximum number of partition contexts to send. Applies only when attached to kafka services. + MaxPartitionContexts int64 `json:"max-partition-contexts,omitempty" validate:"omitempty,gte=200,lte=200000"` + Site EnumDatadogSite `json:"site" validate:"required"` +} + +type DBAASEndpointDatadogInputCreate struct { + Settings *DBAASEndpointDatadogInputCreateSettings `json:"settings,omitempty"` +} + +type DBAASEndpointDatadogInputUpdateSettings struct { + // Datadog API key + DatadogAPIKey string `json:"datadog-api-key" validate:"required,gte=1,lte=256"` + // Custom tags provided by user + DatadogTags []DBAASDatadogTag `json:"datadog-tags,omitempty"` + // Disable kafka consumer group metrics. Applies only when attached to kafka services. + DisableConsumerStats *bool `json:"disable-consumer-stats,omitempty"` + // Number of separate instances to fetch kafka consumer statistics with. Applies only when attached to kafka services. + KafkaConsumerCheckInstances int64 `json:"kafka-consumer-check-instances,omitempty" validate:"omitempty,gte=1,lte=100"` + // Number of seconds that datadog will wait to get consumer statistics from brokers. Applies only when attached to kafka services. + KafkaConsumerStatsTimeout int64 `json:"kafka-consumer-stats-timeout,omitempty" validate:"omitempty,gte=2,lte=300"` + // Maximum number of partition contexts to send. Applies only when attached to kafka services. + MaxPartitionContexts int64 `json:"max-partition-contexts,omitempty" validate:"omitempty,gte=200,lte=200000"` + Site EnumDatadogSite `json:"site,omitempty"` +} + +type DBAASEndpointDatadogInputUpdate struct { + Settings *DBAASEndpointDatadogInputUpdateSettings `json:"settings,omitempty"` +} + +// External integration DataDog configuration +type DBAASEndpointDatadogSettingsSettings struct { + // Custom tags provided by user + DatadogTags []DBAASDatadogTag `json:"datadog-tags,omitempty"` + // Disable kafka consumer group metrics. Applies only when attached to kafka services. + DisableConsumerStats *bool `json:"disable-consumer-stats,omitempty"` + // Number of separate instances to fetch kafka consumer statistics with. Applies only when attached to kafka services. + KafkaConsumerCheckInstances int64 `json:"kafka-consumer-check-instances,omitempty" validate:"omitempty,gte=1,lte=100"` + // Number of seconds that datadog will wait to get consumer statistics from brokers. Applies only when attached to kafka services. + KafkaConsumerStatsTimeout int64 `json:"kafka-consumer-stats-timeout,omitempty" validate:"omitempty,gte=2,lte=300"` + // Maximum number of partition contexts to send. Applies only when attached to kafka services. + MaxPartitionContexts int64 `json:"max-partition-contexts,omitempty" validate:"omitempty,gte=200,lte=200000"` + Site EnumDatadogSite `json:"site,omitempty"` +} + +type DBAASEndpointDatadogSettings struct { + // External integration DataDog configuration + Settings *DBAASEndpointDatadogSettingsSettings `json:"settings,omitempty"` +} + +type DBAASEndpointElasticsearch struct { + // Maximum number of days of logs to keep + IndexDaysMax int64 `json:"index-days-max,omitempty" validate:"omitempty,gte=1,lte=10000"` + // Elasticsearch index prefix + IndexPrefix string `json:"index-prefix" validate:"required,gte=1,lte=1000"` + // Elasticsearch request timeout limit + Timeout int64 `json:"timeout,omitempty" validate:"omitempty,gte=10,lte=120"` + // Elasticsearch connection URL + URL string `json:"url" validate:"required,gte=12,lte=2048"` +} + +type DBAASEndpointElasticsearchInputCreateSettings struct { + // PEM encoded CA certificate + CA string `json:"ca,omitempty" validate:"omitempty,lte=16384"` + // Maximum number of days of logs to keep + IndexDaysMax int64 `json:"index-days-max,omitempty" validate:"omitempty,gte=1,lte=10000"` + // Elasticsearch index prefix + IndexPrefix string `json:"index-prefix" validate:"required,gte=1,lte=1000"` + // Elasticsearch request timeout limit + Timeout int64 `json:"timeout,omitempty" validate:"omitempty,gte=10,lte=120"` + // Elasticsearch connection URL + URL string `json:"url" validate:"required,gte=12,lte=2048"` +} + +type DBAASEndpointElasticsearchInputCreate struct { + Settings *DBAASEndpointElasticsearchInputCreateSettings `json:"settings,omitempty"` +} + +type DBAASEndpointElasticsearchInputUpdateSettings struct { + // PEM encoded CA certificate + CA string `json:"ca,omitempty" validate:"omitempty,lte=16384"` + // Maximum number of days of logs to keep + IndexDaysMax int64 `json:"index-days-max,omitempty" validate:"omitempty,gte=1,lte=10000"` + // Elasticsearch index prefix + IndexPrefix string `json:"index-prefix,omitempty" validate:"omitempty,gte=1,lte=1000"` + // Elasticsearch request timeout limit + Timeout int64 `json:"timeout,omitempty" validate:"omitempty,gte=10,lte=120"` + // Elasticsearch connection URL + URL string `json:"url,omitempty" validate:"omitempty,gte=12,lte=2048"` +} + +type DBAASEndpointElasticsearchInputUpdate struct { + Settings *DBAASEndpointElasticsearchInputUpdateSettings `json:"settings,omitempty"` +} + +type DBAASEndpointElasticsearchOptionalFields struct { + // Maximum number of days of logs to keep + IndexDaysMax int64 `json:"index-days-max,omitempty" validate:"omitempty,gte=1,lte=10000"` + // Elasticsearch index prefix + IndexPrefix string `json:"index-prefix,omitempty" validate:"omitempty,gte=1,lte=1000"` + // Elasticsearch request timeout limit + Timeout int64 `json:"timeout,omitempty" validate:"omitempty,gte=10,lte=120"` + // Elasticsearch connection URL + URL string `json:"url,omitempty" validate:"omitempty,gte=12,lte=2048"` +} + +type DBAASEndpointElasticsearchOutput struct { + // External integration endpoint id + ID UUID `json:"id,omitempty"` + // External integration endpoint name + Name string `json:"name,omitempty"` + Settings *DBAASEndpointElasticsearchOptionalFields `json:"settings,omitempty"` + Type EnumExternalEndpointTypes `json:"type,omitempty"` +} + +type DBAASEndpointElasticsearchSecrets struct { + // PEM encoded CA certificate + CA string `json:"ca,omitempty" validate:"omitempty,lte=16384"` +} + +// External integration Prometheus configuration +type DBAASEndpointExternalPrometheusOutputSettings struct { + // Prometheus basic authentication username + BasicAuthUsername string `json:"basic-auth-username,omitempty" validate:"omitempty,gte=5,lte=32"` +} + +type DBAASEndpointExternalPrometheusOutput struct { + // External integration endpoint id + ID UUID `json:"id,omitempty"` + // External integration endpoint name + Name string `json:"name,omitempty"` + // External integration Prometheus configuration + Settings *DBAASEndpointExternalPrometheusOutputSettings `json:"settings,omitempty"` + Type EnumExternalEndpointTypes `json:"type,omitempty"` +} + +type DBAASEndpointOpensearch struct { + // Maximum number of days of logs to keep + IndexDaysMax int64 `json:"index-days-max,omitempty" validate:"omitempty,gte=1,lte=10000"` + // OpenSearch index prefix + IndexPrefix string `json:"index-prefix" validate:"required,gte=1,lte=1000"` + // OpenSearch request timeout limit + Timeout int64 `json:"timeout,omitempty" validate:"omitempty,gte=10,lte=120"` + // OpenSearch connection URL + URL string `json:"url" validate:"required,gte=12,lte=2048"` +} + +type DBAASEndpointOpensearchInputCreateSettings struct { + // PEM encoded CA certificate + CA string `json:"ca,omitempty" validate:"omitempty,lte=16384"` + // Maximum number of days of logs to keep + IndexDaysMax int64 `json:"index-days-max,omitempty" validate:"omitempty,gte=1,lte=10000"` + // OpenSearch index prefix + IndexPrefix string `json:"index-prefix" validate:"required,gte=1,lte=1000"` + // OpenSearch request timeout limit + Timeout int64 `json:"timeout,omitempty" validate:"omitempty,gte=10,lte=120"` + // OpenSearch connection URL + URL string `json:"url" validate:"required,gte=12,lte=2048"` +} + +type DBAASEndpointOpensearchInputCreate struct { + Settings *DBAASEndpointOpensearchInputCreateSettings `json:"settings,omitempty"` +} + +type DBAASEndpointOpensearchInputUpdateSettings struct { + // PEM encoded CA certificate + CA string `json:"ca,omitempty" validate:"omitempty,lte=16384"` + // Maximum number of days of logs to keep + IndexDaysMax int64 `json:"index-days-max,omitempty" validate:"omitempty,gte=1,lte=10000"` + // OpenSearch index prefix + IndexPrefix string `json:"index-prefix,omitempty" validate:"omitempty,gte=1,lte=1000"` + // OpenSearch request timeout limit + Timeout int64 `json:"timeout,omitempty" validate:"omitempty,gte=10,lte=120"` + // OpenSearch connection URL + URL string `json:"url,omitempty" validate:"omitempty,gte=12,lte=2048"` +} + +type DBAASEndpointOpensearchInputUpdate struct { + Settings *DBAASEndpointOpensearchInputUpdateSettings `json:"settings,omitempty"` +} + +type DBAASEndpointOpensearchOptionalFields struct { + // Maximum number of days of logs to keep + IndexDaysMax int64 `json:"index-days-max,omitempty" validate:"omitempty,gte=1,lte=10000"` + // OpenSearch index prefix + IndexPrefix string `json:"index-prefix,omitempty" validate:"omitempty,gte=1,lte=1000"` + // OpenSearch request timeout limit + Timeout int64 `json:"timeout,omitempty" validate:"omitempty,gte=10,lte=120"` + // OpenSearch connection URL + URL string `json:"url,omitempty" validate:"omitempty,gte=12,lte=2048"` +} + +type DBAASEndpointOpensearchOutput struct { + // External integration endpoint id + ID UUID `json:"id,omitempty"` + // External integration endpoint name + Name string `json:"name,omitempty"` + Settings *DBAASEndpointOpensearchOptionalFields `json:"settings,omitempty"` + Type EnumExternalEndpointTypes `json:"type,omitempty"` +} + +type DBAASEndpointOpensearchSecrets struct { + // PEM encoded CA certificate + CA string `json:"ca,omitempty" validate:"omitempty,lte=16384"` +} + +type DBAASEndpointPrometheus struct { + // Prometheus basic authentication username + BasicAuthUsername string `json:"basic-auth-username,omitempty" validate:"omitempty,gte=5,lte=32"` +} + +type DBAASEndpointPrometheusPayloadSettings struct { + // Prometheus basic authentication password + BasicAuthPassword string `json:"basic-auth-password,omitempty" validate:"omitempty,gte=8,lte=64"` + // Prometheus basic authentication username + BasicAuthUsername string `json:"basic-auth-username,omitempty" validate:"omitempty,gte=5,lte=32"` +} + +type DBAASEndpointPrometheusPayload struct { + Settings *DBAASEndpointPrometheusPayloadSettings `json:"settings,omitempty"` +} + +type DBAASEndpointPrometheusSecrets struct { + // Prometheus basic authentication password + BasicAuthPassword string `json:"basic-auth-password,omitempty" validate:"omitempty,gte=8,lte=64"` +} + +type DBAASEndpointRsyslog struct { + Format EnumRsyslogFormat `json:"format" validate:"required"` + // Custom syslog message format + Logline string `json:"logline,omitempty" validate:"omitempty,gte=1,lte=512"` + // Rsyslog max message size + MaxMessageSize int64 `json:"max-message-size,omitempty" validate:"omitempty,gte=2048,lte=2.147483647e+09"` + // Rsyslog server port + Port int64 `json:"port" validate:"required,gte=1,lte=65535"` + // Structured data block for log message + SD string `json:"sd,omitempty" validate:"omitempty,lte=1024"` + // Rsyslog server IP address or hostname + Server string `json:"server" validate:"required,gte=4,lte=255"` + // Require TLS + Tls *bool `json:"tls" validate:"required"` +} + +type DBAASEndpointRsyslogInputCreateSettings struct { + // PEM encoded CA certificate + CA string `json:"ca,omitempty" validate:"omitempty,lte=16384"` + // PEM encoded client certificate + Cert string `json:"cert,omitempty" validate:"omitempty,lte=16384"` + Format EnumRsyslogFormat `json:"format" validate:"required"` + // PEM encoded client key + Key string `json:"key,omitempty" validate:"omitempty,lte=16384"` + // Custom syslog message format + Logline string `json:"logline,omitempty" validate:"omitempty,gte=1,lte=512"` + // Rsyslog max message size + MaxMessageSize int64 `json:"max-message-size,omitempty" validate:"omitempty,gte=2048,lte=2.147483647e+09"` + // Rsyslog server port + Port int64 `json:"port" validate:"required,gte=1,lte=65535"` + // Structured data block for log message + SD string `json:"sd,omitempty" validate:"omitempty,lte=1024"` + // Rsyslog server IP address or hostname + Server string `json:"server" validate:"required,gte=4,lte=255"` + // Require TLS + Tls *bool `json:"tls" validate:"required"` +} + +type DBAASEndpointRsyslogInputCreate struct { + Settings *DBAASEndpointRsyslogInputCreateSettings `json:"settings,omitempty"` +} + +type DBAASEndpointRsyslogInputUpdateSettings struct { + // PEM encoded CA certificate + CA string `json:"ca,omitempty" validate:"omitempty,lte=16384"` + // PEM encoded client certificate + Cert string `json:"cert,omitempty" validate:"omitempty,lte=16384"` + Format EnumRsyslogFormat `json:"format,omitempty"` + // PEM encoded client key + Key string `json:"key,omitempty" validate:"omitempty,lte=16384"` + // Custom syslog message format + Logline string `json:"logline,omitempty" validate:"omitempty,gte=1,lte=512"` + // Rsyslog max message size + MaxMessageSize int64 `json:"max-message-size,omitempty" validate:"omitempty,gte=2048,lte=2.147483647e+09"` + // Rsyslog server port + Port int64 `json:"port,omitempty" validate:"omitempty,gte=1,lte=65535"` + // Structured data block for log message + SD string `json:"sd,omitempty" validate:"omitempty,lte=1024"` + // Rsyslog server IP address or hostname + Server string `json:"server,omitempty" validate:"omitempty,gte=4,lte=255"` + // Require TLS + Tls *bool `json:"tls,omitempty"` +} + +type DBAASEndpointRsyslogInputUpdate struct { + Settings *DBAASEndpointRsyslogInputUpdateSettings `json:"settings,omitempty"` +} + +type DBAASEndpointRsyslogOptionalFields struct { + Format EnumRsyslogFormat `json:"format,omitempty"` + // Custom syslog message format + Logline string `json:"logline,omitempty" validate:"omitempty,gte=1,lte=512"` + // Rsyslog max message size + MaxMessageSize int64 `json:"max-message-size,omitempty" validate:"omitempty,gte=2048,lte=2.147483647e+09"` + // Rsyslog server port + Port int64 `json:"port,omitempty" validate:"omitempty,gte=1,lte=65535"` + // Structured data block for log message + SD string `json:"sd,omitempty" validate:"omitempty,lte=1024"` + // Rsyslog server IP address or hostname + Server string `json:"server,omitempty" validate:"omitempty,gte=4,lte=255"` + // Require TLS + Tls *bool `json:"tls,omitempty"` +} + +type DBAASEndpointRsyslogSecrets struct { + // PEM encoded CA certificate + CA string `json:"ca,omitempty" validate:"omitempty,lte=16384"` + // PEM encoded client certificate + Cert string `json:"cert,omitempty" validate:"omitempty,lte=16384"` + // PEM encoded client key + Key string `json:"key,omitempty" validate:"omitempty,lte=16384"` +} + +type DBAASExternalEndpoint struct { + // External integration endpoint id + ID UUID `json:"id,omitempty"` + // External integration endpoint name + Name string `json:"name,omitempty"` + Type EnumExternalEndpointTypes `json:"type,omitempty"` +} + +// External integration DataDog configuration +type DBAASExternalEndpointDatadogOutputSettings struct { + // Custom tags provided by user + DatadogTags []DBAASDatadogTag `json:"datadog-tags,omitempty"` + // Disable kafka consumer group metrics. Applies only when attached to kafka services. + DisableConsumerStats *bool `json:"disable-consumer-stats,omitempty"` + // Number of separate instances to fetch kafka consumer statistics with. Applies only when attached to kafka services. + KafkaConsumerCheckInstances int64 `json:"kafka-consumer-check-instances,omitempty" validate:"omitempty,gte=1,lte=100"` + // Number of seconds that datadog will wait to get consumer statistics from brokers. Applies only when attached to kafka services. + KafkaConsumerStatsTimeout int64 `json:"kafka-consumer-stats-timeout,omitempty" validate:"omitempty,gte=2,lte=300"` + // Maximum number of partition contexts to send. Applies only when attached to kafka services. + MaxPartitionContexts int64 `json:"max-partition-contexts,omitempty" validate:"omitempty,gte=200,lte=200000"` + Site EnumDatadogSite `json:"site,omitempty"` +} + +type DBAASExternalEndpointDatadogOutput struct { + // External integration endpoint id + ID UUID `json:"id,omitempty"` + // External integration endpoint name + Name string `json:"name,omitempty"` + // External integration DataDog configuration + Settings *DBAASExternalEndpointDatadogOutputSettings `json:"settings,omitempty"` + Type EnumExternalEndpointTypes `json:"type,omitempty"` +} + +type DBAASExternalEndpointRsyslogOutput struct { + // External integration endpoint id + ID UUID `json:"id,omitempty"` + // External integration endpoint name + Name string `json:"name,omitempty"` + Settings *DBAASEndpointRsyslogOptionalFields `json:"settings,omitempty"` + Type EnumExternalEndpointTypes `json:"type,omitempty"` +} + +// Integrations with other services +type DBAASExternalIntegration struct { + // Description of the integration + Description string `json:"description" validate:"required"` + // External destination endpoint id + DestEndpointID string `json:"dest-endpoint-id,omitempty"` + // External destination endpoint name + DestEndpointName string `json:"dest-endpoint-name,omitempty"` + // Endpoint integration UUID + IntegrationID UUID `json:"integration-id,omitempty"` + // DBaaS source service name + SourceServiceName string `json:"source-service-name" validate:"required"` + SourceServiceType DBAASServiceTypeName `json:"source-service-type" validate:"required,gte=0,lte=64"` + // Integration status + Status string `json:"status,omitempty"` + Type EnumExternalEndpointTypes `json:"type" validate:"required"` +} + type DBAASIntegration struct { // Description of the integration Description string `json:"description,omitempty"` @@ -212,6 +622,13 @@ type DBAASIntegration struct { Type string `json:"type,omitempty"` } +type DBAASIntegrationSettingsDatadog struct { + // Database monitoring: view query metrics, explain plans, and execution details. Correlate queries with host metrics. + DatadogDbmEnabled *bool `json:"datadog-dbm-enabled,omitempty"` + // Integrate PgBouncer with Datadog to track connection pool metrics and monitor application traffic. + DatadogPgbouncerEnabled *bool `json:"datadog-pgbouncer-enabled,omitempty"` +} + // A JSON schema of additional settings of the integration. type DBAASIntegrationTypeSettings struct { AdditionalProperties *bool `json:"additionalProperties,omitempty"` @@ -392,8 +809,8 @@ type DBAASPGTargetVersions string const ( DBAASPGTargetVersions14 DBAASPGTargetVersions = "14" + DBAASPGTargetVersions17 DBAASPGTargetVersions = "17" DBAASPGTargetVersions15 DBAASPGTargetVersions = "15" - DBAASPGTargetVersions12 DBAASPGTargetVersions = "12" DBAASPGTargetVersions13 DBAASPGTargetVersions = "13" DBAASPGTargetVersions16 DBAASPGTargetVersions = "16" ) @@ -406,6 +823,8 @@ type DBAASPlan struct { BackupConfig *DBAASBackupConfig `json:"backup-config,omitempty"` // DBaaS plan disk space DiskSpace int64 `json:"disk-space,omitempty"` + // Instance family subset which the service can use + Family string `json:"family,omitempty"` // DBaaS plan max memory allocated percentage MaxMemoryPercent int64 `json:"max-memory-percent,omitempty" validate:"omitempty,gt=0"` // DBaaS plan name @@ -416,6 +835,8 @@ type DBAASPlan struct { NodeCPUCount int64 `json:"node-cpu-count,omitempty" validate:"omitempty,gt=0"` // DBaaS plan memory count per node NodeMemory int64 `json:"node-memory,omitempty" validate:"omitempty,gt=0"` + // Zones where the plan is available + Zones []string `json:"zones,omitempty"` } type DBAASPostgresUsersUsers struct { @@ -578,38 +999,6 @@ type DBAASServiceGrafana struct { Zone string `json:"zone,omitempty"` } -// Integrations with other services -type DBAASServiceIntegration struct { - // True when integration is active - Active *bool `json:"active" validate:"required"` - // Description of the integration - Description string `json:"description" validate:"required"` - // Destination endpoint name - DestEndpoint string `json:"dest-endpoint,omitempty"` - // Destination endpoint id - DestEndpointID string `json:"dest-endpoint-id,omitempty"` - // Destination service name - DestService string `json:"dest-service" validate:"required"` - DestServiceType DBAASServiceTypeName `json:"dest-service-type" validate:"required,gte=0,lte=64"` - // True when integration is enabled - Enabled *bool `json:"enabled" validate:"required"` - // Integration status - IntegrationStatus map[string]any `json:"integration-status,omitempty"` - // Type of the integration - IntegrationType string `json:"integration-type" validate:"required"` - // Integration ID - ServiceIntegrationID string `json:"service-integration-id" validate:"required"` - // Source endpoint name - SourceEndpoint string `json:"source-endpoint,omitempty"` - // Source endpoint ID - SourceEndpointID string `json:"source-endpoint-id,omitempty"` - // Source service name - SourceService string `json:"source-service" validate:"required"` - SourceServiceType DBAASServiceTypeName `json:"source-service-type" validate:"required,gte=0,lte=64"` - // Service integration settings - UserConfig map[string]any `json:"user-config,omitempty"` -} - // Kafka authentication methods type DBAASServiceKafkaAuthenticationMethods struct { // Whether certificate/SSL authentication is enabled @@ -1239,6 +1628,14 @@ type DBAASUserGrafanaSecrets struct { Username string `json:"username,omitempty"` } +// Kafka Connect secrets +type DBAASUserKafkaConnectSecrets struct { + // Kafka Connect password + Password string `json:"password,omitempty"` + // Kafka Connect username + Username string `json:"username,omitempty"` +} + // Kafka User secrets type DBAASUserKafkaSecrets struct { // Kafka certificate @@ -1430,6 +1827,27 @@ const ( EnumComponentUsageReplica EnumComponentUsage = "replica" ) +type EnumDatadogSite string + +const ( + EnumDatadogSiteUs3DatadoghqCom EnumDatadogSite = "us3.datadoghq.com" + EnumDatadogSiteDdogGovCom EnumDatadogSite = "ddog-gov.com" + EnumDatadogSiteDatadoghqEU EnumDatadogSite = "datadoghq.eu" + EnumDatadogSiteUs5DatadoghqCom EnumDatadogSite = "us5.datadoghq.com" + EnumDatadogSiteAp1DatadoghqCom EnumDatadogSite = "ap1.datadoghq.com" + EnumDatadogSiteDatadoghqCom EnumDatadogSite = "datadoghq.com" +) + +type EnumExternalEndpointTypes string + +const ( + EnumExternalEndpointTypesPrometheus EnumExternalEndpointTypes = "prometheus" + EnumExternalEndpointTypesOpensearch EnumExternalEndpointTypes = "opensearch" + EnumExternalEndpointTypesRsyslog EnumExternalEndpointTypes = "rsyslog" + EnumExternalEndpointTypesDatadog EnumExternalEndpointTypes = "datadog" + EnumExternalEndpointTypesElasticsearch EnumExternalEndpointTypes = "elasticsearch" +) + type EnumIntegrationTypes string const ( @@ -1507,6 +1925,14 @@ const ( EnumPGVariantAiven EnumPGVariant = "aiven" ) +type EnumRsyslogFormat string + +const ( + EnumRsyslogFormatCustom EnumRsyslogFormat = "custom" + EnumRsyslogFormatRfc3164 EnumRsyslogFormat = "rfc3164" + EnumRsyslogFormatRfc5424 EnumRsyslogFormat = "rfc5424" +) + type EnumServiceState string const ( @@ -1779,14 +2205,16 @@ type InstanceTarget struct { type InstanceTypeFamily string const ( - InstanceTypeFamilyGpu3 InstanceTypeFamily = "gpu3" - InstanceTypeFamilyGpu2 InstanceTypeFamily = "gpu2" - InstanceTypeFamilyGpu InstanceTypeFamily = "gpu" - InstanceTypeFamilyMemory InstanceTypeFamily = "memory" - InstanceTypeFamilyStorage InstanceTypeFamily = "storage" - InstanceTypeFamilyStandard InstanceTypeFamily = "standard" - InstanceTypeFamilyColossus InstanceTypeFamily = "colossus" - InstanceTypeFamilyCPU InstanceTypeFamily = "cpu" + InstanceTypeFamilyGpu3 InstanceTypeFamily = "gpu3" + InstanceTypeFamilyGpu3080ti InstanceTypeFamily = "gpu3080ti" + InstanceTypeFamilyGpu2 InstanceTypeFamily = "gpu2" + InstanceTypeFamilyGpu InstanceTypeFamily = "gpu" + InstanceTypeFamilyMemory InstanceTypeFamily = "memory" + InstanceTypeFamilyGpua5000 InstanceTypeFamily = "gpua5000" + InstanceTypeFamilyStorage InstanceTypeFamily = "storage" + InstanceTypeFamilyStandard InstanceTypeFamily = "standard" + InstanceTypeFamilyColossus InstanceTypeFamily = "colossus" + InstanceTypeFamilyCPU InstanceTypeFamily = "cpu" ) type InstanceTypeSize string @@ -1883,6 +2311,8 @@ type JSONSchemaGrafana struct { AuthGoogle map[string]any `json:"auth_google,omitempty"` // Cookie SameSite attribute: 'strict' prevents sending cookie for cross-site requests, effectively disabling direct linking from other sites to Grafana. 'lax' is the default value. CookieSamesite JSONSchemaGrafanaCookieSamesite `json:"cookie_samesite,omitempty"` + // Serve the web frontend using a custom CNAME pointing to the Aiven DNS name + CustomDomain *string `json:"custom_domain,omitempty" validate:"omitempty,lte=255"` // This feature is new in Grafana 9 and is quite resource intensive. It may cause low-end plans to work more slowly while the dashboard previews are rendering. DashboardPreviewsEnabled *bool `json:"dashboard_previews_enabled,omitempty"` // Signed sequence of decimal numbers, followed by a unit suffix (ms, s, m, h, d), e.g. 30s, 1h @@ -1917,25 +2347,592 @@ type JSONSchemaGrafana struct { UserAutoAssignOrgRole JSONSchemaGrafanaUserAutoAssignOrgRole `json:"user_auto_assign_org_role,omitempty"` // Users with view-only permission can edit but not save dashboards ViewersCanEdit *bool `json:"viewers_can_edit,omitempty"` + // Setting to enable/disable Write-Ahead Logging. The default value is false (disabled). + Wal *bool `json:"wal,omitempty"` +} + +type JSONSchemaKafkaCompressionType string + +const ( + JSONSchemaKafkaCompressionTypeGzip JSONSchemaKafkaCompressionType = "gzip" + JSONSchemaKafkaCompressionTypeSnappy JSONSchemaKafkaCompressionType = "snappy" + JSONSchemaKafkaCompressionTypeLz4 JSONSchemaKafkaCompressionType = "lz4" + JSONSchemaKafkaCompressionTypeZstd JSONSchemaKafkaCompressionType = "zstd" + JSONSchemaKafkaCompressionTypeUncompressed JSONSchemaKafkaCompressionType = "uncompressed" + JSONSchemaKafkaCompressionTypeProducer JSONSchemaKafkaCompressionType = "producer" +) + +// Configure log cleaner for topic compaction +type JSONSchemaKafkaLogCleanupAndCompaction struct { + // How long are delete records retained? + LogCleanerDeleteRetentionMS int `json:"log_cleaner_delete_retention_ms,omitempty" validate:"omitempty,gte=0,lte=3.1556926e+11"` + // The maximum amount of time message will remain uncompacted. Only applicable for logs that are being compacted + LogCleanerMaxCompactionLagMS int `json:"log_cleaner_max_compaction_lag_ms,omitempty" validate:"omitempty,gte=30000,lte=9.223372036854776e+18"` + // Controls log compactor frequency. Larger value means more frequent compactions but also more space wasted for logs. Consider setting log.cleaner.max.compaction.lag.ms to enforce compactions sooner, instead of setting a very high value for this option. + LogCleanerMinCleanableRatio float64 `json:"log_cleaner_min_cleanable_ratio,omitempty" validate:"omitempty,gte=0.2,lte=0.9"` + // The minimum time a message will remain uncompacted in the log. Only applicable for logs that are being compacted. + LogCleanerMinCompactionLagMS int `json:"log_cleaner_min_compaction_lag_ms,omitempty" validate:"omitempty,gte=0,lte=9.223372036854776e+18"` + // The default cleanup policy for segments beyond the retention window + LogCleanupPolicy string `json:"log_cleanup_policy,omitempty"` } +type JSONSchemaKafkaLogMessageTimestampType string + +const ( + JSONSchemaKafkaLogMessageTimestampTypeCreateTime JSONSchemaKafkaLogMessageTimestampType = "CreateTime" + JSONSchemaKafkaLogMessageTimestampTypeLogAppendTime JSONSchemaKafkaLogMessageTimestampType = "LogAppendTime" +) + // Kafka broker configuration values -type JSONSchemaKafka map[string]any +type JSONSchemaKafka struct { + // Enable auto creation of topics + AutoCreateTopicsEnable *bool `json:"auto_create_topics_enable,omitempty"` + // Specify the final compression type for a given topic. This configuration accepts the standard compression codecs ('gzip', 'snappy', 'lz4', 'zstd'). It additionally accepts 'uncompressed' which is equivalent to no compression; and 'producer' which means retain the original compression codec set by the producer. + CompressionType JSONSchemaKafkaCompressionType `json:"compression_type,omitempty"` + // Idle connections timeout: the server socket processor threads close the connections that idle for longer than this. + ConnectionsMaxIdleMS int `json:"connections_max_idle_ms,omitempty" validate:"omitempty,gte=1000,lte=3.6e+06"` + // Replication factor for autocreated topics + DefaultReplicationFactor int `json:"default_replication_factor,omitempty" validate:"omitempty,gte=1,lte=10"` + // The amount of time, in milliseconds, the group coordinator will wait for more consumers to join a new group before performing the first rebalance. A longer delay means potentially fewer rebalances, but increases the time until processing begins. The default value for this is 3 seconds. During development and testing it might be desirable to set this to 0 in order to not delay test execution time. + GroupInitialRebalanceDelayMS int `json:"group_initial_rebalance_delay_ms,omitempty" validate:"omitempty,gte=0,lte=300000"` + // The maximum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures. + GroupMaxSessionTimeoutMS int `json:"group_max_session_timeout_ms,omitempty" validate:"omitempty,gte=0,lte=1.8e+06"` + // The minimum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures. + GroupMinSessionTimeoutMS int `json:"group_min_session_timeout_ms,omitempty" validate:"omitempty,gte=0,lte=60000"` + // Configure log cleaner for topic compaction + LogCleanupAndCompaction *JSONSchemaKafkaLogCleanupAndCompaction `json:"log-cleanup-and-compaction,omitempty"` + // The number of messages accumulated on a log partition before messages are flushed to disk + LogFlushIntervalMessages int `json:"log_flush_interval_messages,omitempty" validate:"omitempty,gte=1,lte=9.223372036854776e+18"` + // The maximum time in ms that a message in any topic is kept in memory before flushed to disk. If not set, the value in log.flush.scheduler.interval.ms is used + LogFlushIntervalMS int `json:"log_flush_interval_ms,omitempty" validate:"omitempty,gte=0,lte=9.223372036854776e+18"` + // The interval with which Kafka adds an entry to the offset index + LogIndexIntervalBytes int `json:"log_index_interval_bytes,omitempty" validate:"omitempty,gte=0,lte=1.048576e+08"` + // The maximum size in bytes of the offset index + LogIndexSizeMaxBytes int `json:"log_index_size_max_bytes,omitempty" validate:"omitempty,gte=1.048576e+06,lte=1.048576e+08"` + // The maximum size of local log segments that can grow for a partition before it gets eligible for deletion. If set to -2, the value of log.retention.bytes is used. The effective value should always be less than or equal to log.retention.bytes value. + LogLocalRetentionBytes int `json:"log_local_retention_bytes,omitempty" validate:"omitempty,gte=-2,lte=9.223372036854776e+18"` + // The number of milliseconds to keep the local log segments before it gets eligible for deletion. If set to -2, the value of log.retention.ms is used. The effective value should always be less than or equal to log.retention.ms value. + LogLocalRetentionMS int `json:"log_local_retention_ms,omitempty" validate:"omitempty,gte=-2,lte=9.223372036854776e+18"` + // This configuration controls whether down-conversion of message formats is enabled to satisfy consume requests. + LogMessageDownconversionEnable *bool `json:"log_message_downconversion_enable,omitempty"` + // The maximum difference allowed between the timestamp when a broker receives a message and the timestamp specified in the message + LogMessageTimestampDifferenceMaxMS int `json:"log_message_timestamp_difference_max_ms,omitempty" validate:"omitempty,gte=0,lte=9.223372036854776e+18"` + // Define whether the timestamp in the message is message create time or log append time. + LogMessageTimestampType JSONSchemaKafkaLogMessageTimestampType `json:"log_message_timestamp_type,omitempty"` + // Should pre allocate file when create new segment? + LogPreallocate *bool `json:"log_preallocate,omitempty"` + // The maximum size of the log before deleting messages + LogRetentionBytes int `json:"log_retention_bytes,omitempty" validate:"omitempty,gte=-1,lte=9.223372036854776e+18"` + // The number of hours to keep a log file before deleting it + LogRetentionHours int `json:"log_retention_hours,omitempty" validate:"omitempty,gte=-1,lte=2.147483647e+09"` + // The number of milliseconds to keep a log file before deleting it (in milliseconds), If not set, the value in log.retention.minutes is used. If set to -1, no time limit is applied. + LogRetentionMS int `json:"log_retention_ms,omitempty" validate:"omitempty,gte=-1,lte=9.223372036854776e+18"` + // The maximum jitter to subtract from logRollTimeMillis (in milliseconds). If not set, the value in log.roll.jitter.hours is used + LogRollJitterMS int `json:"log_roll_jitter_ms,omitempty" validate:"omitempty,gte=0,lte=9.223372036854776e+18"` + // The maximum time before a new log segment is rolled out (in milliseconds). + LogRollMS int `json:"log_roll_ms,omitempty" validate:"omitempty,gte=1,lte=9.223372036854776e+18"` + // The maximum size of a single log file + LogSegmentBytes int `json:"log_segment_bytes,omitempty" validate:"omitempty,gte=1.048576e+07,lte=1.073741824e+09"` + // The amount of time to wait before deleting a file from the filesystem + LogSegmentDeleteDelayMS int `json:"log_segment_delete_delay_ms,omitempty" validate:"omitempty,gte=0,lte=3.6e+06"` + // The maximum number of connections allowed from each ip address (defaults to 2147483647). + MaxConnectionsPerIP int `json:"max_connections_per_ip,omitempty" validate:"omitempty,gte=256,lte=2.147483647e+09"` + // The maximum number of incremental fetch sessions that the broker will maintain. + MaxIncrementalFetchSessionCacheSlots int `json:"max_incremental_fetch_session_cache_slots,omitempty" validate:"omitempty,gte=1000,lte=10000"` + // The maximum size of message that the server can receive. + MessageMaxBytes int `json:"message_max_bytes,omitempty" validate:"omitempty,gte=0,lte=1.000012e+08"` + // When a producer sets acks to 'all' (or '-1'), min.insync.replicas specifies the minimum number of replicas that must acknowledge a write for the write to be considered successful. + MinInsyncReplicas int `json:"min_insync_replicas,omitempty" validate:"omitempty,gte=1,lte=7"` + // Number of partitions for autocreated topics + NumPartitions int `json:"num_partitions,omitempty" validate:"omitempty,gte=1,lte=1000"` + // Log retention window in minutes for offsets topic + OffsetsRetentionMinutes int `json:"offsets_retention_minutes,omitempty" validate:"omitempty,gte=1,lte=2.147483647e+09"` + // The purge interval (in number of requests) of the producer request purgatory(defaults to 1000). + ProducerPurgatoryPurgeIntervalRequests int `json:"producer_purgatory_purge_interval_requests,omitempty" validate:"omitempty,gte=10,lte=10000"` + // The number of bytes of messages to attempt to fetch for each partition (defaults to 1048576). This is not an absolute maximum, if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that progress can be made. + ReplicaFetchMaxBytes int `json:"replica_fetch_max_bytes,omitempty" validate:"omitempty,gte=1.048576e+06,lte=1.048576e+08"` + // Maximum bytes expected for the entire fetch response (defaults to 10485760). Records are fetched in batches, and if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that progress can be made. As such, this is not an absolute maximum. + ReplicaFetchResponseMaxBytes int `json:"replica_fetch_response_max_bytes,omitempty" validate:"omitempty,gte=1.048576e+07,lte=1.048576e+09"` + // The (optional) comma-delimited setting for the broker to use to verify that the JWT was issued for one of the expected audiences. + SaslOauthbearerExpectedAudience string `json:"sasl_oauthbearer_expected_audience,omitempty" validate:"omitempty,lte=128"` + // Optional setting for the broker to use to verify that the JWT was created by the expected issuer. + SaslOauthbearerExpectedIssuer string `json:"sasl_oauthbearer_expected_issuer,omitempty" validate:"omitempty,lte=128"` + // OIDC JWKS endpoint URL. By setting this the SASL SSL OAuth2/OIDC authentication is enabled. See also other options for SASL OAuth2/OIDC. + SaslOauthbearerJwksEndpointURL string `json:"sasl_oauthbearer_jwks_endpoint_url,omitempty" validate:"omitempty,lte=2048"` + // Name of the scope from which to extract the subject claim from the JWT. Defaults to sub. + SaslOauthbearerSubClaimName string `json:"sasl_oauthbearer_sub_claim_name,omitempty" validate:"omitempty,lte=128"` + // The maximum number of bytes in a socket request (defaults to 104857600). + SocketRequestMaxBytes int `json:"socket_request_max_bytes,omitempty" validate:"omitempty,gte=1.048576e+07,lte=2.097152e+08"` + // Enable verification that checks that the partition has been added to the transaction before writing transactional records to the partition + TransactionPartitionVerificationEnable *bool `json:"transaction_partition_verification_enable,omitempty"` + // The interval at which to remove transactions that have expired due to transactional.id.expiration.ms passing (defaults to 3600000 (1 hour)). + TransactionRemoveExpiredTransactionCleanupIntervalMS int `json:"transaction_remove_expired_transaction_cleanup_interval_ms,omitempty" validate:"omitempty,gte=600000,lte=3.6e+06"` + // The transaction topic segment bytes should be kept relatively small in order to facilitate faster log compaction and cache loads (defaults to 104857600 (100 mebibytes)). + TransactionStateLogSegmentBytes int `json:"transaction_state_log_segment_bytes,omitempty" validate:"omitempty,gte=1.048576e+06,lte=2.147483647e+09"` +} + +type JSONSchemaKafkaConnectConnectorClientConfigOverridePolicy string + +const ( + JSONSchemaKafkaConnectConnectorClientConfigOverridePolicyNone JSONSchemaKafkaConnectConnectorClientConfigOverridePolicy = "None" + JSONSchemaKafkaConnectConnectorClientConfigOverridePolicyAll JSONSchemaKafkaConnectConnectorClientConfigOverridePolicy = "All" +) + +type JSONSchemaKafkaConnectConsumerAutoOffsetReset string + +const ( + JSONSchemaKafkaConnectConsumerAutoOffsetResetEarliest JSONSchemaKafkaConnectConsumerAutoOffsetReset = "earliest" + JSONSchemaKafkaConnectConsumerAutoOffsetResetLatest JSONSchemaKafkaConnectConsumerAutoOffsetReset = "latest" +) + +type JSONSchemaKafkaConnectConsumerIsolationLevel string + +const ( + JSONSchemaKafkaConnectConsumerIsolationLevelReadUncommitted JSONSchemaKafkaConnectConsumerIsolationLevel = "read_uncommitted" + JSONSchemaKafkaConnectConsumerIsolationLevelReadCommitted JSONSchemaKafkaConnectConsumerIsolationLevel = "read_committed" +) + +type JSONSchemaKafkaConnectProducerCompressionType string + +const ( + JSONSchemaKafkaConnectProducerCompressionTypeGzip JSONSchemaKafkaConnectProducerCompressionType = "gzip" + JSONSchemaKafkaConnectProducerCompressionTypeSnappy JSONSchemaKafkaConnectProducerCompressionType = "snappy" + JSONSchemaKafkaConnectProducerCompressionTypeLz4 JSONSchemaKafkaConnectProducerCompressionType = "lz4" + JSONSchemaKafkaConnectProducerCompressionTypeZstd JSONSchemaKafkaConnectProducerCompressionType = "zstd" + JSONSchemaKafkaConnectProducerCompressionTypeNone JSONSchemaKafkaConnectProducerCompressionType = "none" +) // Kafka Connect configuration values -type JSONSchemaKafkaConnect map[string]any +type JSONSchemaKafkaConnect struct { + // Defines what client configurations can be overridden by the connector. Default is None + ConnectorClientConfigOverridePolicy JSONSchemaKafkaConnectConnectorClientConfigOverridePolicy `json:"connector_client_config_override_policy,omitempty"` + // What to do when there is no initial offset in Kafka or if the current offset does not exist any more on the server. Default is earliest + ConsumerAutoOffsetReset JSONSchemaKafkaConnectConsumerAutoOffsetReset `json:"consumer_auto_offset_reset,omitempty"` + // Records are fetched in batches by the consumer, and if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that the consumer can make progress. As such, this is not a absolute maximum. + ConsumerFetchMaxBytes int `json:"consumer_fetch_max_bytes,omitempty" validate:"omitempty,gte=1.048576e+06,lte=1.048576e+08"` + // Transaction read isolation level. read_uncommitted is the default, but read_committed can be used if consume-exactly-once behavior is desired. + ConsumerIsolationLevel JSONSchemaKafkaConnectConsumerIsolationLevel `json:"consumer_isolation_level,omitempty"` + // Records are fetched in batches by the consumer.If the first record batch in the first non-empty partition of the fetch is larger than this limit, the batch will still be returned to ensure that the consumer can make progress. + ConsumerMaxPartitionFetchBytes int `json:"consumer_max_partition_fetch_bytes,omitempty" validate:"omitempty,gte=1.048576e+06,lte=1.048576e+08"` + // The maximum delay in milliseconds between invocations of poll() when using consumer group management (defaults to 300000). + ConsumerMaxPollIntervalMS int `json:"consumer_max_poll_interval_ms,omitempty" validate:"omitempty,gte=1,lte=2.147483647e+09"` + // The maximum number of records returned in a single call to poll() (defaults to 500). + ConsumerMaxPollRecords int `json:"consumer_max_poll_records,omitempty" validate:"omitempty,gte=1,lte=10000"` + // The interval at which to try committing offsets for tasks (defaults to 60000). + OffsetFlushIntervalMS int `json:"offset_flush_interval_ms,omitempty" validate:"omitempty,gte=1,lte=1e+08"` + // Maximum number of milliseconds to wait for records to flush and partition offset data to be committed to offset storage before cancelling the process and restoring the offset data to be committed in a future attempt (defaults to 5000). + OffsetFlushTimeoutMS int `json:"offset_flush_timeout_ms,omitempty" validate:"omitempty,gte=1,lte=2.147483647e+09"` + // This setting gives the upper bound of the batch size to be sent. If there are fewer than this many bytes accumulated for this partition, the producer will 'linger' for the linger.ms time waiting for more records to show up. A batch size of zero will disable batching entirely (defaults to 16384). + ProducerBatchSize int `json:"producer_batch_size,omitempty" validate:"omitempty,gte=0,lte=5.24288e+06"` + // The total bytes of memory the producer can use to buffer records waiting to be sent to the broker (defaults to 33554432). + ProducerBufferMemory int `json:"producer_buffer_memory,omitempty" validate:"omitempty,gte=5.24288e+06,lte=1.34217728e+08"` + // Specify the default compression type for producers. This configuration accepts the standard compression codecs ('gzip', 'snappy', 'lz4', 'zstd'). It additionally accepts 'none' which is the default and equivalent to no compression. + ProducerCompressionType JSONSchemaKafkaConnectProducerCompressionType `json:"producer_compression_type,omitempty"` + // This setting gives the upper bound on the delay for batching: once there is batch.size worth of records for a partition it will be sent immediately regardless of this setting, however if there are fewer than this many bytes accumulated for this partition the producer will 'linger' for the specified time waiting for more records to show up. Defaults to 0. + ProducerLingerMS int `json:"producer_linger_ms,omitempty" validate:"omitempty,gte=0,lte=5000"` + // This setting will limit the number of record batches the producer will send in a single request to avoid sending huge requests. + ProducerMaxRequestSize int `json:"producer_max_request_size,omitempty" validate:"omitempty,gte=131072,lte=6.7108864e+07"` + // The maximum delay that is scheduled in order to wait for the return of one or more departed workers before rebalancing and reassigning their connectors and tasks to the group. During this period the connectors and tasks of the departed workers remain unassigned. Defaults to 5 minutes. + ScheduledRebalanceMaxDelayMS int `json:"scheduled_rebalance_max_delay_ms,omitempty" validate:"omitempty,gte=0,lte=600000"` + // The timeout in milliseconds used to detect failures when using Kafka’s group management facilities (defaults to 10000). + SessionTimeoutMS int `json:"session_timeout_ms,omitempty" validate:"omitempty,gte=1,lte=2.147483647e+09"` +} + +type JSONSchemaKafkaRestConsumerRequestTimeoutMS int + +const ( + JSONSchemaKafkaRestConsumerRequestTimeoutMS1000 JSONSchemaKafkaRestConsumerRequestTimeoutMS = 1000 + JSONSchemaKafkaRestConsumerRequestTimeoutMS15000 JSONSchemaKafkaRestConsumerRequestTimeoutMS = 15000 + JSONSchemaKafkaRestConsumerRequestTimeoutMS30000 JSONSchemaKafkaRestConsumerRequestTimeoutMS = 30000 +) + +type JSONSchemaKafkaRestNameStrategy string + +const ( + JSONSchemaKafkaRestNameStrategyTopicName JSONSchemaKafkaRestNameStrategy = "topic_name" + JSONSchemaKafkaRestNameStrategyRecordName JSONSchemaKafkaRestNameStrategy = "record_name" + JSONSchemaKafkaRestNameStrategyTopicRecordName JSONSchemaKafkaRestNameStrategy = "topic_record_name" +) + +type JSONSchemaKafkaRestProducerCompressionType string + +const ( + JSONSchemaKafkaRestProducerCompressionTypeGzip JSONSchemaKafkaRestProducerCompressionType = "gzip" + JSONSchemaKafkaRestProducerCompressionTypeSnappy JSONSchemaKafkaRestProducerCompressionType = "snappy" + JSONSchemaKafkaRestProducerCompressionTypeLz4 JSONSchemaKafkaRestProducerCompressionType = "lz4" + JSONSchemaKafkaRestProducerCompressionTypeZstd JSONSchemaKafkaRestProducerCompressionType = "zstd" + JSONSchemaKafkaRestProducerCompressionTypeNone JSONSchemaKafkaRestProducerCompressionType = "none" +) // Kafka REST configuration -type JSONSchemaKafkaRest map[string]any +type JSONSchemaKafkaRest struct { + // If true the consumer's offset will be periodically committed to Kafka in the background + ConsumerEnableAutoCommit *bool `json:"consumer_enable_auto_commit,omitempty"` + // Maximum number of bytes in unencoded message keys and values by a single request + ConsumerRequestMaxBytes int `json:"consumer_request_max_bytes,omitempty" validate:"omitempty,gte=0,lte=6.7108864e+08"` + // The maximum total time to wait for messages for a request if the maximum number of messages has not yet been reached + ConsumerRequestTimeoutMS JSONSchemaKafkaRestConsumerRequestTimeoutMS `json:"consumer_request_timeout_ms,omitempty"` + // Name strategy to use when selecting subject for storing schemas + NameStrategy JSONSchemaKafkaRestNameStrategy `json:"name_strategy,omitempty"` + // If true, validate that given schema is registered under expected subject name by the used name strategy when producing messages. + NameStrategyValidation *bool `json:"name_strategy_validation,omitempty"` + // The number of acknowledgments the producer requires the leader to have received before considering a request complete. If set to 'all' or '-1', the leader will wait for the full set of in-sync replicas to acknowledge the record. + ProducerAcks string `json:"producer_acks,omitempty"` + // Specify the default compression type for producers. This configuration accepts the standard compression codecs ('gzip', 'snappy', 'lz4', 'zstd'). It additionally accepts 'none' which is the default and equivalent to no compression. + ProducerCompressionType JSONSchemaKafkaRestProducerCompressionType `json:"producer_compression_type,omitempty"` + // Wait for up to the given delay to allow batching records together + ProducerLingerMS int `json:"producer_linger_ms,omitempty" validate:"omitempty,gte=0,lte=5000"` + // The maximum size of a request in bytes. Note that Kafka broker can also cap the record batch size. + ProducerMaxRequestSize int `json:"producer_max_request_size,omitempty" validate:"omitempty,gte=0,lte=2.147483647e+09"` + // Maximum number of SimpleConsumers that can be instantiated per broker + SimpleconsumerPoolSizeMax int `json:"simpleconsumer_pool_size_max,omitempty" validate:"omitempty,gte=10,lte=250"` +} + +type JSONSchemaMysqlInternalTmpMemStorageEngine string + +const ( + JSONSchemaMysqlInternalTmpMemStorageEngineTempTable JSONSchemaMysqlInternalTmpMemStorageEngine = "TempTable" + JSONSchemaMysqlInternalTmpMemStorageEngineMEMORY JSONSchemaMysqlInternalTmpMemStorageEngine = "MEMORY" +) // mysql.conf configuration values -type JSONSchemaMysql map[string]any +type JSONSchemaMysql struct { + // The number of seconds that the mysqld server waits for a connect packet before responding with Bad handshake + ConnectTimeout int `json:"connect_timeout,omitempty" validate:"omitempty,gte=2,lte=3600"` + // Default server time zone as an offset from UTC (from -12:00 to +12:00), a time zone name, or 'SYSTEM' to use the MySQL server default. + DefaultTimeZone string `json:"default_time_zone,omitempty" validate:"omitempty,gte=2,lte=100"` + // The maximum permitted result length in bytes for the GROUP_CONCAT() function. + GroupConcatMaxLen int `json:"group_concat_max_len,omitempty" validate:"omitempty,gte=4,lte=1.8446744073709552e+19"` + // The time, in seconds, before cached statistics expire + InformationSchemaStatsExpiry int `json:"information_schema_stats_expiry,omitempty" validate:"omitempty,gte=900,lte=3.1536e+07"` + // Maximum size for the InnoDB change buffer, as a percentage of the total size of the buffer pool. Default is 25 + InnodbChangeBufferMaxSize int `json:"innodb_change_buffer_max_size,omitempty" validate:"omitempty,gte=0,lte=50"` + // Specifies whether flushing a page from the InnoDB buffer pool also flushes other dirty pages in the same extent (default is 1): 0 - dirty pages in the same extent are not flushed, 1 - flush contiguous dirty pages in the same extent, 2 - flush dirty pages in the same extent + InnodbFlushNeighbors int `json:"innodb_flush_neighbors,omitempty" validate:"omitempty,gte=0,lte=2"` + // Minimum length of words that are stored in an InnoDB FULLTEXT index. Changing this parameter will lead to a restart of the MySQL service. + InnodbFTMinTokenSize int `json:"innodb_ft_min_token_size,omitempty" validate:"omitempty,gte=0,lte=16"` + // This option is used to specify your own InnoDB FULLTEXT index stopword list for all InnoDB tables. + InnodbFTServerStopwordTable *string `json:"innodb_ft_server_stopword_table,omitempty" validate:"omitempty,lte=1024"` + // The length of time in seconds an InnoDB transaction waits for a row lock before giving up. Default is 120. + InnodbLockWaitTimeout int `json:"innodb_lock_wait_timeout,omitempty" validate:"omitempty,gte=1,lte=3600"` + // The size in bytes of the buffer that InnoDB uses to write to the log files on disk. + InnodbLogBufferSize int `json:"innodb_log_buffer_size,omitempty" validate:"omitempty,gte=1.048576e+06,lte=4.294967295e+09"` + // The upper limit in bytes on the size of the temporary log files used during online DDL operations for InnoDB tables. + InnodbOnlineAlterLogMaxSize int `json:"innodb_online_alter_log_max_size,omitempty" validate:"omitempty,gte=65536,lte=1.099511627776e+12"` + // When enabled, information about all deadlocks in InnoDB user transactions is recorded in the error log. Disabled by default. + InnodbPrintAllDeadlocks *bool `json:"innodb_print_all_deadlocks,omitempty"` + // The number of I/O threads for read operations in InnoDB. Default is 4. Changing this parameter will lead to a restart of the MySQL service. + InnodbReadIoThreads int `json:"innodb_read_io_threads,omitempty" validate:"omitempty,gte=1,lte=64"` + // When enabled a transaction timeout causes InnoDB to abort and roll back the entire transaction. Changing this parameter will lead to a restart of the MySQL service. + InnodbRollbackOnTimeout *bool `json:"innodb_rollback_on_timeout,omitempty"` + // Defines the maximum number of threads permitted inside of InnoDB. Default is 0 (infinite concurrency - no limit) + InnodbThreadConcurrency int `json:"innodb_thread_concurrency,omitempty" validate:"omitempty,gte=0,lte=1000"` + // The number of I/O threads for write operations in InnoDB. Default is 4. Changing this parameter will lead to a restart of the MySQL service. + InnodbWriteIoThreads int `json:"innodb_write_io_threads,omitempty" validate:"omitempty,gte=1,lte=64"` + // The number of seconds the server waits for activity on an interactive connection before closing it. + InteractiveTimeout int `json:"interactive_timeout,omitempty" validate:"omitempty,gte=30,lte=604800"` + // The storage engine for in-memory internal temporary tables. + InternalTmpMemStorageEngine JSONSchemaMysqlInternalTmpMemStorageEngine `json:"internal_tmp_mem_storage_engine,omitempty"` + // The slow log output destination when slow_query_log is ON. To enable MySQL AI Insights, choose INSIGHTS. To use MySQL AI Insights and the mysql.slow_log table at the same time, choose INSIGHTS,TABLE. To only use the mysql.slow_log table, choose TABLE. To silence slow logs, choose NONE. + LogOutput string `json:"log_output,omitempty"` + // The slow_query_logs work as SQL statements that take more than long_query_time seconds to execute. Default is 10s + LongQueryTime float64 `json:"long_query_time,omitempty" validate:"omitempty,gte=0,lte=3600"` + // Size of the largest message in bytes that can be received by the server. Default is 67108864 (64M) + MaxAllowedPacket int `json:"max_allowed_packet,omitempty" validate:"omitempty,gte=102400,lte=1.073741824e+09"` + // Limits the size of internal in-memory tables. Also set tmp_table_size. Default is 16777216 (16M) + MaxHeapTableSize int `json:"max_heap_table_size,omitempty" validate:"omitempty,gte=1.048576e+06,lte=1.073741824e+09"` + // Start sizes of connection buffer and result buffer. Default is 16384 (16K). Changing this parameter will lead to a restart of the MySQL service. + NetBufferLength int `json:"net_buffer_length,omitempty" validate:"omitempty,gte=1024,lte=1.048576e+06"` + // The number of seconds to wait for more data from a connection before aborting the read. + NetReadTimeout int `json:"net_read_timeout,omitempty" validate:"omitempty,gte=1,lte=3600"` + // The number of seconds to wait for a block to be written to a connection before aborting the write. + NetWriteTimeout int `json:"net_write_timeout,omitempty" validate:"omitempty,gte=1,lte=3600"` + // Slow query log enables capturing of slow queries. Setting slow_query_log to false also truncates the mysql.slow_log table. Default is off + SlowQueryLog *bool `json:"slow_query_log,omitempty"` + // Sort buffer size in bytes for ORDER BY optimization. Default is 262144 (256K) + SortBufferSize int `json:"sort_buffer_size,omitempty" validate:"omitempty,gte=32768,lte=1.073741824e+09"` + // Global SQL mode. Set to empty to use MySQL server defaults. When creating a new service and not setting this field Aiven default SQL mode (strict, SQL standard compliant) will be assigned. + SQLMode string `json:"sql_mode,omitempty" validate:"omitempty,lte=1024"` + // Require primary key to be defined for new tables or old tables modified with ALTER TABLE and fail if missing. It is recommended to always have primary keys because various functionality may break if any large table is missing them. + SQLRequirePrimaryKey *bool `json:"sql_require_primary_key,omitempty"` + // Limits the size of internal in-memory tables. Also set max_heap_table_size. Default is 16777216 (16M) + TmpTableSize int `json:"tmp_table_size,omitempty" validate:"omitempty,gte=1.048576e+06,lte=1.073741824e+09"` + // The number of seconds the server waits for activity on a noninteractive connection before closing it. + WaitTimeout int `json:"wait_timeout,omitempty" validate:"omitempty,gte=1,lte=2.147483e+06"` +} + +// Opensearch Email Sender Settings +type JSONSchemaOpensearchEmailSender struct { + // This should be identical to the Sender name defined in Opensearch dashboards + EmailSenderName string `json:"email_sender_name" validate:"required,lte=40"` + // Sender password for Opensearch alerts to authenticate with SMTP server + EmailSenderPassword string `json:"email_sender_password" validate:"required,lte=1024"` + // Sender username for Opensearch alerts + EmailSenderUsername string `json:"email_sender_username" validate:"required,lte=320"` +} + +// Opensearch ISM History Settings +type JSONSchemaOpensearchIsmHistory struct { + // Specifies whether ISM is enabled or not + IsmEnabled *bool `json:"ism_enabled" validate:"required"` + // Specifies whether audit history is enabled or not. The logs from ISM are automatically indexed to a logs document. + IsmHistoryEnabled *bool `json:"ism_history_enabled,omitempty"` + // The maximum age before rolling over the audit history index in hours + IsmHistoryMaxAge int `json:"ism_history_max_age,omitempty" validate:"omitempty,gte=1,lte=2.147483647e+09"` + // The maximum number of documents before rolling over the audit history index. + IsmHistoryMaxDocs int `json:"ism_history_max_docs,omitempty" validate:"omitempty,gte=1,lte=9.223372036854776e+18"` + // The time between rollover checks for the audit history index in hours. + IsmHistoryRolloverCheckPeriod int `json:"ism_history_rollover_check_period,omitempty" validate:"omitempty,gte=1,lte=2.147483647e+09"` + // How long audit history indices are kept in days. + IsmHistoryRolloverRetentionPeriod int `json:"ism_history_rollover_retention_period,omitempty" validate:"omitempty,gte=1,lte=2.147483647e+09"` +} // OpenSearch settings -type JSONSchemaOpensearch map[string]any +type JSONSchemaOpensearch struct { + // Explicitly allow or block automatic creation of indices. Defaults to true + ActionAutoCreateIndexEnabled *bool `json:"action_auto_create_index_enabled,omitempty"` + // Require explicit index names when deleting + ActionDestructiveRequiresName *bool `json:"action_destructive_requires_name,omitempty"` + // Opensearch Security Plugin Settings + AuthFailureListeners map[string]any `json:"auth_failure_listeners,omitempty"` + // Controls the number of shards allowed in the cluster per data node + ClusterMaxShardsPerNode int `json:"cluster_max_shards_per_node,omitempty" validate:"omitempty,gte=100,lte=10000"` + // How many concurrent incoming/outgoing shard recoveries (normally replicas) are allowed to happen on a node. Defaults to 2. + ClusterRoutingAllocationNodeConcurrentRecoveries int `json:"cluster_routing_allocation_node_concurrent_recoveries,omitempty" validate:"omitempty,gte=2,lte=16"` + // Opensearch Email Sender Settings + EmailSender *JSONSchemaOpensearchEmailSender `json:"email-sender,omitempty"` + // Enable/Disable security audit + EnableSecurityAudit *bool `json:"enable_security_audit,omitempty"` + // Maximum content length for HTTP requests to the OpenSearch HTTP API, in bytes. + HTTPMaxContentLength int `json:"http_max_content_length,omitempty" validate:"omitempty,gte=1,lte=2.147483647e+09"` + // The max size of allowed headers, in bytes + HTTPMaxHeaderSize int `json:"http_max_header_size,omitempty" validate:"omitempty,gte=1024,lte=262144"` + // The max length of an HTTP URL, in bytes + HTTPMaxInitialLineLength int `json:"http_max_initial_line_length,omitempty" validate:"omitempty,gte=1024,lte=65536"` + // Relative amount. Maximum amount of heap memory used for field data cache. This is an expert setting; decreasing the value too much will increase overhead of loading field data; too much memory used for field data cache will decrease amount of heap available for other operations. + IndicesFielddataCacheSize *int `json:"indices_fielddata_cache_size,omitempty" validate:"omitempty,gte=3,lte=100"` + // Percentage value. Default is 10%. Total amount of heap used for indexing buffer, before writing segments to disk. This is an expert setting. Too low value will slow down indexing; too high value will increase indexing performance but causes performance issues for query performance. + IndicesMemoryIndexBufferSize int `json:"indices_memory_index_buffer_size,omitempty" validate:"omitempty,gte=3,lte=40"` + // Absolute value. Default is unbound. Doesn't work without indices.memory.index_buffer_size. Maximum amount of heap used for query cache, an absolute indices.memory.index_buffer_size maximum hard limit. + IndicesMemoryMaxIndexBufferSize int `json:"indices_memory_max_index_buffer_size,omitempty" validate:"omitempty,gte=3,lte=2048"` + // Absolute value. Default is 48mb. Doesn't work without indices.memory.index_buffer_size. Minimum amount of heap used for query cache, an absolute indices.memory.index_buffer_size minimal hard limit. + IndicesMemoryMinIndexBufferSize int `json:"indices_memory_min_index_buffer_size,omitempty" validate:"omitempty,gte=3,lte=2048"` + // Percentage value. Default is 10%. Maximum amount of heap used for query cache. This is an expert setting. Too low value will decrease query performance and increase performance for other operations; too high value will cause issues with other OpenSearch functionality. + IndicesQueriesCacheSize int `json:"indices_queries_cache_size,omitempty" validate:"omitempty,gte=3,lte=40"` + // Maximum number of clauses Lucene BooleanQuery can have. The default value (1024) is relatively high, and increasing it may cause performance issues. Investigate other approaches first before increasing this value. + IndicesQueryBoolMaxClauseCount int `json:"indices_query_bool_max_clause_count,omitempty" validate:"omitempty,gte=64,lte=4096"` + // Limits total inbound and outbound recovery traffic for each node. Applies to both peer recoveries as well as snapshot recoveries (i.e., restores from a snapshot). Defaults to 40mb + IndicesRecoveryMaxBytesPerSec int `json:"indices_recovery_max_bytes_per_sec,omitempty" validate:"omitempty,gte=40,lte=400"` + // Number of file chunks sent in parallel for each recovery. Defaults to 2. + IndicesRecoveryMaxConcurrentFileChunks int `json:"indices_recovery_max_concurrent_file_chunks,omitempty" validate:"omitempty,gte=2,lte=5"` + // Opensearch ISM History Settings + IsmHistory *JSONSchemaOpensearchIsmHistory `json:"ism-history,omitempty"` + // Enable or disable KNN memory circuit breaker. Defaults to true. + KnnMemoryCircuitBreakerEnabled *bool `json:"knn_memory_circuit_breaker_enabled,omitempty"` + // Maximum amount of memory that can be used for KNN index. Defaults to 50% of the JVM heap size. + KnnMemoryCircuitBreakerLimit int `json:"knn_memory_circuit_breaker_limit,omitempty" validate:"omitempty,gte=3,lte=100"` + // Compatibility mode sets OpenSearch to report its version as 7.10 so clients continue to work. Default is false + OverrideMainResponseVersion *bool `json:"override_main_response_version,omitempty"` + // Enable or disable filtering of alerting by backend roles. Requires Security plugin. Defaults to false + PluginsAlertingFilterByBackendRoles *bool `json:"plugins_alerting_filter_by_backend_roles,omitempty"` + // Whitelisted addresses for reindexing. Changing this value will cause all OpenSearch instances to restart. + ReindexRemoteWhitelist []string `json:"reindex_remote_whitelist"` + // Script compilation circuit breaker limits the number of inline script compilations within a period of time. Default is use-context + ScriptMaxCompilationsRate string `json:"script_max_compilations_rate,omitempty" validate:"omitempty,lte=1024"` + // Search Backpressure Settings + SearchBackpressure map[string]any `json:"search_backpressure,omitempty"` + // Maximum number of aggregation buckets allowed in a single response. OpenSearch default value is used when this is not defined. + SearchMaxBuckets *int `json:"search_max_buckets,omitempty" validate:"omitempty,gte=1,lte=1e+06"` + // Shard indexing back pressure settings + ShardIndexingPressure map[string]any `json:"shard_indexing_pressure,omitempty"` + // Size for the thread pool queue. See documentation for exact details. + ThreadPoolAnalyzeQueueSize int `json:"thread_pool_analyze_queue_size,omitempty" validate:"omitempty,gte=10,lte=2000"` + // Size for the thread pool. See documentation for exact details. Do note this may have maximum value depending on CPU count - value is automatically lowered if set to higher than maximum value. + ThreadPoolAnalyzeSize int `json:"thread_pool_analyze_size,omitempty" validate:"omitempty,gte=1,lte=128"` + // Size for the thread pool. See documentation for exact details. Do note this may have maximum value depending on CPU count - value is automatically lowered if set to higher than maximum value. + ThreadPoolForceMergeSize int `json:"thread_pool_force_merge_size,omitempty" validate:"omitempty,gte=1,lte=128"` + // Size for the thread pool queue. See documentation for exact details. + ThreadPoolGetQueueSize int `json:"thread_pool_get_queue_size,omitempty" validate:"omitempty,gte=10,lte=2000"` + // Size for the thread pool. See documentation for exact details. Do note this may have maximum value depending on CPU count - value is automatically lowered if set to higher than maximum value. + ThreadPoolGetSize int `json:"thread_pool_get_size,omitempty" validate:"omitempty,gte=1,lte=128"` + // Size for the thread pool queue. See documentation for exact details. + ThreadPoolSearchQueueSize int `json:"thread_pool_search_queue_size,omitempty" validate:"omitempty,gte=10,lte=2000"` + // Size for the thread pool. See documentation for exact details. Do note this may have maximum value depending on CPU count - value is automatically lowered if set to higher than maximum value. + ThreadPoolSearchSize int `json:"thread_pool_search_size,omitempty" validate:"omitempty,gte=1,lte=128"` + // Size for the thread pool queue. See documentation for exact details. + ThreadPoolSearchThrottledQueueSize int `json:"thread_pool_search_throttled_queue_size,omitempty" validate:"omitempty,gte=10,lte=2000"` + // Size for the thread pool. See documentation for exact details. Do note this may have maximum value depending on CPU count - value is automatically lowered if set to higher than maximum value. + ThreadPoolSearchThrottledSize int `json:"thread_pool_search_throttled_size,omitempty" validate:"omitempty,gte=1,lte=128"` + // Size for the thread pool queue. See documentation for exact details. + ThreadPoolWriteQueueSize int `json:"thread_pool_write_queue_size,omitempty" validate:"omitempty,gte=10,lte=2000"` + // Size for the thread pool. See documentation for exact details. Do note this may have maximum value depending on CPU count - value is automatically lowered if set to higher than maximum value. + ThreadPoolWriteSize int `json:"thread_pool_write_size,omitempty" validate:"omitempty,gte=1,lte=128"` +} + +// Autovacuum settings +type JSONSchemaPGAutovacuum struct { + // Specifies a fraction of the table size to add to autovacuum_analyze_threshold when deciding whether to trigger an ANALYZE. The default is 0.2 (20% of table size) + AutovacuumAnalyzeScaleFactor float64 `json:"autovacuum_analyze_scale_factor,omitempty" validate:"omitempty,gte=0,lte=1"` + // Specifies the minimum number of inserted, updated or deleted tuples needed to trigger an ANALYZE in any one table. The default is 50 tuples. + AutovacuumAnalyzeThreshold int `json:"autovacuum_analyze_threshold,omitempty" validate:"omitempty,gte=0,lte=2.147483647e+09"` + // Specifies the maximum age (in transactions) that a table's pg_class.relfrozenxid field can attain before a VACUUM operation is forced to prevent transaction ID wraparound within the table. Note that the system will launch autovacuum processes to prevent wraparound even when autovacuum is otherwise disabled. This parameter will cause the server to be restarted. + AutovacuumFreezeMaxAge int `json:"autovacuum_freeze_max_age,omitempty" validate:"omitempty,gte=2e+08,lte=1.5e+09"` + // Specifies the maximum number of autovacuum processes (other than the autovacuum launcher) that may be running at any one time. The default is three. This parameter can only be set at server start. + AutovacuumMaxWorkers int `json:"autovacuum_max_workers,omitempty" validate:"omitempty,gte=1,lte=20"` + // Specifies the minimum delay between autovacuum runs on any given database. The delay is measured in seconds, and the default is one minute + AutovacuumNaptime int `json:"autovacuum_naptime,omitempty" validate:"omitempty,gte=1,lte=86400"` + // Specifies the cost delay value that will be used in automatic VACUUM operations. If -1 is specified, the regular vacuum_cost_delay value will be used. The default value is 20 milliseconds + AutovacuumVacuumCostDelay int `json:"autovacuum_vacuum_cost_delay,omitempty" validate:"omitempty,gte=-1,lte=100"` + // Specifies the cost limit value that will be used in automatic VACUUM operations. If -1 is specified (which is the default), the regular vacuum_cost_limit value will be used. + AutovacuumVacuumCostLimit int `json:"autovacuum_vacuum_cost_limit,omitempty" validate:"omitempty,gte=-1,lte=10000"` + // Specifies a fraction of the table size to add to autovacuum_vacuum_threshold when deciding whether to trigger a VACUUM. The default is 0.2 (20% of table size) + AutovacuumVacuumScaleFactor float64 `json:"autovacuum_vacuum_scale_factor,omitempty" validate:"omitempty,gte=0,lte=1"` + // Specifies the minimum number of updated or deleted tuples needed to trigger a VACUUM in any one table. The default is 50 tuples + AutovacuumVacuumThreshold int `json:"autovacuum_vacuum_threshold,omitempty" validate:"omitempty,gte=0,lte=2.147483647e+09"` + // Causes each action executed by autovacuum to be logged if it ran for at least the specified number of milliseconds. Setting this to zero logs all autovacuum actions. Minus-one (the default) disables logging autovacuum actions. + LogAutovacuumMinDuration int `json:"log_autovacuum_min_duration,omitempty" validate:"omitempty,gte=-1,lte=2.147483647e+09"` +} + +// Background (BG) writer settings +type JSONSchemaPGBGWriter struct { + // Specifies the delay between activity rounds for the background writer in milliseconds. Default is 200. + BgwriterDelay int `json:"bgwriter_delay,omitempty" validate:"omitempty,gte=10,lte=10000"` + // Whenever more than bgwriter_flush_after bytes have been written by the background writer, attempt to force the OS to issue these writes to the underlying storage. Specified in kilobytes, default is 512. Setting of 0 disables forced writeback. + BgwriterFlushAfter int `json:"bgwriter_flush_after,omitempty" validate:"omitempty,gte=0,lte=2048"` + // In each round, no more than this many buffers will be written by the background writer. Setting this to zero disables background writing. Default is 100. + BgwriterLruMaxpages int `json:"bgwriter_lru_maxpages,omitempty" validate:"omitempty,gte=0,lte=1.073741823e+09"` + // The average recent need for new buffers is multiplied by bgwriter_lru_multiplier to arrive at an estimate of the number that will be needed during the next round, (up to bgwriter_lru_maxpages). 1.0 represents a “just in time” policy of writing exactly the number of buffers predicted to be needed. Larger values provide some cushion against spikes in demand, while smaller values intentionally leave writes to be done by server processes. The default is 2.0. + BgwriterLruMultiplier float64 `json:"bgwriter_lru_multiplier,omitempty" validate:"omitempty,gte=0,lte=10"` +} + +type JSONSchemaPGDefaultToastCompression string + +const ( + JSONSchemaPGDefaultToastCompressionLz4 JSONSchemaPGDefaultToastCompression = "lz4" + JSONSchemaPGDefaultToastCompressionPglz JSONSchemaPGDefaultToastCompression = "pglz" +) + +type JSONSchemaPGLogErrorVerbosity string + +const ( + JSONSchemaPGLogErrorVerbosityTERSE JSONSchemaPGLogErrorVerbosity = "TERSE" + JSONSchemaPGLogErrorVerbosityDEFAULT JSONSchemaPGLogErrorVerbosity = "DEFAULT" + JSONSchemaPGLogErrorVerbosityVERBOSE JSONSchemaPGLogErrorVerbosity = "VERBOSE" +) + +type JSONSchemaPGPGStatStatementsTrack string + +const ( + JSONSchemaPGPGStatStatementsTrackAll JSONSchemaPGPGStatStatementsTrack = "all" + JSONSchemaPGPGStatStatementsTrackTop JSONSchemaPGPGStatStatementsTrack = "top" + JSONSchemaPGPGStatStatementsTrackNone JSONSchemaPGPGStatStatementsTrack = "none" +) + +type JSONSchemaPGTrackCommitTimestamp string + +const ( + JSONSchemaPGTrackCommitTimestampOff JSONSchemaPGTrackCommitTimestamp = "off" + JSONSchemaPGTrackCommitTimestampOn JSONSchemaPGTrackCommitTimestamp = "on" +) + +type JSONSchemaPGTrackFunctions string + +const ( + JSONSchemaPGTrackFunctionsAll JSONSchemaPGTrackFunctions = "all" + JSONSchemaPGTrackFunctionsPL JSONSchemaPGTrackFunctions = "pl" + JSONSchemaPGTrackFunctionsNone JSONSchemaPGTrackFunctions = "none" +) + +type JSONSchemaPGTrackIoTiming string + +const ( + JSONSchemaPGTrackIoTimingOff JSONSchemaPGTrackIoTiming = "off" + JSONSchemaPGTrackIoTimingOn JSONSchemaPGTrackIoTiming = "on" +) + +// Write-ahead log (WAL) settings +type JSONSchemaPGWal struct { + // PostgreSQL maximum WAL size (MB) reserved for replication slots. Default is -1 (unlimited). wal_keep_size minimum WAL size setting takes precedence over this. + MaxSlotWalKeepSize int `json:"max_slot_wal_keep_size,omitempty" validate:"omitempty,gte=-1,lte=2.147483647e+09"` + // PostgreSQL maximum WAL senders + MaxWalSenders int `json:"max_wal_senders,omitempty" validate:"omitempty,gte=20,lte=64"` + // Terminate replication connections that are inactive for longer than this amount of time, in milliseconds. + WalSenderTimeout int `json:"wal_sender_timeout,omitempty" validate:"omitempty,gte=0,lte=1.08e+07"` + // WAL flush interval in milliseconds. Note that setting this value to lower than the default 200ms may negatively impact performance + WalWriterDelay int `json:"wal_writer_delay,omitempty" validate:"omitempty,gte=10,lte=200"` +} // postgresql.conf configuration values -type JSONSchemaPG map[string]any +type JSONSchemaPG struct { + // Autovacuum settings + Autovacuum *JSONSchemaPGAutovacuum `json:"autovacuum,omitempty"` + // Background (BG) writer settings + BGWriter *JSONSchemaPGBGWriter `json:"bg-writer,omitempty"` + // This is the amount of time, in milliseconds, to wait on a lock before checking to see if there is a deadlock condition. + DeadlockTimeout int `json:"deadlock_timeout,omitempty" validate:"omitempty,gte=500,lte=1.8e+06"` + // Specifies the default TOAST compression method for values of compressible columns (the default is lz4). + DefaultToastCompression JSONSchemaPGDefaultToastCompression `json:"default_toast_compression,omitempty"` + // Time out sessions with open transactions after this number of milliseconds + IdleInTransactionSessionTimeout int `json:"idle_in_transaction_session_timeout,omitempty" validate:"omitempty,gte=0,lte=6.048e+08"` + // Controls system-wide use of Just-in-Time Compilation (JIT). + Jit *bool `json:"jit,omitempty"` + // Controls the amount of detail written in the server log for each message that is logged. + LogErrorVerbosity JSONSchemaPGLogErrorVerbosity `json:"log_error_verbosity,omitempty"` + // Choose from one of the available log-formats. These can support popular log analyzers like pgbadger, pganalyze etc. + LogLinePrefix string `json:"log_line_prefix,omitempty"` + // Log statements that take more than this number of milliseconds to run, -1 disables + LogMinDurationStatement int `json:"log_min_duration_statement,omitempty" validate:"omitempty,gte=-1,lte=8.64e+07"` + // Log statements for each temporary file created larger than this number of kilobytes, -1 disables + LogTempFiles int `json:"log_temp_files,omitempty" validate:"omitempty,gte=-1,lte=2.147483647e+09"` + // PostgreSQL maximum number of files that can be open per process + MaxFilesPerProcess int `json:"max_files_per_process,omitempty" validate:"omitempty,gte=1000,lte=4096"` + // PostgreSQL maximum locks per transaction + MaxLocksPerTransaction int `json:"max_locks_per_transaction,omitempty" validate:"omitempty,gte=64,lte=6400"` + // PostgreSQL maximum logical replication workers (taken from the pool of max_parallel_workers) + MaxLogicalReplicationWorkers int `json:"max_logical_replication_workers,omitempty" validate:"omitempty,gte=4,lte=64"` + // Sets the maximum number of workers that the system can support for parallel queries + MaxParallelWorkers int `json:"max_parallel_workers,omitempty" validate:"omitempty,gte=0,lte=96"` + // Sets the maximum number of workers that can be started by a single Gather or Gather Merge node + MaxParallelWorkersPerGather int `json:"max_parallel_workers_per_gather,omitempty" validate:"omitempty,gte=0,lte=96"` + // PostgreSQL maximum predicate locks per transaction + MaxPredLocksPerTransaction int `json:"max_pred_locks_per_transaction,omitempty" validate:"omitempty,gte=64,lte=5120"` + // PostgreSQL maximum prepared transactions + MaxPreparedTransactions int `json:"max_prepared_transactions,omitempty" validate:"omitempty,gte=0,lte=10000"` + // PostgreSQL maximum replication slots + MaxReplicationSlots int `json:"max_replication_slots,omitempty" validate:"omitempty,gte=8,lte=64"` + // Maximum depth of the stack in bytes + MaxStackDepth int `json:"max_stack_depth,omitempty" validate:"omitempty,gte=2.097152e+06,lte=6.291456e+06"` + // Max standby archive delay in milliseconds + MaxStandbyArchiveDelay int `json:"max_standby_archive_delay,omitempty" validate:"omitempty,gte=1,lte=4.32e+07"` + // Max standby streaming delay in milliseconds + MaxStandbyStreamingDelay int `json:"max_standby_streaming_delay,omitempty" validate:"omitempty,gte=1,lte=4.32e+07"` + // Sets the maximum number of background processes that the system can support + MaxWorkerProcesses int `json:"max_worker_processes,omitempty" validate:"omitempty,gte=8,lte=96"` + // Sets the time interval to run pg_partman's scheduled tasks + PGPartmanBgwInterval int `json:"pg_partman_bgw.interval,omitempty" validate:"omitempty,gte=3600,lte=604800"` + // Controls which role to use for pg_partman's scheduled background tasks. + PGPartmanBgwRole string `json:"pg_partman_bgw.role,omitempty" validate:"omitempty,lte=64"` + // Enables or disables query plan monitoring + PGStatMonitorPgsmEnableQueryPlan *bool `json:"pg_stat_monitor.pgsm_enable_query_plan,omitempty"` + // Sets the maximum number of buckets + PGStatMonitorPgsmMaxBuckets int `json:"pg_stat_monitor.pgsm_max_buckets,omitempty" validate:"omitempty,gte=1,lte=10"` + // Controls which statements are counted. Specify top to track top-level statements (those issued directly by clients), all to also track nested statements (such as statements invoked within functions), or none to disable statement statistics collection. The default value is top. + PGStatStatementsTrack JSONSchemaPGPGStatStatementsTrack `json:"pg_stat_statements.track,omitempty"` + // PostgreSQL temporary file limit in KiB, -1 for unlimited + TempFileLimit int `json:"temp_file_limit,omitempty" validate:"omitempty,gte=-1,lte=2.147483647e+09"` + // PostgreSQL service timezone + Timezone string `json:"timezone,omitempty" validate:"omitempty,lte=64"` + // Specifies the number of bytes reserved to track the currently executing command for each active session. + TrackActivityQuerySize int `json:"track_activity_query_size,omitempty" validate:"omitempty,gte=1024,lte=10240"` + // Record commit time of transactions. + TrackCommitTimestamp JSONSchemaPGTrackCommitTimestamp `json:"track_commit_timestamp,omitempty"` + // Enables tracking of function call counts and time used. + TrackFunctions JSONSchemaPGTrackFunctions `json:"track_functions,omitempty"` + // Enables timing of database I/O calls. This parameter is off by default, because it will repeatedly query the operating system for the current time, which may cause significant overhead on some platforms. + TrackIoTiming JSONSchemaPGTrackIoTiming `json:"track_io_timing,omitempty"` + // Write-ahead log (WAL) settings + Wal *JSONSchemaPGWal `json:"wal,omitempty"` +} type JSONSchemaPgbouncerAutodbPoolMode string @@ -1957,6 +2954,8 @@ type JSONSchemaPgbouncer struct { AutodbPoolSize int `json:"autodb_pool_size,omitempty" validate:"omitempty,gte=0,lte=10000"` // List of parameters to ignore when given in startup packet IgnoreStartupParameters []string `json:"ignore_startup_parameters,omitempty"` + // PgBouncer tracks protocol-level named prepared statements related commands sent by the client in transaction and statement pooling modes when max_prepared_statements is set to a non-zero value. Setting it to 0 disables prepared statements. max_prepared_statements defaults to 100, and its maximum is 3000. + MaxPreparedStatements int `json:"max_prepared_statements,omitempty" validate:"omitempty,gte=0,lte=3000"` // Add more server connections to pool if below this number. Improves behavior when usual load comes suddenly back after period of total inactivity. The value is effectively capped at the pool size. MinPoolSize int `json:"min_pool_size,omitempty" validate:"omitempty,gte=0,lte=10000"` // If a server connection has been idle more than this many seconds it will be dropped. If 0 then timeout is disabled. [seconds] @@ -2027,7 +3026,12 @@ type JSONSchemaRedis struct { } // Schema Registry configuration -type JSONSchemaSchemaRegistry map[string]any +type JSONSchemaSchemaRegistry struct { + // If true, Karapace / Schema Registry on the service nodes can participate in leader election. It might be needed to disable this when the schemas topic is replicated to a secondary cluster and Karapace / Schema Registry there must not participate in leader election. Defaults to `true`. + LeaderEligibility *bool `json:"leader_eligibility,omitempty"` + // The durable single partition topic that acts as the durable log for the data. This topic must be compacted to avoid losing data due to retention policy. Please note that changing this configuration in an existing Schema Registry / Karapace setup leads to previous schemas being inaccessible, data encoded with them potentially unreadable and schema ID sequence put out of order. It's only possible to do the switch while Schema Registry / Karapace is disabled. Defaults to `_schemas`. + TopicName string `json:"topic_name,omitempty" validate:"omitempty,gte=1,lte=249"` +} // System-wide settings for the timescaledb extension type JSONSchemaTimescaledb struct { @@ -2265,6 +3269,8 @@ type PrivateNetwork struct { Name string `json:"name,omitempty" validate:"omitempty,gte=1,lte=255"` // Private Network netmask Netmask net.IP `json:"netmask,omitempty"` + // Private Network DHCP Options + Options *PrivateNetworkOptions `json:"options,omitempty"` // Private Network start IP address StartIP net.IP `json:"start-ip,omitempty"` // Private Network VXLAN ID @@ -2279,6 +3285,18 @@ type PrivateNetworkLease struct { IP net.IP `json:"ip,omitempty"` } +// Private Network DHCP Options +type PrivateNetworkOptions struct { + // DNS Servers + DNSServers []net.IP `json:"dns-servers,omitempty"` + // Domain search list, limited to 255 octets + DomainSearch []string `json:"domain-search,omitempty"` + // NTP Servers + NtpServers []net.IP `json:"ntp-servers,omitempty"` + // Routers + Routers []net.IP `json:"routers,omitempty"` +} + type PublicIPAssignment string const ( @@ -2457,6 +3475,13 @@ type SKSKubeconfigRequest struct { User string `json:"user,omitempty"` } +type SKSNodepoolPublicIPAssignment string + +const ( + SKSNodepoolPublicIPAssignmentInet4 SKSNodepoolPublicIPAssignment = "inet4" + SKSNodepoolPublicIPAssignmentDual SKSNodepoolPublicIPAssignment = "dual" +) + type SKSNodepoolState string const ( @@ -2498,6 +3523,10 @@ type SKSNodepool struct { Name string `json:"name,omitempty" validate:"omitempty,gte=1,lte=255"` // Nodepool Private Networks PrivateNetworks []PrivateNetwork `json:"private-networks,omitempty"` + // Nodepool public IP assignment of the Instances: + // * IPv4 and IPv6 (`dual`) addressing. + // * IPv4 and IPv6 (`dual`) addressing. + PublicIPAssignment SKSNodepoolPublicIPAssignment `json:"public-ip-assignment,omitempty"` // Nodepool Security Groups SecurityGroups []SecurityGroup `json:"security-groups,omitempty"` // Number of instances diff --git a/vendor/github.com/exoscale/egoscale/v3/version.go b/vendor/github.com/exoscale/egoscale/v3/version.go index 909a039a..152bce86 100644 --- a/vendor/github.com/exoscale/egoscale/v3/version.go +++ b/vendor/github.com/exoscale/egoscale/v3/version.go @@ -1,4 +1,4 @@ package v3 // Version represents the current egoscale v3 version. -const Version = "v3.1.0" +const Version = "v3.1.8" diff --git a/vendor/golang.org/x/crypto/sha3/doc.go b/vendor/golang.org/x/crypto/sha3/doc.go index 7e023090..bbf391fe 100644 --- a/vendor/golang.org/x/crypto/sha3/doc.go +++ b/vendor/golang.org/x/crypto/sha3/doc.go @@ -5,6 +5,10 @@ // Package sha3 implements the SHA-3 fixed-output-length hash functions and // the SHAKE variable-output-length hash functions defined by FIPS-202. // +// All types in this package also implement [encoding.BinaryMarshaler], +// [encoding.BinaryAppender] and [encoding.BinaryUnmarshaler] to marshal and +// unmarshal the internal state of the hash. +// // Both types of hash function use the "sponge" construction and the Keccak // permutation. For a detailed specification see http://keccak.noekeon.org/ // diff --git a/vendor/golang.org/x/crypto/sha3/hashes.go b/vendor/golang.org/x/crypto/sha3/hashes.go index c544b29e..31fffbe0 100644 --- a/vendor/golang.org/x/crypto/sha3/hashes.go +++ b/vendor/golang.org/x/crypto/sha3/hashes.go @@ -48,33 +48,52 @@ func init() { crypto.RegisterHash(crypto.SHA3_512, New512) } +const ( + dsbyteSHA3 = 0b00000110 + dsbyteKeccak = 0b00000001 + dsbyteShake = 0b00011111 + dsbyteCShake = 0b00000100 + + // rateK[c] is the rate in bytes for Keccak[c] where c is the capacity in + // bits. Given the sponge size is 1600 bits, the rate is 1600 - c bits. + rateK256 = (1600 - 256) / 8 + rateK448 = (1600 - 448) / 8 + rateK512 = (1600 - 512) / 8 + rateK768 = (1600 - 768) / 8 + rateK1024 = (1600 - 1024) / 8 +) + func new224Generic() *state { - return &state{rate: 144, outputLen: 28, dsbyte: 0x06} + return &state{rate: rateK448, outputLen: 28, dsbyte: dsbyteSHA3} } func new256Generic() *state { - return &state{rate: 136, outputLen: 32, dsbyte: 0x06} + return &state{rate: rateK512, outputLen: 32, dsbyte: dsbyteSHA3} } func new384Generic() *state { - return &state{rate: 104, outputLen: 48, dsbyte: 0x06} + return &state{rate: rateK768, outputLen: 48, dsbyte: dsbyteSHA3} } func new512Generic() *state { - return &state{rate: 72, outputLen: 64, dsbyte: 0x06} + return &state{rate: rateK1024, outputLen: 64, dsbyte: dsbyteSHA3} } // NewLegacyKeccak256 creates a new Keccak-256 hash. // // Only use this function if you require compatibility with an existing cryptosystem // that uses non-standard padding. All other users should use New256 instead. -func NewLegacyKeccak256() hash.Hash { return &state{rate: 136, outputLen: 32, dsbyte: 0x01} } +func NewLegacyKeccak256() hash.Hash { + return &state{rate: rateK512, outputLen: 32, dsbyte: dsbyteKeccak} +} // NewLegacyKeccak512 creates a new Keccak-512 hash. // // Only use this function if you require compatibility with an existing cryptosystem // that uses non-standard padding. All other users should use New512 instead. -func NewLegacyKeccak512() hash.Hash { return &state{rate: 72, outputLen: 64, dsbyte: 0x01} } +func NewLegacyKeccak512() hash.Hash { + return &state{rate: rateK1024, outputLen: 64, dsbyte: dsbyteKeccak} +} // Sum224 returns the SHA3-224 digest of the data. func Sum224(data []byte) (digest [28]byte) { diff --git a/vendor/golang.org/x/crypto/sha3/sha3.go b/vendor/golang.org/x/crypto/sha3/sha3.go index afedde5a..6658c444 100644 --- a/vendor/golang.org/x/crypto/sha3/sha3.go +++ b/vendor/golang.org/x/crypto/sha3/sha3.go @@ -4,6 +4,15 @@ package sha3 +import ( + "crypto/subtle" + "encoding/binary" + "errors" + "unsafe" + + "golang.org/x/sys/cpu" +) + // spongeDirection indicates the direction bytes are flowing through the sponge. type spongeDirection int @@ -14,16 +23,13 @@ const ( spongeSqueezing ) -const ( - // maxRate is the maximum size of the internal buffer. SHAKE-256 - // currently needs the largest buffer. - maxRate = 168 -) - type state struct { - // Generic sponge components. - a [25]uint64 // main state of the hash - rate int // the number of bytes of state to use + a [1600 / 8]byte // main state of the hash + + // a[n:rate] is the buffer. If absorbing, it's the remaining space to XOR + // into before running the permutation. If squeezing, it's the remaining + // output to produce before running the permutation. + n, rate int // dsbyte contains the "domain separation" bits and the first bit of // the padding. Sections 6.1 and 6.2 of [1] separate the outputs of the @@ -39,10 +45,6 @@ type state struct { // Extendable-Output Functions (May 2014)" dsbyte byte - i, n int // storage[i:n] is the buffer, i is only used while squeezing - storage [maxRate]byte - - // Specific to SHA-3 and SHAKE. outputLen int // the default output size in bytes state spongeDirection // whether the sponge is absorbing or squeezing } @@ -61,7 +63,7 @@ func (d *state) Reset() { d.a[i] = 0 } d.state = spongeAbsorbing - d.i, d.n = 0, 0 + d.n = 0 } func (d *state) clone() *state { @@ -69,22 +71,25 @@ func (d *state) clone() *state { return &ret } -// permute applies the KeccakF-1600 permutation. It handles -// any input-output buffering. +// permute applies the KeccakF-1600 permutation. func (d *state) permute() { - switch d.state { - case spongeAbsorbing: - // If we're absorbing, we need to xor the input into the state - // before applying the permutation. - xorIn(d, d.storage[:d.rate]) - d.n = 0 - keccakF1600(&d.a) - case spongeSqueezing: - // If we're squeezing, we need to apply the permutation before - // copying more output. - keccakF1600(&d.a) - d.i = 0 - copyOut(d, d.storage[:d.rate]) + var a *[25]uint64 + if cpu.IsBigEndian { + a = new([25]uint64) + for i := range a { + a[i] = binary.LittleEndian.Uint64(d.a[i*8:]) + } + } else { + a = (*[25]uint64)(unsafe.Pointer(&d.a)) + } + + keccakF1600(a) + d.n = 0 + + if cpu.IsBigEndian { + for i := range a { + binary.LittleEndian.PutUint64(d.a[i*8:], a[i]) + } } } @@ -92,53 +97,36 @@ func (d *state) permute() { // the multi-bitrate 10..1 padding rule, and permutes the state. func (d *state) padAndPermute() { // Pad with this instance's domain-separator bits. We know that there's - // at least one byte of space in d.buf because, if it were full, + // at least one byte of space in the sponge because, if it were full, // permute would have been called to empty it. dsbyte also contains the // first one bit for the padding. See the comment in the state struct. - d.storage[d.n] = d.dsbyte - d.n++ - for d.n < d.rate { - d.storage[d.n] = 0 - d.n++ - } + d.a[d.n] ^= d.dsbyte // This adds the final one bit for the padding. Because of the way that // bits are numbered from the LSB upwards, the final bit is the MSB of // the last byte. - d.storage[d.rate-1] ^= 0x80 + d.a[d.rate-1] ^= 0x80 // Apply the permutation d.permute() d.state = spongeSqueezing - d.n = d.rate - copyOut(d, d.storage[:d.rate]) } // Write absorbs more data into the hash's state. It panics if any // output has already been read. -func (d *state) Write(p []byte) (written int, err error) { +func (d *state) Write(p []byte) (n int, err error) { if d.state != spongeAbsorbing { panic("sha3: Write after Read") } - written = len(p) + + n = len(p) for len(p) > 0 { - if d.n == 0 && len(p) >= d.rate { - // The fast path; absorb a full "rate" bytes of input and apply the permutation. - xorIn(d, p[:d.rate]) - p = p[d.rate:] - keccakF1600(&d.a) - } else { - // The slow path; buffer the input until we can fill the sponge, and then xor it in. - todo := d.rate - d.n - if todo > len(p) { - todo = len(p) - } - d.n += copy(d.storage[d.n:], p[:todo]) - p = p[todo:] - - // If the sponge is full, apply the permutation. - if d.n == d.rate { - d.permute() - } + x := subtle.XORBytes(d.a[d.n:d.rate], d.a[d.n:d.rate], p) + d.n += x + p = p[x:] + + // If the sponge is full, apply the permutation. + if d.n == d.rate { + d.permute() } } @@ -156,14 +144,14 @@ func (d *state) Read(out []byte) (n int, err error) { // Now, do the squeezing. for len(out) > 0 { - n := copy(out, d.storage[d.i:d.n]) - d.i += n - out = out[n:] - // Apply the permutation if we've squeezed the sponge dry. - if d.i == d.rate { + if d.n == d.rate { d.permute() } + + x := copy(out, d.a[d.n:d.rate]) + d.n += x + out = out[x:] } return @@ -183,3 +171,74 @@ func (d *state) Sum(in []byte) []byte { dup.Read(hash) return append(in, hash...) } + +const ( + magicSHA3 = "sha\x08" + magicShake = "sha\x09" + magicCShake = "sha\x0a" + magicKeccak = "sha\x0b" + // magic || rate || main state || n || sponge direction + marshaledSize = len(magicSHA3) + 1 + 200 + 1 + 1 +) + +func (d *state) MarshalBinary() ([]byte, error) { + return d.AppendBinary(make([]byte, 0, marshaledSize)) +} + +func (d *state) AppendBinary(b []byte) ([]byte, error) { + switch d.dsbyte { + case dsbyteSHA3: + b = append(b, magicSHA3...) + case dsbyteShake: + b = append(b, magicShake...) + case dsbyteCShake: + b = append(b, magicCShake...) + case dsbyteKeccak: + b = append(b, magicKeccak...) + default: + panic("unknown dsbyte") + } + // rate is at most 168, and n is at most rate. + b = append(b, byte(d.rate)) + b = append(b, d.a[:]...) + b = append(b, byte(d.n), byte(d.state)) + return b, nil +} + +func (d *state) UnmarshalBinary(b []byte) error { + if len(b) != marshaledSize { + return errors.New("sha3: invalid hash state") + } + + magic := string(b[:len(magicSHA3)]) + b = b[len(magicSHA3):] + switch { + case magic == magicSHA3 && d.dsbyte == dsbyteSHA3: + case magic == magicShake && d.dsbyte == dsbyteShake: + case magic == magicCShake && d.dsbyte == dsbyteCShake: + case magic == magicKeccak && d.dsbyte == dsbyteKeccak: + default: + return errors.New("sha3: invalid hash state identifier") + } + + rate := int(b[0]) + b = b[1:] + if rate != d.rate { + return errors.New("sha3: invalid hash state function") + } + + copy(d.a[:], b) + b = b[len(d.a):] + + n, state := int(b[0]), spongeDirection(b[1]) + if n > d.rate { + return errors.New("sha3: invalid hash state") + } + d.n = n + if state != spongeAbsorbing && state != spongeSqueezing { + return errors.New("sha3: invalid hash state") + } + d.state = state + + return nil +} diff --git a/vendor/golang.org/x/crypto/sha3/shake.go b/vendor/golang.org/x/crypto/sha3/shake.go index a01ef435..a6b3a428 100644 --- a/vendor/golang.org/x/crypto/sha3/shake.go +++ b/vendor/golang.org/x/crypto/sha3/shake.go @@ -16,9 +16,12 @@ package sha3 // [2] https://doi.org/10.6028/NIST.SP.800-185 import ( + "bytes" "encoding/binary" + "errors" "hash" "io" + "math/bits" ) // ShakeHash defines the interface to hash functions that support @@ -50,41 +53,33 @@ type cshakeState struct { initBlock []byte } -// Consts for configuring initial SHA-3 state -const ( - dsbyteShake = 0x1f - dsbyteCShake = 0x04 - rate128 = 168 - rate256 = 136 -) +func bytepad(data []byte, rate int) []byte { + out := make([]byte, 0, 9+len(data)+rate-1) + out = append(out, leftEncode(uint64(rate))...) + out = append(out, data...) + if padlen := rate - len(out)%rate; padlen < rate { + out = append(out, make([]byte, padlen)...) + } + return out +} -func bytepad(input []byte, w int) []byte { - // leftEncode always returns max 9 bytes - buf := make([]byte, 0, 9+len(input)+w) - buf = append(buf, leftEncode(uint64(w))...) - buf = append(buf, input...) - padlen := w - (len(buf) % w) - return append(buf, make([]byte, padlen)...) -} - -func leftEncode(value uint64) []byte { - var b [9]byte - binary.BigEndian.PutUint64(b[1:], value) - // Trim all but last leading zero bytes - i := byte(1) - for i < 8 && b[i] == 0 { - i++ +func leftEncode(x uint64) []byte { + // Let n be the smallest positive integer for which 2^(8n) > x. + n := (bits.Len64(x) + 7) / 8 + if n == 0 { + n = 1 } - // Prepend number of encoded bytes - b[i-1] = 9 - i - return b[i-1:] + // Return n || x with n as a byte and x an n bytes in big-endian order. + b := make([]byte, 9) + binary.BigEndian.PutUint64(b[1:], x) + b = b[9-n-1:] + b[0] = byte(n) + return b } func newCShake(N, S []byte, rate, outputLen int, dsbyte byte) ShakeHash { c := cshakeState{state: &state{rate: rate, outputLen: outputLen, dsbyte: dsbyte}} - - // leftEncode returns max 9 bytes - c.initBlock = make([]byte, 0, 9*2+len(N)+len(S)) + c.initBlock = make([]byte, 0, 9+len(N)+9+len(S)) // leftEncode returns max 9 bytes c.initBlock = append(c.initBlock, leftEncode(uint64(len(N))*8)...) c.initBlock = append(c.initBlock, N...) c.initBlock = append(c.initBlock, leftEncode(uint64(len(S))*8)...) @@ -111,6 +106,30 @@ func (c *state) Clone() ShakeHash { return c.clone() } +func (c *cshakeState) MarshalBinary() ([]byte, error) { + return c.AppendBinary(make([]byte, 0, marshaledSize+len(c.initBlock))) +} + +func (c *cshakeState) AppendBinary(b []byte) ([]byte, error) { + b, err := c.state.AppendBinary(b) + if err != nil { + return nil, err + } + b = append(b, c.initBlock...) + return b, nil +} + +func (c *cshakeState) UnmarshalBinary(b []byte) error { + if len(b) <= marshaledSize { + return errors.New("sha3: invalid hash state") + } + if err := c.state.UnmarshalBinary(b[:marshaledSize]); err != nil { + return err + } + c.initBlock = bytes.Clone(b[marshaledSize:]) + return nil +} + // NewShake128 creates a new SHAKE128 variable-output-length ShakeHash. // Its generic security strength is 128 bits against all attacks if at // least 32 bytes of its output are used. @@ -126,11 +145,11 @@ func NewShake256() ShakeHash { } func newShake128Generic() *state { - return &state{rate: rate128, outputLen: 32, dsbyte: dsbyteShake} + return &state{rate: rateK256, outputLen: 32, dsbyte: dsbyteShake} } func newShake256Generic() *state { - return &state{rate: rate256, outputLen: 64, dsbyte: dsbyteShake} + return &state{rate: rateK512, outputLen: 64, dsbyte: dsbyteShake} } // NewCShake128 creates a new instance of cSHAKE128 variable-output-length ShakeHash, @@ -143,7 +162,7 @@ func NewCShake128(N, S []byte) ShakeHash { if len(N) == 0 && len(S) == 0 { return NewShake128() } - return newCShake(N, S, rate128, 32, dsbyteCShake) + return newCShake(N, S, rateK256, 32, dsbyteCShake) } // NewCShake256 creates a new instance of cSHAKE256 variable-output-length ShakeHash, @@ -156,7 +175,7 @@ func NewCShake256(N, S []byte) ShakeHash { if len(N) == 0 && len(S) == 0 { return NewShake256() } - return newCShake(N, S, rate256, 64, dsbyteCShake) + return newCShake(N, S, rateK512, 64, dsbyteCShake) } // ShakeSum128 writes an arbitrary-length digest of data into hash. diff --git a/vendor/golang.org/x/crypto/sha3/xor.go b/vendor/golang.org/x/crypto/sha3/xor.go deleted file mode 100644 index 6ada5c95..00000000 --- a/vendor/golang.org/x/crypto/sha3/xor.go +++ /dev/null @@ -1,40 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package sha3 - -import ( - "crypto/subtle" - "encoding/binary" - "unsafe" - - "golang.org/x/sys/cpu" -) - -// xorIn xors the bytes in buf into the state. -func xorIn(d *state, buf []byte) { - if cpu.IsBigEndian { - for i := 0; len(buf) >= 8; i++ { - a := binary.LittleEndian.Uint64(buf) - d.a[i] ^= a - buf = buf[8:] - } - } else { - ab := (*[25 * 64 / 8]byte)(unsafe.Pointer(&d.a)) - subtle.XORBytes(ab[:], ab[:], buf) - } -} - -// copyOut copies uint64s to a byte buffer. -func copyOut(d *state, b []byte) { - if cpu.IsBigEndian { - for i := 0; len(b) >= 8; i++ { - binary.LittleEndian.PutUint64(b, d.a[i]) - b = b[8:] - } - } else { - ab := (*[25 * 64 / 8]byte)(unsafe.Pointer(&d.a)) - copy(b, ab[:]) - } -} diff --git a/vendor/golang.org/x/term/README.md b/vendor/golang.org/x/term/README.md index d03d0aef..05ff623f 100644 --- a/vendor/golang.org/x/term/README.md +++ b/vendor/golang.org/x/term/README.md @@ -4,16 +4,13 @@ This repository provides Go terminal and console support packages. -## Download/Install - -The easiest way to install is to run `go get -u golang.org/x/term`. You can -also manually git clone the repository to `$GOPATH/src/golang.org/x/term`. - ## Report Issues / Send Patches This repository uses Gerrit for code changes. To learn how to submit changes to -this repository, see https://golang.org/doc/contribute.html. +this repository, see https://go.dev/doc/contribute. + +The git repository is https://go.googlesource.com/term. The main issue tracker for the term repository is located at -https://github.com/golang/go/issues. Prefix your issue with "x/term:" in the +https://go.dev/issues. Prefix your issue with "x/term:" in the subject line, so it is easy to find. diff --git a/vendor/modules.txt b/vendor/modules.txt index 718df110..f99bd71a 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -23,8 +23,8 @@ github.com/diskfs/go-diskfs/util # github.com/elliotwutingfeng/asciiset v0.0.0-20230602022725-51bbb787efab ## explicit; go 1.11 github.com/elliotwutingfeng/asciiset -# github.com/exoscale/egoscale/v3 v3.1.1 -## explicit; go 1.22 +# github.com/exoscale/egoscale/v3 v3.1.8 +## explicit; go 1.22.0 github.com/exoscale/egoscale/v3 github.com/exoscale/egoscale/v3/credentials github.com/exoscale/egoscale/v3/metadata @@ -189,7 +189,7 @@ go.uber.org/atomic # go.uber.org/multierr v1.9.0 ## explicit; go 1.19 go.uber.org/multierr -# golang.org/x/crypto v0.28.0 +# golang.org/x/crypto v0.31.0 ## explicit; go 1.20 golang.org/x/crypto/sha3 # golang.org/x/exp v0.0.0-20240213143201-ec583247a57a @@ -217,10 +217,10 @@ golang.org/x/sys/cpu golang.org/x/sys/plan9 golang.org/x/sys/unix golang.org/x/sys/windows -# golang.org/x/term v0.25.0 +# golang.org/x/term v0.27.0 ## explicit; go 1.18 golang.org/x/term -# golang.org/x/text v0.19.0 +# golang.org/x/text v0.21.0 ## explicit; go 1.18 golang.org/x/text/encoding golang.org/x/text/encoding/internal