diff --git a/.build/build.yaml b/.build/build.yaml index e7090626..ec7be18b 100644 --- a/.build/build.yaml +++ b/.build/build.yaml @@ -40,7 +40,7 @@ spec: memory: "4G" limits: cpu: "6" - memory: "4G" + memory: "4G" script: | #!/bin/sh scripts/go-test.sh @@ -188,14 +188,45 @@ spec: memory: "4G" limits: cpu: "4" - memory: "4G" + memory: "4G" script: | #!/bin/sh ./scripts/nginx-test.sh - - name: build-alb-chart + - name: update-module-plugin runAfter: - nginx-build - alb-build + workspaces: + - name: source + workspace: source + params: + - name: build_git_version_shart + value: $(build.git.version.short) + taskSpec: + description: | + set the version + workspaces: + - name: source + workspace: source + params: + - name: build_git_version_shart + type: string + steps: + - name: set-version + image: build-harbor.alauda.cn/ops/alpine:latest + imagePullPolicy: IfNotPresent + workingDir: $(workspaces.source.path) + script: | + #!/bin/sh + sed -i "s|version: .*|version: $(params.build_git_version_shart)|g" ./deploy/chart/alb/module-plugin.yaml + cat ./deploy/chart/alb/module-plugin.yaml + resources: + requests: + cpu: 100m + memory: 100Mi + - name: build-alb-chart + runAfter: + - update-module-plugin timeout: 30m retries: 0 taskRef: @@ -221,7 +252,7 @@ spec: - name: values value: - .global.images.alb2.tag=$(build.git.version.docker) - - .global.images.nginx.tag=$(build.git.version.docker) + - .global.images.nginx.tag=$(build.git.version.docker) - name: commit-push taskRef: name: alauda-git-commit-push diff --git a/.cspell.json b/.cspell.json index 1287550b..1ba9e385 100644 --- a/.cspell.json +++ b/.cspell.json @@ -3,6 +3,33 @@ "caseSensitive": false, "words": [ //proper noun + "testlink", + "rshift", + "restry", + "combind", + "divmod", + "lshift", + "rshift", + "flamegraph", + "urlsafe", + "urandom", + "pborman", + "hostport", + "jsondiff", + "AUTHFAIL", + "joxi", + "ncaptures", + "ingng", // ingress-nginx + "nameserver", + "resolvconf", + "resolv", + "authtypes", + "httpbin", + "signinurl", + "ingressnginx", + "varstring", + "extctl", + "sortbean", "luacov", "strftime", "ncruces", diff --git a/.editorconfig b/.editorconfig new file mode 100644 index 00000000..e3d0fb92 --- /dev/null +++ b/.editorconfig @@ -0,0 +1,168 @@ + +# see https://github.com/CppCXY/EmmyLuaCodeStyle +[*.lua] +# [basic] + +# optional space/tab +indent_style = space +# if indent_style is space, this is valid +indent_size = 4 +# if indent_style is tab, this is valid +tab_width = 4 +# none/single/double +quote_style = none + +continuation_indent = 4 +## extend option +# continuation_indent.before_block = 4 +# continuation_indent.in_expr = 4 +# continuation_indent.in_table = 4 + +# this mean utf8 length , if this is 'unset' then the line width is no longer checked +# this option decides when to chopdown the code +max_line_length = 120 + +# optional crlf/lf/cr/auto, if it is 'auto', in windows it is crlf other platforms are lf +# in neovim the value 'auto' is not a valid option, please use 'unset' +end_of_line = auto + +# none/ comma / semicolon / only_kv_colon +table_separator_style = none + +#optional keep/never/always/smart +trailing_table_separator = keep + +# keep/remove/remove_table_only/remove_string_only +call_arg_parentheses = keep + +detect_end_of_line = false + +# this will check text end with new line +insert_final_newline = true + +# [space] +space_around_table_field_list = true + +space_before_attribute = true + +space_before_function_open_parenthesis = false + +space_before_function_call_open_parenthesis = false + +space_before_closure_open_parenthesis = true + +# optional always/only_string/only_table/none +# or true/false +space_before_function_call_single_arg = always +## extend option +## always/keep/none +# space_before_function_call_single_arg.table = always +## always/keep/none +# space_before_function_call_single_arg.string = always + +space_before_open_square_bracket = false + +space_inside_function_call_parentheses = false + +space_inside_function_param_list_parentheses = false + +space_inside_square_brackets = false + +# like t[#t+1] = 1 +space_around_table_append_operator = false + +ignore_spaces_inside_function_call = false + +# detail number or 'keep' +space_before_inline_comment = 1 + +# convert '---' to '--- ' or '--' to '-- ' +space_after_comment_dash = false + +# [operator space] +space_around_math_operator = true +# space_around_math_operator.exponent = false + +space_after_comma = true + +space_after_comma_in_for_statement = true + +space_around_concat_operator = true + +space_around_logical_operator = true + +space_around_assign_operator = true + +# [align] + +align_call_args = false + +align_function_params = true + +align_continuous_assign_statement = true + +align_continuous_rect_table_field = true + +align_continuous_line_space = 2 + +align_if_branch = false + +# option none / always / contain_curly/ +align_array_table = true + +align_continuous_similar_call_args = false + +align_continuous_inline_comment = true +# option none / always / only_call_stmt +align_chain_expr = none + +# [indent] + +never_indent_before_if_condition = false + +never_indent_comment_on_if_branch = false + +keep_indents_on_empty_lines = false + +allow_non_indented_comments = false +# [line space] + +# The following configuration supports four expressions +# keep +# fixed(n) +# min(n) +# max(n) +# for eg. min(2) + +line_space_after_if_statement = keep + +line_space_after_do_statement = keep + +line_space_after_while_statement = keep + +line_space_after_repeat_statement = keep + +line_space_after_for_statement = keep + +line_space_after_local_or_assign_statement = keep + +line_space_after_function_statement = fixed(2) + +line_space_after_expression_statement = keep + +line_space_after_comment = keep + +line_space_around_block = fixed(1) +# [line break] +break_all_list_when_line_exceed = false + +auto_collapse_lines = false + +break_before_braces = false + +# [preference] +ignore_space_after_colon = true + +remove_call_expression_list_finish_comma = false +# keep / always / same_line / replace_with_newline / never +end_statement_with_semicolon = keep diff --git a/.gitignore b/.gitignore index 8504eb33..055c97ef 100644 --- a/.gitignore +++ b/.gitignore @@ -61,3 +61,8 @@ docs/.obsidian template/share/dhparam.pem luacov-html/ luacov.stats.out +luacov.report.out +luacov.report.out.index +luacov.summary +.kube +.fg diff --git a/Dockerfile b/Dockerfile index 7dd1cbae..f2ed1574 100644 --- a/Dockerfile +++ b/Dockerfile @@ -4,7 +4,7 @@ ARG OPENRESTY_BASE=build-harbor.alauda.cn/3rdparty/alb-nginx:v1.25.3 FROM ${GO_BUILD_BASE} AS go_builder ENV GO111MODULE=on -ENV GOPROXY=https://goproxy.cn,direct +ENV GOPROXY=https://goproxy.cn,https://build-nexus.alauda.cn/repository/golang,direct COPY ./ /alb/ WORKDIR /alb ENV GOFLAGS=-buildvcs=false @@ -15,6 +15,8 @@ RUN go build -buildmode=pie -ldflags '-w -s -linkmode=external -extldflags=-Wl,- RUN go build -buildmode=pie -ldflags '-w -s -linkmode=external -extldflags=-static' -v -o /out/albctl alauda.io/alb2/cmd/utils/albctl RUN go build -buildmode=pie -ldflags '-w -s -linkmode=external -extldflags=-Wl,-z,relro,-z,now' -v -o /out/tweak_gen alauda.io/alb2/cmd/utils/tweak_gen RUN go build -buildmode=pie -ldflags '-w -s -linkmode=external -extldflags=-Wl,-z,relro,-z,now' -v -o /out/ngx_gen alauda.io/alb2/cmd/utils/ngx_gen +RUN go build -buildmode=pie -ldflags '-w -s -linkmode=external -extldflags=-Wl,-z,relro,-z,now' -v -o /out/dirhash alauda.io/alb2/cmd/utils/dirhash +RUN go install github.com/mccutchen/go-httpbin/v2/cmd/go-httpbin@latest && cp /go/bin/go-httpbin /out/ RUN ldd /out/albctl || true FROM ${OPENRESTY_BASE} AS base @@ -38,11 +40,13 @@ COPY ./pkg/controller/ngxconf/nginx.tmpl /alb/ctl/template/nginx/nginx.tmpl COPY run-alb.sh /alb/ctl/run-alb.sh COPY --from=go_builder /out/tweak_gen /alb/tools/tweak_gen COPY --from=go_builder /out/ngx_gen /alb/tools/ngx_gen +COPY --from=go_builder /out/dirhash /alb/tools/dirhash COPY --from=go_builder /out/alb /alb/ctl/alb COPY --from=go_builder /out/migrate /alb/ctl/tools/ COPY --from=go_builder /out/operator /alb/ctl/operator COPY --from=go_builder /alb/migrate/backup /alb/ctl/tools/backup COPY --from=go_builder /out/albctl /alb/ctl/tools/albctl +COPY --from=go_builder /out/go-httpbin /alb/tools/go-httpbin ENV PATH=$PATH:/usr/local/openresty/luajit/bin:/usr/local/openresty/nginx/sbin:/usr/local/openresty/bin:/usr/local/openresty/openssl/bin/ ENV NGINX_TEMPLATE_PATH /alb/ctl/template/nginx/nginx.tmpl diff --git a/cmd/utils/albctl/cmd/rule.go b/cmd/utils/albctl/cmd/rule.go index a1118816..d7a68a8e 100644 --- a/cmd/utils/albctl/cmd/rule.go +++ b/cmd/utils/albctl/cmd/rule.go @@ -210,6 +210,12 @@ func listRule(ctx Ctx) error { if err != nil { return err } + rule_maps := map[string]InternalRule{} + for _, ft := range lb.Frontends { + for _, r := range ft.Rules { + rule_maps[r.RuleID] = *r + } + } err = pcli.FillUpBackends(lb) if err != nil { return err @@ -238,7 +244,7 @@ func listRule(ctx Ctx) error { Name: p.Rule, Port: int(port), RealPriority: p.ComplexPriority, - Host: parse_host(p.DSLX), + Host: parse_host(rule_maps[p.Rule].DSLX), Matches: PrettyCompactJson(p.InternalDSL), Policy: *p, Upstream: backens_maps[p.Rule], diff --git a/cmd/utils/dirhash/main.go b/cmd/utils/dirhash/main.go new file mode 100644 index 00000000..f64e3e13 --- /dev/null +++ b/cmd/utils/dirhash/main.go @@ -0,0 +1,16 @@ +package main + +import ( + "fmt" + "os" + + "alauda.io/alb2/utils/dirhash" +) + +func main() { + out, err := dirhash.HashDir(os.Args[1], "", dirhash.DefaultHash) + if err != nil { + panic(err) + } + fmt.Printf("%s\n", out) +} diff --git a/cmd/utils/map_gen/main.go b/cmd/utils/map_gen/main.go new file mode 100644 index 00000000..87ffb690 --- /dev/null +++ b/cmd/utils/map_gen/main.go @@ -0,0 +1,207 @@ +package main + +import ( + "bytes" + "fmt" + "os" + "reflect" + "strings" + "text/template" + + . "alauda.io/alb2/pkg/controller/ext/auth/types" +) + +func main() { + type MapCfg struct { + base string + mapping []struct { + from reflect.Type + to reflect.Type + } + } + cfg := MapCfg{ + base: "./pkg/controller/ext/auth/types/", + mapping: []struct { + from reflect.Type + to reflect.Type + }{ + { + from: reflect.TypeOf((*AuthIngressForward)(nil)).Elem(), + to: reflect.TypeOf((*ForwardAuthInCr)(nil)).Elem(), + }, + { + from: reflect.TypeOf((*ForwardAuthInCr)(nil)).Elem(), + to: reflect.TypeOf((*ForwardAuthPolicy)(nil)).Elem(), + }, + { + from: reflect.TypeOf((*AuthIngressBasic)(nil)).Elem(), + to: reflect.TypeOf((*BasicAuthInCr)(nil)).Elem(), + }, + { + from: reflect.TypeOf((*BasicAuthInCr)(nil)).Elem(), + to: reflect.TypeOf((*BasicAuthPolicy)(nil)).Elem(), + }, + }, + } + for _, m := range cfg.mapping { + lt := m.from + rt := m.to + base := cfg.base + f := fmt.Sprintf(base+"codegen_mapping_%s_%s.go", strings.ToLower(lt.Name()), strings.ToLower(rt.Name())) + out, err := trans(lt, rt) + if err != nil { + panic(err) + } + err = os.WriteFile(f, []byte(out), 0o644) + if err != nil { + panic(err) + } + } +} + +func upperFirst(s string) string { + if s == "" { + return s + } + return strings.ToUpper(s[:1]) + s[1:] +} + +func find_field_via_key(t reflect.Type, key string) *reflect.StructField { + for i := 0; i < t.NumField(); i++ { + f := t.Field(i) + if f.Tag.Get("key") == key { + return &f + } + } + return nil +} + +func trans(lt reflect.Type, rt reflect.Type) (string, error) { + TEMPLATE := ` +package {{.pkg}} + +import ( + "strings" +) + +func init() { + // make go happy + _ = strings.Clone("") +} + +type ReAssign{{.From}}To{{.To}}Opt struct { + {{ range $trans_name, $trans_cfg := .trans_map }} + {{$trans_name}} func({{$trans_cfg.trans_from_type}}) ({{$trans_cfg.trans_to_type}}, error) + {{end}} +} + +func ReAssign{{.From}}To{{.To}}(lt *{{.From}}, rt *{{.To}}, opt *ReAssign{{.From}}To{{.To}}Opt) error { + trans := map[string]func(lt *{{.From}}, rt *{{.To}}) error{ + {{ range $name, $field_cfg := .field_map }} + {{ if $field_cfg.trans_name -}} + "{{$name}}": func(lt *{{$.From}}, rt *{{$.To}}) error { + {{if eq $field_cfg.trans_name "From_bool"}} + ret := strings.ToLower(lt{{$field_cfg.l_access}}) == "true" + rt{{$field_cfg.r_access}} = ret + return nil + {{else}} + ret, err := opt.{{$field_cfg.trans_name}}(lt{{$field_cfg.l_access}}) + if err != nil { + return err + } + rt{{$field_cfg.r_access}} = ret + return nil + {{end}} + }, + {{ end }} + {{end}} + } + {{ range $name, $field_cfg := .field_map }} + {{ if not $field_cfg.trans_name -}} + rt{{$field_cfg.r_access}} = lt{{$field_cfg.l_access}} + {{- end -}} + {{- end }} + for _, m := range trans { + err := m(lt, rt) + if err != nil { + return err + } + } + return nil +} +` + pkg_path := strings.Split(lt.PkgPath(), "/") + pkg := pkg_path[len(pkg_path)-1] + // 准备模板数据 + data := map[string]interface{}{ + "From": lt.Name(), + "pkg": pkg, + "To": rt.Name(), + "field_map": make(map[string]map[string]string), + "trans_map": make(map[string]map[string]string), + } + + fieldMap := make(map[string]map[string]string) + transMap := make(map[string]map[string]string) + + // 从右侧类型开始遍历 + for i := 0; i < rt.NumField(); i++ { + rf := rt.Field(i) + key := rf.Tag.Get("key") + if key == "" { + continue + } + lf_or_null := find_field_via_key(lt, key) + if lf_or_null == nil { + return "", fmt.Errorf("%s key not find in left type", key) + } + lf := *lf_or_null + // 在左侧类型中查找具有相同key的字段 + fieldCfg := make(map[string]string) + fieldCfg["l_access"] = "." + lf.Name + fieldCfg["r_access"] = "." + rf.Name + + // 检查是否需要特殊转换 + trans := upperFirst(rf.Tag.Get("trans")) + + if trans != "" { + if trans == "from_bool" { + continue + } + fieldCfg["trans_name"] = trans + // 添加到转换函数映射 + transCfg := make(map[string]string) + transCfg["trans_from_type"] = lf.Type.String() + transCfg["trans_to_type"] = strings.ReplaceAll(rf.Type.String(), pkg+".", "") + fmt.Printf("%s xx %v\n", trans, rf.Type.String()) + transMap[trans] = transCfg + // Check for duplicate trans names + if t, exists := transMap[trans]; exists && !reflect.DeepEqual(t, transCfg) { + return "", fmt.Errorf("trans name '%s' %v %v fConflict", trans, t, transCfg) + } + fieldMap[key] = fieldCfg + continue + } + + if lf.Type == rf.Type { + fieldMap[key] = fieldCfg + continue + } + return "", fmt.Errorf("could not find trans from %s to %s for %s", lf.Type.String(), rf.Type.String(), key) + } + + data["field_map"] = fieldMap + data["trans_map"] = transMap + + tmpl, err := template.New("trans").Parse(TEMPLATE) + if err != nil { + return "", err + } + + var buf bytes.Buffer + err = tmpl.Execute(&buf, data) + if err != nil { + return "", err + } + return buf.String(), nil +} diff --git a/cmd/utils/tylua/go.mod b/cmd/utils/tylua/go.mod deleted file mode 100644 index 0f4ef278..00000000 --- a/cmd/utils/tylua/go.mod +++ /dev/null @@ -1,13 +0,0 @@ -module tylua - -go 1.22.4 - -require ( - github.com/fatih/structtag v1.2.0 - golang.org/x/tools v0.23.0 -) - -require ( - golang.org/x/mod v0.19.0 // indirect - golang.org/x/sync v0.7.0 // indirect -) diff --git a/cmd/utils/tylua/go.sum b/cmd/utils/tylua/go.sum deleted file mode 100644 index 9d41ec51..00000000 --- a/cmd/utils/tylua/go.sum +++ /dev/null @@ -1,8 +0,0 @@ -github.com/fatih/structtag v1.2.0 h1:/OdNE99OxoI/PqaW/SuSK9uxxT3f/tcSZgon/ssNSx4= -github.com/fatih/structtag v1.2.0/go.mod h1:mBJUNpUnHmRKrKlQQlmCrh5PuhftFbNv8Ys4/aAZl94= -golang.org/x/mod v0.19.0 h1:fEdghXQSo20giMthA7cd28ZC+jts4amQ3YMXiP5oMQ8= -golang.org/x/mod v0.19.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= -golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= -golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= -golang.org/x/tools v0.23.0 h1:SGsXPZ+2l4JsgaCKkx+FQ9YZ5XEtA1GZYuoDjenLjvg= -golang.org/x/tools v0.23.0/go.mod h1:pnu6ufv6vQkll6szChhK3C3L/ruaIv5eBeztNG8wtsI= diff --git a/cmd/utils/tylua/main.go b/cmd/utils/tylua/main.go index 8f3ba28e..cc281f1c 100644 --- a/cmd/utils/tylua/main.go +++ b/cmd/utils/tylua/main.go @@ -1,26 +1,161 @@ package main -// copy from https://github.com/gzuidhof/tygo.git - import ( - "log" + "fmt" "os" + "reflect" + "sort" "strings" - t "tylua/tylua" + ct "alauda.io/alb2/controller/types" + "golang.org/x/exp/maps" ) +type LuaHint struct { + Kind string // struct or alias + Fields map[string]string + EmbedFields []string + Alias string + Name string + Order int +} + +type LuaHintMap map[string]LuaHint + func main() { - t := t.New(&t.Config{ - Packages: []*t.PackageConfig{ - { - Path: os.Args[1], - OutputPath: os.Args[2], - }, - }, + hm := LuaHintMap{} + t := reflect.TypeFor[ct.NgxPolicy]() + resolve(t, hm, "", 0) + hints := maps.Values(hm) + sort.Slice(hints, func(i, j int) bool { + if hints[i].Order != hints[j].Order { + return hints[i].Order < hints[j].Order + } + return hints[i].Name < hints[j].Name }) - err := t.Generate(strings.Split(os.Args[1], ","), os.Args[2], os.Args[3]) - if err != nil { - log.Fatalf("Tylua failed: %v", err) + s := "" + s += "--- @alias CJSON_NULL userdata\n" + for _, h := range hints { + if h.Kind == "struct" { + s += fmt.Sprintf("--- @class %s\n", h.Name) + fs := maps.Keys(h.Fields) + sort.Strings(fs) + for _, k := range fs { + v := h.Fields[k] + s += fmt.Sprintf("--- @field %s %s\n", k, v) + } + for _, v := range h.EmbedFields { + s += resolve_embed(hm, v) + } + s += "\n\n" + } + } + fmt.Printf("-----") + fmt.Printf("%s", s) + fmt.Printf("write to %s", os.Args[1]) + _ = os.WriteFile(os.Args[1], []byte(s), 0o644) +} + +func resolve_embed(hm LuaHintMap, key string) string { + h := hm[key] + s := "" + if h.Kind == "struct" { + fs := maps.Keys(h.Fields) + sort.Strings(fs) + for _, k := range fs { + v := h.Fields[k] + s += fmt.Sprintf("--- @field %s %s\n", k, v) + } + ef := h.EmbedFields + sort.Strings(ef) + for _, v := range ef { + s += resolve_embed(hm, v) + } + } + return s +} + +func resolve(f reflect.Type, hm LuaHintMap, tag reflect.StructTag, order int) string { + fmt.Println("resolve type", f.Kind(), f.Name()) + omit_empty := strings.Contains(tag.Get("json"), "omitempty") + or_empty := func(s string, empty bool) string { + if empty { + return fmt.Sprintf("%s?", s) + } + return fmt.Sprintf("(%s|CJSON_NULL)", s) + } + if f.Kind() == reflect.Map { + k := resolve(f.Key(), hm, "", order+1) + v := resolve(f.Elem(), hm, "", order+1) + return fmt.Sprintf("table<%s, %s>", k, v) + } + if f.Kind() == reflect.Slice { + k := resolve(f.Elem(), hm, "", order+1) + if f.Elem().Kind() == reflect.Ptr { + k = resolve(f.Elem().Elem(), hm, "", order+1) + } + return fmt.Sprintf("%s[]", k) + } + if f.Kind() == reflect.Ptr { + fmt.Println("resolve ptr", f) + k := resolve(f.Elem(), hm, "", order+1) + return or_empty(k, omit_empty) + } + if f.Kind() == reflect.Array { + k := resolve(f.Elem(), hm, "", order+1) + return fmt.Sprintf("%s[]", k) + } + if f.Kind() == reflect.Struct { + name := f.Name() + hm[name] = LuaHint{ + Kind: "struct", + Alias: "", + Order: order, + Fields: map[string]string{}, + EmbedFields: []string{}, + Name: name, + } + for i := 0; i < f.NumField(); i++ { + ff := f.Field(i) + fmt.Println("handle field", ff.Name, ff.Type, ff.Anonymous) + + field_name := strings.Split(ff.Tag.Get("json"), ",")[0] + if field_name == "-" { + continue + } + if ff.Anonymous { + hint := resolve(ff.Type, hm, ff.Tag, order+1) + lh := hm[name] + lh.EmbedFields = append(lh.EmbedFields, hint) + hm[name] = lh + continue + } + hint := resolve(ff.Type, hm, ff.Tag, order+1) + hm[name].Fields[field_name] = hint + } + return name + } + return default_lua_type(f.Kind().String()) +} + +func default_lua_type(s string) string { + _, ret := default_type_map(s) + return ret +} + +func default_type_map(s string) (bool, string) { + switch s { + case "interface": + return true, "any" + case "bool": + return true, "boolean" + case "string": + return true, "string" + case "int", "int8", "int16", "int32", "int64", + "uint", "uint8", "uint16", "uint32", "uint64", + "float32", "float64", + "complex64", "complex128": + return true, "number" } + return false, s } diff --git a/cmd/utils/tylua/tylua/config.go b/cmd/utils/tylua/tylua/config.go deleted file mode 100644 index 50c9f597..00000000 --- a/cmd/utils/tylua/tylua/config.go +++ /dev/null @@ -1,138 +0,0 @@ -package tylua - -import ( - "fmt" - "log" - "path/filepath" -) - -const defaultOutputFilename = "type.lua" - -type PackageConfig struct { - // The package path just like you would import it in Go - Path string `yaml:"path"` - - // Where this output should be written to. - // If you specify a folder it will be written to a file `index.ts` within that folder. By default it is written into the Golang package folder. - OutputPath string `yaml:"output_path"` - - // Customize the indentation (use \t if you want tabs) - Indent string `yaml:"indent"` - - // Specify your own custom type translations, useful for custom types, `time.Time` and `null.String`. - // Be default unrecognized types will be output as `any /* name */`. - TypeMappings map[string]string `yaml:"type_mappings"` - - // This content will be put at the top of the output Typescript file. - // You would generally use this to import custom types. - Frontmatter string `yaml:"frontmatter"` - - // Filenames of Go source files that should not be included in the Typescript output. - ExcludeFiles []string `yaml:"exclude_files"` - - // Filenames of Go source files that should be included in the Typescript output. - IncludeFiles []string `yaml:"include_files"` - - // FallbackType defines the Typescript type used as a fallback for unknown Go types. - FallbackType string `yaml:"fallback_type"` - - // Flavor defines what the key names of the output types will look like. - // Supported values: "default", "" (same as "default"), "yaml". - // In "default" mode, `json` and `yaml` tags are respected, but otherwise keys are unchanged. - // In "yaml" mode, keys are lowercased to emulate gopkg.in/yaml.v2. - Flavor string `yaml:"flavor"` - - // PreserveComments is an option to preserve comments in the generated TypeScript output. - // Supported values: "default", "" (same as "default"), "types", "none". - // By "default", package-level comments as well as type comments are - // preserved. - // In "types" mode, only type comments are preserved. - // If "none" is supplied, no comments are preserved. - PreserveComments string `yaml:"preserve_comments"` -} - -type Config struct { - Packages []*PackageConfig `yaml:"packages"` -} - -func (c Config) PackageNames() []string { - names := make([]string, len(c.Packages)) - - for i, p := range c.Packages { - names[i] = p.Path - } - return names -} - -func (c Config) PackageConfig(packagePath string) *PackageConfig { - for _, pc := range c.Packages { - if pc.Path == packagePath { - if pc.Indent == "" { - pc.Indent = " " - } - - var err error - pc.Flavor, err = normalizeFlavor(pc.Flavor) - if err != nil { - log.Fatalf("Invalid config for package %s: %s", packagePath, err) - } - - pc.PreserveComments, err = normalizePreserveComments(pc.PreserveComments) - if err != nil { - log.Fatalf("Invalid config for package %s: %s", packagePath, err) - } - return pc - } - } - log.Fatalf("Config not found for package %s", packagePath) - return nil -} - -func normalizeFlavor(flavor string) (string, error) { - switch flavor { - case "", "default": - return "default", nil - case "yaml": - return "yaml", nil - default: - return "", fmt.Errorf("unsupported flavor: %s", flavor) - } -} - -func normalizePreserveComments(preserveComments string) (string, error) { - switch preserveComments { - case "", "default": - return "default", nil - case "types": - return "types", nil - case "none": - return "none", nil - default: - return "", fmt.Errorf("unsupported preserve_comments: %s", preserveComments) - } -} - -func (c PackageConfig) IsFileIgnored(pathToFile string) bool { - basename := filepath.Base(pathToFile) - for _, ef := range c.ExcludeFiles { - if basename == ef { - return true - } - } - - // if defined, only included files are allowed - if len(c.IncludeFiles) > 0 { - for _, include := range c.IncludeFiles { - if basename == include { - return false - } - } - return true - } - - return false -} - -func (c PackageConfig) ResolvedOutputPath(packageDir string) string { - return c.OutputPath -} diff --git a/cmd/utils/tylua/tylua/generator.go b/cmd/utils/tylua/tylua/generator.go deleted file mode 100644 index c96b0086..00000000 --- a/cmd/utils/tylua/tylua/generator.go +++ /dev/null @@ -1,101 +0,0 @@ -package tylua - -import ( - "fmt" - "log" - "os" - "strings" - - "golang.org/x/tools/go/packages" -) - -// Generator for one or more input packages, responsible for linking -// them together if necessary. -type TyLua struct { - conf *Config - - packageGenerators map[string]*PackageGenerator -} - -// Responsible for generating the code for an input package -type PackageGenerator struct { - conf *PackageConfig - pkg *packages.Package - GoFiles []string - types map[string]string - pendingTypes map[string]bool - typesOrder map[string]int -} - -func New(config *Config) *TyLua { - return &TyLua{ - conf: config, - packageGenerators: make(map[string]*PackageGenerator), - } -} - -func (g *TyLua) SetTypeMapping(goType string, tsType string) { - for _, p := range g.conf.Packages { - p.TypeMappings[goType] = tsType - } -} - -func (g *TyLua) Generate(pkg_names []string, boot string, out string) error { - log.Printf("Generating for packages: %v", pkg_names) - pkgs, err := packages.Load(&packages.Config{ - Mode: packages.NeedSyntax | packages.NeedFiles, - }, pkg_names...) - if err != nil { - return err - } - know_types := map[string]string{} - types_order := map[string]int{} - pending_types := map[string]bool{boot: true} - for i := 0; i < 10; i++ { - allok := true - pd := "" - for k, v := range pending_types { - if v == true { - pd = pd + " " + k - allok = false - } - } - if allok { - break - } - log.Printf("pending %v", pd) - for i, pkg := range pkgs { - if len(pkg.GoFiles) == 0 { - log.Printf("no input go files for package index %d", i) - continue - } - if len(pkg.Errors) > 0 { - return fmt.Errorf("err %+v", pkg.Errors) - } - - // log.Printf("pkg x %v", pkg.Name) - pkgConfig := &PackageConfig{ - TypeMappings: map[string]string{ - "albv1.PortNumber": "number", - }, - } - pkgGen := &PackageGenerator{ - conf: pkgConfig, - GoFiles: pkg.GoFiles, - - pkg: pkg, - types: know_types, - pendingTypes: pending_types, - typesOrder: types_order, - } - g.packageGenerators[pkg.PkgPath] = pkgGen - pkgGen.Resolve() - } - } - codes := make([]string, len(know_types)+1) - for k, v := range know_types { - codes[types_order[k]] = v - } - log.Printf("resolve ok %v", out) - return os.WriteFile(out, []byte(strings.Join(codes, "")), os.FileMode(0o644)) -} diff --git a/cmd/utils/tylua/tylua/iota.go b/cmd/utils/tylua/tylua/iota.go deleted file mode 100644 index 1901ac52..00000000 --- a/cmd/utils/tylua/tylua/iota.go +++ /dev/null @@ -1,17 +0,0 @@ -package tylua - -import ( - "regexp" - "strconv" - "strings" -) - -var iotaRegexp = regexp.MustCompile(`\biota\b`) - -func isProbablyIotaType(valueString string) bool { - return !strings.ContainsAny(valueString, "\"`") && iotaRegexp.MatchString(valueString) -} - -func replaceIotaValue(valueString string, iotaValue int) string { - return iotaRegexp.ReplaceAllLiteralString(valueString, strconv.Itoa(iotaValue)) -} diff --git a/cmd/utils/tylua/tylua/package_generator.go b/cmd/utils/tylua/tylua/package_generator.go deleted file mode 100644 index 263d72b9..00000000 --- a/cmd/utils/tylua/tylua/package_generator.go +++ /dev/null @@ -1,83 +0,0 @@ -package tylua - -import ( - "go/ast" - "go/token" - "log" - "strings" -) - -func (g *PackageGenerator) Resolve() { - for _, file := range g.pkg.Syntax { - // log.Printf("gen for file %v", file.Name) - ast.Inspect(file, func(n ast.Node) bool { - switch x := n.(type) { - // GenDecl can be an import, type, var, or const expression - case *ast.GenDecl: - if x.Tok == token.IMPORT { - return false - } - g.ResolveFile(x) - return false - } - return true - }) - } -} - -func (g *PackageGenerator) IsPending(name string) bool { - return g.pendingTypes[name] -} - -func (g *PackageGenerator) AddPending(name string) { - kn := map[string]bool{ - "int": true, - "string": true, - "bool": true, - "uint": true, - } - if _, ok := kn[name]; ok { - return - } - if g.types[name] != "" { - return - } - g.pendingTypes[name] = true -} - -func (g *PackageGenerator) ResolveFile(decl *ast.GenDecl) { - for _, spec := range decl.Specs { - ts, ok := spec.(*ast.TypeSpec) - if ok && ts.Name.IsExported() { - if g.IsPending(ts.Name.Name) { - log.Printf("resolve %v", ts.Name.Name) - g.ResolveTypeSpec(ts) - } - } - } -} - -func (g *PackageGenerator) ResolveTypeSpec(spec *ast.TypeSpec) { - name := spec.Name.Name - s := strings.Builder{} - ok := g.writeTypeSpec(&s, spec) - if !ok { - return - } - g.types[name] = s.String() - log.Printf("resolved %v", name) - if g.typesOrder[name] == 0 { - g.typesOrder[name] = len(g.typesOrder) + 1 - } - g.pendingTypes[name] = false -} - -func (g *PackageGenerator) ShowPend() string { - out := "" - for k, v := range g.pendingTypes { - if v { - out += k - } - } - return out -} diff --git a/cmd/utils/tylua/tylua/write.go b/cmd/utils/tylua/tylua/write.go deleted file mode 100644 index c23b0359..00000000 --- a/cmd/utils/tylua/tylua/write.go +++ /dev/null @@ -1,241 +0,0 @@ -package tylua - -import ( - "fmt" - "go/ast" - "go/token" - "log" - "reflect" - "regexp" - "strings" - - "github.com/fatih/structtag" -) - -var ( - validJSNameRegexp = regexp.MustCompile(`(?m)^[\pL_][\pL\pN_]*$`) - octalPrefixRegexp = regexp.MustCompile(`^0[0-7]`) - unicode8Regexp = regexp.MustCompile(`\\\\|\\U[\da-fA-F]{8}`) -) - -// https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Operators/Operator_precedence#table -var jsNumberOperatorPrecedence = map[token.Token]int{ - token.MUL: 6, - token.QUO: 6, - token.REM: 6, - token.ADD: 5, - token.SUB: 5, - token.SHL: 4, - token.SHR: 4, - token.AND: 3, - token.AND_NOT: 3, - token.OR: 2, - token.XOR: 1, -} - -func validJSName(n string) bool { - return validJSNameRegexp.MatchString(n) -} - -func getIdent(s string) string { - switch s { - case "bool": - return "boolean" - case "int", "int8", "int16", "int32", "int64", - "uint", "uint8", "uint16", "uint32", "uint64", - "float32", "float64", - "complex64", "complex128": - return "number /* " + s + " */" - } - return s -} - -func (g *PackageGenerator) writeIndent(s *strings.Builder, depth int) { - for i := 0; i < depth; i++ { - s.WriteString(g.conf.Indent) - } -} - -func (g *PackageGenerator) writeType( - s *strings.Builder, - t ast.Expr, - p ast.Expr, - depth int, - optionalParens bool, -) { - log.Println("writeType:", reflect.TypeOf(t), t) - switch t := t.(type) { - case *ast.StarExpr: - if optionalParens { - s.WriteByte('(') - } - g.writeType(s, t.X, t, depth, false) - s.WriteString(" | nil") - if optionalParens { - s.WriteByte(')') - } - case *ast.ArrayType: - if v, ok := t.Elt.(*ast.Ident); ok && v.String() == "byte" { - s.WriteString("string") - break - } - case *ast.InterfaceType: - s.WriteString(" table ") - case *ast.StructType: - s.WriteString("{\n") - g.writeStructFields(s, t.Fields.List, depth+1) - g.writeIndent(s, depth+1) - s.WriteByte('}') - case *ast.Ident: - if t.String() == "any" { - s.WriteString(getIdent(g.conf.FallbackType)) - } else { - log.Printf("check pending %v", t) - if _, has := g.types[t.String()]; !has { - log.Printf("add pending %v", t) - g.AddPending(t.String()) - } - s.WriteString(getIdent(t.String())) - } - case *ast.SelectorExpr: - longType := fmt.Sprintf("%s.%s", t.X, t.Sel) - log.Printf("SelectorExpr %v", longType) - mappedTsType, ok := g.conf.TypeMappings[longType] - if ok { - s.WriteString(mappedTsType) - } else { // For unknown types we use the fallback type - log.Printf("import x.x %v", t.Sel.String()) - s.WriteString(t.Sel.String()) - if _, has := g.types[t.Sel.String()]; !has { - log.Printf("add pending %v", t) - g.AddPending(t.Sel.String()) - } - } - case *ast.MapType: - s.WriteString("table<") - g.writeType(s, t.Key, t, depth, false) - s.WriteString(",") - g.writeType(s, t.Value, t, depth, false) - s.WriteString(">") - case *ast.ParenExpr: - s.WriteByte('(') - g.writeType(s, t.X, t, depth, false) - s.WriteByte(')') - default: - err := fmt.Errorf("unhandled: %s\n %T", t, t) - fmt.Println(err) - panic(err) - } -} - -func (g *PackageGenerator) writeStructFields(s *strings.Builder, fields []*ast.Field, depth int) { - for _, f := range fields { - // fmt.Println(f.Type) - optional := false - required := false - readonly := false - - var fieldName string - if len(f.Names) == 0 { // anonymous field - if name, valid := getAnonymousFieldName(f.Type); valid { - fieldName = name - } - } - if len(f.Names) != 0 && f.Names[0] != nil && len(f.Names[0].Name) != 0 { - fieldName = f.Names[0].Name - } - if len(fieldName) == 0 || 'A' > fieldName[0] || fieldName[0] > 'Z' { - continue - } - - var name string - var tstype string - if f.Tag != nil { - tags, err := structtag.Parse(f.Tag.Value[1 : len(f.Tag.Value)-1]) - if err != nil { - panic(err) - } - - jsonTag, err := tags.Get("json") - if err == nil { - name = jsonTag.Name - if name == "-" { - continue - } - - optional = jsonTag.HasOption("omitempty") - } - yamlTag, err := tags.Get("yaml") - if err == nil { - name = yamlTag.Name - if name == "-" { - continue - } - - optional = yamlTag.HasOption("omitempty") - } - - tstypeTag, err := tags.Get("tstype") - if err == nil { - tstype = tstypeTag.Name - if tstype == "-" || tstypeTag.HasOption("extends") { - continue - } - required = tstypeTag.HasOption("required") - readonly = tstypeTag.HasOption("readonly") - } - } - - if len(name) == 0 { - if g.conf.Flavor == "yaml" { - name = strings.ToLower(fieldName) - } else { - name = fieldName - } - } - - if g.PreserveTypeComments() { - g.writeCommentGroupIfNotNil(s, f.Doc, depth+1) - } - - quoted := !validJSName(name) - if quoted { - s.WriteByte('\'') - } - if readonly { - s.WriteString("readonly ") - } - log.Printf("field %v", name) - s.WriteString("---@field " + name) - if quoted { - s.WriteByte('\'') - } - - switch t := f.Type.(type) { - case *ast.StarExpr: - optional = !required - f.Type = t.X - } - - if optional { - s.WriteByte('?') - } - - s.WriteString(" ") - - if tstype == "" { - g.writeType(s, f.Type, nil, depth, false) - } else { - s.WriteString(tstype) - } - - if f.Comment != nil && g.PreserveTypeComments() { - // Line comment is present, that means a comment after the field. - s.WriteString(" // ") - s.WriteString(f.Comment.Text()) - } else { - s.WriteByte('\n') - } - - } -} diff --git a/cmd/utils/tylua/tylua/write_comment.go b/cmd/utils/tylua/tylua/write_comment.go deleted file mode 100644 index ff56c71e..00000000 --- a/cmd/utils/tylua/tylua/write_comment.go +++ /dev/null @@ -1,74 +0,0 @@ -package tylua - -import ( - "go/ast" - "strings" -) - -func (g *PackageGenerator) PreserveDocComments() bool { - return g.conf.PreserveComments == "default" -} - -func (g *PackageGenerator) PreserveTypeComments() bool { - return g.conf.PreserveComments == "types" || g.conf.PreserveComments == "default" -} - -func (g *PackageGenerator) writeCommentGroupIfNotNil(s *strings.Builder, f *ast.CommentGroup, depth int) { - if f != nil { - g.writeCommentGroup(s, f, depth) - } -} - -func (c *PackageGenerator) writeDirective(s *strings.Builder, cg *ast.CommentGroup) { - for _, cm := range cg.List { - if strings.HasPrefix(cm.Text, "//tylua:emit") { - // remove the separator whitespace but leave extra whitespace for indentation - s.WriteString(strings.TrimPrefix(cm.Text, "//tylua:emit")[1:]) - s.WriteString("\n") - } - } -} - -func (g *PackageGenerator) writeCommentGroup(s *strings.Builder, cg *ast.CommentGroup, depth int) { - docLines := strings.Split(cg.Text(), "\n") - - g.writeDirective(s, cg) - if len(cg.List) > 0 && cg.Text() == "" { // This is a directive comment like //go:embed - s.WriteByte('\n') - return - } - - if depth != 0 { - g.writeIndent(s, depth) - } - s.WriteString("--- \n") - - for _, c := range docLines { - if len(strings.TrimSpace(c)) == 0 { - continue - } - g.writeIndent(s, depth) - s.WriteString("---") - s.WriteString(c) - s.WriteByte('\n') - } - g.writeIndent(s, depth) - s.WriteString("---\n") -} - -// Outputs a comment like // hello world -func (g *PackageGenerator) writeSingleLineComment(s *strings.Builder, cg *ast.CommentGroup) { - text := cg.Text() - - if len(cg.List) > 0 && cg.Text() == "" { // This is a directive comment like //go:embed - s.WriteByte('\n') - return - } - - s.WriteString(" // " + text) - - if len(text) == 0 { - // This is an empty comment like // - s.WriteByte('\n') - } -} diff --git a/cmd/utils/tylua/tylua/write_headers.go b/cmd/utils/tylua/tylua/write_headers.go deleted file mode 100644 index 0f8bd67a..00000000 --- a/cmd/utils/tylua/tylua/write_headers.go +++ /dev/null @@ -1,30 +0,0 @@ -package tylua - -import ( - "go/ast" - "path/filepath" - "strings" -) - -func (g *PackageGenerator) writeFileCodegenHeader(w *strings.Builder) { - w.WriteString("-- Code generated by tylua. DO NOT EDIT.\n") -} - -func (g *PackageGenerator) writeFileFrontmatter(w *strings.Builder) { - if g.conf.Frontmatter != "" { - w.WriteString(g.conf.Frontmatter) - } -} - -func (g *PackageGenerator) writeFileSourceHeader(w *strings.Builder, path string, file *ast.File) { - w.WriteString("\n//////////\n// source: ") - w.WriteString(filepath.Base(path)) - w.WriteString("\n") - - if file.Doc != nil && g.PreserveDocComments() { - w.WriteString("/*\n") - w.WriteString(file.Doc.Text()) - w.WriteString("*/\n") - } - w.WriteString("\n") -} diff --git a/cmd/utils/tylua/tylua/write_toplevel.go b/cmd/utils/tylua/tylua/write_toplevel.go deleted file mode 100644 index a87a9414..00000000 --- a/cmd/utils/tylua/tylua/write_toplevel.go +++ /dev/null @@ -1,154 +0,0 @@ -package tylua - -import ( - "fmt" - "go/ast" - "go/token" - "log" - "strings" -) - -type groupContext struct { - isGroupedDeclaration bool - doc *ast.CommentGroup - groupValue string - groupType string - iotaValue int -} - -func (g *PackageGenerator) writeGroupDecl(s *strings.Builder, decl *ast.GenDecl) { - // This checks whether the declaration is a group declaration like: - // const ( - // X = 3 - // Y = "abc" - // ) - isGroupedDeclaration := len(decl.Specs) > 1 - - if !isGroupedDeclaration && g.PreserveTypeComments() { - g.writeCommentGroupIfNotNil(s, decl.Doc, 0) - } - - // We need a bit of state to handle syntax like - // const ( - // X SomeType = iota - // _ - // Y - // Foo string = "Foo" - // _ - // AlsoFoo - // ) - group := &groupContext{ - isGroupedDeclaration: len(decl.Specs) > 1, - doc: decl.Doc, - groupType: "", - groupValue: "", - iotaValue: -1, - } - - for _, spec := range decl.Specs { - g.writeSpec(s, spec, group) - } -} - -func (g *PackageGenerator) writeSpec(s *strings.Builder, spec ast.Spec, group *groupContext) { - // e.g. "type Foo struct {}" or "type Bar = string" - ts, ok := spec.(*ast.TypeSpec) - if ok && ts.Name.IsExported() { - g.writeTypeSpec(s, ts) - } -} - -func showType(t ast.Expr) string { - switch t := t.(type) { - case *ast.Ident: - return fmt.Sprintf("ident %v", t) - case *ast.ArrayType: - return fmt.Sprintf("array %v", t) - case *ast.StructType: - return fmt.Sprintf("struct %v", t) - case *ast.SelectorExpr: - return fmt.Sprintf("selector %v", t) - case *ast.StarExpr: - return fmt.Sprintf("star %v", t) - case *ast.MapType: - return fmt.Sprintf("map %v", t) - } - return "unknown" -} - -// Writing of type specs, which are expressions like -// `type X struct { ... }` -// or -// `type Bar = string` -func (g *PackageGenerator) writeTypeSpec( - s *strings.Builder, - ts *ast.TypeSpec, -) bool { - if ts.Doc != nil && - g.PreserveTypeComments() { // The spec has its own comment, which overrules the grouped comment. - g.writeCommentGroup(s, ts.Doc, 0) - } - isok := false - log.Printf("type %s %s ", ts.Name, showType(ts.Type)) - switch at := ts.Type.(type) { - case *ast.Ident: - log.Printf("ident %v", at.Name) - s.WriteString(at.Name) - g.AddPending(at.Name) - isok = true - case *ast.ArrayType: - log.Printf("array %v", ts.Name.Name) - s.WriteString(fmt.Sprintf("---@alias %s ", ts.Name.Name)) - g.writeType(s, at.Elt, at, 0, true) - s.WriteString("[]") - isok = true - case *ast.StructType: - s.WriteString("---@class ") - s.WriteString(ts.Name.Name) - s.WriteString("\n") - g.writeStructFields(s, at.Fields.List, 0) - s.WriteString("\n") - isok = true - case *ast.StarExpr: - g.writeType(s, at.X, at, 0, true) - isok = true - case *ast.MapType: - log.Printf("maptype %s %s", ts.Name, at.Value) - s.WriteString(fmt.Sprintf("---@alias %s table\n", ts.Name.Name, at.Value)) - g.AddPending(fmt.Sprintf("%s", at.Value)) - isok = true - } - if ts.Comment != nil && g.PreserveTypeComments() { - g.writeSingleLineComment(s, ts.Comment) - } else { - s.WriteString("\n") - } - return isok -} - -func getAnonymousFieldName(f ast.Expr) (name string, valid bool) { - switch ft := f.(type) { - case *ast.Ident: - name = ft.Name - if ft.Obj != nil && ft.Obj.Decl != nil { - dcl, ok := ft.Obj.Decl.(*ast.TypeSpec) - if ok { - valid = dcl.Name.IsExported() - } - } else { - // Types defined in the Go file after the parsed file in the same package - valid = token.IsExported(name) - } - case *ast.IndexExpr: - return getAnonymousFieldName(ft.X) - case *ast.IndexListExpr: - return getAnonymousFieldName(ft.X) - case *ast.SelectorExpr: - valid = ft.Sel.IsExported() - name = ft.Sel.String() - case *ast.StarExpr: - return getAnonymousFieldName(ft.X) - } - - return -} diff --git a/config/config.go b/config/config.go index de394d75..2621b396 100644 --- a/config/config.go +++ b/config/config.go @@ -361,7 +361,6 @@ func (c *Config) GetNetworkMode() ControllerNetWorkMode { return ControllerNetWorkMode(c.Controller.NetworkMode) } -// TODO a better name func (c *Config) IsEnableAlb() bool { return c.Controller.Flags.EnableAlb } diff --git a/controller/alb/alb.go b/controller/alb/alb.go index 8d43226b..4c738771 100644 --- a/controller/alb/alb.go +++ b/controller/alb/alb.go @@ -14,6 +14,7 @@ import ( "alauda.io/alb2/driver" gctl "alauda.io/alb2/gateway/ctl" "alauda.io/alb2/ingress" + pm "alauda.io/alb2/pkg/utils/metrics" "alauda.io/alb2/utils" "alauda.io/alb2/utils/log" "github.com/go-logr/logr" @@ -136,6 +137,8 @@ func (a *Alb) StartReloadLoadBalancerLoop(drv *driver.KubernetesDriver, ctx cont nctl := ctl.NewNginxController(drv, ctx, a.albCfg, l.WithName("nginx"), a.le) nctl.PortProber = a.portProbe + + l.Info("reload: ctl init", "cost", time.Since(startTime)) // do leader stuff if a.le.AmILeader() { if a.portProbe != nil { @@ -145,6 +148,7 @@ func (a *Alb) StartReloadLoadBalancerLoop(drv *driver.KubernetesDriver, ctx cont } } } + l.Info("reload: leader update", "cost", time.Since(startTime)) if a.albCfg.GetFlags().DisablePeriodGenNginxConfig { l.Info("reload: period regenerated config disabled") @@ -155,6 +159,7 @@ func (a *Alb) StartReloadLoadBalancerLoop(drv *driver.KubernetesDriver, ctx cont l.Error(err, "generate conf failed") return } + l.Info("time", "policy-gen", pm.Read()) if err := nctl.ReloadLoadBalancer(); err != nil { l.Error(err, "reload load balancer failed") @@ -175,7 +180,6 @@ func (a *Alb) StartReloadLoadBalancerLoop(drv *driver.KubernetesDriver, ctx cont } func (a *Alb) StartGoMonitorLoop(ctx context.Context) { - // TODO fixme // TODO how to stop it? use http server with ctx. log := a.log.WithName("monitor") flags := a.albCfg.GetFlags() diff --git a/controller/cli/alb.go b/controller/cli/alb.go index c4feb57b..bcabf870 100644 --- a/controller/cli/alb.go +++ b/controller/cli/alb.go @@ -2,43 +2,96 @@ package cli import ( "strings" + "time" + "alauda.io/alb2/controller/modules" . "alauda.io/alb2/controller/types" "alauda.io/alb2/driver" albv1 "alauda.io/alb2/pkg/apis/alauda/v1" - cus "alauda.io/alb2/pkg/controller/custom_config" + ext "alauda.io/alb2/pkg/controller/extctl" + pm "alauda.io/alb2/pkg/utils/metrics" "github.com/go-logr/logr" - "k8s.io/klog/v2" + corev1 "k8s.io/api/core/v1" + "sigs.k8s.io/controller-runtime/pkg/client" ) // cli to fetch loadbalancer from alb/ft/rule type AlbCli struct { drv *driver.KubernetesDriver log logr.Logger - cus cus.CustomCfgCtl + cus ext.ExtCtl } func NewAlbCli(drv *driver.KubernetesDriver, log logr.Logger) AlbCli { return AlbCli{ drv: drv, log: log, - cus: cus.NewCustomCfgCtl(cus.CustomCfgOpt{ + cus: ext.NewExtensionCtl(ext.ExtCtlCfgOpt{ Log: log, Domain: drv.Opt.Domain, }), } } +func (c *AlbCli) RuleToInternalRule(mr *modules.Rule, ir *InternalRule) { + mrs := mr.Spec + + // rule-meta + meta := RuleMeta{} + meta.RuleID = mr.Name + meta.Type = mrs.Type + meta.Source = mrs.Source + meta.Priority = mrs.Priority + ir.RuleMeta = meta + + // rule-match + match := RuleMatch{} + match.DSLX = mrs.DSLX + ir.RuleMatch = match + + // rule-cert + cert := RuleCert{} + cert.CertificateName = mrs.CertificateName + cert.Domain = mrs.Domain + ir.RuleCert = cert + + // rule-upstream + // 在redirect的情况下,service group为null 是正常的 + if mrs.ServiceGroup != nil { + up := RuleUpstream{} + up.BackendProtocol = strings.ToLower(mrs.BackendProtocol) + up.SessionAffinityPolicy = mrs.ServiceGroup.SessionAffinityPolicy + up.SessionAffinityAttr = mrs.ServiceGroup.SessionAffinityAttribute + if up.Services == nil { + up.Services = []*BackendService{} + } + for _, svc := range mrs.ServiceGroup.Services { + up.Services = append(up.Services, &BackendService{ + ServiceNs: svc.Namespace, + ServiceName: svc.Name, + ServicePort: svc.Port, + Weight: svc.Weight, + }) + } + // will be init in fillup backend phase + up.BackendGroup = &BackendGroup{} + ir.RuleUpstream = up + } + ext := RuleExt{} + ir.Config = ext + // rule-ext + c.cus.ToInternalRule(mr, ir) +} + func (c *AlbCli) GetLBConfig(ns string, name string) (*LoadBalancer, error) { - // TODO we should merge mAlb and cAlb to one struct. - // mAlb LoadBalancer struct from modules package used in driver. - // cAlb LoadBalancer struct from controller package. kd := *c.drv + s := time.Now() mAlb, err := kd.LoadALBbyName(ns, name) if err != nil { - klog.Error("load mAlb fail", err) + c.log.Error(err, "load m-alb fail") return nil, err } + pm.Write("load-m-alb", float64(time.Since(s).Milliseconds())) cAlb := &LoadBalancer{ Name: mAlb.Alb.Name, @@ -47,6 +100,7 @@ func (c *AlbCli) GetLBConfig(ns string, name string) (*LoadBalancer, error) { Labels: mAlb.Alb.Labels, } + c.log.Info("ft len", "alb", name, "ft", len(mAlb.Frontends)) // mft frontend struct from modules package. for _, mft := range mAlb.Frontends { ft := &Frontend{ @@ -56,61 +110,25 @@ func (c *AlbCli) GetLBConfig(ns string, name string) (*LoadBalancer, error) { Protocol: mft.Spec.Protocol, Rules: RuleList{}, CertificateName: mft.Spec.CertificateName, - BackendProtocol: mft.Spec.BackendProtocol, + BackendProtocol: strings.ToLower(mft.Spec.BackendProtocol), Labels: mft.Labels, } if !ft.IsValidProtocol() { - klog.Errorf("frontend %s %s has no valid protocol", ft.FtName, ft.Protocol) + c.log.Info("frontend has no valid protocol", "ft", ft.FtName, "protocol", ft.Protocol) ft.Protocol = albv1.FtProtocolTCP } if ft.Port <= 0 { - klog.Errorf("frontend %s has an invalid port %d", ft.FtName, ft.Port) + c.log.Info("frontend has an invalid port ", "ft", ft.FtName, "port", ft.Port) continue } + c.log.Info("rule", "ft", ft.FtName, "rule", len(mft.Rules)) // translate rule cr to our rule struct for _, marl := range mft.Rules { - arl := marl.Spec - rule := &Rule{ - Config: &RuleConfigInPolicy{}, - RuleID: marl.Name, - SameInRuleCr: SameInRuleCr{ - Priority: arl.Priority, - DSLX: arl.DSLX, - URL: arl.URL, - RewriteBase: arl.RewriteBase, - RewriteTarget: arl.RewriteTarget, - EnableCORS: arl.EnableCORS, - CORSAllowHeaders: arl.CORSAllowHeaders, - CORSAllowOrigin: arl.CORSAllowOrigin, - BackendProtocol: arl.BackendProtocol, - RedirectURL: arl.RedirectURL, - VHost: arl.VHost, - Source: arl.Source, - RedirectCode: arl.RedirectCode, - }, - Type: arl.Type, - Domain: strings.ToLower(arl.Domain), - Description: arl.Description, - CertificateName: arl.CertificateName, - } - if arl.ServiceGroup != nil { - rule.SessionAffinityPolicy = arl.ServiceGroup.SessionAffinityPolicy - rule.SessionAffinityAttr = arl.ServiceGroup.SessionAffinityAttribute - if rule.Services == nil { - rule.Services = []*BackendService{} - } - for _, svc := range arl.ServiceGroup.Services { - rule.Services = append(rule.Services, &BackendService{ - ServiceNs: svc.Namespace, - ServiceName: svc.Name, - ServicePort: svc.Port, - Weight: svc.Weight, - }) - } - } - c.cus.FromRuleCr(marl, rule) + // arl := marl.Spec + rule := &InternalRule{} + c.RuleToInternalRule(marl, rule) ft.Rules = append(ft.Rules, rule) } @@ -131,8 +149,46 @@ func (c *AlbCli) GetLBConfig(ns string, name string) (*LoadBalancer, error) { }) } } - cAlb.Frontends = append(cAlb.Frontends, ft) } + cAlb.Refs = RefMap{ + ConfigMap: map[client.ObjectKey]*corev1.ConfigMap{}, + Secret: map[client.ObjectKey]*corev1.Secret{}, + } return cAlb, nil } + +func (c *AlbCli) CollectAndFetchRefs(lb *LoadBalancer) { + s := time.Now() + defer func() { + e := time.Now() + pm.Write("collect-refs", float64(e.UnixMilli())-float64(s.UnixMilli())) + }() + s_pick_refs := time.Now() + for _, ft := range lb.Frontends { + for _, rule := range ft.Rules { + c.cus.CollectRefs(rule, lb.Refs) + } + } + pm.Write("collect-refs/pick-refs", float64(time.Since(s_pick_refs).Milliseconds())) + for k := range lb.Refs.ConfigMap { + cm := &corev1.ConfigMap{} + err := c.drv.Cli.Get(c.drv.Ctx, k, cm) + if err != nil { + c.log.Error(err, "get cm fail", "cm", k) + delete(lb.Refs.ConfigMap, k) + continue + } + lb.Refs.ConfigMap[k] = cm + } + for k := range lb.Refs.Secret { + secret := &corev1.Secret{} + err := c.drv.Cli.Get(c.drv.Ctx, k, secret) + if err != nil { + c.log.Error(err, "get secret fail", "secret", k) + delete(lb.Refs.Secret, k) + continue + } + lb.Refs.Secret[k] = secret + } +} diff --git a/controller/cli/cert.go b/controller/cli/cert.go index c02c6112..4808f2a0 100644 --- a/controller/cli/cert.go +++ b/controller/cli/cert.go @@ -5,26 +5,35 @@ import ( "crypto/tls" "errors" "fmt" + "net" "sort" "strconv" "strings" + "sync" + "time" - "alauda.io/alb2/controller/types" + . "alauda.io/alb2/controller/types" "alauda.io/alb2/driver" albv1 "alauda.io/alb2/pkg/apis/alauda/v1" + pm "alauda.io/alb2/pkg/utils/metrics" "github.com/go-logr/logr" apiv1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + certutil "k8s.io/client-go/util/cert" "sigs.k8s.io/controller-runtime/pkg/client" ) -func getCertMap(alb *types.LoadBalancer, d *driver.KubernetesDriver) map[string]types.Certificate { +func getCertMap(alb *LoadBalancer, d *driver.KubernetesDriver) map[string]Certificate { + s := time.Now() + defer func() { + pm.Write("gen-cert", float64(time.Since(s).Milliseconds())) + }() certProtocol := map[albv1.FtProtocol]bool{ albv1.FtProtocolHTTPS: true, albv1.FtProtocolgRPC: true, } - getPortDefaultCert := func(alb *types.LoadBalancer) map[string]client.ObjectKey { + getPortDefaultCert := func(alb *LoadBalancer) map[string]client.ObjectKey { cm := make(map[string]client.ObjectKey) for _, ft := range alb.Frontends { if ft.Conflict || !certProtocol[ft.Protocol] || ft.CertificateName == "" { @@ -57,8 +66,8 @@ func getCertMap(alb *types.LoadBalancer, d *driver.KubernetesDriver) map[string] } d.Log.Info("get secrets", "secretMap", secretMap) - certMap := make(map[string]types.Certificate) - certCache := make(map[string]types.Certificate) + certMap := make(map[string]Certificate) + certCache := make(map[string]Certificate) for domain, secret := range secretMap { secretKey := secret.String() @@ -78,7 +87,7 @@ func getCertMap(alb *types.LoadBalancer, d *driver.KubernetesDriver) map[string] return certMap } -func getCertificateFromSecret(driver *driver.KubernetesDriver, namespace, name string) (*types.Certificate, error) { +func getCertificateFromSecret(driver *driver.KubernetesDriver, namespace, name string) (*Certificate, error) { secret, err := driver.Client.CoreV1().Secrets(namespace).Get(context.TODO(), name, metav1.GetOptions{}) if err != nil { return nil, err @@ -92,7 +101,7 @@ func getCertificateFromSecret(driver *driver.KubernetesDriver, namespace, name s } key := string(secret.Data[apiv1.TLSPrivateKeyKey]) cert := string(secret.Data[apiv1.TLSCertKey]) - caCert := string(secret.Data[types.CaCert]) + caCert := string(secret.Data[CaCert]) if len(caCert) != 0 { trimNewLine := func(s string) string { return strings.Trim(s, "\n") @@ -100,7 +109,7 @@ func getCertificateFromSecret(driver *driver.KubernetesDriver, namespace, name s cert = trimNewLine(cert) + "\n" + trimNewLine(caCert) } - return &types.Certificate{ + return &Certificate{ Key: key, Cert: cert, }, nil @@ -138,7 +147,7 @@ func SameCertificateName(left, right string) (bool, error) { } // domain / ft / cert -func getCertsFromRule(alb *types.LoadBalancer, certProtocol map[albv1.FtProtocol]bool, log logr.Logger) map[string]map[string][]client.ObjectKey { +func getCertsFromRule(alb *LoadBalancer, certProtocol map[albv1.FtProtocol]bool, log logr.Logger) map[string]map[string][]client.ObjectKey { cm := make(map[string]map[string][]client.ObjectKey) for _, ft := range alb.Frontends { if ft.Conflict || !certProtocol[ft.Protocol] { @@ -200,3 +209,27 @@ func formatCertsMap(domainCertRaw map[string]map[string][]client.ObjectKey) map[ } return ret } + +func (p *PolicyCli) setMetricsPortCert(cert map[string]Certificate) { + port := p.opt.MetricsPort + cert[fmt.Sprintf("%d", port)] = genMetricsCert() +} + +var ( + metricsCert Certificate + once sync.Once +) + +func init() { + once.Do(func() { + cert, key, _ := certutil.GenerateSelfSignedCertKey("localhost", []net.IP{}, []string{}) + metricsCert = Certificate{ + Cert: string(cert), + Key: string(key), + } + }) +} + +func genMetricsCert() Certificate { + return metricsCert +} diff --git a/controller/cli/policy.go b/controller/cli/policy.go index 7ff76b86..2320d0c2 100644 --- a/controller/cli/policy.go +++ b/controller/cli/policy.go @@ -1,30 +1,20 @@ package cli import ( - "fmt" - "math" - "net" - "sort" - "strings" - "sync" + "time" - m "alauda.io/alb2/controller/modules" . "alauda.io/alb2/controller/types" "alauda.io/alb2/driver" albv1 "alauda.io/alb2/pkg/apis/alauda/v1" - cus "alauda.io/alb2/pkg/controller/custom_config" - "alauda.io/alb2/utils" + cus "alauda.io/alb2/pkg/controller/extctl" + pm "alauda.io/alb2/pkg/utils/metrics" "github.com/go-logr/logr" - "github.com/thoas/go-funk" - corev1 "k8s.io/api/core/v1" - certutil "k8s.io/client-go/util/cert" - "k8s.io/klog/v2" ) type PolicyCli struct { drv *driver.KubernetesDriver log logr.Logger - cus cus.CustomCfgCtl + cus cus.ExtCtl opt PolicyCliOpt } type PolicyCliOpt struct { @@ -36,382 +26,45 @@ func NewPolicyCli(drv *driver.KubernetesDriver, log logr.Logger, opt PolicyCliOp drv: drv, log: log, opt: opt, - cus: cus.NewCustomCfgCtl(cus.CustomCfgOpt{Log: log}), + cus: cus.NewExtensionCtl(cus.ExtCtlCfgOpt{Log: log}), } } // fetch cert and backend info that lb config need, constructs a "dynamic config" used by openresty. func (p *PolicyCli) GenerateAlbPolicy(alb *LoadBalancer) NgxPolicy { + s := time.Now() + defer func() { + pm.Write("gen-policy", float64(time.Since(s).Milliseconds())) + }() + + s_other := time.Now() certificateMap := getCertMap(alb, p.drv) + p.setMetricsPortCert(certificateMap) backendGroup := pickAllBackendGroup(alb) + pm.Write("gen-policy/pick", float64(time.Since(s_other).Milliseconds())) ngxPolicy := NgxPolicy{ CertificateMap: certificateMap, Http: HttpPolicy{Tcp: make(map[albv1.PortNumber]Policies)}, - CommonConfig: CommonPolicyConfig{}, + SharedConfig: SharedExtPolicyConfig{}, Stream: StreamPolicy{Tcp: make(map[albv1.PortNumber]Policies), Udp: make(map[albv1.PortNumber]Policies)}, BackendGroup: backendGroup, } + sf := time.Now() for _, ft := range alb.Frontends { if ft.Conflict { continue } if ft.IsStreamMode() { p.initStreamModeFt(ft, &ngxPolicy) - } - - if ft.IsHttpMode() { - p.initHttpModeFt(ft, &ngxPolicy) - } - - if ft.IsGRPCMode() { - p.initGRPCModeFt(ft, &ngxPolicy) - } - } - p.cus.ResolvePolicies(alb, &ngxPolicy) - return ngxPolicy -} - -func pickAllBackendGroup(alb *LoadBalancer) BackendGroups { - backendGroup := BackendGroups{} - for _, ft := range alb.Frontends { - if ft.Conflict { - continue - } - for _, rule := range ft.Rules { - backendGroup = append(backendGroup, rule.BackendGroup) - } - - if ft.BackendGroup != nil && len(ft.BackendGroup.Backends) > 0 { - // FIX: http://jira.alaudatech.com/browse/DEV-16954 - // remove duplicate upstream - if !funk.Contains(backendGroup, ft.BackendGroup) { - backendGroup = append(backendGroup, ft.BackendGroup) - } - } - } - sort.Sort(backendGroup) - return backendGroup -} - -func (p *PolicyCli) initStreamModeFt(ft *Frontend, ngxPolicy *NgxPolicy) { - // create a default rule for stream mode ft. - if len(ft.Rules) == 0 { - if ft.BackendGroup == nil || ft.BackendGroup.Backends == nil { - klog.Warningf("ft %s,stream mode ft must have backend group", ft.FtName) - } - if ft.Protocol == albv1.FtProtocolTCP { - policy := Policy{} - policy.Subsystem = SubsystemStream - policy.Upstream = ft.BackendGroup.Name - policy.Rule = ft.BackendGroup.Name - ngxPolicy.Stream.Tcp[ft.Port] = append(ngxPolicy.Stream.Tcp[ft.Port], &policy) - } - if ft.Protocol == albv1.FtProtocolUDP { - policy := Policy{} - policy.Subsystem = SubsystemStream - policy.Upstream = ft.BackendGroup.Name - policy.Rule = ft.BackendGroup.Name - ngxPolicy.Stream.Udp[ft.Port] = append(ngxPolicy.Stream.Udp[ft.Port], &policy) - } - return - } - - if len(ft.Rules) != 1 { - klog.Warningf("stream mode ft could only have one rule", ft.FtName) - } - rule := ft.Rules[0] - policy := Policy{} - policy.Subsystem = SubsystemStream - policy.Upstream = rule.BackendGroup.Name - policy.Rule = rule.RuleID - policy.Config = rule.Config - if ft.Protocol == albv1.FtProtocolTCP { - ngxPolicy.Stream.Tcp[ft.Port] = append(ngxPolicy.Stream.Tcp[ft.Port], &policy) - } - if ft.Protocol == albv1.FtProtocolUDP { - ngxPolicy.Stream.Udp[ft.Port] = append(ngxPolicy.Stream.Udp[ft.Port], &policy) - } -} - -func (p *PolicyCli) initHttpModeFt(ft *Frontend, ngxPolicy *NgxPolicy) { - if _, ok := ngxPolicy.Http.Tcp[ft.Port]; !ok { - ngxPolicy.Http.Tcp[ft.Port] = Policies{} - } - - for _, rule := range ft.Rules { - if rule.DSLX == nil { - klog.Warningf("rule %s has no matcher, skip", rule.RuleID) - continue - } - - klog.V(3).Infof("Rule is %v", rule) - // translate our rule struct to policy (the json file) - internalDSL, err := utils.DSLX2Internal(rule.DSLX) - if err != nil { - klog.Error("convert dslx to internal failed", err) - continue - } - policy := Policy{} - policy.InternalDSL = internalDSL - policy.InternalDSLLen = utils.InternalDSLLen(internalDSL) - // policy-gen 设置 rule 的 upstream - policy.Upstream = rule.BackendGroup.Name // IMPORTANT - policy.ComplexPriority = rule.GetPriority() - policy.Subsystem = SubsystemHTTP - policy.Rule = rule.RuleID - policy.Config = rule.Config - - policy.SameInRuleCr = rule.SameInRuleCr - policy.SameInPolicy = rule.SameInPolicy - InitPolicySource(&policy, rule) - - ngxPolicy.Http.Tcp[ft.Port] = append(ngxPolicy.Http.Tcp[ft.Port], &policy) - } - - // set default rule if ft have default backend. - if ft.BackendGroup != nil && ft.BackendGroup.Backends != nil { - defaultPolicy := Policy{} - defaultPolicy.Rule = ft.FtName - defaultPolicy.ComplexPriority = -1 // default rule should have the minimum priority - defaultPolicy.Priority = 999 // minimum number means higher priority - defaultPolicy.Subsystem = SubsystemHTTP - defaultPolicy.InternalDSL = []interface{}{[]string{"STARTS_WITH", "URL", "/"}} // [[START_WITH URL /]] - defaultPolicy.BackendProtocol = ft.BackendProtocol - defaultPolicy.Upstream = ft.BackendGroup.Name - ngxPolicy.Http.Tcp[ft.Port] = append(ngxPolicy.Http.Tcp[ft.Port], &defaultPolicy) - } - sort.Sort(ngxPolicy.Http.Tcp[ft.Port]) // IMPORTANT sort to make sure priority work. -} - -func InitPolicySource(p *Policy, rule *Rule) { - if rule.Source == nil || rule.Source.Type != m.TypeIngress { - return - } - p.SourceType = m.TypeIngress - p.SourceName = rule.Source.Name - p.SourceNs = rule.Source.Namespace -} - -func (p *PolicyCli) initGRPCModeFt(ft *Frontend, ngxPolicy *NgxPolicy) { - log := p.log - if _, ok := ngxPolicy.Http.Tcp[ft.Port]; !ok { - ngxPolicy.Http.Tcp[ft.Port] = Policies{} - } - - for _, rule := range ft.Rules { - if rule.DSLX == nil { - log.V(3).Info("no matcher for rule, skip", "ruleID", rule.RuleID) - continue - } - - klog.V(3).Infof("Rule is %v", rule) - policy := Policy{} - policy.Subsystem = SubsystemHTTP - policy.Rule = rule.RuleID - internalDSL, err := utils.DSLX2Internal(rule.DSLX) - if err != nil { - log.Error(err, "convert dslx to internal failed", "ruleID", rule.RuleID) - continue - } - policy.InternalDSL = internalDSL - policy.Priority = rule.GetRawPriority() - policy.InternalDSLLen = utils.InternalDSLLen(internalDSL) - // policy-gen 设置 rule 的 upstream - policy.Upstream = rule.BackendGroup.Name // IMPORTANT - // for rewrite - policy.URL = rule.URL - policy.BackendProtocol = rule.BackendProtocol - policy.Config = rule.Config - ngxPolicy.Http.Tcp[ft.Port] = append(ngxPolicy.Http.Tcp[ft.Port], &policy) - } - - // set default rule if ft have default backend. - if ft.BackendGroup != nil && ft.BackendGroup.Backends != nil { - defaultPolicy := Policy{} - defaultPolicy.Rule = ft.FtName - defaultPolicy.Priority = 999 // default backend has the lowest priority - defaultPolicy.Subsystem = SubsystemHTTP - defaultPolicy.InternalDSL = []interface{}{[]string{"STARTS_WITH", "URL", "/"}} // [[START_WITH URL /]] - defaultPolicy.BackendProtocol = ft.BackendProtocol - defaultPolicy.Upstream = ft.BackendGroup.Name - ngxPolicy.Http.Tcp[ft.Port] = append(ngxPolicy.Http.Tcp[ft.Port], &defaultPolicy) - } - sort.Sort(ngxPolicy.Http.Tcp[ft.Port]) -} - -func (p *PolicyCli) FillUpBackends(cAlb *LoadBalancer) error { - services := p.loadServices(cAlb) - backendMap := make(map[string][]*driver.Backend) - log := p.log - for key, svc := range services { - if svc == nil { - continue - } - backendMap[key] = svc.Backends - } - - for _, ft := range cAlb.Frontends { - var rules RuleList - protocol := m.FtProtocolToServiceProtocol(ft.Protocol) - for _, rule := range ft.Rules { - if len(rule.Services) == 0 { - log.Info("no active service", "ruleID", rule.RuleID) - } - rule.BackendGroup = &BackendGroup{ - Name: rule.RuleID, - Mode: FtProtocolToBackendMode(ft.Protocol), - SessionAffinityPolicy: rule.SessionAffinityPolicy, - SessionAffinityAttribute: rule.SessionAffinityAttr, - } - rule.BackendGroup.Backends = generateBackend(backendMap, rule.Services, protocol) - // if backend app protocol is https. use https. - if rule.BackendProtocol == "$http_backend_protocol" { - rule.BackendProtocol = "http" - for _, b := range rule.BackendGroup.Backends { - if b.AppProtocol != nil && strings.ToLower(*b.AppProtocol) == "https" { - rule.BackendProtocol = "https" - } - } - } - rules = append(rules, rule) - } - if len(rules) > 0 { - ft.Rules = rules } else { - ft.Rules = RuleList{} - } - - if len(ft.Services) == 0 { - log.V(1).Info("frontend has no default service", "ft", ft.String()) - } - if len(ft.Services) != 0 { - // set ft default services group. - ft.BackendGroup.Backends = generateBackend(backendMap, ft.Services, protocol) - ft.BackendGroup.Mode = FtProtocolToBackendMode(ft.Protocol) - } - } - return nil -} - -func (p *PolicyCli) loadServices(alb *LoadBalancer) map[string]*driver.Service { - kd := p.drv - svcMap := make(map[string]*driver.Service) - - getServiceWithCache := func(svc *BackendService, protocol corev1.Protocol, svcMap map[string]*driver.Service) error { - svcKey := generateServiceKey(svc.ServiceNs, svc.ServiceName, protocol, svc.ServicePort) - if _, ok := svcMap[svcKey]; !ok { - service, err := kd.GetServiceByName(svc.ServiceNs, svc.ServiceName, svc.ServicePort, protocol) - if service != nil { - svcMap[svcKey] = service - } - return err - } - return nil - } - - for _, ft := range alb.Frontends { - protocol := m.FtProtocolToServiceProtocol(ft.Protocol) - for _, svc := range ft.Services { - err := getServiceWithCache(svc, protocol, svcMap) - if err != nil { - klog.Errorf("get backends for ft fail svc %s/%s port %d protocol %s ft %s err %v", svc.ServiceName, svc.ServiceNs, svc.ServicePort, protocol, ft.FtName, err) - } - } - - for _, rule := range ft.Rules { - if rule.AllowNoAddr() { - continue - } - for _, svc := range rule.Services { - err := getServiceWithCache(svc, protocol, svcMap) - if err != nil { - klog.Errorf("get backends for rule fail svc %s/%s %d protocol %s rule %s err %v", svc.ServiceName, svc.ServiceNs, svc.ServicePort, protocol, rule.RuleID, err) - } - } + p.initHttpModeFt(ft, &ngxPolicy, alb.Refs) } } - return svcMap -} - -func generateServiceKey(ns string, name string, protocol corev1.Protocol, svcPort int) string { - key := fmt.Sprintf("%s-%s-%s-%d", ns, name, protocol, svcPort) - return strings.ToLower(key) -} -// 找到 service 对应的后端 -func generateBackend(backendMap map[string][]*driver.Backend, services []*BackendService, protocol corev1.Protocol) Backends { - totalWeight := 0 - for _, svc := range services { - if svc.Weight > 100 { - svc.Weight = 100 - } - if svc.Weight < 0 { - svc.Weight = 0 - } - totalWeight += svc.Weight - } - if totalWeight == 0 { - // all service has zero weight - totalWeight = 100 - } - bes := []*Backend{} - for _, svc := range services { - name := generateServiceKey(svc.ServiceNs, svc.ServiceName, protocol, svc.ServicePort) - backends, ok := backendMap[name] - // some rule such as redirect ingress will use a fake service. - if !ok || len(backends) == 0 { - continue - } - // 100 is the max weigh in SLB - weight := int(math.Floor(float64(svc.Weight*100)/float64(totalWeight*len(backends)) + 0.5)) - if weight == 0 && svc.Weight != 0 { - weight = 1 - } - for _, be := range backends { - port := be.Port - if port == 0 { - klog.Warningf("invalid backend port 0 svc: %+v", svc) - continue - } - bes = append(bes, - &Backend{ - Address: be.IP, - Pod: be.Pod, - Svc: svc.ServiceName, - Ns: svc.ServiceNs, - Port: port, - Weight: weight, - Protocol: be.Protocol, - AppProtocol: be.AppProtocol, - FromOtherClusters: be.FromOtherClusters, - }) - } - } - sortedBackends := Backends(bes) - sort.Sort(sortedBackends) - return sortedBackends -} - -func (p *PolicyCli) setMetricsPortCert(cert map[string]Certificate) { - port := p.opt.MetricsPort - cert[fmt.Sprintf("%d", port)] = genMetricsCert() -} - -var ( - metricsCert Certificate - once sync.Once -) - -func genMetricsCert() Certificate { - once.Do(func() { - cert, key, _ := certutil.GenerateSelfSignedCertKey("localhost", []net.IP{}, []string{}) - metricsCert = Certificate{ - Cert: string(cert), - Key: string(key), - } - }) - return metricsCert + pm.Write("gen-policy/init-ft", float64(time.Since(sf).Milliseconds())) + p.cus.ResolvePolicies(alb, &ngxPolicy) + return ngxPolicy } diff --git a/controller/cli/policy_backend.go b/controller/cli/policy_backend.go new file mode 100644 index 00000000..ae2ae3b3 --- /dev/null +++ b/controller/cli/policy_backend.go @@ -0,0 +1,201 @@ +package cli + +import ( + "fmt" + "math" + "sort" + "strings" + "time" + + m "alauda.io/alb2/controller/modules" + . "alauda.io/alb2/controller/types" + "alauda.io/alb2/driver" + pm "alauda.io/alb2/pkg/utils/metrics" + "github.com/thoas/go-funk" + corev1 "k8s.io/api/core/v1" + "k8s.io/klog/v2" +) + +func (p *PolicyCli) FillUpBackends(cAlb *LoadBalancer) error { + s := time.Now() + defer func() { + pm.Write("fill-backends", float64(time.Since(s).Milliseconds())) + }() + + services := p.loadServices(cAlb) + backendMap := make(map[string][]*driver.Backend) + log := p.log + for key, svc := range services { + if svc == nil { + continue + } + backendMap[key] = svc.Backends + } + + for _, ft := range cAlb.Frontends { + var rules RuleList + protocol := m.FtProtocolToServiceProtocol(ft.Protocol) + for _, rule := range ft.Rules { + if len(rule.Services) == 0 { + log.Info("no active service", "ruleID", rule.RuleID) + } + rule.BackendGroup = &BackendGroup{ + Name: rule.RuleID, + Mode: FtProtocolToBackendMode(ft.Protocol), + SessionAffinityPolicy: rule.SessionAffinityPolicy, + SessionAffinityAttribute: rule.SessionAffinityAttr, + } + rule.BackendGroup.Backends = generateBackend(backendMap, rule.Services, protocol) + // if backend app protocol is https. use https. + if rule.BackendProtocol == "$http_backend_protocol" { + rule.BackendProtocol = "http" + for _, b := range rule.BackendGroup.Backends { + if b.AppProtocol != nil && strings.ToLower(*b.AppProtocol) == "https" { + rule.BackendProtocol = "https" + } + } + } + rules = append(rules, rule) + } + if len(rules) > 0 { + ft.Rules = rules + } else { + ft.Rules = RuleList{} + } + + if len(ft.Services) == 0 { + log.V(1).Info("frontend has no default service", "ft", ft.String()) + } + if len(ft.Services) != 0 { + // set ft default services group. + ft.BackendGroup.Backends = generateBackend(backendMap, ft.Services, protocol) + ft.BackendGroup.Mode = FtProtocolToBackendMode(ft.Protocol) + } + } + return nil +} + +func (p *PolicyCli) loadServices(alb *LoadBalancer) map[string]*driver.Service { + kd := p.drv + svcMap := make(map[string]*driver.Service) + + getServiceWithCache := func(svc *BackendService, protocol corev1.Protocol, svcMap map[string]*driver.Service) error { + svcKey := generateServiceKey(svc.ServiceNs, svc.ServiceName, protocol, svc.ServicePort) + if _, ok := svcMap[svcKey]; !ok { + service, err := kd.GetServiceByName(svc.ServiceNs, svc.ServiceName, svc.ServicePort, protocol) + if service != nil { + svcMap[svcKey] = service + } + return err + } + return nil + } + + for _, ft := range alb.Frontends { + protocol := m.FtProtocolToServiceProtocol(ft.Protocol) + for _, svc := range ft.Services { + err := getServiceWithCache(svc, protocol, svcMap) + if err != nil { + klog.Errorf("get backends for ft fail svc %s/%s port %d protocol %s ft %s err %v", svc.ServiceName, svc.ServiceNs, svc.ServicePort, protocol, ft.FtName, err) + } + } + + for _, rule := range ft.Rules { + if rule.AllowNoAddr() { + continue + } + for _, svc := range rule.Services { + err := getServiceWithCache(svc, protocol, svcMap) + if err != nil { + klog.Errorf("get backends for rule fail svc %s/%s %d protocol %s rule %s err %v", svc.ServiceName, svc.ServiceNs, svc.ServicePort, protocol, rule.RuleID, err) + } + } + } + } + return svcMap +} + +func generateServiceKey(ns string, name string, protocol corev1.Protocol, svcPort int) string { + key := fmt.Sprintf("%s-%s-%s-%d", ns, name, protocol, svcPort) + return strings.ToLower(key) +} + +// 找到 service 对应的后端 +func generateBackend(backendMap map[string][]*driver.Backend, services []*BackendService, protocol corev1.Protocol) Backends { + totalWeight := 0 + for _, svc := range services { + if svc.Weight > 100 { + svc.Weight = 100 + } + if svc.Weight < 0 { + svc.Weight = 0 + } + totalWeight += svc.Weight + } + if totalWeight == 0 { + // all service has zero weight + totalWeight = 100 + } + bes := []*Backend{} + for _, svc := range services { + name := generateServiceKey(svc.ServiceNs, svc.ServiceName, protocol, svc.ServicePort) + backends, ok := backendMap[name] + // some rule such as redirect ingress will use a fake service. + if !ok || len(backends) == 0 { + continue + } + // 100 is the max weigh in SLB + weight := int(math.Floor(float64(svc.Weight*100)/float64(totalWeight*len(backends)) + 0.5)) + if weight == 0 && svc.Weight != 0 { + weight = 1 + } + for _, be := range backends { + port := be.Port + if port == 0 { + klog.Warningf("invalid backend port 0 svc: %+v", svc) + continue + } + bes = append(bes, + &Backend{ + Address: be.IP, + Pod: be.Pod, + Svc: svc.ServiceName, + Ns: svc.ServiceNs, + Port: port, + Weight: weight, + Protocol: be.Protocol, + AppProtocol: be.AppProtocol, + FromOtherClusters: be.FromOtherClusters, + }) + } + } + sortedBackends := Backends(bes) + sort.Sort(sortedBackends) + return sortedBackends +} + +func pickAllBackendGroup(alb *LoadBalancer) BackendGroups { + s := time.Now() + defer func() { + pm.Write("pick-backends", float64(time.Since(s).Milliseconds())) + }() + backendGroup := BackendGroups{} + for _, ft := range alb.Frontends { + if ft.Conflict { + continue + } + for _, rule := range ft.Rules { + backendGroup = append(backendGroup, rule.BackendGroup) + } + + if ft.BackendGroup != nil && len(ft.BackendGroup.Backends) > 0 { + // FIX: http://jira.alaudatech.com/browse/DEV-16954 + // remove duplicate upstream + if !funk.Contains(backendGroup, ft.BackendGroup) { + backendGroup = append(backendGroup, ft.BackendGroup) + } + } + } + sort.Sort(backendGroup) + return backendGroup +} diff --git a/controller/cli/policy_l4.go b/controller/cli/policy_l4.go new file mode 100644 index 00000000..815a7f68 --- /dev/null +++ b/controller/cli/policy_l4.go @@ -0,0 +1,46 @@ +package cli + +import ( + . "alauda.io/alb2/controller/types" + albv1 "alauda.io/alb2/pkg/apis/alauda/v1" + "k8s.io/klog/v2" +) + +func (p *PolicyCli) initStreamModeFt(ft *Frontend, ngxPolicy *NgxPolicy) { + // create a default rule for stream mode ft. + if len(ft.Rules) == 0 { + if ft.BackendGroup == nil || ft.BackendGroup.Backends == nil { + klog.Warningf("ft %s,stream mode ft must have backend group", ft.FtName) + } + if ft.Protocol == albv1.FtProtocolTCP { + policy := Policy{} + policy.Subsystem = SubsystemStream + policy.Upstream = ft.BackendGroup.Name + policy.Rule = ft.BackendGroup.Name + ngxPolicy.Stream.Tcp[ft.Port] = append(ngxPolicy.Stream.Tcp[ft.Port], &policy) + } + if ft.Protocol == albv1.FtProtocolUDP { + policy := Policy{} + policy.Subsystem = SubsystemStream + policy.Upstream = ft.BackendGroup.Name + policy.Rule = ft.BackendGroup.Name + ngxPolicy.Stream.Udp[ft.Port] = append(ngxPolicy.Stream.Udp[ft.Port], &policy) + } + return + } + + if len(ft.Rules) != 1 { + klog.Warningf("stream mode ft could only have one rule", ft.FtName) + } + rule := ft.Rules[0] + policy := Policy{} + policy.Subsystem = SubsystemStream + policy.Upstream = rule.BackendGroup.Name + policy.Rule = rule.RuleID + if ft.Protocol == albv1.FtProtocolTCP { + ngxPolicy.Stream.Tcp[ft.Port] = append(ngxPolicy.Stream.Tcp[ft.Port], &policy) + } + if ft.Protocol == albv1.FtProtocolUDP { + ngxPolicy.Stream.Udp[ft.Port] = append(ngxPolicy.Stream.Udp[ft.Port], &policy) + } +} diff --git a/controller/cli/policy_l7.go b/controller/cli/policy_l7.go new file mode 100644 index 00000000..ae0c6ee1 --- /dev/null +++ b/controller/cli/policy_l7.go @@ -0,0 +1,96 @@ +package cli + +import ( + "fmt" + "sort" + + m "alauda.io/alb2/controller/modules" + . "alauda.io/alb2/controller/types" + "alauda.io/alb2/utils" +) + +func (p *PolicyCli) InternalRuleToL7Policy(rule *InternalRule, refs RefMap) (*Policy, error) { + if rule.DSLX == nil { + return nil, fmt.Errorf("rule without matcher") + } + // translate our rule struct to policy (the json file) + internalDSL, err := utils.DSLX2Internal(rule.DSLX) + if err != nil { + return nil, fmt.Errorf("dslx to interval fail %v", err) + } + + policy := Policy{} + policy.InternalDSL = internalDSL + + // sort-bean + policy.InternalDSLLen = utils.InternalDSLLen(internalDSL) + policy.Priority = rule.Priority + policy.ComplexPriority = rule.DSLX.Priority() + + policy.Upstream = rule.RuleUpstream.BackendGroup.Name // IMPORTANT + policy.BackendProtocol = rule.RuleUpstream.BackendProtocol + policy.Config.Refs = map[PolicyExtKind]string{} + policy.Rule = rule.RuleID + initPolicySource(&policy, rule) + p.initLegacyCfg(&policy, rule) + p.initPolicyExt(&policy, rule, refs) + return &policy, nil +} + +func (c *PolicyCli) initLegacyCfg(p *Policy, ir *InternalRule) { + if ir.Config.Rewrite != nil { + p.RewriteConf = *ir.Config.Rewrite + } + if ir.Config.Redirect != nil { + p.RedirectConf = *ir.Config.Redirect + } + if ir.Config.Cors != nil { + p.Cors = *ir.Config.Cors + } + if ir.Config.Vhost != nil { + p.Vhost = *ir.Config.Vhost + } +} + +func (c *PolicyCli) initPolicyExt(p *Policy, ir *InternalRule, refs RefMap) { + c.cus.ToPolicy(ir, p, refs) +} + +func (p *PolicyCli) initHttpModeFt(ft *Frontend, ngxPolicy *NgxPolicy, refs RefMap) { + if _, ok := ngxPolicy.Http.Tcp[ft.Port]; !ok { + ngxPolicy.Http.Tcp[ft.Port] = Policies{} + } + + for _, rule := range ft.Rules { + policy, err := p.InternalRuleToL7Policy(rule, refs) + if err != nil { + p.log.Error(err, "to policy fail, skip this rule", "rule", rule.RuleID) + continue + } + ngxPolicy.Http.Tcp[ft.Port] = append(ngxPolicy.Http.Tcp[ft.Port], policy) + } + + // set default rule if ft have default backend. + if ft.BackendGroup != nil && ft.BackendGroup.Backends != nil { + defaultPolicy := Policy{} + defaultPolicy.Rule = ft.FtName + defaultPolicy.ComplexPriority = -1 // default rule should have the minimum priority + defaultPolicy.Priority = 999 // minimum number means higher priority + defaultPolicy.Subsystem = SubsystemHTTP + defaultPolicy.InternalDSL = []interface{}{[]string{"STARTS_WITH", "URL", "/"}} // [[START_WITH URL /]] + defaultPolicy.BackendProtocol = ft.BackendProtocol + defaultPolicy.Upstream = ft.BackendGroup.Name + + ngxPolicy.Http.Tcp[ft.Port] = append(ngxPolicy.Http.Tcp[ft.Port], &defaultPolicy) + } + sort.Sort(ngxPolicy.Http.Tcp[ft.Port]) // IMPORTANT sort to make sure priority work. +} + +func initPolicySource(p *Policy, rule *InternalRule) { + if rule.Source == nil || rule.Source.Type != m.TypeIngress { + return + } + p.SourceType = m.TypeIngress + p.SourceName = rule.Source.Name + p.SourceNs = rule.Source.Namespace +} diff --git a/controller/controller_test.go b/controller/controller_test.go index ad5d5ba5..a0a177e7 100644 --- a/controller/controller_test.go +++ b/controller/controller_test.go @@ -249,19 +249,19 @@ func TestSortPolicy(t *testing.T) { policies: []*Policy{ { Rule: "a", - SameInRuleCr: SameInRuleCr{ - Priority: 5, + PolicySortBean: PolicySortBean{ + Priority: 5, + ComplexPriority: 50000 + 1000, }, - ComplexPriority: 50000 + 1000, - InternalDSL: []interface{}{[]string{utils.OP_STARTS_WITH, utils.KEY_URL, "/"}}, + InternalDSL: []interface{}{[]string{utils.OP_STARTS_WITH, utils.KEY_URL, "/"}}, }, { Rule: "b", - SameInRuleCr: SameInRuleCr{ - Priority: 1, + PolicySortBean: PolicySortBean{ + Priority: 1, + ComplexPriority: 10000 + 500, }, - ComplexPriority: 10000 + 500, - InternalDSL: []interface{}{[]string{utils.OP_STARTS_WITH, utils.KEY_URL, "/b"}}, + InternalDSL: []interface{}{[]string{utils.OP_STARTS_WITH, utils.KEY_URL, "/b"}}, }, }, order: []string{"b", "a"}, @@ -271,27 +271,27 @@ func TestSortPolicy(t *testing.T) { policies: []*Policy{ { Rule: "a", - SameInRuleCr: SameInRuleCr{ - Priority: 5, + PolicySortBean: PolicySortBean{ + Priority: 5, + ComplexPriority: 50000 + 1000, }, - ComplexPriority: 50000 + 1000, - InternalDSL: []interface{}{[]string{utils.OP_STARTS_WITH, utils.KEY_URL, "/"}}, + InternalDSL: []interface{}{[]string{utils.OP_STARTS_WITH, utils.KEY_URL, "/"}}, }, { Rule: "b", - SameInRuleCr: SameInRuleCr{ - Priority: 1, + PolicySortBean: PolicySortBean{ + Priority: 1, + ComplexPriority: 10000 + 500, }, - ComplexPriority: 10000 + 500, - InternalDSL: []interface{}{[]string{utils.OP_STARTS_WITH, utils.KEY_URL, "/b"}}, + InternalDSL: []interface{}{[]string{utils.OP_STARTS_WITH, utils.KEY_URL, "/b"}}, }, { Rule: "c", - SameInRuleCr: SameInRuleCr{ - Priority: -1, + PolicySortBean: PolicySortBean{ + Priority: -1, + ComplexPriority: 10000 + 500, }, - ComplexPriority: 10000 + 500, - InternalDSL: []interface{}{[]string{utils.OP_STARTS_WITH, utils.KEY_URL, "/b"}}, + InternalDSL: []interface{}{[]string{utils.OP_STARTS_WITH, utils.KEY_URL, "/b"}}, }, }, order: []string{"c", "b", "a"}, @@ -301,19 +301,19 @@ func TestSortPolicy(t *testing.T) { policies: []*Policy{ { Rule: "a", - SameInRuleCr: SameInRuleCr{ - Priority: 5, + PolicySortBean: PolicySortBean{ + Priority: 5, + ComplexPriority: 10000 + 1000 + 500 + 100, }, - ComplexPriority: 10000 + 1000 + 500 + 100, - InternalDSL: []interface{}{[]string{utils.OP_STARTS_WITH, utils.KEY_URL, "/"}}, + InternalDSL: []interface{}{[]string{utils.OP_STARTS_WITH, utils.KEY_URL, "/"}}, }, { Rule: "b", - SameInRuleCr: SameInRuleCr{ - Priority: 5, + PolicySortBean: PolicySortBean{ + Priority: 5, + ComplexPriority: 50000 + 1000, }, - ComplexPriority: 50000 + 1000, - InternalDSL: []interface{}{[]string{utils.OP_STARTS_WITH, utils.KEY_URL, "/b"}}, + InternalDSL: []interface{}{[]string{utils.OP_STARTS_WITH, utils.KEY_URL, "/b"}}, }, }, order: []string{"b", "a"}, @@ -323,19 +323,19 @@ func TestSortPolicy(t *testing.T) { policies: []*Policy{ { Rule: "a", - SameInRuleCr: SameInRuleCr{ - Priority: 5, + PolicySortBean: PolicySortBean{ + Priority: 5, + ComplexPriority: 10000 + 1000, }, - ComplexPriority: 10000 + 1000, - InternalDSL: []interface{}{[]string{utils.OP_STARTS_WITH, utils.KEY_URL, "/"}}, + InternalDSL: []interface{}{[]string{utils.OP_STARTS_WITH, utils.KEY_URL, "/"}}, }, { Rule: "b", - SameInRuleCr: SameInRuleCr{ - Priority: 5, + PolicySortBean: PolicySortBean{ + Priority: 5, + ComplexPriority: 10000 + 1000, }, - ComplexPriority: 10000 + 1000, - InternalDSL: []interface{}{[]string{utils.OP_STARTS_WITH, utils.KEY_URL, "/b"}}, + InternalDSL: []interface{}{[]string{utils.OP_STARTS_WITH, utils.KEY_URL, "/b"}}, }, }, order: []string{"b", "a"}, @@ -346,19 +346,19 @@ func TestSortPolicy(t *testing.T) { policies: []*Policy{ { Rule: "b", - SameInRuleCr: SameInRuleCr{ - Priority: 5, + PolicySortBean: PolicySortBean{ + Priority: 5, + ComplexPriority: 10000 + 1000, }, - ComplexPriority: 10000 + 1000, - InternalDSL: []interface{}{[]string{utils.OP_STARTS_WITH, utils.KEY_URL, "/"}}, + InternalDSL: []interface{}{[]string{utils.OP_STARTS_WITH, utils.KEY_URL, "/"}}, }, { Rule: "a", - SameInRuleCr: SameInRuleCr{ - Priority: 5, + PolicySortBean: PolicySortBean{ + Priority: 5, + ComplexPriority: 10000 + 1000, }, - ComplexPriority: 10000 + 1000, - InternalDSL: []interface{}{[]string{utils.OP_STARTS_WITH, utils.KEY_URL, "/"}}, + InternalDSL: []interface{}{[]string{utils.OP_STARTS_WITH, utils.KEY_URL, "/"}}, }, }, order: []string{"a", "b"}, @@ -367,7 +367,7 @@ func TestSortPolicy(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { for i := range tt.policies { - tt.policies[i].InternalDSLLen = utils.InternalDSLLen((tt.policies[i].InternalDSL)) + tt.policies[i].PolicySortBean.InternalDSLLen = utils.InternalDSLLen((tt.policies[i].InternalDSL)) } sort.Sort(tt.policies) realOrder := []string{} diff --git a/controller/nginx.go b/controller/nginx.go index 13bfd0d3..f3396347 100644 --- a/controller/nginx.go +++ b/controller/nginx.go @@ -6,6 +6,7 @@ import ( "os" "os/exec" "strings" + "time" "alauda.io/alb2/controller/cli" m "alauda.io/alb2/controller/modules" @@ -17,9 +18,9 @@ import ( . "alauda.io/alb2/controller/types" "alauda.io/alb2/driver" gateway "alauda.io/alb2/gateway/nginx" - "alauda.io/alb2/pkg/controller/ngxconf" . "alauda.io/alb2/pkg/controller/ngxconf" . "alauda.io/alb2/pkg/controller/ngxconf/types" + pm "alauda.io/alb2/pkg/utils/metrics" "k8s.io/klog/v2" ) @@ -34,9 +35,9 @@ type NginxController struct { log logr.Logger lc *LeaderElection PortProber *PortProbe - albcli cli.AlbCli // load alb tree from k8s - policycli cli.PolicyCli // fetch policy needed cr from k8s into alb tree - ngxcli ngxconf.NgxCli // fetch ngxconf need cr from k8s into alb tree + albcli cli.AlbCli // load alb tree from k8s + policycli cli.PolicyCli // fetch policy needed cr from k8s into alb tree + ngxcli NgxCli // fetch ngxconf need cr from k8s into alb tree } func NewNginxController(kd *driver.KubernetesDriver, ctx context.Context, cfg *config.Config, log logr.Logger, leader *LeaderElection) *NginxController { @@ -101,6 +102,7 @@ func (nc *NginxController) GenerateNginxConfigAndPolicy() (nginxTemplateConfig N if len(alb.Frontends) == 0 { l.Info("No service bind to this nginx now ", "key", nc.albcfg.GeKey()) } + nc.albcli.CollectAndFetchRefs(alb) nginxPolicy = nc.policycli.GenerateAlbPolicy(alb) phase := state.GetState().GetPhase() @@ -116,9 +118,6 @@ func (nc *NginxController) GenerateNginxConfigAndPolicy() (nginxTemplateConfig N } } - if err = nc.ngxcli.FillUpRefCms(alb); err != nil { - return NginxTemplateConfig{}, NgxPolicy{}, err - } cfg, err := nc.ngxcli.GenerateNginxTemplateConfig(alb, string(phase), nc.albcfg) if err != nil { return NginxTemplateConfig{}, NgxPolicy{}, fmt.Errorf("generate nginx.conf fail %v", err) @@ -127,6 +126,11 @@ func (nc *NginxController) GenerateNginxConfigAndPolicy() (nginxTemplateConfig N } func (nc *NginxController) GetLBConfig() (*LoadBalancer, error) { + s := time.Now() + defer func() { + pm.Write("gen-lb-config", float64(time.Since(s).Milliseconds())) + }() + log := nc.log cfg := nc.albcfg ns := cfg.GetNs() @@ -205,12 +209,15 @@ func MergeLBConfig(alb *LoadBalancer, gateway *LoadBalancer) (*LoadBalancer, err } alb.Frontends = append(alb.Frontends, ft) } - return alb, nil } func (nc *NginxController) WriteConfig(nginxTemplateConfig NginxTemplateConfig, ngxPolicies NgxPolicy) error { - ngxconf, err := ngxconf.RenderNgxFromFile(nginxTemplateConfig, nc.TemplatePath) + s := time.Now() + defer func() { + pm.Write("update-file", float64(time.Since(s).Milliseconds())) + }() + ngxconf, err := RenderNgxFromFile(nginxTemplateConfig, nc.TemplatePath) if err != nil { return err } diff --git a/controller/nginx_test.go b/controller/nginx_test.go index e5e433e2..6b834ccf 100644 --- a/controller/nginx_test.go +++ b/controller/nginx_test.go @@ -11,6 +11,7 @@ import ( albv1 "alauda.io/alb2/pkg/apis/alauda/v1" albv2 "alauda.io/alb2/pkg/apis/alauda/v2beta1" ngxconf "alauda.io/alb2/pkg/controller/ngxconf" + ptu "alauda.io/alb2/pkg/utils/test_utils" "alauda.io/alb2/utils" "alauda.io/alb2/utils/log" "alauda.io/alb2/utils/test_utils" @@ -36,8 +37,8 @@ func TestPolicies_Less(t *testing.T) { { "1", []*Policy{ - {ComplexPriority: 100, SameInRuleCr: SameInRuleCr{Priority: 5}}, - {ComplexPriority: 100, SameInRuleCr: SameInRuleCr{Priority: 5}}, + {PolicySortBean: PolicySortBean{ComplexPriority: 100, Priority: 5}}, + {PolicySortBean: PolicySortBean{ComplexPriority: 100, Priority: 5}}, }, args{0, 1}, false, @@ -45,8 +46,8 @@ func TestPolicies_Less(t *testing.T) { { "2", []*Policy{ - {ComplexPriority: 100, SameInRuleCr: SameInRuleCr{Priority: 4}}, - {ComplexPriority: 100, SameInRuleCr: SameInRuleCr{Priority: 5}}, + {PolicySortBean: PolicySortBean{ComplexPriority: 100, Priority: 4}}, + {PolicySortBean: PolicySortBean{ComplexPriority: 100, Priority: 5}}, }, args{0, 1}, true, @@ -54,8 +55,8 @@ func TestPolicies_Less(t *testing.T) { { "3", []*Policy{ - {ComplexPriority: 99, SameInRuleCr: SameInRuleCr{Priority: 5}}, - {ComplexPriority: 100, SameInRuleCr: SameInRuleCr{Priority: 5}}, + {PolicySortBean: PolicySortBean{ComplexPriority: 99, Priority: 5}}, + {PolicySortBean: PolicySortBean{ComplexPriority: 100, Priority: 5}}, }, args{0, 1}, false, @@ -63,8 +64,8 @@ func TestPolicies_Less(t *testing.T) { { "4", []*Policy{ - {ComplexPriority: 100, SameInRuleCr: SameInRuleCr{Priority: 100}, Rule: "a"}, - {ComplexPriority: 100, SameInRuleCr: SameInRuleCr{Priority: 100}, Rule: "b"}, + {PolicySortBean: PolicySortBean{ComplexPriority: 100, Priority: 100}, Rule: "a"}, + {PolicySortBean: PolicySortBean{ComplexPriority: 100, Priority: 100}, Rule: "b"}, }, args{0, 1}, true, @@ -79,24 +80,35 @@ func TestPolicies_Less(t *testing.T) { } } -func GenPolicyAndConfig(t *testing.T, env test_utils.FakeAlbEnv, res test_utils.FakeResource) (*NgxPolicy, string, error) { +func GenPolicyAndConfig(t *testing.T, res test_utils.FakeResource) (*NgxPolicy, string, error) { ctx, cancel := context.WithCancel(context.Background()) - defer cancel() + env := test_utils.NewFakeEnv() + env.AssertStart() + l := log.L() + defer func() { + cancel() + env.Stop() + }() + kt := test_utils.NewKubectl("", env.GetCfg(), l) + _, err := kt.Kubectl("get crd -A") + assert.NoError(t, err) + cfg := config.DefaultMock() cfg.Name = "alb-1" cfg.Ns = "ns-1" cfg.SetDomain("alauda.io") cfg.Controller.Flags.EnableAlb = true config.UseMock(cfg) - err := env.ApplyFakes(res) + err = env.ApplyFakes(res) assert.NoError(t, err) - drv, err := driver.GetAndInitKubernetesDriverFromCfg(ctx, env.GetCfg()) + _, err = kt.Kubectl("get frontends -A") + assert.NoError(t, err) + drv, err := driver.NewDriver(driver.NewDrvOpt(ctx, env.GetCfg(), cfg)) assert.NoError(t, err) - ctl := NewNginxController(drv, ctx, cfg, log.L(), nil) + ctl := NewNginxController(drv, ctx, cfg, l, nil) nginxConfig, nginxPolicy, err := ctl.GenerateNginxConfigAndPolicy() assert.NoError(t, err) - env.ClearFakes(res) // marshal and unmarshal to make sure we generate a valid policy json file policy := NgxPolicy{} nginxPolicyJson, err := json.MarshalIndent(nginxPolicy, " ", " ") @@ -173,15 +185,12 @@ func TestGenerateAlbPolicyAndConfig(t *testing.T) { } } } - env := test_utils.NewFakeEnv() - env.AssertStart() for _, c := range casesRun { t.Logf("run test %s", c.Name) - albPolicy, ngxCfg, err := GenPolicyAndConfig(t, env, c.Res()) + albPolicy, ngxCfg, err := GenPolicyAndConfig(t, c.Res()) assert.NoError(t, err, c.Name) c.Assert(*albPolicy, ngxCfg) } - env.Stop() } defaultAlb := []albv2.ALB2{ { @@ -546,7 +555,7 @@ func TestGenerateAlbPolicyAndConfig(t *testing.T) { } }, Assert: func(albPolicy NgxPolicy, ngxCfg string) { - listen, err := test_utils.PickStreamServerListen(ngxCfg) + listen, err := ptu.PickStreamServerListen(ngxCfg) assert.NoError(t, err) assert.Equal(t, listen, []string{"0.0.0.0:8000", "[::]:8000"}) policies := albPolicy.Stream.Tcp[8000] @@ -662,7 +671,7 @@ func TestGenerateAlbPolicyAndConfig(t *testing.T) { } }, Assert: func(albPolicy NgxPolicy, ngxCfg string) { - listen, err := test_utils.PickStreamServerListen(ngxCfg) + listen, err := ptu.PickStreamServerListen(ngxCfg) assert.NoError(t, err) assert.Equal(t, listen, []string{"0.0.0.0:8000 udp", "[::]:8000 udp"}) @@ -811,10 +820,10 @@ func TestGenerateAlbPolicyAndConfig(t *testing.T) { } }, Assert: func(p NgxPolicy, cfg string) { - listen, err := test_utils.PickHttpServerListen(cfg) + listen, err := ptu.PickHttpServerListen(cfg) assert.NoError(t, err) assert.Equal(t, listen, []string{"0.0.0.0:1936 ssl", "[::]:1936 ssl", "0.0.0.0:80 backlog=100 default_server", "[::]:80 backlog=100 default_server"}) - listen, err = test_utils.PickStreamServerListen(cfg) + listen, err = ptu.PickStreamServerListen(cfg) assert.NoError(t, err) assert.Equal(t, listen, []string{"0.0.0.0:53", "[::]:53", "0.0.0.0:53 udp", "[::]:53 udp"}) }, diff --git a/controller/sync_lb_service.go b/controller/sync_lb_service.go index c2c9ee25..6beb4a2c 100644 --- a/controller/sync_lb_service.go +++ b/controller/sync_lb_service.go @@ -3,6 +3,7 @@ package controller import ( "context" "fmt" + "time" "alauda.io/alb2/controller/types" albv1 "alauda.io/alb2/pkg/apis/alauda/v1" @@ -10,6 +11,8 @@ import ( mapset "github.com/deckarep/golang-set/v2" "github.com/google/go-cmp/cmp" "github.com/samber/lo" + + pm "alauda.io/alb2/pkg/utils/metrics" corev1 "k8s.io/api/core/v1" k8serrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -22,6 +25,10 @@ import ( // in container mode, we want to create/update loadbalancer tcp/udp service,use it as high available solution. func (nc *NginxController) SyncLbSvcPort(frontends []*types.Frontend) error { + s := time.Now() + defer func() { + pm.Write("sync-lb-svc", float64(time.Since(s).Milliseconds())) + }() return MixProtocolLbSvc{nc: nc}.sync(nc.Ctx, frontends) } diff --git a/controller/types/consts.go b/controller/types/consts.go new file mode 100644 index 00000000..5f626a22 --- /dev/null +++ b/controller/types/consts.go @@ -0,0 +1,28 @@ +package types + +const ( + SubsystemHTTP = "http" + SubsystemStream = "stream" + + PolicySIPHash = "sip-hash" + PolicyCookie = "cookie" + + CaCert = "ca.crt" +) + +var ( + LastConfig = "" + LastFailure = false +) + +const ( + ModeTCP = "tcp" + ModeHTTP = "http" + ModeUDP = "udp" + ModegRPC = "grpc" +) + +const ( + RuleTypeIngress = "ingress" + RuleTypeGateway = "gateway" +) diff --git a/controller/types/helper.go b/controller/types/helper.go new file mode 100644 index 00000000..6b69d95b --- /dev/null +++ b/controller/types/helper.go @@ -0,0 +1,164 @@ +package types + +import ( + "fmt" + + albv1 "alauda.io/alb2/pkg/apis/alauda/v1" + v1 "alauda.io/alb2/pkg/apis/alauda/v1" + otelt "alauda.io/alb2/pkg/controller/ext/otel/types" +) + +func (p Policy) GetOtel() *otelt.OtelConf { + return p.Config.Otel +} + +func (p *NgxPolicy) GetBackendGroup(name string) *BackendGroup { + for _, be := range p.BackendGroup { + if be.Name == name { + return be + } + } + return nil +} + +func (p *HttpPolicy) GetPoliciesByPort(port int) Policies { + return p.Tcp[albv1.PortNumber(port)] +} + +func (p Policies) Len() int { return len(p) } + +func (p Policies) Less(i, j int) bool { + // raw priority is set by user it should be [1,10]. the smaller the number, the higher the ranking + if p[i].Priority != p[j].Priority { + return p[i].Priority < p[j].Priority + } + // priority is calculated by the "complex" of this policy. the bigger the number, the higher the ranking + if p[i].ComplexPriority != p[j].ComplexPriority { + return p[i].ComplexPriority > p[j].ComplexPriority + } + if p[i].InternalDSLLen != p[j].InternalDSLLen { + return p[i].InternalDSLLen > p[j].InternalDSLLen + } + return p[i].Rule < p[j].Rule +} + +func (p Policies) Swap(i, j int) { p[i], p[j] = p[j], p[i] } + +func (rl InternalRule) AllowNoAddr() bool { + return rl.Config.Redirect != nil && rl.Config.Redirect.RedirectURL != "" +} + +func (rl InternalRule) GetRawPriority() int { + return rl.Priority +} + +func (rl InternalRule) GetPriority() int { + return rl.DSLX.Priority() +} + +type RuleList []*InternalRule + +type BackendGroups []*BackendGroup + +func (bgs BackendGroups) Len() int { + return len(bgs) +} + +func (bgs BackendGroups) Swap(i, j int) { + bgs[i], bgs[j] = bgs[j], bgs[i] +} + +func (bgs BackendGroups) Less(i, j int) bool { + return bgs[i].Name > bgs[j].Name +} + +func (bg BackendGroup) Eq(other BackendGroup) bool { + // bg equal other + return bg.Name == other.Name && + bg.Mode == other.Mode && + bg.SessionAffinityAttribute == other.SessionAffinityAttribute && + bg.SessionAffinityPolicy == other.SessionAffinityPolicy && + bg.Backends.Eq(other.Backends) +} + +func FtProtocolToBackendMode(protocol v1.FtProtocol) string { + switch protocol { + case v1.FtProtocolTCP: + return ModeTCP + case v1.FtProtocolUDP: + return ModeUDP + case v1.FtProtocolHTTP: + return ModeHTTP + case v1.FtProtocolHTTPS: + return ModeHTTP + case v1.FtProtocolgRPC: + return ModegRPC + } + return "" +} + +func (ft *Frontend) String() string { + return fmt.Sprintf("%s-%d-%s", ft.AlbName, ft.Port, ft.Protocol) +} + +func (ft *Frontend) IsTcpBaseProtocol() bool { + return ft.Protocol == v1.FtProtocolHTTP || + ft.Protocol == v1.FtProtocolHTTPS || + ft.Protocol == v1.FtProtocolTCP +} + +func (ft *Frontend) IsStreamMode() bool { + return ft.Protocol == v1.FtProtocolTCP || ft.Protocol == v1.FtProtocolUDP +} + +func (ft *Frontend) IsHttpMode() bool { + return ft.Protocol == v1.FtProtocolHTTP || ft.Protocol == v1.FtProtocolHTTPS +} + +func (ft *Frontend) IsGRPCMode() bool { + return ft.Protocol == v1.FtProtocolgRPC +} + +func (ft *Frontend) IsValidProtocol() bool { + return ft.Protocol == v1.FtProtocolHTTP || + ft.Protocol == v1.FtProtocolHTTPS || + ft.Protocol == v1.FtProtocolTCP || + ft.Protocol == v1.FtProtocolUDP || + ft.Protocol == v1.FtProtocolgRPC +} + +func (b *Backend) Eq(other *Backend) bool { + return b.Address == other.Address && b.Port == other.Port && b.Weight == other.Weight +} + +func (b Backend) String() string { + return fmt.Sprintf("%v-%v-%v", b.Address, b.Port, b.Weight) +} + +func (bs Backends) Len() int { + return len(bs) +} + +func (bs Backends) Less(i, j int) bool { + return bs[i].String() < bs[j].String() +} + +func (bs Backends) Swap(i, j int) { + bs[i], bs[j] = bs[j], bs[i] +} + +func (bs Backends) Eq(other Backends) bool { + if len(bs) != len(other) { + return false + } + for i := range bs { + if !bs[i].Eq(other[i]) { + return false + } + } + return true +} + +func (r RewriteResponseConfig) IsEmpty() bool { + return len(r.Headers) == 0 +} diff --git a/controller/types/policy.go b/controller/types/policy.go deleted file mode 100644 index d1fbff7e..00000000 --- a/controller/types/policy.go +++ /dev/null @@ -1,116 +0,0 @@ -package types - -import ( - albv1 "alauda.io/alb2/pkg/apis/alauda/v1" - otelt "alauda.io/alb2/pkg/controller/ext/otel/types" -) - -// keep it as same as rule -type Policy struct { - InternalDSL []interface{} `json:"internal_dsl"` // dsl determine whether a request match this rule, same as rule.spec.dlsx - InternalDSLLen int `json:"-"` // the len of jsonstringify internal dsl, used to sort policy - Upstream string `json:"upstream"` // name in backend group - ComplexPriority int `json:"-"` // priority calculated by the complex of dslx, used to sort policy after user_priority - Subsystem string `json:"-"` - - Rule string `json:"rule"` // the name of rule, corresponding with k8s rule cr - Config *RuleConfigInPolicy `json:"config,omitempty"` - - SameInRuleCr - SameInPolicy - SourceType string `json:"source_type,omitempty"` - SourceName string `json:"source_name,omitempty"` - SourceNs string `json:"source_ns,omitempty"` -} - -func (p Policy) GetOtel() *otelt.OtelConf { - if p.Config == nil || p.Config.Otel == nil || p.Config.Otel.Otel == nil { - return nil - } - return p.Config.Otel.Otel -} - -type SameInRuleCr struct { - Priority int `json:"-"` // priority set by user, used to sort policy which is rule's priority - DSLX albv1.DSLX `json:"-"` - URL string `json:"url"` - RewriteBase string `json:"rewrite_base"` - RewriteTarget string `json:"rewrite_target"` - EnableCORS bool `json:"enable_cors"` - CORSAllowHeaders string `json:"cors_allow_headers"` - CORSAllowOrigin string `json:"cors_allow_origin"` - BackendProtocol string `json:"backend_protocol"` - RedirectURL string `json:"redirect_url"` - VHost string `json:"vhost"` - RedirectCode int `json:"redirect_code"` - Source *albv1.Source `json:"source,omitempty"` -} - -type SameInPolicy struct { - RewritePrefixMatch *string `json:"rewrite_prefix_match,omitempty"` - RewriteReplacePrefix *string `json:"rewrite_replace_prefix,omitempty"` - RedirectScheme *string `json:"redirect_scheme,omitempty"` - RedirectHost *string `json:"redirect_host,omitempty"` - RedirectPort *int `json:"redirect_port,omitempty"` - RedirectPrefixMatch *string `json:"redirect_prefix_match,omitempty"` - RedirectReplacePrefix *string `json:"redirect_replace_prefix,omitempty"` - ToLocation *string `json:"to_location,omitempty"` -} - -type NgxPolicy struct { - CertificateMap map[string]Certificate `json:"certificate_map"` - Http HttpPolicy `json:"http"` - Stream StreamPolicy `json:"stream"` - CommonConfig CommonPolicyConfig `json:"config"` - BackendGroup []*BackendGroup `json:"backend_group"` -} - -func (p *NgxPolicy) GetBackendGroup(name string) *BackendGroup { - for _, be := range p.BackendGroup { - if be.Name == name { - return be - } - } - return nil -} - -type HttpPolicy struct { - Tcp map[albv1.PortNumber]Policies `json:"tcp"` -} - -func (p *HttpPolicy) GetPoliciesByPort(port int) Policies { - return p.Tcp[albv1.PortNumber(port)] -} - -type CommonPolicyConfig map[string]CommonPolicyConfigVal - -type CommonPolicyConfigVal struct { - Type string `json:"type"` - Otel *otelt.OtelInCommon `json:"otel,omitempty"` -} - -type StreamPolicy struct { - Tcp map[albv1.PortNumber]Policies `json:"tcp"` - Udp map[albv1.PortNumber]Policies `json:"udp"` -} - -type Policies []*Policy - -func (p Policies) Len() int { return len(p) } - -func (p Policies) Less(i, j int) bool { - // raw priority is set by user it should be [1,10]. the smaller the number, the higher the ranking - if p[i].Priority != p[j].Priority { - return p[i].Priority < p[j].Priority - } - // priority is calculated by the "complex" of this policy. the bigger the number, the higher the ranking - if p[i].ComplexPriority != p[j].ComplexPriority { - return p[i].ComplexPriority > p[j].ComplexPriority - } - if p[i].InternalDSLLen != p[j].InternalDSLLen { - return p[i].InternalDSLLen > p[j].InternalDSLLen - } - return p[i].Rule < p[j].Rule -} - -func (p Policies) Swap(i, j int) { p[i], p[j] = p[j], p[i] } diff --git a/controller/types/policyext_test.go b/controller/types/policyext_test.go new file mode 100644 index 00000000..c5ac0c78 --- /dev/null +++ b/controller/types/policyext_test.go @@ -0,0 +1,33 @@ +package types + +import ( + "encoding/json" + "testing" + + otelt "alauda.io/alb2/pkg/controller/ext/otel/types" + "github.com/stretchr/testify/assert" +) + +func TestToMaps(t *testing.T) { + p := PolicyExt{ + Otel: &otelt.OtelConf{ + Exporter: &otelt.Exporter{}, + }, + RewriteResponse: &RewriteResponseConfig{}, + } + m := p.ToMaps() + js, err := json.MarshalIndent(m, " ", " ") + assert.NoError(t, err) + t.Logf("m %s", string(js)) + assert.Equal(t, 2, len(m)) +} + +func TestClean(t *testing.T) { + p := PolicyExt{ + Otel: &otelt.OtelConf{ + Exporter: &otelt.Exporter{}, + }, + RewriteResponse: &RewriteResponseConfig{}, + } + p.Clean("otel") +} diff --git a/controller/types/types.go b/controller/types/types.go index 484aba42..615f7942 100644 --- a/controller/types/types.go +++ b/controller/types/types.go @@ -1,54 +1,31 @@ package types import ( - "encoding/json" - "fmt" - gatewayPolicy "alauda.io/alb2/pkg/apis/alauda/gateway/v1alpha1" + albv1 "alauda.io/alb2/pkg/apis/alauda/v1" v1 "alauda.io/alb2/pkg/apis/alauda/v1" + "sigs.k8s.io/controller-runtime/pkg/client" + + auth_t "alauda.io/alb2/pkg/controller/ext/auth/types" otelt "alauda.io/alb2/pkg/controller/ext/otel/types" waft "alauda.io/alb2/pkg/controller/ext/waf/types" - corev1 "k8s.io/api/core/v1" -) -const ( - SubsystemHTTP = "http" - SubsystemStream = "stream" - - PolicySIPHash = "sip-hash" - PolicyCookie = "cookie" - - CaCert = "ca.crt" -) - -var ( - LastConfig = "" - LastFailure = false + corev1 "k8s.io/api/core/v1" ) -type Domain struct { - Domain string `json:"domain"` - Type string `json:"type,omitempty"` - Disabled bool `json:"disabled"` +type RefMap struct { + ConfigMap map[client.ObjectKey]*corev1.ConfigMap + Secret map[client.ObjectKey]*corev1.Secret } type LoadBalancer struct { - Labels map[string]string `json:"-"` - Name string `json:"name"` - Address string `json:"address"` - Type string `json:"type"` - Version int `json:"version"` - Frontends []*Frontend `json:"frontends"` - CmRefs map[string]*corev1.ConfigMap -} - -type Certificate struct { - Cert string `json:"cert"` - Key string `json:"key"` -} - -type CaCertificate struct { - Cert string `json:"cert"` + Labels map[string]string + Name string + Address string + Type string + Version int + Frontends []*Frontend + Refs RefMap } type Frontend struct { @@ -65,96 +42,21 @@ type Frontend struct { Conflict bool `json:"-"` } -func (ft *Frontend) String() string { - return fmt.Sprintf("%s-%d-%s", ft.AlbName, ft.Port, ft.Protocol) -} - -func (ft *Frontend) IsTcpBaseProtocol() bool { - return ft.Protocol == v1.FtProtocolHTTP || - ft.Protocol == v1.FtProtocolHTTPS || - ft.Protocol == v1.FtProtocolTCP -} - -func (ft *Frontend) IsStreamMode() bool { - return ft.Protocol == v1.FtProtocolTCP || ft.Protocol == v1.FtProtocolUDP -} - -func (ft *Frontend) IsHttpMode() bool { - return ft.Protocol == v1.FtProtocolHTTP || ft.Protocol == v1.FtProtocolHTTPS -} - -func (ft *Frontend) IsGRPCMode() bool { - return ft.Protocol == v1.FtProtocolgRPC -} - -func (ft *Frontend) IsValidProtocol() bool { - return ft.Protocol == v1.FtProtocolHTTP || - ft.Protocol == v1.FtProtocolHTTPS || - ft.Protocol == v1.FtProtocolTCP || - ft.Protocol == v1.FtProtocolUDP || - ft.Protocol == v1.FtProtocolgRPC -} - -func (b *Backend) Eq(other *Backend) bool { - return b.Address == other.Address && b.Port == other.Port && b.Weight == other.Weight -} - -func (b Backend) String() string { - return fmt.Sprintf("%v-%v-%v", b.Address, b.Port, b.Weight) -} - -func (bs Backends) Len() int { - return len(bs) -} - -func (bs Backends) Less(i, j int) bool { - return bs[i].String() < bs[j].String() -} - -func (bs Backends) Swap(i, j int) { - bs[i], bs[j] = bs[j], bs[i] +type Domain struct { + Domain string `json:"domain"` + Type string `json:"type,omitempty"` + Disabled bool `json:"disabled"` } -func (bs Backends) Eq(other Backends) bool { - if len(bs) != len(other) { - return false - } - for i := range bs { - if !bs[i].Eq(other[i]) { - return false - } - } - return true +type Certificate struct { + Cert string `json:"cert"` + Key string `json:"key"` } -const ( - ModeTCP = "tcp" - ModeHTTP = "http" - ModeUDP = "udp" - ModegRPC = "grpc" -) - -func FtProtocolToBackendMode(protocol v1.FtProtocol) string { - switch protocol { - case v1.FtProtocolTCP: - return ModeTCP - case v1.FtProtocolUDP: - return ModeUDP - case v1.FtProtocolHTTP: - return ModeHTTP - case v1.FtProtocolHTTPS: - return ModeHTTP - case v1.FtProtocolgRPC: - return ModegRPC - } - return "" +type CaCertificate struct { + Cert string `json:"cert"` } -const ( - RuleTypeIngress = "ingress" - RuleTypeGateway = "gateway" -) - type BackendGroup struct { Name string `json:"name"` SessionAffinityPolicy string `json:"session_affinity_policy"` @@ -186,73 +88,225 @@ type BackendService struct { Weight int `json:"weight"` } -// rule cr/gateway cr => rule => policy -type Rule struct { - Type string `json:"type"` - Description string `json:"description"` - Domain string `json:"domain"` // used to fetch cert. +type NgxPolicy struct { + CertificateMap map[string]Certificate `json:"certificate_map"` + Http HttpPolicy `json:"http"` + Stream StreamPolicy `json:"stream"` + SharedConfig SharedExtPolicyConfig `json:"config"` + BackendGroup []*BackendGroup `json:"backend_group"` +} - // CertificateName = namespace_secretName - CertificateName string `json:"certificate_name"` +type ( + Policies []*Policy + HttpPolicy struct { + Tcp map[albv1.PortNumber]Policies `json:"tcp"` + } +) - SessionAffinityPolicy string `json:"session_affinity_policy"` - SessionAffinityAttr string `json:"session_affinity_attribute"` - Services []*BackendService `json:"services"` // 这条规则对应的后端服务 - BackendGroup *BackendGroup `json:"-"` // 这条规则对应的后端 pod 的 ip +type SharedExtPolicyConfig map[string]RefBox - RuleID string `json:"rule_id"` - Config *RuleConfigInPolicy `json:"config,omitempty"` +type StreamPolicy struct { + Tcp map[albv1.PortNumber]Policies `json:"tcp"` + Udp map[albv1.PortNumber]Policies `json:"udp"` +} - Waf *waft.WafInRule `json:"waf"` // waf not need to be in policy.json - SameInRuleCr - SameInPolicy +// keep it as same as rule +type Source struct { + SourceType string `json:"source_type,omitempty"` + SourceName string `json:"source_name,omitempty"` + SourceNs string `json:"source_ns,omitempty"` } -func (rl Rule) AllowNoAddr() bool { - return rl.RedirectURL != "" +type Policy struct { + // match + InternalDSL []interface{} `json:"internal_dsl"` // dsl determine whether a request match this rule, same as rule.spec.dlsx + + PolicySortBean `json:"-"` + + Upstream string `json:"upstream"` // upstream_refs + BackendProtocol string `json:"backend_protocol"` // set to variable $backend_protocol, used in proxy_pass $backend_protocol://http_backend; in nginx.conf + + // meta + Rule string `json:"rule"` // rule_refs the name of rule, corresponding with k8s rule cr + Subsystem string `json:"subsystem"` + Source + + LegacyExtInPolicy // some legacy extension should migrate to the config field + Config PolicyExtCfg `json:"config"` // config or reference + + ToLocation *string `json:"to_location,omitempty"` + + Plugins []string `json:"plugins"` // a list of lua module which enabled for this rule } -func (rl Rule) GetRawPriority() int { - return rl.Priority +type PolicySortBean struct { + Priority int `json:"-"` // priority set by user, used to sort policy which is rule's priority + ComplexPriority int + InternalDSLLen int } -func (rl Rule) GetPriority() int { - return rl.DSLX.Priority() +// rule cr/gateway cr => internal-rule => policy +// 一个internal rule 代表了一个转发规则的最小的*完整*的信息单元 +// 最核心的有 +// 1. match 描述请求和规则是否匹配 +// 2. sortbean 描述这个规则在所有规则中的排序位置 +// 3. upstream 转发到那个后端,这个后端的转发相关的配置 + +type InternalRule struct { + RuleMeta + RuleMatch + RuleCert + Config RuleExt + RuleUpstream } -type RuleList []*Rule +type RuleMeta struct { + Type string `json:"type"` // 这个internal rule是从那个结构转换来的,目前有rule和gateway + RuleID string `json:"rule_id"` // rule的标示,对alb-rule 是alb-rule的name,对gateway api route是这个route的唯一路径 + Source *v1.Source `json:"source,omitempty"` + Priority int `json:"-"` // priority set by user, used to sort policy which is rule's priority +} -type BackendGroups []*BackendGroup +type RuleMatch struct { // 和匹配规则 + DSLX v1.DSLX `json:"-"` +} -func (bgs BackendGroups) Len() int { - return len(bgs) +type RuleCert struct { // 和证书有关的配置 + // CertificateName = namespace_secretName + CertificateName string `json:"certificate_name"` // cert_ref + Domain string `json:"domain"` // used to fetch cert. } -func (bgs BackendGroups) Swap(i, j int) { - bgs[i], bgs[j] = bgs[j], bgs[i] +// 直接放在rule.spec 而不是rule.spec.config +type Vhost struct { + VHost string `json:"vhost"` // ext vhost } -func (bgs BackendGroups) Less(i, j int) bool { - return bgs[i].Name > bgs[j].Name +type RewriteConf struct { + URL string `json:"url"` // for rewrite // alb-rule + RewriteBase string `json:"rewrite_base"` // alb-rule + RewriteTarget string `json:"rewrite_target"` // alb-rule + RewritePrefixMatch *string `json:"rewrite_prefix_match,omitempty"` // gatewayapi-httproute + RewriteReplacePrefix *string `json:"rewrite_replace_prefix,omitempty"` // gatewayapi-httproute } -func (bg BackendGroup) Eq(other BackendGroup) bool { - // bg equal other - return bg.Name == other.Name && - bg.Mode == other.Mode && - bg.SessionAffinityAttribute == other.SessionAffinityAttribute && - bg.SessionAffinityPolicy == other.SessionAffinityPolicy && - bg.Backends.Eq(other.Backends) +type RedirectConf struct { + RedirectURL string `json:"redirect_url"` // alb-rule + RedirectCode int `json:"redirect_code"` // alb-rule + + RedirectScheme *string `json:"redirect_scheme,omitempty"` // gatewayapi-httproute + RedirectHost *string `json:"redirect_host,omitempty"` // gatewayapi-httproute + RedirectPort *int `json:"redirect_port,omitempty"` // gatewayapi-httproute + RedirectPrefixMatch *string `json:"redirect_prefix_match,omitempty"` // gatewayapi-httproute + RedirectReplacePrefix *string `json:"redirect_replace_prefix,omitempty"` // gatewayapi-httproute } -// policy.json http match rule config -type RuleConfigInPolicy struct { +type Cors struct { // ext cors + EnableCORS bool `json:"enable_cors"` + CORSAllowHeaders string `json:"cors_allow_headers"` + CORSAllowOrigin string `json:"cors_allow_origin"` +} + +type RuleExt struct { // 不同的扩展的配置 + Rewrite *RewriteConf + Redirect *RedirectConf + Cors *Cors + Vhost *Vhost + Timeout *gatewayPolicy.TimeoutPolicyConfig `json:"timeout,omitempty"` + RewriteResponse *RewriteResponseConfig `json:"rewrite_response,omitempty"` + RewriteRequest *RewriteRequestConfig `json:"rewrite_request,omitempty"` + Otel *otelt.OtelConf `json:"otel,omitempty"` + Waf *waft.WafInternal + Auth *auth_t.AuthCr +} + +type PolicyExtKind string + +// keep this as same as policy_ext json annotation +const ( + Rewrite PolicyExtKind = "rewrite" + CORS PolicyExtKind = "cors" + RewriteRequest PolicyExtKind = "rewrite_request" + RewriteResponse PolicyExtKind = "rewrite_response" + Timeout PolicyExtKind = "timeout" + Otel PolicyExtKind = "otel" + Waf PolicyExtKind = "waf" + Auth PolicyExtKind = "auth" +) + +type PolicyExt struct { RewriteResponse *RewriteResponseConfig `json:"rewrite_response,omitempty"` RewriteRequest *RewriteRequestConfig `json:"rewrite_request,omitempty"` Timeout *gatewayPolicy.TimeoutPolicyConfig `json:"timeout,omitempty"` - Otel *otelt.OtelInPolicy `json:"otel,omitempty"` + Otel *otelt.OtelConf `json:"otel,omitempty"` + Auth *auth_t.AuthPolicy `json:"auth,omitempty"` +} + +type PolicyExtCfg struct { + PolicyExt + Refs map[PolicyExtKind]string `json:"refs"` +} + +// TODO use code-gen +func (p *PolicyExt) Clean(key PolicyExtKind) { + if key == RewriteRequest { + p.RewriteRequest = nil + } + if key == RewriteResponse { + p.RewriteResponse = nil + } + if key == Timeout { + p.Timeout = nil + } + if key == Otel { + p.Otel = nil + } + if key == Auth { + p.Auth = nil + } +} + +// 将其转换为map方便后续去重 +// TODO use code-gen +func (p PolicyExt) ToMaps() PolicyExtMap { + m := PolicyExtMap{} + if p.RewriteResponse != nil { + m[RewriteResponse] = &PolicyExt{RewriteResponse: p.RewriteResponse} + } + if p.RewriteRequest != nil { + m[RewriteRequest] = &PolicyExt{RewriteRequest: p.RewriteRequest} + } + if p.Timeout != nil { + m[Timeout] = &PolicyExt{Timeout: p.Timeout} + } + if p.Otel != nil { + m[Otel] = &PolicyExt{Otel: p.Otel} + } + if p.Auth != nil { + m[Auth] = &PolicyExt{Auth: p.Auth} + } + return m +} + +type PolicyExtMap = map[PolicyExtKind]*PolicyExt + +type LegacyExtInPolicy struct { + RewriteConf + RedirectConf + Cors + Vhost +} + +type RuleUpstream struct { // 不同的扩展的配置 + BackendProtocol string `json:"backend_protocol"` // set to variable $backend_protocol, used in proxy_pass $backend_protocol://http_backend; in nginx.conf + SessionAffinityPolicy string `json:"session_affinity_policy"` // will be set in upstream config + SessionAffinityAttr string `json:"session_affinity_attribute"` // will be set in upstream config + Services []*BackendService `json:"services"` // 这条规则对应的后端服务 + BackendGroup *BackendGroup `json:"-"` // 这条规则对应的后端 pod 的 ip } +// policy.json http match rule config + type RewriteResponseConfig struct { Headers map[string]string `json:"headers,omitempty"` HeadersRemove []string `json:"headers_remove,omitempty"` @@ -267,11 +321,9 @@ type RewriteRequestConfig struct { HeadersAddVar map[string][]string `json:"headers_add_var,omitempty"` } -func (r RewriteResponseConfig) IsEmpty() bool { - return len(r.Headers) == 0 -} - -func (r RuleConfigInPolicy) ToJsonString() (string, error) { - ret, err := json.Marshal(&r) - return string(ret), err +type RefBox struct { + Hash string `json:"-"` + Type PolicyExtKind `json:"type"` + Note *string `json:"note,omitempty"` + PolicyExt } diff --git a/deploy/chart/alb/Chart.yaml b/deploy/chart/alb/Chart.yaml index e3765133..aadc17e1 100644 --- a/deploy/chart/alb/Chart.yaml +++ b/deploy/chart/alb/Chart.yaml @@ -1,8 +1,8 @@ name: alauda-alb2 -version: v3.19.0-beta.10.g6bb115e1 +version: v3.19.0-beta.11.g44488b9b description: alauda loadbalancer version 2 apiVersion: v2 annotations: release: alpha branch: master - commit: 6bb115e16053f07ab913fd9d9849e8895481c5ed + commit: 44488b9befdd33f7e5ce7a8025e80d04939c6e57 diff --git a/deploy/chart/alb/crds/crd.alauda.io_alaudaloadbalancer2.yaml b/deploy/chart/alb/crds/crd.alauda.io_alaudaloadbalancer2.yaml index d30fdaea..c78e6d91 100644 --- a/deploy/chart/alb/crds/crd.alauda.io_alaudaloadbalancer2.yaml +++ b/deploy/chart/alb/crds/crd.alauda.io_alaudaloadbalancer2.yaml @@ -134,6 +134,44 @@ spec: properties: antiAffinityKey: type: string + auth: + properties: + basic: + properties: + auth_type: + description: only support basic now + type: string + realm: + type: string + secret: + type: string + secret_type: + description: auth-file|auth-map + type: string + type: object + forward: + description: auth via forward request + properties: + always_set_cookie: + type: boolean + auth_headers_cm_ref: + type: string + auth_request_redirect: + type: string + method: + type: string + signin: + type: string + signin_redirect_param: + type: string + upstream_headers: + items: + type: string + type: array + url: + type: string + type: object + type: object backlog: type: integer bindNIC: @@ -515,6 +553,44 @@ spec: properties: antiAffinityKey: type: string + auth: + properties: + basic: + properties: + auth_type: + description: only support basic now + type: string + realm: + type: string + secret: + type: string + secret_type: + description: auth-file|auth-map + type: string + type: object + forward: + description: auth via forward request + properties: + always_set_cookie: + type: boolean + auth_headers_cm_ref: + type: string + auth_request_redirect: + type: string + method: + type: string + signin: + type: string + signin_redirect_param: + type: string + upstream_headers: + items: + type: string + type: array + url: + type: string + type: object + type: object backlog: type: integer bindNIC: diff --git a/deploy/chart/alb/crds/crd.alauda.io_frontends.yaml b/deploy/chart/alb/crds/crd.alauda.io_frontends.yaml index 438db1a2..c27e431f 100644 --- a/deploy/chart/alb/crds/crd.alauda.io_frontends.yaml +++ b/deploy/chart/alb/crds/crd.alauda.io_frontends.yaml @@ -53,6 +53,44 @@ spec: type: string config: properties: + auth: + properties: + basic: + properties: + auth_type: + description: only support basic now + type: string + realm: + type: string + secret: + type: string + secret_type: + description: auth-file|auth-map + type: string + type: object + forward: + description: auth via forward request + properties: + always_set_cookie: + type: boolean + auth_headers_cm_ref: + type: string + auth_request_redirect: + type: string + method: + type: string + signin: + type: string + signin_redirect_param: + type: string + upstream_headers: + items: + type: string + type: array + url: + type: string + type: object + type: object modsecurity: properties: cmRef: diff --git a/deploy/chart/alb/crds/crd.alauda.io_rules.yaml b/deploy/chart/alb/crds/crd.alauda.io_rules.yaml index e442cba4..68656a40 100644 --- a/deploy/chart/alb/crds/crd.alauda.io_rules.yaml +++ b/deploy/chart/alb/crds/crd.alauda.io_rules.yaml @@ -70,6 +70,44 @@ spec: type: string config: properties: + auth: + properties: + basic: + properties: + auth_type: + description: only support basic now + type: string + realm: + type: string + secret: + type: string + secret_type: + description: auth-file|auth-map + type: string + type: object + forward: + description: auth via forward request + properties: + always_set_cookie: + type: boolean + auth_headers_cm_ref: + type: string + auth_request_redirect: + type: string + method: + type: string + signin: + type: string + signin_redirect_param: + type: string + upstream_headers: + items: + type: string + type: array + url: + type: string + type: object + type: object modsecurity: properties: cmRef: diff --git a/deploy/chart/alb/module-plugin.yaml b/deploy/chart/alb/module-plugin.yaml new file mode 100644 index 00000000..964df001 --- /dev/null +++ b/deploy/chart/alb/module-plugin.yaml @@ -0,0 +1,31 @@ +apiVersion: cluster.alauda.io/v1alpha1 +kind: ModulePlugin +metadata: + annotations: + cpaas.io/module-name: "alb" + cpaas.io/display-name: '{"en": "ALB Operator", "zh": "ALB Operator"}' + cpaas.io/built-in-plugin: "true" + labels: + cpaas.io/module-type: plugin + cpaas.io/module-name: alb + cpaas.io/auto-install: "true" + name: alb +spec: + name: alb + logo: "" + description: + en: "Load Balancer Operator" + zh: "负载均衡器 Operator" + deleteable: false + labelCluster: "false" + appReleases: + - name: alauda-alb2 + chartVersions: + - name: acp/chart-alauda-alb2 + releaseName: alauda-alb2 + version: v3.19.0-beta.11.g44488b9b + mainChart: acp/chart-alauda-alb2 + upgradeRiskLevel: High + upgradeRiskDescription: "升级期间可能会短暂影响用户已部署的应用的访问." + upgradeRiskDescriptionEn: "During the upgrade, there may be a temporary impact on the access to applications already deployed by users." + diff --git a/deploy/chart/alb/scripts/plugin-config.yaml b/deploy/chart/alb/scripts/plugin-config.yaml new file mode 100644 index 00000000..e4fcb9c5 --- /dev/null +++ b/deploy/chart/alb/scripts/plugin-config.yaml @@ -0,0 +1,83 @@ +supportedUpgradeVersions: ">= v1.0.0" +mustUpgrade: false +valuesTemplates: + acp/chart-alauda-alb2: | + <<- $replicas := .Alb2Nodes | len >> + <<- $address := .VIP >> + <<- $ingressHTTPPort := 0 >> + <<- $ingressHTTPSPort := .Alb2Port >> + <<- $metricsPort := 11782 >> + <<- $loadbalancerName := .SystemNamespace >> + <<- $defaultSSLCert := (printf "%s/%s" .SystemNamespace .SystemNamespace) >> + <<- $defaultSSLStrategy := "Always" >> + <<- $nodeSelector := (printf "cpaas-system-alb: %s" (quote "")) >> + <<- $projects := .SystemNamespace >> + + <<- if eq .ClusterName "global" >> + <<- if (and (eq .ClusterType "Baremetal") (eq .Base.DeployType "ImportedGlobal")) >> + <<- $nodeSelector = (printf "node-role.kubernetes.io/control-plane: %s" (quote "")) >> + <<- else >> + <<- labelClusterNode "global" "ingress" "true" .Base.Ingress.Controller.Nodes >> + <<- $nodeSelector = (printf "ingress: %s" (quote "true")) >> + <<- end >> + <<- $replicas = (len .Base.Ingress.Controller.Nodes) >> + <<- $ingressHTTPPort = .Base.HTTP.HTTPPort >> + <<- $ingressHTTPSPort = .Base.HTTP.HTTPSPort >> + <<- $loadbalancerName = "global-alb2" >> + <<- $defaultSSLCert = (printf "%s/dex.tls" .SystemNamespace) >> + <<- if .Base.HTTP.ForceRedirectHTTPS >> + <<- $defaultSSLStrategy = "Always" >> + <<- else >> + <<- $defaultSSLStrategy = "Both" >> + <<- end >> + <<- $projects = "cpaas-system" >> + <<- else >> + <<- $ns := (lookup .ClusterName "v1" "Namespace" "" .SystemNamespace) >> + <<- if $ns >> + <<- $key := (printf "%s/project" .LabelBaseDomain) >> + <<- $label := (index $ns "metadata" "labels" $key) >> + <<- if ne (string $label) .SystemNamespace >> + <<- label .ClusterName $ns $key .SystemNamespace >> + <<- end >> + <<- end >> + <<- end >> + + replicas: << $replicas >> + address: << $address >> + defaultSSLCert: << $defaultSSLCert >> + defaultSSLStrategy: << $defaultSSLStrategy >> + ingressHTTPPort: << $ingressHTTPPort >> + ingressHTTPSPort: << $ingressHTTPSPort >> + metricsPort: << $metricsPort >> + antiAffinityKey: system + gateway: + enable: false + loadbalancerName: << $loadbalancerName >> + projects: + - << $projects >> + <<- if (and (ne .ProvisionType "Registered") (or (eq .ClusterType "Baremetal") (eq .ClusterType "OCP") (eq .ClusterType "Imported") ) ) >> + nodeSelector: + << $nodeSelector >> + <<- end >> + <<- if (or (eq .ProvisionType "Registered") (and (ne .ClusterType "Baremetal") (ne .ClusterType "OCP") (ne .ClusterType "Imported"))) >> + vip: + allocateLoadBalancerNodePorts: true + enableLbSvc: << ne .ProvisionType "Registered" >> + <<- if (and (ne .ProvisionType "Registered") (eq .ClusterType "HuaweiCloudCCE")) >> + lbSvcAnnotations: + kubernetes.io/elb.class: union + kubernetes.io/elb.autocreate: '{ + "type": "public", + "bandwidth_name": "cce-bandwidth-<< .ClusterName >>-<< .SystemNamespace >>", + "bandwidth_chargemode": "traffic", + "bandwidth_size": 5, + "bandwidth_sharetype": "PER", + "eip_type": "5_bgp" + }' + kubernetes.io/elb.lb-algorithm: ROUND_ROBIN + kubernetes.io/elb.health-check-flag: 'on' + kubernetes.io/elb.health-check-option: '{"protocol":"TCP","delay":"5","timeout":"10","max_retries":"3"}' + lbSvcIpFamilyPolicy: SingleStack + <<- end >> + networkMode: container + <<- end >> diff --git a/deploy/chart/alb/templates/deploy-deployment.yaml b/deploy/chart/alb/templates/deploy-deployment.yaml index 6e8dc9fe..e654d582 100644 --- a/deploy/chart/alb/templates/deploy-deployment.yaml +++ b/deploy/chart/alb/templates/deploy-deployment.yaml @@ -5,9 +5,7 @@ metadata: namespace: {{ .Values.global.namespace }} annotations: {{- if .Values.defaultAlb }} - helm.sh/waitResources: '[{"apiVersion": "apps/v1", "kind": "Deployment", "namespace": "{{ .Values.global.namespace }}", "name": "alb-operator-ctl"},{"apiVersion": "apps/v1", "kind": "Deployment", "namespace": "{{ .Values.global.namespace }}", "name": "{{.Values.loadbalancerName}}"}]' - {{- else }} - helm.sh/waitResources: '[{"apiVersion": "apps/v1", "kind": "Deployment", "namespace": "{{ .Values.global.namespace }}", "name": "alb-operator-ctl"}]' + helm.sh/waitResources: '[{"apiVersion": "apps/v1", "kind": "Deployment", "namespace": "{{ .Values.global.namespace }}", "name": "{{.Values.loadbalancerName}}"}]' {{- end }} deploy-mode: "deployment" spec: diff --git a/deploy/chart/alb/values.yaml b/deploy/chart/alb/values.yaml index 0209c9f7..a60fb5e7 100644 --- a/deploy/chart/alb/values.yaml +++ b/deploy/chart/alb/values.yaml @@ -18,12 +18,12 @@ global: code: gitlab-ce.alauda.cn/container-platform/alb2 support_arm: true repository: acp/alb2 - tag: v3.19.0-beta.10.g6bb115e1 + tag: v3.19.0-beta.11.g44488b9b nginx: code: gitlab-ce.alauda.cn/container-platform/alb2 support_arm: true repository: acp/alb-nginx - tag: "v3.19.0-beta.10.g6bb115e1" + tag: "v3.19.0-beta.11.g44488b9b" resources: alb: limits: diff --git a/docs/README.md b/docs/README.md index 11c01901..00d73412 100644 --- a/docs/README.md +++ b/docs/README.md @@ -6,4 +6,5 @@ [[rules]] [[errorpage]] [[feature/otel/otel]] -[[modsecurity.en]] \ No newline at end of file +[[modsecurity.en]] +[[auth]] \ No newline at end of file diff --git a/docs/feature/auth/auth.md b/docs/feature/auth/auth.md new file mode 100644 index 00000000..f20db361 --- /dev/null +++ b/docs/feature/auth/auth.md @@ -0,0 +1,51 @@ +## what it is +- 当请求通过lb转发到后端时,可以让lb做些 校验/鉴权的逻辑。 +- 基本逻辑为: 如果请求中有某些header,或者向另外一个地址发起请求并返回了200.则校验通过,继续将请求转发到后端,否则返回401 +## alb auth ingress annotation 支持情况 + +| | support | type | note | +|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|---------|---------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| local-auth | | | 只通过请求中的header来判断,不需要额外发请求.
他的一般流程是,在ingress上指定一个secret其中有用户的帐号和密码
header带有帐号密码,lb检查其是否与secret中配置的一致。 | +| [nginx.ingress.kubernetes.io/auth-realm](https://github.com/kubernetes/ingress-nginx/blob/main/docs/user-guide/nginx-configuration/annotations.md#authentication) | | string | | +| [nginx.ingress.kubernetes.io/auth-secret](https://github.com/kubernetes/ingress-nginx/blob/main/docs/user-guide/nginx-configuration/annotations.md#authentication) | | string | | +| [nginx.ingress.kubernetes.io/auth-secret-type](https://github.com/kubernetes/ingress-nginx/blob/main/docs/user-guide/nginx-configuration/annotations.md#authentication) | | string | | +| [nginx.ingress.kubernetes.io/auth-type](https://github.com/kubernetes/ingress-nginx/blob/main/docs/user-guide/nginx-configuration/annotations.md#authentication) | | "basic" or "digest" | basic: secret中密码的格式是base64编码的帐号密码
digest: 暂不支持 | +| remote-auth | | | 通过向其他发请求,并根据请求结果来决定是否校验通过 | +| [nginx.ingress.kubernetes.io/auth-url](https://github.com/kubernetes/ingress-nginx/blob/main/docs/user-guide/nginx-configuration/annotations.md#external-authentication) | | string | 发起请求的ur | +| [nginx.ingress.kubernetes.io/auth-method](https://github.com/kubernetes/ingress-nginx/blob/main/docs/user-guide/nginx-configuration/annotations.md#external-authentication) | | string | | +| [nginx.ingress.kubernetes.io/auth-signin]() | | string | 当请求失败时,不是返回401,而是返回302 调转到指定的地址登录
配置oauth时会用到 | +| [nginx.ingress.kubernetes.io/auth-signin-redirect-param]() | | string | | +| [nginx.ingress.kubernetes.io/auth-response-headers]() | | string | | +| [nginx.ingress.kubernetes.io/auth-proxy-set-headers]() | | string | | +| [nginx.ingress.kubernetes.io/auth-request-redirect]() | | string | | +| [nginx.ingress.kubernetes.io/auth-always-set-cookie]() | | boolean | | +| [nginx.ingress.kubernetes.io/auth-snippet](https://github.com/kubernetes/ingress-nginx/blob/main/docs/user-guide/nginx-configuration/annotations.md#external-authentication) | x | string | 做auth请求时,nginx的配置。 | +| auth-cache | | | 指定发起请求时,是否使用cache | +| [nginx.ingress.kubernetes.io/auth-cache-key](https://github.com/kubernetes/ingress-nginx/blob/main/docs/user-guide/nginx-configuration/annotations.md#external-authentication) | x | string | | +| [nginx.ingress.kubernetes.io/auth-cache-duration](https://github.com/kubernetes/ingress-nginx/blob/main/docs/user-guide/nginx-configuration/annotations.md#external-authentication) | x | string | | +| auth-keepalive | | | 发请求时,使用keepalive长连接。通过一系列annotation指定keepalive的行为 | +| [nginx.ingress.kubernetes.io/auth-keepalive](https://github.com/kubernetes/ingress-nginx/blob/main/docs/user-guide/nginx-configuration/annotations.md#external-authentication) | x | number | | +| [nginx.ingress.kubernetes.io/auth-keepalive-share-vars](https://github.com/kubernetes/ingress-nginx/blob/main/docs/user-guide/nginx-configuration/annotations.md#external-authentication) | x | "true" or "false" | | +| [nginx.ingress.kubernetes.io/auth-keepalive-requests](https://github.com/kubernetes/ingress-nginx/blob/main/docs/user-guide/nginx-configuration/annotations.md#external-authentication) | x | number | | +| [nginx.ingress.kubernetes.io/auth-keepalive-timeout](https://github.com/kubernetes/ingress-nginx/blob/main/docs/user-guide/nginx-configuration/annotations.md#external-authentication) | x | number | | +| auth-tls | | | 当请求是https时,额外校验请求发过来的证书. | +| [nginx.ingress.kubernetes.io/auth-tls-secret](https://github.com/kubernetes/ingress-nginx/blob/main/docs/user-guide/nginx-configuration/annotations.md#client-certificate-authentication) | x | string | | +| [nginx.ingress.kubernetes.io/auth-tls-verify-depth](https://github.com/kubernetes/ingress-nginx/blob/main/docs/user-guide/nginx-configuration/annotations.md#client-certificate-authentication) | x | number | | +| [nginx.ingress.kubernetes.io/auth-tls-verify-client](https://github.com/kubernetes/ingress-nginx/blob/main/docs/user-guide/nginx-configuration/annotations.md#client-certificate-authentication) | x | string | | +| [nginx.ingress.kubernetes.io/auth-tls-error-page](https://github.com/kubernetes/ingress-nginx/blob/main/docs/user-guide/nginx-configuration/annotations.md#client-certificate-authentication) | x | string | | +| [nginx.ingress.kubernetes.io/auth-tls-pass-certificate-to-upstream](https://github.com/kubernetes/ingress-nginx/blob/main/docs/user-guide/nginx-configuration/annotations.md#client-certificate-authentication) | x | "true" or "false" | | +| [nginx.ingress.kubernetes.io/auth-tls-match-cn](https://github.com/kubernetes/ingress-nginx/blob/main/docs/user-guide/nginx-configuration/annotations.md#client-certificate-authentication) | x | string | | +## ingress-nginx 其他特性 +### global-auth +在ingress-nginx中可以通过configmap设置一个全局的auth。相当给所有的ingress配置了auth +## no-auth-locations [¶](https://kubernetes.github.io/ingress-nginx/user-guide/nginx-configuration/configmap/#no-auth-locations "Permanent link") +支持通过ingress annotation指定某个path 不用做auth +## alb +### alb special ingress annotation + +## 与ingress-nginx 不兼容的部分 +1. 不支持 auth-snippet +2. 不支持 cache +3. 不支持 auth-tls +4. basic-auth 只支持basic 不支持digest +5. basic-auth basic 只支持apr1算法,不支持bcrypt sha256等 diff --git "a/docs/feature/auth/\351\205\215\347\275\256oauth2-proxy.md" "b/docs/feature/auth/\351\205\215\347\275\256oauth2-proxy.md" new file mode 100644 index 00000000..47b7b92d --- /dev/null +++ "b/docs/feature/auth/\351\205\215\347\275\256oauth2-proxy.md" @@ -0,0 +1,155 @@ +使用kind + alb + oauth2-proxy + github OAuth app 实现外部认证 +# note + +# 步骤 +## 部署kind +```bash +kind create cluster --name alb-auth --image=kindest/node:v1.28.0 +kind get kubeconfig --name=alb-auth > ~/.kube/config +``` +## 部署alb +```bash +helm repo add alb https://alauda.github.io/alb/;helm repo update;helm search repo|grep alb +helm install alb-operator alb/alauda-alb2 +alb_ip=$(docker inspect -f '{{range.NetworkSettings.Networks}}{{.IPAddress}}{{end}}' alb-auth-control-plane) +echo $alb_ip +cat < oauth2-proxy需要访问github,可能需要配置HTTPS_PROXY的环境变量 +```bash +COOKIE_SECRET=$(python -c 'import os,base64; print(base64.urlsafe_b64encode(os.urandom(32)).decode())') +OAUTH2_PROXY_IMAGE="quay.io/oauth2-proxy/oauth2-proxy:v7.7.1" +kind load docker-image $OAUTH2_PROXY_IMAGE --name alb-auth +cat < | otel/other| -// +-----------------+ +--------------+ +-----------+ -// |cr:rule -> m:rule|-------->| custom config| -// +-------+---------+ +--------------+ +-----------------+ -// | ^ +--->| (legacy)rewrite req/res | -// | | -------------------+ -// | | -// +-----v------+ | -// | policy gen +------------------+ -// +------------+ - -// TODO a better name? -type CustomCfgCtl struct { - log logr.Logger - domain string - otel *otel.Otel - waf *waf.Waf -} - -type CustomCfgOpt struct { - Log logr.Logger - Domain string -} - -func NewCustomCfgCtl(opt CustomCfgOpt) CustomCfgCtl { - return CustomCfgCtl{ - log: opt.Log, - domain: opt.Domain, - otel: otel.NewOtel(opt.Log), - waf: waf.NewWaf(opt.Log), - } -} - -// ingress sync -func (c CustomCfgCtl) IngressToRule(ing *nv1.Ingress, rindex int, pindex int, rule *albv1.Rule) { - annotations := legacyGenerateRuleAnnotationFromIngressAnnotation(ing.Name, ing.Annotations, c.domain) - rule.Annotations = u.MergeMap(rule.Annotations, annotations) - c.otel.UpdateRuleViaIngress(ing, rindex, pindex, rule, c.domain) - c.waf.UpdateRuleViaIngress(ing, rindex, pindex, rule, c.domain) -} - -// cr:rule -> m:rule -func (c CustomCfgCtl) FromRuleCr(rule *m.Rule, r *ct.Rule) { - ruleInPolicyFromRuleAnnotation(rule, c.domain, r.Config) - c.otel.FromRuleCr(rule, r) - c.waf.FromRuleCr(rule, r) -} - -// policy gen -func (c CustomCfgCtl) ResolvePolicies(alb *ct.LoadBalancer, ngx *ct.NgxPolicy) { - _ = c.otel.ResolvePolicy(alb, ngx) -} diff --git a/pkg/controller/ext/auth/auth.go b/pkg/controller/ext/auth/auth.go new file mode 100644 index 00000000..5f696f7d --- /dev/null +++ b/pkg/controller/ext/auth/auth.go @@ -0,0 +1,154 @@ +package auth + +import ( + "fmt" + + av1 "alauda.io/alb2/pkg/apis/alauda/v1" + . "alauda.io/alb2/pkg/controller/ext/auth/types" + . "alauda.io/alb2/pkg/utils" + nv1 "k8s.io/api/networking/v1" + + m "alauda.io/alb2/controller/modules" + ct "alauda.io/alb2/controller/types" + "github.com/go-logr/logr" + + "alauda.io/alb2/config" + ngt "alauda.io/alb2/pkg/controller/ngxconf/types" +) + +// [nginx.ingress.kubernetes.io/auth-realm] +// [nginx.ingress.kubernetes.io/auth-secret] +// [nginx.ingress.kubernetes.io/auth-secret-type] +// [nginx.ingress.kubernetes.io/auth-type] + +// [nginx.ingress.kubernetes.io/auth-url] +// [nginx.ingress.kubernetes.io/auth-method] +// [nginx.ingress.kubernetes.io/auth-proxy-set-headers] # 从configmap中获取,go部分将其转换成具体的map +// [nginx.ingress.kubernetes.io/auth-request-redirect] +// [nginx.ingress.kubernetes.io/auth-response-headers] +// [nginx.ingress.kubernetes.io/auth-signin] +// [nginx.ingress.kubernetes.io/auth-always-set-cookie] +// [nginx.ingress.kubernetes.io/auth-signin-redirect-param] # go部分根据这个annotation修改sign的var_string + +// not supported +// [nginx.ingress.kubernetes.io/auth-snippet] +// [nginx.ingress.kubernetes.io/auth-cache-duration] +// [nginx.ingress.kubernetes.io/auth-cache-key] + +type AuthCtl struct { + L logr.Logger + domain string + forward ForwardAuthCtl + basic BasicAuthCtl +} + +func NewAuthCtl(l logr.Logger, domain string) *AuthCtl { + return &AuthCtl{ + L: l, + domain: domain, + forward: ForwardAuthCtl{ + l: l.WithName("forward-auth"), + }, + basic: BasicAuthCtl{ + l: l.WithName("basic-auth"), + }, + } +} + +func (a *AuthCtl) IngressAnnotationToRule(ingress *nv1.Ingress, ruleIndex int, pathIndex int, rule *av1.Rule) { + auth_ingress := AuthIngress{} + _ = ResolverStructFromAnnotation(&auth_ingress, ingress.Annotations, ResolveAnnotationOpt{Prefix: []string{fmt.Sprintf("alb.ingress.%s/index/%d-%d", a.domain, ruleIndex, pathIndex), fmt.Sprintf("alb.ingress.%s", a.domain), "nginx.ingress.kubernetes.io"}}) + if auth_ingress.Enable == "false" { + return + } + auth_cr := AuthCr{} + if auth_ingress.Url != "" { + a.forward.AuthIngressToAuthCr(&auth_ingress, &auth_cr) + } + if auth_ingress.AuthType != "" { + a.basic.AuthIngressToAuthCr(&auth_ingress, &auth_cr) + } + + if auth_cr.Basic == nil && auth_cr.Forward == nil { + return + } + // we have to take a choice + if auth_cr.Basic != nil && auth_cr.Forward != nil { + a.L.Info("both basic auth and forward auth ? use basic", "ing", ingress.Name, "ing-ns", ingress.Namespace) + auth_cr.Forward = nil + } + rule.Spec.Config.Auth = &auth_cr +} + +func (c *AuthCtl) ToInternalRule(mr *m.Rule, ir *ct.InternalRule) { + if mr.Spec.Config.Auth != nil { + ir.Config.Auth = mr.Spec.Config.Auth + return + } + ft := mr.GetFtConfig() + if ft != nil && ft.Auth != nil { + ir.Config.Auth = ft.Auth + return + } + + lb := mr.GetAlbConfig() + if lb != nil && lb.Auth != nil { + ir.Config.Auth = lb.Auth + return + } +} + +func (c *AuthCtl) CollectRefs(ir *ct.InternalRule, refs ct.RefMap) { + if ir.Config.Auth == nil { + return + } + cfg := ir.Config.Auth + c.ForwardAuthCollectRefs(ir, cfg, refs) + c.BasicAuthCollectRefs(ir, cfg, refs) +} + +func (c *AuthCtl) ForwardAuthCollectRefs(ir *ct.InternalRule, cfg *AuthCr, refs ct.RefMap) { + if cfg.Forward == nil || cfg.Forward.AuthHeadersCmRef == "" { + return + } + raw_key := cfg.Forward.AuthHeadersCmRef + key, err := ParseStringToObjectKey(raw_key) + if err != nil { + c.L.Error(err, "invalid cmref", "cmref", key, "rule", ir.RuleID) + return + } + refs.ConfigMap[key] = nil +} + +func (c *AuthCtl) BasicAuthCollectRefs(ir *ct.InternalRule, cfg *AuthCr, refs ct.RefMap) { + if cfg.Basic == nil || cfg.Basic.Secret == "" { + return + } + raw_key := cfg.Basic.Secret + key, err := ParseStringToObjectKey(raw_key) + if err != nil { + c.L.Error(err, "invalid secret-ref", "ref", key, "rule", ir.RuleID) + return + } + refs.Secret[key] = nil +} + +func (c *AuthCtl) ToPolicy(ir *ct.InternalRule, p *ct.Policy, refs ct.RefMap) { + if ir.Config.Auth == nil { + return + } + ir_auth := ir.Config.Auth + p.Config.Auth = &AuthPolicy{} + if ir_auth.Forward != nil { + c.forward.ToPolicy(ir_auth.Forward, p.Config.Auth, refs, ir.RuleID) + } + if ir_auth.Basic != nil { + c.basic.ToPolicy(ir_auth.Basic, p.Config.Auth, refs, ir.RuleID) + } +} + +func (c *AuthCtl) UpdateNgxTmpl(_ *ngt.NginxTemplateConfig, _ *ct.LoadBalancer, _ *config.Config) { +} + +func (c *AuthCtl) UpdatePolicyAfterUniq(_ *ct.PolicyExt) { +} diff --git a/pkg/controller/ext/auth/auth_test.go b/pkg/controller/ext/auth/auth_test.go new file mode 100644 index 00000000..10b33f6f --- /dev/null +++ b/pkg/controller/ext/auth/auth_test.go @@ -0,0 +1,424 @@ +package auth + +import ( + "encoding/json" + "fmt" + "testing" + "time" + + u "alauda.io/alb2/utils" + "github.com/kr/pretty" + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + "github.com/stretchr/testify/assert" + "golang.org/x/crypto/bcrypt" + corev1 "k8s.io/api/core/v1" + nv1 "k8s.io/api/networking/v1" + "sigs.k8s.io/controller-runtime/pkg/client" + + . "alauda.io/alb2/controller/types" + av1 "alauda.io/alb2/pkg/apis/alauda/v1" + . "alauda.io/alb2/pkg/controller/ext/auth/types" + . "alauda.io/alb2/pkg/utils" + . "alauda.io/alb2/utils/test_utils" + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +var ( + _ = fmt.Println + l = ConsoleLog() +) + +func TestAuth(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "auth config") +} + +var _ = Describe("auth", func() { + t := GinkgoT() + + It("parse var-string", func() { + xx := []string{"a", "b", "c", "&xx"} + fmt.Println(xx) + out, _ := json.Marshal(xx) + fmt.Println(string(out)) + }) + + It("parse var-string", func() { + cases := []struct { + name string + input string + expected VarString + }{ + { + name: "simple string", + input: "xx xx xx", + expected: VarString{"xx xx xx"}, + }, + { + name: "string with variable", + input: "xx $xx xx", + expected: VarString{"xx ", "$xx", " xx"}, + }, + { + name: "string with variable and -", + input: "$x-$xx$s dd xx-dd x", + expected: VarString{"$x", "-", "$xx", "$s", " dd xx-dd x"}, + }, + { + name: "complex var string", + input: `'[$time_local] $remote_addr "$host" "$request" ' '$status $upstream_status $upstream_addr ' '"$http_user_agent" "$http_x_forwarded_for" ' '$request_time $upstream_response_time $upstream_bytes_received'`, + expected: VarString{ + "'[", "$time_local", "] ", "$remote_addr", " \"", "$host", "\" \"", "$request", "\" ' '", "$status", " ", "$upstream_status", " ", "$upstream_addr", " ' '\"", "$http_user_agent", "\" \"", "$http_x_forwarded_for", "\" ' '", "$request_time", " ", "$upstream_response_time", " ", "$upstream_bytes_received", "'", + }, + }, + { + name: "url", + input: `http://$host/auth/start?rd=$escaped_request_uri&xx=bb`, + expected: VarString{ + "http://", "$host", "/auth/start?rd=", "$escaped_request_uri", "&xx=bb", + }, + }, + { + name: "url", + input: `http://${host}{x}.-~!@#%^&*()_+|}{':?$es $$$a $~`, + expected: VarString{ + "http://", "$host", "{x}.-~!@#%^&*()_+|}{':?", "$es", " ", "$", "$", "$a", " ", "$", "~", + }, + }, + } + + for _, tc := range cases { + vs, err := ParseVarString(tc.input) + assert.NoError(t, err) + l.Info("check", "real", vs, "exp", tc.expected) + assert.Equal(t, tc.expected, vs) + } + }) + + It("simple reflect perf", func() { + annotation := map[string]string{ + "xxx/url": "abc", + "xxx/xx": "xyz", + } + type A struct { + F1 string `annotation:"url"` + F2 string `annotation:"xx"` + } + simple := func(a *A, annotation map[string]string) { + if v, ok := annotation["xxx/url"]; ok { + a.F1 = v + } + if v, ok := annotation["xxx/xx"]; ok { + a.F2 = v + } + } + a := A{} + show_delay := func(f func()) time.Duration { + start := time.Now() + f() + end := time.Now() + return end.Sub(start) + } + fmt.Println(show_delay(func() { + for i := 0; i < 100000; i++ { + simple(&a, annotation) + } + })) + fmt.Println(show_delay(func() { + for i := 0; i < 100000; i++ { + ResolverStructFromAnnotation(&a, annotation, ResolveAnnotationOpt{ + Prefix: []string{"xxx"}, + }) + } + })) + // 10w 1ms vs 100ms + }) + + It("resolve from annotation should ok", func() { + type A struct { + F1 string `annotation:"url"` + F2 string `annotation:"xx"` + } + type AA struct { + A + B string `annotation:"b"` + } + + annotation := map[string]string{ + "xxx/url": "abc", + "xxx/xx": "xyz", + "xxx/b": "hi", + "aaa/url": "123", + } + a := A{} + ResolverStructFromAnnotation(&a, annotation, ResolveAnnotationOpt{ + Prefix: []string{"xxx"}, + }) + assert.Equal(t, a, A{F1: "abc", F2: "xyz"}) + ResolverStructFromAnnotation(&a, annotation, ResolveAnnotationOpt{ + Prefix: []string{"aaa", "xxx"}, + }) + assert.Equal(t, a, A{F1: "123", F2: "xyz"}) + // support embed struct + aa := AA{} + ResolverStructFromAnnotation(&aa, annotation, ResolveAnnotationOpt{ + Prefix: []string{"aaa", "xxx"}, + }) + fmt.Printf("%+v", aa) + assert.Equal(t, aa, AA{A: A{F1: "123", F2: "xyz"}, B: "hi"}) + }) + + It("resolve ingress annotation should ok ", func() { + type Case struct { + title string + annotations map[string]string + rule_assert func(mr *av1.Rule) + rule_index int + path_index int + } + cases := []Case{ + { + title: "basic set should ok", + annotations: map[string]string{ + "nginx.ingress.kubernetes.io/auth-url": "https://$host/oauth2/auth", + "nginx.ingress.kubernetes.io/auth-signin": "https://$host/oauth2/start?rd=$escaped_request_uri", + }, + rule_assert: func(mr *av1.Rule) { + assert.Equal(t, mr.Spec.Config.Auth.Forward.Url, "https://$host/oauth2/auth") + assert.Equal(t, mr.Spec.Config.Auth.Forward.Signin, "https://$host/oauth2/start?rd=$escaped_request_uri") + }, + }, + { + title: "ignore path should ok", + annotations: map[string]string{ + "nginx.ingress.kubernetes.io/auth-enable": "false", + "nginx.ingress.kubernetes.io/auth-url": "https://$host/oauth2/auth", + "nginx.ingress.kubernetes.io/auth-signin": "https://$host/oauth2/start?rd=$escaped_request_uri", + }, + rule_assert: func(mr *av1.Rule) { + assert.Nil(t, mr.Spec.Config.Auth) + }, + }, + { + title: "specific path index should ok", + annotations: map[string]string{ + "nginx.ingress.kubernetes.io/auth-url": "https://$host/oauth2/auth", + "nginx.ingress.kubernetes.io/auth-signin": "https://$host/oauth2/start?rd=$escaped_request_uri", + "alb.ingress.cpaas.io/index/0-0/auth-url": "a.com", + "alb.ingress.cpaas.io/index/0-0/auth-signin": "b.com", + }, + rule_assert: func(mr *av1.Rule) { + assert.Equal(t, mr.Spec.Config.Auth.Forward.Url, "a.com") + assert.Equal(t, mr.Spec.Config.Auth.Forward.Signin, "b.com") + }, + }, + { + title: "disable should ok", + annotations: map[string]string{ + "alb.ingress.cpaas.io/auth-enable": "false", + }, + rule_assert: func(mr *av1.Rule) { + assert.Nil(t, mr.Spec.Config.Auth) + }, + }, + { + title: "default should ok", + annotations: map[string]string{}, + rule_assert: func(mr *av1.Rule) { + assert.Nil(t, mr.Spec.Config.Auth) + }, + }, + { + title: "basic auth should ok", + annotations: map[string]string{ + "nginx.ingress.kubernetes.io/auth-realm": "default", + "nginx.ingress.kubernetes.io/auth-secret": "cpaas-system/auth-secret", + "nginx.ingress.kubernetes.io/auth-secret-type": "auth-file", + "nginx.ingress.kubernetes.io/auth-type": "basic", + }, + rule_assert: func(mr *av1.Rule) { + GinkgoAssertJsonEq(mr.Spec.Config.Auth.Basic, ` + { + "auth_type": "basic", + "realm": "default", + "secret": "cpaas-system/auth-secret", + "secret_type": "auth-file", + } + `, "") + }, + }, + } + for _, c := range cases { + annotation := c.annotations + a_ctl := NewAuthCtl(l, "cpaas.io") + ing := &nv1.Ingress{ + ObjectMeta: meta_v1.ObjectMeta{ + Annotations: annotation, + }, + Spec: nv1.IngressSpec{}, + } + mr := &av1.Rule{ + Spec: av1.RuleSpec{ + Config: &av1.RuleConfigInCr{}, + }, + } + + a_ctl.IngressAnnotationToRule(ing, c.rule_index, c.path_index, mr) + l.Info(c.title, "mr", PrettyCr(mr.Spec.Config.Auth)) + c.rule_assert(mr) + } + }) + + It("to policy should ok", func() { + type Case struct { + title string + rule *InternalRule + refs RefMap + p_assert func(p *Policy) + } + cases := []Case{ + { + title: "resolve configmap refs should ok", + rule: &InternalRule{ + Config: RuleExt{ + Auth: &AuthCr{ + Forward: &ForwardAuthInCr{ + Url: "https://$host/oauth2/auth", + AuthHeadersCmRef: "cpaas-system/auth-cm", + }, + }, + }, + }, + refs: RefMap{ + ConfigMap: map[client.ObjectKey]*corev1.ConfigMap{ + { + Namespace: "cpaas-system", + Name: "auth-cm", + }: { + Data: map[string]string{ + "x-xx": "$host-$uri", + }, + }, + }, + }, + p_assert: func(p *Policy) { + assert.Equal(t, p.Config.Auth.Forward.Url, VarString{"https://", "$host", "/oauth2/auth"}) + assert.Equal(t, p.Config.Auth.Forward.AuthHeaders["x-xx"], VarString{"$host", "-", "$uri"}) + assert.Equal(t, p.Config.Auth.Forward.UpstreamHeaders, []string{}) + }, + }, + { + title: "resolve signin url and redirect param should ok", + rule: &InternalRule{ + Config: RuleExt{ + Auth: &AuthCr{ + Forward: &ForwardAuthInCr{ + Url: "https://$host/oauth2/auth", + Signin: "https://$host/oauth2/start", + SigninRedirectParam: "xx", + }, + }, + }, + }, + refs: RefMap{}, + p_assert: func(p *Policy) { + assert.Equal(t, p.Config.Auth.Forward.SigninUrl, VarString{"https://", "$host", "/oauth2/start?xx=", "$pass_access_scheme", "://", "$http_host", "$escaped_request_uri"}) + }, + }, + { + title: "basic auth, auth-file secret should work", + rule: &InternalRule{ + Config: RuleExt{ + Auth: &AuthCr{ + Basic: &BasicAuthInCr{ + Realm: "xx", + Secret: "cpaas-system/auth-file", + SecretType: "auth-file", + AuthType: "basic", + }, + }, + }, + }, + refs: RefMap{ + Secret: map[client.ObjectKey]*corev1.Secret{ + {Name: "auth-file", Namespace: "cpaas-system"}: { + Type: corev1.SecretTypeOpaque, + Data: map[string][]byte{ + // foo:$apr1$qICNZ61Q$2iooiJVUAMmprq258/ChP1 // cspell:disable-line + "auth": []byte("Zm9vOiRhcHIxJHFJQ05aNjFRJDJpb29pSlZVQU1tcHJxMjU4L0NoUDE"), // cspell:disable-line + }, + }, + }, + }, + p_assert: func(p *Policy) { + l.Info("policy", "auth", u.PrettyJson(p.Config.Auth)) + /* cspell:disable-next-line */ + GinkgoAssertJsonEq(p.Config.Auth.Basic.Secret["foo"], `{"algorithm":"apr1","hash":"2iooiJVUAMmprq258/ChP1","name":"foo","salt":"qICNZ61Q"}`, "") + }, + }, + { + title: "basic auth, auth-map secret should work", + rule: &InternalRule{ + Config: RuleExt{ + Auth: &AuthCr{ + Basic: &BasicAuthInCr{ + Realm: "xx", + Secret: "cpaas-system/auth-map", + SecretType: "auth-map", + AuthType: "basic", + }, + }, + }, + }, + refs: RefMap{ + Secret: map[client.ObjectKey]*corev1.Secret{ + {Name: "auth-map", Namespace: "cpaas-system"}: { + Type: corev1.SecretTypeOpaque, + Data: map[string][]byte{ + "foo": []byte("JGFwcjEkcUlDTlo2MVEkMmlvb2lKVlVBTW1wcnEyNTgvQ2hQMQ"), // cspell:disable-line + }, + }, + }, + }, + p_assert: func(p *Policy) { + l.Info("policy", "auth", u.PrettyJson(p.Config.Auth)) + GinkgoAssertStringEq(p.Config.Auth.Basic.Err, "", "") + /* cspell:disable-next-line */ + GinkgoAssertJsonEq(p.Config.Auth.Basic.Secret["foo"], `{"algorithm":"apr1","hash":"2iooiJVUAMmprq258/ChP1","name":"foo","salt":"qICNZ61Q"}`, "") + }, + }, + } + for _, c := range cases { + p := &Policy{ + Config: PolicyExtCfg{}, + } + a_ctl := NewAuthCtl(l, "cpaas.io") + a_ctl.ToPolicy(c.rule, p, c.refs) + l.Info("policy", "p", pretty.Sprint(p)) + c.p_assert(p) + } + }) + It("get secret", func() { + out, err := bcrypt.GenerateFromPassword([]byte("bar"), 14) + GinkgoNoErr(err) + l.Info("secret", "out", string(out)) + }) + + It("parse hash should ok", func() { + { + cfg, err := parseHash("foo:$apr1$qICNZ61Q$2iooiJVUAMmprq258/ChP1", "") // cspell:disable-line + GinkgoNoErr(err) + l.Info("xx", "x", u.PrettyJson(cfg)) + /* cspell:disable-next-line */ + GinkgoAssertJsonEq(cfg, `{"algorithm":"apr1","hash":"2iooiJVUAMmprq258/ChP1","name":"foo","salt":"qICNZ61Q"}`, "") + } + { + cfg, err := parseHash("$apr1$qICNZ61Q$2iooiJVUAMmprq258/ChP1", "foo") // cspell:disable-line + GinkgoNoErr(err) + l.Info("xx", "x", u.PrettyJson(cfg)) + /* cspell:disable-next-line */ + GinkgoAssertJsonEq(cfg, `{"algorithm":"apr1","hash":"2iooiJVUAMmprq258/ChP1","name":"foo","salt":"qICNZ61Q"}`, "") + } + }) +}) diff --git a/pkg/controller/ext/auth/basic_auth.go b/pkg/controller/ext/auth/basic_auth.go new file mode 100644 index 00000000..ab7fb185 --- /dev/null +++ b/pkg/controller/ext/auth/basic_auth.go @@ -0,0 +1,109 @@ +package auth + +import ( + "fmt" + "strings" + + ct "alauda.io/alb2/controller/types" + . "alauda.io/alb2/pkg/controller/ext/auth/types" + "github.com/go-logr/logr" + + . "alauda.io/alb2/pkg/utils" + corev1 "k8s.io/api/core/v1" +) + +type BasicAuthCtl struct { + l logr.Logger +} + +func (f BasicAuthCtl) AuthIngressToAuthCr(auth_ingress *AuthIngress, auth_cr *AuthCr) { + auth_cr.Basic = &BasicAuthInCr{ + Realm: "", + Secret: "", + SecretType: "", + AuthType: "", + } + _ = ReAssignAuthIngressBasicToBasicAuthInCr(&auth_ingress.AuthIngressBasic, auth_cr.Basic, nil) +} + +func (b BasicAuthCtl) ToPolicy(basic *BasicAuthInCr, p *AuthPolicy, refs ct.RefMap, rule string) { + log := b.l.WithValues("rule", rule) + bp := &BasicAuthPolicy{ + Realm: "", + Secret: map[string]BasicAuthHash{}, + AuthType: "", + Err: "", + } + p.Basic = bp + _ = ReAssignBasicAuthInCrToBasicAuthPolicy(basic, bp, nil) + if bp.AuthType != "basic" { + bp.Err = "only support basic auth" + return + } + + key, err := ParseStringToObjectKey(basic.Secret) + if err != nil { + log.Error(err, "invalid secret refs", "key", key) + bp.Err = "invalid secret refs format" + return + } + if secret := refs.Secret[key]; secret == nil { + log.Error(err, "secret refs ", key) + bp.Err = "secret refs not found" + return + } + bp.Secret, err = parseSecret(refs.Secret[key], basic.SecretType) + if err != nil { + bp.Err = "invalid secret context " + err.Error() + } +} + +func parseHash(hash string, name string) (BasicAuthHash, error) { + cfg := BasicAuthHash{} + if !strings.Contains(hash, "$apr1$") { + return cfg, fmt.Errorf("unsupported algorithm") + } + parts := strings.Split(hash, "$apr1$") + if len(parts) != 2 { + return cfg, fmt.Errorf("invalid pass format") + } + if name == "" { + name_in_hash, has_suffix := strings.CutSuffix(parts[0], ":") + if !has_suffix { + return cfg, fmt.Errorf("invalid pass format") + } + name = name_in_hash + } + pass := strings.Split(parts[1], "$") + if len(parts) != 2 { + return cfg, fmt.Errorf("invalid pass format") + } + cfg.Algorithm = "apr1" + cfg.Name = name + cfg.Salt = pass[0] + cfg.Hash = pass[1] + return cfg, nil +} + +func parseSecret(secret *corev1.Secret, secret_type string) (map[string]BasicAuthHash, error) { + ret := map[string]BasicAuthHash{} + if secret_type == "auth-file" { + hash_cfg, err := parseHash(string(secret.Data["auth"]), "") + if err != nil { + return nil, err + } + ret[hash_cfg.Name] = hash_cfg + return ret, nil + } + + if secret_type == "auth-map" { + for name, hash := range secret.Data { + hash_cfg, err := parseHash(string(hash), name) + if err != nil { + return nil, err + } + ret[hash_cfg.Name] = hash_cfg + } + } + return ret, nil +} diff --git a/pkg/controller/ext/auth/forward_auth.go b/pkg/controller/ext/auth/forward_auth.go new file mode 100644 index 00000000..f1c562f2 --- /dev/null +++ b/pkg/controller/ext/auth/forward_auth.go @@ -0,0 +1,114 @@ +package auth + +import ( + "fmt" + "net/url" + "strings" + + ct "alauda.io/alb2/controller/types" + . "alauda.io/alb2/pkg/controller/ext/auth/types" + "github.com/go-logr/logr" + + . "alauda.io/alb2/pkg/utils" +) + +type ForwardAuthCtl struct { + l logr.Logger +} + +func (f ForwardAuthCtl) AuthIngressToAuthCr(auth_ingress *AuthIngress, auth_cr *AuthCr) { + auth_cr.Forward = &ForwardAuthInCr{ + Url: "", + Method: "", + AuthHeadersCmRef: "", + AuthRequestRedirect: "", + Signin: "", + AlwaysSetCookie: false, + SigninRedirectParam: "", + } + _ = ReAssignAuthIngressForwardToForwardAuthInCr(&auth_ingress.AuthIngressForward, auth_cr.Forward, &ReAssignAuthIngressForwardToForwardAuthInCrOpt{ + Resolve_response_headers: func(ls string) ([]string, error) { + return strings.Split(ls, ","), nil + }, + }) +} + +func (f ForwardAuthCtl) ToPolicy(forward *ForwardAuthInCr, p *AuthPolicy, refs ct.RefMap, rule string) { + log := f.l.WithValues("rule", rule) + fp := &ForwardAuthPolicy{ + Url: []string{}, + Method: "GET", + AuthHeaders: map[string]VarString{}, + AuthRequestRedirect: []string{}, + UpstreamHeaders: []string{}, + AlwaysSetCookie: false, + SigninUrl: []string{}, + } + err := ReAssignForwardAuthInCrToForwardAuthPolicy(forward, fp, &ReAssignForwardAuthInCrToForwardAuthPolicyOpt{ + Resolve_proxy_set_headers: func(cm_ref string) (map[string]VarString, error) { + rv := map[string]VarString{} + if cm_ref == "" { + return rv, nil + } + cm_key, err := ParseStringToObjectKey(cm_ref) + if err != nil { + return nil, err + } + cm, ok := refs.ConfigMap[cm_key] + if !ok { + fp.InvalidAuthReqCmRef = true + log.Info("cm not found", "key", cm_key) + return nil, nil + } + for k, v := range cm.Data { + var_str, err := ParseVarString(v) + if err != nil { + return nil, err + } + rv[k] = var_str + } + return rv, nil + }, + Resolve_varstring: ParseVarString, + }) + if err != nil { + log.Error(err, "gen policy fail") + return + } + + if forward.Signin != "" { + redirect_param := forward.SigninRedirectParam + if redirect_param == "" { + redirect_param = "rd" + } + full, err := resolve_signin_url(forward.Signin, redirect_param) + if err != nil { + log.Error(err, "resolve signin url fail") + return + } + fp.SigninUrl = full + } + p.Forward = fp +} + +// https://github.com/kubernetes/ingress-nginx/blob/d1dc3e827f818ee23a08af09e9a7be0b12af1736/internal/ingress/controller/template/template.go#L1156 +func buildAuthSignURL(authSignURL, authRedirectParam string) string { + u, _ := url.Parse(authSignURL) + q := u.Query() + if authRedirectParam == "" { + authRedirectParam = "rd" + } + if len(q) == 0 { + return fmt.Sprintf("%s?%s=$pass_access_scheme://$http_host$escaped_request_uri", authSignURL, authRedirectParam) + } + + if q.Get(authRedirectParam) != "" { + return authSignURL + } + return fmt.Sprintf("%s&%s=$pass_access_scheme://$http_host$escaped_request_uri", authSignURL, authRedirectParam) +} + +func resolve_signin_url(signin_url string, redirect_param string) (VarString, error) { + full := buildAuthSignURL(signin_url, redirect_param) + return ParseVarString(full) +} diff --git a/pkg/controller/ext/auth/types/codegen_mapping_authingressbasic_basicauthincr.go b/pkg/controller/ext/auth/types/codegen_mapping_authingressbasic_basicauthincr.go new file mode 100644 index 00000000..69a93170 --- /dev/null +++ b/pkg/controller/ext/auth/types/codegen_mapping_authingressbasic_basicauthincr.go @@ -0,0 +1,28 @@ +package types + +import ( + "strings" +) + +func init() { + // make go happy + _ = strings.Clone("") +} + +type ReAssignAuthIngressBasicToBasicAuthInCrOpt struct{} + +func ReAssignAuthIngressBasicToBasicAuthInCr(lt *AuthIngressBasic, rt *BasicAuthInCr, opt *ReAssignAuthIngressBasicToBasicAuthInCrOpt) error { + trans := map[string]func(lt *AuthIngressBasic, rt *BasicAuthInCr) error{} + + rt.AuthType = lt.AuthType + rt.Realm = lt.Realm + rt.Secret = lt.Secret + rt.SecretType = lt.SecretType + for _, m := range trans { + err := m(lt, rt) + if err != nil { + return err + } + } + return nil +} diff --git a/pkg/controller/ext/auth/types/codegen_mapping_authingressforward_forwardauthincr.go b/pkg/controller/ext/auth/types/codegen_mapping_authingressforward_forwardauthincr.go new file mode 100644 index 00000000..2e4488f8 --- /dev/null +++ b/pkg/controller/ext/auth/types/codegen_mapping_authingressforward_forwardauthincr.go @@ -0,0 +1,50 @@ +package types + +import ( + "strings" +) + +func init() { + // make go happy + _ = strings.Clone("") +} + +type ReAssignAuthIngressForwardToForwardAuthInCrOpt struct { + From_bool func(string) (bool, error) + + Resolve_response_headers func(string) ([]string, error) +} + +func ReAssignAuthIngressForwardToForwardAuthInCr(lt *AuthIngressForward, rt *ForwardAuthInCr, opt *ReAssignAuthIngressForwardToForwardAuthInCrOpt) error { + trans := map[string]func(lt *AuthIngressForward, rt *ForwardAuthInCr) error{ + "always_set_cookie": func(lt *AuthIngressForward, rt *ForwardAuthInCr) error { + ret := strings.ToLower(lt.AlwaysSetCookie) == "true" + rt.AlwaysSetCookie = ret + return nil + }, + + "response_headers": func(lt *AuthIngressForward, rt *ForwardAuthInCr) error { + ret, err := opt.Resolve_response_headers(lt.ResponseHeaders) + if err != nil { + return err + } + rt.UpstreamHeaders = ret + return nil + }, + } + + rt.Method = lt.Method + rt.AuthHeadersCmRef = lt.ProxySetHeaders + rt.AuthRequestRedirect = lt.RequestRedirect + + rt.Signin = lt.Signin + rt.SigninRedirectParam = lt.SigninRedirectParam + rt.Url = lt.Url + for _, m := range trans { + err := m(lt, rt) + if err != nil { + return err + } + } + return nil +} diff --git a/pkg/controller/ext/auth/types/codegen_mapping_basicauthincr_basicauthpolicy.go b/pkg/controller/ext/auth/types/codegen_mapping_basicauthincr_basicauthpolicy.go new file mode 100644 index 00000000..51c20adb --- /dev/null +++ b/pkg/controller/ext/auth/types/codegen_mapping_basicauthincr_basicauthpolicy.go @@ -0,0 +1,26 @@ +package types + +import ( + "strings" +) + +func init() { + // make go happy + _ = strings.Clone("") +} + +type ReAssignBasicAuthInCrToBasicAuthPolicyOpt struct{} + +func ReAssignBasicAuthInCrToBasicAuthPolicy(lt *BasicAuthInCr, rt *BasicAuthPolicy, opt *ReAssignBasicAuthInCrToBasicAuthPolicyOpt) error { + trans := map[string]func(lt *BasicAuthInCr, rt *BasicAuthPolicy) error{} + + rt.AuthType = lt.AuthType + rt.Realm = lt.Realm + for _, m := range trans { + err := m(lt, rt) + if err != nil { + return err + } + } + return nil +} diff --git a/pkg/controller/ext/auth/types/codegen_mapping_forwardauthincr_forwardauthpolicy.go b/pkg/controller/ext/auth/types/codegen_mapping_forwardauthincr_forwardauthpolicy.go new file mode 100644 index 00000000..0feda7e8 --- /dev/null +++ b/pkg/controller/ext/auth/types/codegen_mapping_forwardauthincr_forwardauthpolicy.go @@ -0,0 +1,60 @@ +package types + +import ( + "strings" +) + +func init() { + // make go happy + _ = strings.Clone("") +} + +type ReAssignForwardAuthInCrToForwardAuthPolicyOpt struct { + Resolve_proxy_set_headers func(string) (map[string]VarString, error) + + Resolve_varstring func(string) (VarString, error) +} + +func ReAssignForwardAuthInCrToForwardAuthPolicy(lt *ForwardAuthInCr, rt *ForwardAuthPolicy, opt *ReAssignForwardAuthInCrToForwardAuthPolicyOpt) error { + trans := map[string]func(lt *ForwardAuthInCr, rt *ForwardAuthPolicy) error{ + "proxy_set_headers": func(lt *ForwardAuthInCr, rt *ForwardAuthPolicy) error { + ret, err := opt.Resolve_proxy_set_headers(lt.AuthHeadersCmRef) + if err != nil { + return err + } + rt.AuthHeaders = ret + return nil + }, + + "request_redirect": func(lt *ForwardAuthInCr, rt *ForwardAuthPolicy) error { + ret, err := opt.Resolve_varstring(lt.AuthRequestRedirect) + if err != nil { + return err + } + rt.AuthRequestRedirect = ret + return nil + }, + + "url": func(lt *ForwardAuthInCr, rt *ForwardAuthPolicy) error { + ret, err := opt.Resolve_varstring(lt.Url) + if err != nil { + return err + } + rt.Url = ret + return nil + }, + } + + rt.AlwaysSetCookie = lt.AlwaysSetCookie + rt.Method = lt.Method + + rt.UpstreamHeaders = lt.UpstreamHeaders + + for _, m := range trans { + err := m(lt, rt) + if err != nil { + return err + } + } + return nil +} diff --git a/pkg/controller/ext/auth/types/types.go b/pkg/controller/ext/auth/types/types.go new file mode 100644 index 00000000..9d9ff3dc --- /dev/null +++ b/pkg/controller/ext/auth/types/types.go @@ -0,0 +1,103 @@ +package types + +type VarString []string // a string "$host hello $uri$http_id $arg_id" => []string{"$host"," hello ", "$uri", "$http_id"," ","$arg_id"} + +type AuthIngress struct { + Enable string `annotation:"auth-enable" key:"enable" default:"true"` // for feature: no-auth-locations + AuthIngressForward + AuthIngressBasic +} + +// ingress annotation 对应的结构 +type AuthIngressForward struct { + Url string `annotation:"auth-url" key:"url"` + Method string `annotation:"auth-method" default:"GET" key:"method"` + ProxySetHeaders string `annotation:"auth-proxy-set-headers" key:"proxy_set_headers"` // the name of a ConfigMap that specifies headers to pass to the authentication service + RequestRedirect string `annotation:"auth-request-redirect" key:"request_redirect"` // to specify the X-Auth-Request-Redirect header value. + ResponseHeaders string `annotation:"auth-response-headers" key:"response_headers"` // to specify headers to pass to backend once authentication request completes. + Signin string `annotation:"auth-signin" key:"signin"` + AlwaysSetCookie string `annotation:"auth-always-set-cookie" default:"false" key:"always_set_cookie"` + SigninRedirectParam string `annotation:"auth-signin-redirect-param" key:"signin_redirect_param"` + // cacheDuration string `annotation:"auth-cache-duration"` + // cacheKey string `annotation:"auth-cache-key"` +} + +type AuthIngressBasic struct { + Realm string `annotation:"auth-realm" key:"realm"` + Secret string `annotation:"auth-secret" key:"secret"` + SecretType string `annotation:"auth-secret-type" key:"secret_type"` + AuthType string `annotation:"auth-type" key:"auth_type"` +} + +// auth via forward request +// +k8s:deepcopy-gen=true +type ForwardAuthInCr struct { + // +optional + Url string `json:"url,omitempty" key:"url"` + + // +optional + Method string `json:"method,omitempty" key:"method"` + // +optional + AuthHeadersCmRef string `json:"auth_headers_cm_ref,omitempty" key:"proxy_set_headers"` + // +optional + AuthRequestRedirect string `json:"auth_request_redirect,omitempty" key:"request_redirect"` + // +optional + UpstreamHeaders []string `json:"upstream_headers,omitempty" key:"response_headers" trans:"resolve_response_headers"` + // +optional + Signin string `json:"signin,omitempty" key:"signin"` + // +optional + AlwaysSetCookie bool `json:"always_set_cookie,omitempty" key:"always_set_cookie" trans:"from_bool"` + // +optional + SigninRedirectParam string `json:"signin_redirect_param,omitempty" key:"signin_redirect_param"` +} + +// +k8s:deepcopy-gen=true +type AuthCr struct { + Forward *ForwardAuthInCr `json:"forward,omitempty"` + Basic *BasicAuthInCr `json:"basic,omitempty"` +} + +// +k8s:deepcopy-gen=true +type BasicAuthInCr struct { + // +optional + Realm string `json:"realm" key:"realm"` + // +optional + Secret string `json:"secret" key:"secret"` + // auth-file|auth-map + // +optional + SecretType string `json:"secret_type" key:"secret_type"` + + // only support basic now + // +optional + AuthType string `json:"auth_type" key:"auth_type"` +} + +type AuthPolicy struct { + Forward *ForwardAuthPolicy `json:"forward_auth,omitempty"` + Basic *BasicAuthPolicy `json:"basic_auth,omitempty"` +} + +// 我们的lua能接受的auth配置 +type ForwardAuthPolicy struct { + Url VarString `json:"url" key:"url" trans:"resolve_varstring"` + Method string `json:"method" key:"method"` + AuthHeaders map[string]VarString `json:"auth_headers" key:"proxy_set_headers" trans:"resolve_proxy_set_headers"` + InvalidAuthReqCmRef bool `json:"invalid_auth_req_cm_ref" key:"invalid_auth_req_cm_ref"` + AuthRequestRedirect VarString `json:"auth_request_redirect" key:"request_redirect" trans:"resolve_varstring"` + UpstreamHeaders []string `json:"upstream_headers" key:"response_headers"` + AlwaysSetCookie bool `json:"always_set_cookie" key:"always_set_cookie"` + SigninUrl VarString `json:"signin_url"` // resolved via ourself +} +type BasicAuthHash struct { + Name string `json:"name"` + Algorithm string `json:"algorithm"` + Salt string `json:"salt"` + Hash string `json:"hash"` +} + +type BasicAuthPolicy struct { + Realm string `json:"realm" key:"realm"` + Secret map[string]BasicAuthHash `json:"secret"` + AuthType string `json:"auth_type" key:"auth_type"` + Err string `json:"err"` // if rule or ingress are invalid this route should report error to user +} diff --git a/pkg/controller/ext/auth/types/zz_generated.deepcopy.go b/pkg/controller/ext/auth/types/zz_generated.deepcopy.go new file mode 100644 index 00000000..7cfb7c43 --- /dev/null +++ b/pkg/controller/ext/auth/types/zz_generated.deepcopy.go @@ -0,0 +1,85 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package types + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuthCr) DeepCopyInto(out *AuthCr) { + *out = *in + if in.Forward != nil { + in, out := &in.Forward, &out.Forward + *out = new(ForwardAuthInCr) + (*in).DeepCopyInto(*out) + } + if in.Basic != nil { + in, out := &in.Basic, &out.Basic + *out = new(BasicAuthInCr) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthCr. +func (in *AuthCr) DeepCopy() *AuthCr { + if in == nil { + return nil + } + out := new(AuthCr) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BasicAuthInCr) DeepCopyInto(out *BasicAuthInCr) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BasicAuthInCr. +func (in *BasicAuthInCr) DeepCopy() *BasicAuthInCr { + if in == nil { + return nil + } + out := new(BasicAuthInCr) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ForwardAuthInCr) DeepCopyInto(out *ForwardAuthInCr) { + *out = *in + if in.UpstreamHeaders != nil { + in, out := &in.UpstreamHeaders, &out.UpstreamHeaders + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ForwardAuthInCr. +func (in *ForwardAuthInCr) DeepCopy() *ForwardAuthInCr { + if in == nil { + return nil + } + out := new(ForwardAuthInCr) + in.DeepCopyInto(out) + return out +} diff --git a/pkg/controller/ext/auth/util.go b/pkg/controller/ext/auth/util.go new file mode 100644 index 00000000..333d857d --- /dev/null +++ b/pkg/controller/ext/auth/util.go @@ -0,0 +1,76 @@ +package auth + +import ( + "strings" + + . "alauda.io/alb2/pkg/controller/ext/auth/types" +) + +// NOTE: 因为实现原理的不同,我们的parse实际上要比ingress-nginx的更宽松。。$$ 在ingress-nginx会报错 +// 这里我们假设不会有这种异常的annotation +func ParseVarString(s string) (VarString, error) { + ret := VarString{} + if !strings.Contains(s, "$") { + ret = append(ret, s) + return ret, nil + } + buff := "" + cur := 0 + take := func() string { + cur++ + return string(s[cur-1]) + } + peek := func() (string, bool) { + if cur == len(s) { + return "", true + } + return string(s[cur]), false + } + for { + c, eof := peek() + if eof { + break + } + if c == "$" { + if buff != "" { + ret = append(ret, buff) + } + buff = take() + continue + } + // 变量只能是字母数字下划线 + is_var := (c >= "0" && c <= "9") || (c >= "a" && c <= "z") || (c >= "A" && c <= "Z") || c == "_" + if !is_var { + // 不是变量模式,继续 + if len(buff) > 0 && buff[0] != '$' { + buff += take() + continue + } + // 是变量模式 + if len(buff) > 0 && buff[0] == '$' { + if c == "{" { + buff += take() + continue + } + // ${} 模式 + if c == "}" && len(buff) >= 2 && buff[1] == '{' { + _ = take() + buff = "$" + buff[2:] + ret = append(ret, buff) + buff = "" + continue + } + // 其他字符都会导致立刻退出变量模式 + ret = append(ret, buff) + buff = take() + continue + } + } + buff += take() + } + // 处理最后一个 + if buff != "" { + ret = append(ret, buff) + } + return ret, nil +} diff --git a/pkg/controller/ext/otel/otel.go b/pkg/controller/ext/otel/otel.go index 09c0f228..1a5d7327 100644 --- a/pkg/controller/ext/otel/otel.go +++ b/pkg/controller/ext/otel/otel.go @@ -12,9 +12,9 @@ import ( ct "alauda.io/alb2/controller/types" av1 "alauda.io/alb2/pkg/apis/alauda/v1" . "alauda.io/alb2/pkg/controller/ext/otel/types" + ngt "alauda.io/alb2/pkg/controller/ngxconf/types" . "alauda.io/alb2/pkg/utils" "alauda.io/alb2/utils" - u "alauda.io/alb2/utils" jp "github.com/evanphx/json-patch" "github.com/go-logr/logr" "github.com/xorcare/pointer" @@ -26,18 +26,20 @@ const ( OpenTelemetryTrustIncomingSpan = "nginx.ingress.kubernetes.io/opentelemetry-trust-incoming-spans" ) -type Otel struct { - Log logr.Logger +type OtelCtl struct { + Log logr.Logger + domain string } -func NewOtel(log logr.Logger) *Otel { - return &Otel{Log: log} +func NewOtel(log logr.Logger, domain string) *OtelCtl { + return &OtelCtl{Log: log, domain: domain} } // TODO ingress default rule and ft default backend not support otel.. // 根据ingress 生成rule -func (o *Otel) UpdateRuleViaIngress(ingress *nv1.Ingress, ruleIndex int, pathIndex int, rule *av1.Rule, domain string) { +func (o *OtelCtl) IngressAnnotationToRule(ingress *nv1.Ingress, ruleIndex int, pathIndex int, rule *av1.Rule) { + domain := o.domain if ingress == nil || rule == nil || ingress.Annotations == nil { return } @@ -74,8 +76,7 @@ func (o *Otel) UpdateRuleViaIngress(ingress *nv1.Ingress, ruleIndex int, pathInd } } -// rule cr 转成 policy -func (o *Otel) FromRuleCr(rule *m.Rule, r *ct.Rule) { +func (o *OtelCtl) ToInternalRule(rule *m.Rule, r *ct.InternalRule) { alb_otel, ft_otel, rule_otel := access_otel(rule) if !alb_otel.Need() && !rule_otel.Need() && !ft_otel.Need() { return @@ -90,13 +91,26 @@ func (o *Otel) FromRuleCr(rule *m.Rule, r *ct.Rule) { r.Config.Otel = nil return } + r.Config.Otel = &cf.OtelConf +} + +func (w *OtelCtl) ToPolicy(r *ct.InternalRule, p *ct.Policy, refs ct.RefMap) { if r.Config.Otel == nil { - r.Config.Otel = &OtelInPolicy{} + return + } + p.Config.Otel = r.Config.Otel +} + +func (o *OtelCtl) ResolveDnsIfNeed(cf *OtelConf) (*OtelConf, error) { + if cf.Exporter == nil || cf.Exporter.Collector == nil { + return nil, fmt.Errorf("invalid otel config, exporter is nil") + } + addr, err := ResolveDnsIfNeed(cf.Exporter.Collector.Address) + if err != nil { + return nil, err } - hash := Hash(u.PrettyJson(cf)) - // IMPR 这样的一个问题是 从policy上我们不是很容易看出这个hash属于谁 - r.Config.Otel.Hash = hash - r.Config.Otel.Otel = &cf.OtelConf + cf.Exporter.Collector.Address = addr + return cf, nil } var DEFAULT_OTEL = OtelCrConf{ @@ -156,64 +170,6 @@ func access_otel(r *m.Rule) (alb_otel *OtelCrConf, ft_otel *OtelCrConf, rule_ote return alb_otel, ft_otel, r.GetOtel() } -// 有所有policy的情况下重新整理 -func (o *Otel) ResolvePolicy(alb *ct.LoadBalancer, policy *ct.NgxPolicy) error { - // 遍历所有的rule 如果配置相同 提取成同一个 config 并设置好对应ref - - type ResolvedOtel struct { - otel OtelConf - err error - } - common_otel := map[string]ResolvedOtel{} - - resolve := func(hash string, otel *OtelConf) error { - if c, has := common_otel[hash]; has { - return c.err - } - addr, err := ResolveDnsIfNeed(otel.Exporter.Collector.Address) - if err != nil { - common_otel[hash] = ResolvedOtel{err: err} - return err - } - otel.Exporter.Collector.Address = addr - common_otel[hash] = ResolvedOtel{ - otel: *otel, - } - return nil - } - for _, ps := range policy.Http.Tcp { - for _, p := range ps { - otel := p.GetOtel() - if otel == nil { - continue - } - // set otel in matched policy to nil, we should get it via ref - p.Config.Otel.Otel = nil - if !otel.HasCollector() { - continue - } - hash := p.Config.Otel.Hash - if resolve(hash, otel) != nil { - continue - } - p.Config.Otel.OtelRef = pointer.String(hash) - } - } - - for hash, otel := range common_otel { - if otel.err != nil { - continue - } - policy.CommonConfig[hash] = ct.CommonPolicyConfigVal{ - Type: "otel", - Otel: &OtelInCommon{ - Otel: otel.otel, - }, - } - } - return nil -} - func ResolveDnsIfNeedWithNet(rawurl string, lookup func(string) ([]string, error)) (string, error) { url, err := url.Parse(rawurl) if err != nil { @@ -264,3 +220,22 @@ func getIngressOpt(in *nv1.Ingress) (enable, trustincoming *bool) { } return enable, trustincoming } + +func (c *OtelCtl) CollectRefs(ir *ct.InternalRule, refs ct.RefMap) { +} + +func (c *OtelCtl) UpdateNgxTmpl(_ *ngt.NginxTemplateConfig, _ *ct.LoadBalancer, _ *config.Config) { +} + +func (c *OtelCtl) UpdatePolicyAfterUniq(ext *ct.PolicyExt) { + if ext.Otel == nil { + return + } + otel, err := c.ResolveDnsIfNeed(ext.Otel) + if err != nil { + c.Log.Error(err, "resolve otel dns fail", "policy", ext.Otel) + ext.Otel = nil + return + } + ext.Otel = otel +} diff --git a/pkg/controller/ext/otel/test/otel_test.go b/pkg/controller/ext/otel/test/otel_test.go index 2513681f..f431afe0 100644 --- a/pkg/controller/ext/otel/test/otel_test.go +++ b/pkg/controller/ext/otel/test/otel_test.go @@ -6,6 +6,7 @@ import ( "testing" "alauda.io/alb2/config" + ct "alauda.io/alb2/controller/types" "alauda.io/alb2/driver" "alauda.io/alb2/ingress" albv1 "alauda.io/alb2/pkg/apis/alauda/v1" @@ -192,6 +193,65 @@ var _ = Describe("otel related test", func() { }, }, }, + { + name: "override parent_name", + cfg: []*OtelCrConf{ + { + Enable: true, + OtelConf: OtelConf{ + Exporter: &Exporter{ + Collector: &Collector{ + Address: "http://127.0.0.1:4318", + RequestTimeout: 1000, + }, + }, + Sampler: &Sampler{ + Name: "parent_base", + Options: &SamplerOptions{ + ParentName: pointer.String("trace_id_ratio"), + Fraction: pointer.String("0.1"), + }, + }, + }, + }, + { + Enable: true, + OtelConf: OtelConf{ + Sampler: &Sampler{ + Name: "parent_base", + Options: &SamplerOptions{ + ParentName: pointer.String("always_on"), + }, + }, + }, + }, + }, + expect: OtelCrConf{ + Enable: true, + OtelConf: OtelConf{ + Exporter: &Exporter{ + Collector: &Collector{ + Address: "http://127.0.0.1:4318", + RequestTimeout: 1000, + }, + BatchSpanProcessor: &BatchSpanProcessor{ + MaxQueueSize: 2048, + InactiveTimeout: 2, + }, + }, + Sampler: &Sampler{ + Name: "parent_base", + Options: &SamplerOptions{ + ParentName: pointer.String("always_on"), + }, + }, + Flags: &Flags{ + NoTrustIncomingSpan: false, + HideUpstreamAttrs: false, + }, + }, + }, + }, } for i, c := range cases { real, err := otel.MergeWithDefaultJsonPatch(c.cfg, *otel.DEFAULT_OTEL.DeepCopy()) @@ -428,10 +488,11 @@ spec: GinkgoNoErr(err) policy, err := GetPolicy(PolicyGetCtx{Ctx: ctx, Name: "a1", Ns: "cpaas-system", Drv: drv, L: l}) GinkgoNoErr(err) - l.Info("policy", "policy", PrettyJson(policy)) - + policy_80 := policy.Http.GetPoliciesByPort(80)[0] + key := policy_80.Config.Refs[ct.Otel] + l.Info("policy", "policy", PrettyJson(policy), "key", key, "p", policy_80.Config.Refs) GinkgoAssertJsonEq( - policy.CommonConfig[*policy.Http.GetPoliciesByPort(80)[0].Config.Otel.OtelRef].Otel.Otel, + policy.SharedConfig[key].Otel, ` { "exporter": { diff --git a/pkg/controller/ext/otel/types/type.go b/pkg/controller/ext/otel/types/type.go index 869d448c..8a94eae7 100644 --- a/pkg/controller/ext/otel/types/type.go +++ b/pkg/controller/ext/otel/types/type.go @@ -1,30 +1,11 @@ package types -type OtelInCommon struct { - Otel OtelConf `json:"otel"` -} - -type OtelInPolicy struct { - OtelRef *string `json:"otel_ref"` - Otel *OtelConf `json:"otel,omitempty"` - Hash string `json:"-"` -} - -// the otel config in cr - // +k8s:deepcopy-gen=true type OtelCrConf struct { Enable bool `json:"enable"` OtelConf `json:",inline"` } -func (o *OtelCrConf) Need() bool { - if o == nil { - return false - } - return o.Enable -} - // +k8s:deepcopy-gen=true type OtelConf struct { Exporter *Exporter `json:"exporter,omitempty"` @@ -33,6 +14,13 @@ type OtelConf struct { Resource map[string]string `json:"resource,omitempty"` } +func (o *OtelCrConf) Need() bool { + if o == nil { + return false + } + return o.Enable +} + func (o *OtelConf) HasCollector() bool { return !(o.Exporter == nil || o.Exporter.Collector == nil) } @@ -59,11 +47,11 @@ type Collector struct { RequestTimeout int `json:"request_timeout"` } -// -- opts.drop_on_queue_full: if true, drop span when queue is full, otherwise force process batches, default true -// -- opts.max_queue_size: maximum queue size to buffer spans for delayed processing, default 2048 -// -- opts.batch_timeout: maximum duration for constructing a batch, default 5s -// -- opts.inactive_timeout: timer interval for processing batches, default 2s -// -- opts.max_export_batch_size: maximum number of spans to process in a single batch, default 256 +// -- opts.drop_on_queue_full: if true, drop span when queue is full, otherwise force process batches, default true +// -- opts.max_queue_size: maximum queue size to buffer spans for delayed processing, default 2048 +// -- opts.batch_timeout: maximum duration for constructing a batch, default 5s +// -- opts.inactive_timeout: timer interval for processing batches, default 2s +// -- opts.max_export_batch_size: maximum number of spans to process in a single batch, default 256 // +k8s:deepcopy-gen=true type BatchSpanProcessor struct { @@ -81,7 +69,7 @@ type Sampler struct { // +k8s:deepcopy-gen=true type SamplerOptions struct { // +optional - ParentName *string `json:"parent_name"` // name of parent if parent_base sampler + ParentName *string `json:"parent_name"` // name of parent if parent_base sampler -- do not omitempty, it should not be overwrite // +optional - Fraction *string `json:"fraction,omitempty"` // k8s does not like float, so use string + Fraction *string `json:"fraction"` // k8s does not like float, so use string -- do not omitempty, it should not be overwrite } diff --git a/pkg/controller/ext/waf/types/waf_type.go b/pkg/controller/ext/waf/types/waf_type.go index 19b51718..d027c230 100644 --- a/pkg/controller/ext/waf/types/waf_type.go +++ b/pkg/controller/ext/waf/types/waf_type.go @@ -1,4 +1,4 @@ -package waf_type +package types // +k8s:deepcopy-gen=true type WafCrConf struct { @@ -19,8 +19,8 @@ type WafConf struct { CmRef string `json:"cmRef"` // ns/name#section } -type WafInRule struct { - Key string - Raw WafConf - Snippet string +type WafInternal struct { + Key string // 要跳转到的 nginx location + Raw WafConf // 其他的waf的配置 + Snippet string // nginx location 的配置 } diff --git a/pkg/controller/ext/waf/types/zz_generated.deepcopy.go b/pkg/controller/ext/waf/types/zz_generated.deepcopy.go new file mode 100644 index 00000000..b0b26135 --- /dev/null +++ b/pkg/controller/ext/waf/types/zz_generated.deepcopy.go @@ -0,0 +1,55 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package types + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WafConf) DeepCopyInto(out *WafConf) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WafConf. +func (in *WafConf) DeepCopy() *WafConf { + if in == nil { + return nil + } + out := new(WafConf) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WafCrConf) DeepCopyInto(out *WafCrConf) { + *out = *in + out.WafConf = in.WafConf + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WafCrConf. +func (in *WafCrConf) DeepCopy() *WafCrConf { + if in == nil { + return nil + } + out := new(WafCrConf) + in.DeepCopyInto(out) + return out +} diff --git a/pkg/controller/ext/waf/waf.go b/pkg/controller/ext/waf/waf.go index 0848b3b6..f3ffd91b 100644 --- a/pkg/controller/ext/waf/waf.go +++ b/pkg/controller/ext/waf/waf.go @@ -9,10 +9,10 @@ import ( . "alauda.io/alb2/controller/types" . "alauda.io/alb2/pkg/controller/ext/waf/types" . "alauda.io/alb2/pkg/controller/ngxconf/types" + "sigs.k8s.io/controller-runtime/pkg/client" m "alauda.io/alb2/controller/modules" ct "alauda.io/alb2/controller/types" - corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" av1 "alauda.io/alb2/pkg/apis/alauda/v1" @@ -20,12 +20,13 @@ import ( nv1 "k8s.io/api/networking/v1" ) -type Waf struct { - log logr.Logger +type WafCtl struct { + log logr.Logger + domain string } -func NewWaf(l logr.Logger) *Waf { - return &Waf{log: l} +func NewWaf(l logr.Logger, domain string) *WafCtl { + return &WafCtl{log: l, domain: domain} } const ( @@ -36,7 +37,8 @@ const ( ) // 根据ingress 生成rule -func (w *Waf) UpdateRuleViaIngress(ingress *nv1.Ingress, ruleIndex int, pathIndex int, rule *av1.Rule, domain string) { +func (w *WafCtl) IngressAnnotationToRule(ingress *nv1.Ingress, ruleIndex int, pathIndex int, rule *av1.Rule) { + domain := w.domain if ingress == nil || rule == nil || ingress.Annotations == nil { return } @@ -80,20 +82,25 @@ func (w *Waf) UpdateRuleViaIngress(ingress *nv1.Ingress, ruleIndex int, pathInde } // rule cr 转成 policy -func (w *Waf) FromRuleCr(rule *m.Rule, r *ct.Rule) { +func (w *WafCtl) ToInternalRule(rule *m.Rule, r *ct.InternalRule) { waf, snip, key := mergeWaf(rule) if waf == nil || !waf.Enable { return } - r.ToLocation = &key - - r.Waf = &WafInRule{ + r.Config.Waf = &WafInternal{ Raw: waf.WafConf, Snippet: snip, Key: key, } } +func (w *WafCtl) ToPolicy(rule *ct.InternalRule, r *ct.Policy, refs ct.RefMap) { + if rule.Config.Waf == nil { + return + } + r.ToLocation = &rule.Config.Waf.Key +} + func getWafAnnotation(obj metav1.ObjectMeta) string { return obj.Annotations[ModSecuritySnippet] } @@ -118,24 +125,39 @@ func mergeWaf(r *m.Rule) (*WafCrConf, string, string) { return nil, "", "" } +func (w *WafCtl) CollectRefs(r *ct.InternalRule, refs RefMap) { + waf := r.Config.Waf + if waf == nil || waf.Raw.CmRef == "" { + return + } + ns, name, _, err := ParseCmRef(waf.Raw.CmRef) + if err != nil { + w.log.Error(err, "invalid cmref", "cmref", waf.Raw.CmRef) + return + } + key := client.ObjectKey{Namespace: ns, Name: name} + refs.ConfigMap[key] = nil +} + // 更新和删除配置不会有问题,因为现在的custom location的名字是固定的 // 唯一可能的就是刚刚增加的时候,有可能旧的worker看到nginx 配置还是旧的。但是读到的policy已经是新的了。导致想去跳转到一个还不存在的location上去。 // 只对长连接有影响。 -func (w *Waf) UpdateNgxTmpl(tmpl_cfg *NginxTemplateConfig, alb *LoadBalancer, cfg *config.Config) error { +func (w *WafCtl) UpdateNgxTmpl(tmpl_cfg *NginxTemplateConfig, alb *LoadBalancer, cfg *config.Config) { custom_location := map[string]map[string]FtCustomLocation{} for _, f := range alb.Frontends { for _, r := range f.Rules { - if r.Waf == nil { + waf := r.Config.Waf + if waf == nil { continue } if _, ok := custom_location[f.String()]; !ok { custom_location[f.String()] = map[string]FtCustomLocation{} } - key := r.Waf.Key + key := waf.Key if _, has := custom_location[f.String()][key]; !has { custom_location[f.String()][key] = FtCustomLocation{ Name: key, - LocationRaw: GenLocation(alb.CmRefs, r), + LocationRaw: GenLocation(alb.Refs, r), } } } @@ -157,11 +179,10 @@ func (w *Waf) UpdateNgxTmpl(tmpl_cfg *NginxTemplateConfig, alb *LoadBalancer, cf }) tmpl_cfg.Frontends[f] = ft } - return nil } -func GenLocation(cms map[string]*corev1.ConfigMap, r *ct.Rule) string { - waf := r.Waf +func GenLocation(cms RefMap, r *ct.InternalRule) string { + waf := r.Config.Waf if waf.Snippet != "" { waf.Raw.CmRef = "" waf.Raw.UseCoreRules = false @@ -169,7 +190,7 @@ func GenLocation(cms map[string]*corev1.ConfigMap, r *ct.Rule) string { if waf.Raw.CmRef != "" { waf.Raw.UseCoreRules = false } - pickCm := func(cms map[string]*corev1.ConfigMap, ref string) string { + pickCm := func(cms RefMap, ref string) string { if ref == "" { return "" } @@ -177,8 +198,11 @@ func GenLocation(cms map[string]*corev1.ConfigMap, r *ct.Rule) string { if err != nil { return "" } - key := fmt.Sprintf("%s/%s", ns, name) - if cm, has := cms[key]; has { + key := client.ObjectKey{ + Namespace: ns, + Name: name, + } + if cm, has := cms.ConfigMap[key]; has { return cm.Data[section] } return "" @@ -228,3 +252,6 @@ func ParseCmRef(ref string) (ns, name, section string, err error) { section = name_and_section[1] return ns, name, section, nil } + +func (c *WafCtl) UpdatePolicyAfterUniq(ext *ct.PolicyExt) { +} diff --git a/pkg/controller/extctl/extctl.go b/pkg/controller/extctl/extctl.go new file mode 100644 index 00000000..a6c4c088 --- /dev/null +++ b/pkg/controller/extctl/extctl.go @@ -0,0 +1,148 @@ +package extctl + +import ( + "encoding/json" + "sort" + "time" + + "alauda.io/alb2/config" + m "alauda.io/alb2/controller/modules" + ct "alauda.io/alb2/controller/types" + albv1 "alauda.io/alb2/pkg/apis/alauda/v1" + "alauda.io/alb2/pkg/controller/ext/auth" + "alauda.io/alb2/pkg/controller/ext/otel" + "alauda.io/alb2/pkg/controller/ext/waf" + ngt "alauda.io/alb2/pkg/controller/ngxconf/types" + pu "alauda.io/alb2/pkg/utils" + pm "alauda.io/alb2/pkg/utils/metrics" + "github.com/go-logr/logr" + "golang.org/x/exp/maps" + nv1 "k8s.io/api/networking/v1" +) + +// extension controller +type ExtCtl struct { + log logr.Logger + domain string + extensions []Extension +} + +type ExtCtlCfgOpt struct { + Log logr.Logger + Domain string +} + +type Extension interface { + IngressAnnotationToRule(ing *nv1.Ingress, rindex int, pindex int, rule *albv1.Rule) + ToInternalRule(rule *m.Rule, r *ct.InternalRule) + CollectRefs(*ct.InternalRule, ct.RefMap) + ToPolicy(*ct.InternalRule, *ct.Policy, ct.RefMap) + UpdateNgxTmpl(tmpl_cfg *ngt.NginxTemplateConfig, alb *ct.LoadBalancer, cfg *config.Config) + UpdatePolicyAfterUniq(*ct.PolicyExt) +} + +func NewExtensionCtl(opt ExtCtlCfgOpt) ExtCtl { + return ExtCtl{ + log: opt.Log, + domain: opt.Domain, + extensions: []Extension{ + NewLegacyExtCtl(), + NewHeaderModifyCtl(opt.Log, opt.Domain), + otel.NewOtel(opt.Log, opt.Domain), + waf.NewWaf(opt.Log, opt.Domain), + auth.NewAuthCtl(opt.Log, opt.Domain), + }, + } +} + +// ingress sync +func (c ExtCtl) IngressAnnotationToRule(ing *nv1.Ingress, rindex int, pindex int, rule *albv1.Rule) { + for _, ext := range c.extensions { + ext.IngressAnnotationToRule(ing, rindex, pindex, rule) + } +} + +// cr:rule -> m:rule +func (c ExtCtl) ToInternalRule(mr *m.Rule, ir *ct.InternalRule) { + for _, ext := range c.extensions { + ext.ToInternalRule(mr, ir) + } +} + +func (c ExtCtl) ToPolicy(ir *ct.InternalRule, p *ct.Policy, refs ct.RefMap) { + for _, ext := range c.extensions { + ext.ToPolicy(ir, p, refs) + } +} + +// TODO performance when huge rule? 应该使用某种hint 不要每次都重新计算 +// NOTE 每个插件负责根据对每个rule生成配置,这些配置可能是从其他地方继承而来的,但是在policy.new中对于相同的配置,我们把他merge到一起. +func (c ExtCtl) MergeSamePolicyConfig(ngx *ct.NgxPolicy) { + share := ct.SharedExtPolicyConfig{} + for _, ps := range ngx.Http.Tcp { + for _, p := range ps { + if p.Config.Refs == nil { + p.Config.Refs = map[ct.PolicyExtKind]string{} + } + used_plugin_map := map[string]bool{} + for k, c := range p.Config.PolicyExt.ToMaps() { + // TODO migrate those to new framework + if k == "rewrite" || k == "cors" || k == "rewrite_request" || k == "rewrite_response" || k == "timeout" { + continue + } + used_plugin_map[string(k)] = true + hash := hash(c) + if _, exist := share[hash]; !exist { + share[hash] = ct.RefBox{ + Hash: hash, + Type: k, + PolicyExt: *c, + } + } + p.Config.Refs[k] = hash + p.Config.Clean(k) + } + used_plugin_list := maps.Keys(used_plugin_map) + sort.Strings(used_plugin_list) + p.Plugins = used_plugin_list + } + } + ngx.SharedConfig = share +} + +// policy gen +func (c ExtCtl) ResolvePolicies(alb *ct.LoadBalancer, ngx *ct.NgxPolicy) { + s := time.Now() + defer func() { + e := time.Now() + pm.Write("ext-resolve-policy", float64(e.UnixMilli())-float64(s.UnixMilli())) + }() + c.MergeSamePolicyConfig(ngx) + + for _, v := range ngx.SharedConfig { + for _, ext := range c.extensions { + ext.UpdatePolicyAfterUniq(&v.PolicyExt) + } + } +} + +func (c ExtCtl) CollectRefs(ir *ct.InternalRule, refs ct.RefMap) { + for _, ext := range c.extensions { + ext.CollectRefs(ir, refs) + } +} + +func (c ExtCtl) UpdateNgxTmpl(tmpl_cfg *ngt.NginxTemplateConfig, alb *ct.LoadBalancer, cfg *config.Config) error { + for _, ext := range c.extensions { + ext.UpdateNgxTmpl(tmpl_cfg, alb, cfg) + } + return nil +} + +func hash(x interface{}) string { + bytes, err := json.Marshal(x) + if err != nil { + return "" + } + return pu.HashBytes(bytes) +} diff --git a/pkg/controller/extctl/extctl_test.go b/pkg/controller/extctl/extctl_test.go new file mode 100644 index 00000000..afb4e276 --- /dev/null +++ b/pkg/controller/extctl/extctl_test.go @@ -0,0 +1,61 @@ +package extctl + +import ( + "encoding/json" + "testing" + + ct "alauda.io/alb2/controller/types" + albv1 "alauda.io/alb2/pkg/apis/alauda/v1" + otelt "alauda.io/alb2/pkg/controller/ext/otel/types" + "github.com/stretchr/testify/assert" +) + +func TestMergePolicy(t *testing.T) { + ngx := &ct.NgxPolicy{ + Http: ct.HttpPolicy{ + Tcp: map[albv1.PortNumber]ct.Policies{ + 80: { + { + Rule: "xx", + Upstream: "xx", + InternalDSL: []interface{}{}, + Config: ct.PolicyExtCfg{ + Refs: map[ct.PolicyExtKind]string{}, + PolicyExt: ct.PolicyExt{ + Otel: &otelt.OtelConf{ + Exporter: &otelt.Exporter{}, + Sampler: &otelt.Sampler{ + Name: "always_on", + }, + Flags: &otelt.Flags{}, + Resource: map[string]string{}, + }, + }, + }, + }, + }, + }, + }, + } + ctl := &ExtCtl{} + ctl.MergeSamePolicyConfig(ngx) + js, err := json.MarshalIndent(ngx, " ", " ") + assert.NoError(t, err) + t.Logf("ngx %s", js) + + // OMG. https://github.com/golang/go/issues/37711 + type TestB struct { + B string `json:"b"` + } + type TestS struct { + A string `json:"a"` + A1 string `json:"a1,omitempty"` + B TestB `json:"b"` + C []string `json:"c"` + D []TestB `json:"d"` + E map[string]TestB `json:"e"` + } + s := TestS{} + s_js, _ := json.MarshalIndent(s, " ", " ") + t.Logf("s is %s", s_js) +} diff --git a/pkg/controller/extctl/header_moidy_ctl.go b/pkg/controller/extctl/header_moidy_ctl.go new file mode 100644 index 00000000..c06ff6cd --- /dev/null +++ b/pkg/controller/extctl/header_moidy_ctl.go @@ -0,0 +1,119 @@ +package extctl + +import ( + "encoding/json" + "fmt" + + m "alauda.io/alb2/controller/modules" + ct "alauda.io/alb2/controller/types" + albv1 "alauda.io/alb2/pkg/apis/alauda/v1" + u "alauda.io/alb2/pkg/utils" + "github.com/go-logr/logr" + nv1 "k8s.io/api/networking/v1" + + "alauda.io/alb2/config" + "alauda.io/alb2/controller/types" + ngt "alauda.io/alb2/pkg/controller/ngxconf/types" +) + +// rewrite request/response header +type HeaderModifyCtl struct { + domain string + log logr.Logger +} + +func NewHeaderModifyCtl(log logr.Logger, domain string) HeaderModifyCtl { + return HeaderModifyCtl{ + domain: domain, + log: log, + } +} + +func (c HeaderModifyCtl) IngressAnnotationToRule(ing *nv1.Ingress, ruleIndex int, pathIndex int, rule *albv1.Rule) { + annotations := c.GenRewriteResponseOrRequestRuleAnnotation(ing.Name, ing.Annotations, c.domain) + rule.Annotations = u.MergeMap(rule.Annotations, annotations) +} + +func rewriteResponseConfigFromJson(jsonStr string) (*types.RewriteResponseConfig, error) { + cfg := types.RewriteResponseConfig{} + err := json.Unmarshal([]byte(jsonStr), &cfg) + if err != nil { + return nil, err + } + if cfg.IsEmpty() { + return nil, fmt.Errorf("empty config") + } + return &cfg, err +} + +func rewriteRequestConfigFromJson(jsonStr string) (*types.RewriteRequestConfig, error) { + cfg := types.RewriteRequestConfig{} + err := json.Unmarshal([]byte(jsonStr), &cfg) + if err != nil { + return nil, err + } + return &cfg, err +} + +func (c HeaderModifyCtl) GenRewriteResponseOrRequestRuleAnnotation(ingressName string, annotation map[string]string, domain string) map[string]string { + ruleAnnotation := make(map[string]string) + n := config.NewNames(domain) + + if val, ok := annotation[n.GetAlbIngressRewriteResponseAnnotation()]; ok { + _, err := rewriteResponseConfigFromJson(val) + if err != nil { + c.log.Error(err, "ext ingress rewrite_response: invalid annotation", "ing", ingressName, "val", err) + } else { + ruleAnnotation[n.GetAlbRuleRewriteResponseAnnotation()] = val + } + } + if val, ok := annotation[n.GetAlbIngressRewriteRequestAnnotation()]; ok { + _, err := rewriteRequestConfigFromJson(val) + if err != nil { + c.log.Error(err, "ext ingress rewrite_request: invalid annotation", "ing", ingressName, "val", err) + } else { + ruleAnnotation[n.GetAlbRuleRewriteRequestAnnotation()] = val + } + } + return ruleAnnotation +} + +func (c HeaderModifyCtl) ToInternalRule(rule *m.Rule, r *ct.InternalRule) { + n := config.NewNames(c.domain) + annotation := rule.Annotations + ruleName := rule.Name + if val, ok := annotation[n.GetAlbRuleRewriteResponseAnnotation()]; ok { + rewriteCfg, err := rewriteResponseConfigFromJson(val) + if err != nil { + c.log.Error(err, "ext ingress rewrite_response: invalid annotation", "rule", ruleName, "val", err) + } else { + r.Config.RewriteResponse = rewriteCfg + } + } + if val, ok := annotation[n.GetAlbRuleRewriteRequestAnnotation()]; ok { + rewriteCfg, err := rewriteRequestConfigFromJson(val) + if err != nil { + c.log.Error(err, "ext ingress rewrite_request: invalid annotation", "rule", ruleName, "val", err) + } else { + r.Config.RewriteRequest = rewriteCfg + } + } +} + +func (c HeaderModifyCtl) ToPolicy(r *ct.InternalRule, p *ct.Policy, refs ct.RefMap) { + if r.Config.RewriteRequest != nil { + p.Config.RewriteRequest = r.Config.RewriteRequest + } + if r.Config.RewriteResponse != nil { + p.Config.RewriteResponse = r.Config.RewriteResponse + } +} + +func (c HeaderModifyCtl) CollectRefs(ir *ct.InternalRule, refs ct.RefMap) { +} + +func (c HeaderModifyCtl) UpdateNgxTmpl(_ *ngt.NginxTemplateConfig, _ *ct.LoadBalancer, _ *config.Config) { +} + +func (c HeaderModifyCtl) UpdatePolicyAfterUniq(_ *ct.PolicyExt) { +} diff --git a/pkg/controller/custom_config/alb_custom_config_test.go b/pkg/controller/extctl/header_moidy_ctl_test.go similarity index 82% rename from pkg/controller/custom_config/alb_custom_config_test.go rename to pkg/controller/extctl/header_moidy_ctl_test.go index 58d93069..d676c3c0 100644 --- a/pkg/controller/custom_config/alb_custom_config_test.go +++ b/pkg/controller/extctl/header_moidy_ctl_test.go @@ -1,4 +1,4 @@ -package custom_config +package extctl import ( "fmt" @@ -8,6 +8,7 @@ import ( "alauda.io/alb2/controller/modules" . "alauda.io/alb2/controller/types" albv1 "alauda.io/alb2/pkg/apis/alauda/v1" + "alauda.io/alb2/utils/log" "github.com/stretchr/testify/assert" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -23,9 +24,9 @@ func TestRuleConfig(t *testing.T) { type TestCase struct { ingressAnnotation map[string]string expectRuleAnnotation map[string]string - expectRuleConfig *RuleConfigInPolicy + expectRuleConfig *RuleExt } - empty := &RuleConfigInPolicy{} + empty := &RuleExt{} case1 := TestCase{ map[string]string{}, map[string]string{}, @@ -35,7 +36,7 @@ func TestRuleConfig(t *testing.T) { case2 := TestCase{ map[string]string{ALBIngressRewriteResponseAnnotation: `{"headers":{"aa":"bb"}}`}, map[string]string{RuleRewriteResponseAnnotation: `{"headers":{"aa":"bb"}}`}, - &RuleConfigInPolicy{ + &RuleExt{ RewriteResponse: &RewriteResponseConfig{ Headers: map[string]string{ "aa": "bb", @@ -67,7 +68,7 @@ func TestRuleConfig(t *testing.T) { { map[string]string{ALBIngressRewriteRequestAnnotation: `{"headers_var":{"a":"cookie_b"},"headers_add_var":{"aa":["cookie_b"]}}`}, map[string]string{RuleRewriteRequestAnnotation: `{"headers_var":{"a":"cookie_b"},"headers_add_var":{"aa":["cookie_b"]}}`}, - &RuleConfigInPolicy{ + &RuleExt{ RewriteRequest: &RewriteRequestConfig{ HeadersVar: map[string]string{ "a": "cookie_b", @@ -80,7 +81,11 @@ func TestRuleConfig(t *testing.T) { }, } for i, c := range cases { - ruleAnnotation := legacyGenerateRuleAnnotationFromIngressAnnotation("xx", c.ingressAnnotation, cfg.Domain) + ctl := HeaderModifyCtl{ + domain: cfg.Domain, + log: log.L(), + } + ruleAnnotation := ctl.GenRewriteResponseOrRequestRuleAnnotation("xx", c.ingressAnnotation, cfg.Domain) assert.Equal(t, ruleAnnotation, c.expectRuleAnnotation, fmt.Sprintf("case %v fail", i+1)) rule := &modules.Rule{ Rule: &albv1.Rule{ @@ -90,14 +95,16 @@ func TestRuleConfig(t *testing.T) { }, }, } - p := &RuleConfigInPolicy{} - ruleInPolicyFromRuleAnnotation(rule, cfg.Domain, p) - assert.Equal(t, p, c.expectRuleConfig, fmt.Sprintf("case %v fail", i+1)) + ir := &InternalRule{ + Config: RuleExt{}, + } + ctl.ToInternalRule(rule, ir) + assert.Equal(t, ir.Config, *c.expectRuleConfig, fmt.Sprintf("case %v fail", i+1)) } type RuleTestCase struct { ruleAnnotation map[string]string - expectRuleConfig *RuleConfigInPolicy + expectRuleConfig *RuleExt } // if rule annotation is invalid, rule config should be nil. ruleCase1 := RuleTestCase{ @@ -124,8 +131,13 @@ func TestRuleConfig(t *testing.T) { }, }, } - p := &RuleConfigInPolicy{} - ruleInPolicyFromRuleAnnotation(rule, cfg.Domain, p) - assert.Equal(t, p, c.expectRuleConfig) + ir := &InternalRule{Config: RuleExt{}} + + ctl := HeaderModifyCtl{ + domain: cfg.Domain, + log: log.L(), + } + ctl.ToInternalRule(rule, ir) + assert.Equal(t, ir.Config, *c.expectRuleConfig) } } diff --git a/pkg/controller/extctl/legacy.go b/pkg/controller/extctl/legacy.go new file mode 100644 index 00000000..55b5e643 --- /dev/null +++ b/pkg/controller/extctl/legacy.go @@ -0,0 +1,75 @@ +package extctl + +import ( + "alauda.io/alb2/config" + m "alauda.io/alb2/controller/modules" + ct "alauda.io/alb2/controller/types" + + albv1 "alauda.io/alb2/pkg/apis/alauda/v1" + ngt "alauda.io/alb2/pkg/controller/ngxconf/types" + nv1 "k8s.io/api/networking/v1" +) + +// rewrite_url redirect cors vhost... those ext born with alb.. just donot touch them, for now. +type LegacyExtCtl struct{} + +func NewLegacyExtCtl() LegacyExtCtl { + return LegacyExtCtl{} +} + +func (o LegacyExtCtl) IngressAnnotationToRule(ing *nv1.Ingress, rindex int, pindex int, rule *albv1.Rule) { +} + +func (o LegacyExtCtl) ToInternalRule(mr *m.Rule, ir *ct.InternalRule) { + mrs := mr.Spec + + ru := ct.RewriteConf{} + ru.URL = mrs.URL + ru.RewriteBase = mrs.RewriteBase + ru.RewriteTarget = mrs.RewriteTarget + ir.Config.Rewrite = &ru + + rd := ct.RedirectConf{} + rd.RedirectURL = mrs.RedirectURL + rd.RedirectCode = mrs.RedirectCode + ir.Config.Redirect = &rd + + cors := ct.Cors{} + cors.EnableCORS = mrs.EnableCORS + cors.CORSAllowHeaders = mrs.CORSAllowHeaders + cors.CORSAllowOrigin = mrs.CORSAllowOrigin + ir.Config.Cors = &cors + + vhost := ct.Vhost{} + vhost.VHost = mrs.VHost + ir.Config.Vhost = &vhost +} + +func (o LegacyExtCtl) CollectRefs(_ *ct.InternalRule, _ ct.RefMap) { +} + +func (o LegacyExtCtl) ToPolicy(ir *ct.InternalRule, p *ct.Policy, refs ct.RefMap) { + // rewrite + rule_ext := ir.Config + if rule_ext.Rewrite != nil { + p.RewriteConf = *rule_ext.Rewrite + } + // redirect + if rule_ext.Redirect != nil { + p.RedirectConf = *rule_ext.Redirect + } + // cors + if rule_ext.Cors != nil { + p.Cors = *rule_ext.Cors + } + // vhost + if rule_ext.Vhost != nil { + p.Vhost = *rule_ext.Vhost + } +} + +func (o LegacyExtCtl) UpdateNgxTmpl(tmpl_cfg *ngt.NginxTemplateConfig, alb *ct.LoadBalancer, cfg *config.Config) { +} + +func (o LegacyExtCtl) UpdatePolicyAfterUniq(*ct.PolicyExt) { +} diff --git a/pkg/controller/ngxconf/nginx.tmpl b/pkg/controller/ngxconf/nginx.tmpl index 90abd9d5..8599f1c2 100644 --- a/pkg/controller/ngxconf/nginx.tmpl +++ b/pkg/controller/ngxconf/nginx.tmpl @@ -44,6 +44,13 @@ env HOSTNAME; {{ if .Flags.ShowHttpWrapper }} http { {{ end }} + + {{ if $.Resolver -}} + resolver {{$.Resolver}}; + {{ if $.ResolverTimeout -}} + resolver_timeout {{$.ResolverTimeout}}; + {{ end }} + {{ end }} {{ if $.Flags.ShowMimeTypes }} include {{$.RestyBase}}/nginx/conf/mime.types; @@ -51,6 +58,7 @@ http { include {{$.TweakBase}}/http.conf; + {{$.HttpExtra}} {{ if $cfg.EnableGzip -}} @@ -93,6 +101,12 @@ http { } } + location /healthz { + content_by_lua_block { + ngx.say("ok") + } + } + location /clear { content_by_lua_block { require("metrics").clear() @@ -122,7 +136,7 @@ http { set $location_mode sub; rewrite_by_lua_file {{$.NginxBase}}/lua/phase/l7_rewrite_phase.lua; proxy_pass $backend_protocol://http_backend; - header_filter_by_lua_file {{$.NginxBase}}/lua/l7_header_filter.lua; + header_filter_by_lua_file {{$.NginxBase}}/lua/phase/l7_header_filter_phase.lua; {{ if $cfg.EnablePrometheus -}} log_by_lua_file {{$.NginxBase}}/lua/phase/log_phase.lua; @@ -136,7 +150,7 @@ http { rewrite_by_lua_file {{$.NginxBase}}/lua/phase/l7_rewrite_phase.lua; proxy_pass $backend_protocol://http_backend; - header_filter_by_lua_file {{$.NginxBase}}/lua/l7_header_filter.lua; + header_filter_by_lua_file {{$.NginxBase}}/lua/phase/l7_header_filter_phase.lua; {{ if $cfg.EnablePrometheus -}} log_by_lua_file {{$.NginxBase}}/lua/phase/log_phase.lua; @@ -172,7 +186,6 @@ http { ssl_dhparam {{$.ShareBase}}/dhparam.pem; ssl_certificate_by_lua_file {{$.NginxBase}}/lua/phase/ssl_cert_phase.lua; - {{ range $_, $loc := $ft.CustomLocation -}} location @{{ $loc.Name }} { internal; @@ -181,7 +194,7 @@ http { set $location_mode sub; rewrite_by_lua_file {{$.NginxBase}}/lua/phase/l7_rewrite_phase.lua; proxy_pass $backend_protocol://http_backend; - header_filter_by_lua_file {{$.NginxBase}}/lua/l7_header_filter.lua; + header_filter_by_lua_file {{$.NginxBase}}/lua/phase/l7_header_filter_phase.lua; {{ if $cfg.EnablePrometheus -}} log_by_lua_file {{$.NginxBase}}/lua/phase/log_phase.lua; @@ -196,7 +209,7 @@ http { rewrite_by_lua_file {{$.NginxBase}}/lua/phase/l7_rewrite_phase.lua; proxy_pass $backend_protocol://http_backend; - header_filter_by_lua_file {{$.NginxBase}}/lua/l7_header_filter.lua; + header_filter_by_lua_file {{$.NginxBase}}/lua/phase/l7_header_filter_phase.lua; {{ if $cfg.EnablePrometheus -}} log_by_lua_file {{$.NginxBase}}/lua/phase/log_phase.lua; diff --git a/pkg/controller/ngxconf/ngxconf_tmpl.go b/pkg/controller/ngxconf/ngxconf_tmpl.go index 055d2ec6..9be2ea60 100644 --- a/pkg/controller/ngxconf/ngxconf_tmpl.go +++ b/pkg/controller/ngxconf/ngxconf_tmpl.go @@ -1,38 +1,37 @@ package ngxconf import ( + "bufio" "encoding/json" "fmt" - "io/ioutil" + "io" "net" "os" "path/filepath" "sort" "strings" + "time" "alauda.io/alb2/utils/dirhash" + "github.com/go-logr/logr" "gopkg.in/yaml.v2" - "sigs.k8s.io/controller-runtime/pkg/client" "alauda.io/alb2/config" "alauda.io/alb2/controller/types" - "alauda.io/alb2/utils" - "k8s.io/klog/v2" - - . "alauda.io/alb2/controller/types" "alauda.io/alb2/driver" - "alauda.io/alb2/pkg/controller/ext/waf" + cus "alauda.io/alb2/pkg/controller/extctl" . "alauda.io/alb2/pkg/controller/ngxconf/types" - "github.com/go-logr/logr" - corev1 "k8s.io/api/core/v1" + pm "alauda.io/alb2/pkg/utils/metrics" + "alauda.io/alb2/utils" ) type NgxCli struct { drv *driver.KubernetesDriver log logr.Logger opt NgxCliOpt - waf *waf.Waf + cus cus.ExtCtl } + type NgxCliOpt struct{} func NewNgxCli(drv *driver.KubernetesDriver, log logr.Logger, opt NgxCliOpt) NgxCli { @@ -40,45 +39,22 @@ func NewNgxCli(drv *driver.KubernetesDriver, log logr.Logger, opt NgxCliOpt) Ngx drv: drv, log: log, opt: opt, - waf: waf.NewWaf(log), - } -} - -func (c *NgxCli) FillUpRefCms(alb *LoadBalancer) error { - // 实际上也应该是由ext/waf做的,但是目前没有同类的东西,所以这里直接做 - if alb.CmRefs == nil { - alb.CmRefs = map[string]*corev1.ConfigMap{} - } - cms := map[client.ObjectKey]string{} - for _, f := range alb.Frontends { - for _, r := range f.Rules { - if r.Waf != nil && r.Waf.Raw.CmRef != "" { - ns, name, _, err := waf.ParseCmRef(r.Waf.Raw.CmRef) - if err != nil { - continue - } - cms[client.ObjectKey{Namespace: ns, Name: name}] = r.Waf.Key - } - } - } - for cm_key, waf_key := range cms { - cm := &corev1.ConfigMap{} - err := c.drv.Cli.Get(c.drv.Ctx, cm_key, cm) - if err != nil { - c.log.Error(err, "get waf used cm fail", "waf", waf_key, "cm", cm_key.String()) - continue - } - alb.CmRefs[cm_key.String()] = cm + cus: cus.NewExtensionCtl(cus.ExtCtlCfgOpt{Log: log, Domain: drv.Opt.Domain}), } - return nil } func (c *NgxCli) GenerateNginxTemplateConfig(alb *types.LoadBalancer, phase string, cfg *config.Config) (*NginxTemplateConfig, error) { + s := time.Now() + defer func() { + pm.Write("gen-nginx-conf", float64(time.Since(s).Milliseconds())) + }() nginxParam := newNginxParam(cfg) + s_bind_ip := time.Now() ipv4, ipv6, err := GetBindIp(cfg) if err != nil { return nil, err } + pm.Write("gen-nginx-conf/bind-ip", float64(time.Since(s_bind_ip).Milliseconds())) fts := make(map[string]FtConfig) for _, ft := range alb.Frontends { if ft.Conflict { @@ -95,11 +71,22 @@ func (c *NgxCli) GenerateNginxTemplateConfig(alb *types.LoadBalancer, phase stri } // calculate hash by tweak dir tweakBase := cfg.GetNginxCfg().TweakDir - hash, err := dirhash.HashDir(tweakBase, ".conf", dirhash.DefaultHash) + hash := "default" + s_hash := time.Now() + if tweakBase != "" { + hash, err = dirhash.HashDir(tweakBase, ".conf", dirhash.DefaultHash) + if err != nil { + c.log.Error(err, "failed to calculate hash") + return nil, err + } + } + pm.Write("gen-nginx-conf/hash-tweak", float64(time.Since(s_hash).Milliseconds())) + + resolver, err := getDnsResolver() if err != nil { - klog.Error(err) return nil, err } + tmpl_cfg := &NginxTemplateConfig{ Name: alb.Name, TweakBase: tweakBase, @@ -108,6 +95,7 @@ func (c *NgxCli) GenerateNginxTemplateConfig(alb *types.LoadBalancer, phase stri ShareBase: "/etc/alb2/nginx", Frontends: fts, TweakHash: hash, + Resolver: resolver, Phase: phase, Metrics: MetricsConfig{ Port: nginxParam.MetricsPort, @@ -117,13 +105,59 @@ func (c *NgxCli) GenerateNginxTemplateConfig(alb *types.LoadBalancer, phase stri NginxParam: nginxParam, Flags: DefaulNgxTmplFlags(), } - err = c.waf.UpdateNgxTmpl(tmpl_cfg, alb, cfg) + err = c.cus.UpdateNgxTmpl(tmpl_cfg, alb, cfg) if err != nil { return nil, err } return tmpl_cfg, nil } +func getDnsResolver() (string, error) { + f, err := os.Open("/etc/resolv.conf") + if err != nil { + return "", err + } + defer f.Close() + raw, err := io.ReadAll(f) + if err != nil { + return "", err + } + return getDnsResolverRaw(string(raw)) +} + +func getDnsResolverRaw(raw string) (string, error) { + var nameservers []string + scanner := bufio.NewScanner(strings.NewReader(raw)) + for scanner.Scan() { + line := strings.TrimSpace(scanner.Text()) + if strings.HasPrefix(line, "#") || line == "" { + continue + } + if strings.HasPrefix(line, "nameserver") { + parts := strings.Fields(line) + if len(parts) == 2 { + nameservers = append(nameservers, parts[1]) + } + } + } + if err := scanner.Err(); err != nil { + return "", err + } + + // dual-stack and ipv4 first + for _, ip := range nameservers { + if utils.IsIPv4(ip) { + return ip, nil + } + } + for _, ip := range nameservers { + if utils.IsIPv6(ip) { + return "[" + ip + "]", nil + } + } + return "", fmt.Errorf("no nameserver found in %v", raw) +} + func NgxTmplCfgFromYaml(ngx string) (*NginxTemplateConfig, error) { var cfg NginxTemplateConfig err := yaml.Unmarshal([]byte(ngx), &cfg) @@ -139,16 +173,8 @@ func GetBindIp(cfg *config.Config) (ipv4Address []string, ipv6Address []string, return nil, nil, err } - networkInfo, err := GetCurrentNetwork() - if err != nil { - return nil, nil, err - } - return getBindIp(bindNICConfig, networkInfo, cfg.GetNginxCfg().EnableIpv6) -} - -func getBindIp(bindNICConfig BindNICConfig, networkInfo NetWorkInfo, enableIpv6 bool) (ipv4Address []string, ipv6Address []string, err error) { + enableIpv6 := cfg.GetNginxCfg().EnableIpv6 if len(bindNICConfig.Nic) == 0 { - klog.Info("[bind_nic] without config bind 0.0.0.0") ipv4 := []string{"0.0.0.0"} ipv6 := []string{"[::]"} if !enableIpv6 { @@ -156,7 +182,14 @@ func getBindIp(bindNICConfig BindNICConfig, networkInfo NetWorkInfo, enableIpv6 } return ipv4, ipv6, nil } + networkInfo, err := GetCurrentNetwork() + if err != nil { + return nil, nil, err + } + return getBindIp(bindNICConfig, networkInfo, enableIpv6) +} +func getBindIp(bindNICConfig BindNICConfig, networkInfo NetWorkInfo, enableIpv6 bool) (ipv4Address []string, ipv6Address []string, err error) { ipv4Address = []string{} ipv6Address = []string{} @@ -187,11 +220,9 @@ func getBindIp(bindNICConfig BindNICConfig, networkInfo NetWorkInfo, enableIpv6 } if len(ipv4Address) == 0 { - klog.Info("[bind_nic] could not find any ipv4 address bind 0.0.0.0") ipv4Address = append(ipv4Address, "0.0.0.0") } if enableIpv6 && len(ipv6Address) == 0 { - klog.Info("[bind_nic] could not find any ipv6 address and enableIpv6 bind [::]") ipv6Address = append(ipv6Address, "[::]") } @@ -199,7 +230,6 @@ func getBindIp(bindNICConfig BindNICConfig, networkInfo NetWorkInfo, enableIpv6 ipv6Address = utils.StrListRemoveDuplicates(ipv6Address) sort.Strings(ipv4Address) sort.Strings(ipv6Address) - klog.Infof("[bind_nic] bind ipv4 %v ip v6 %v", ipv4Address, ipv6Address) return ipv4Address, ipv6Address, nil } @@ -211,6 +241,7 @@ type InterfaceInfo struct { type NetWorkInfo = map[string]InterfaceInfo +// TODO GetCurrentNetwork maybe slow (80ms),但是标准库中获取interface本质上也是先获取所有的nic func GetCurrentNetwork() (NetWorkInfo, error) { ifaces, err := net.Interfaces() if err != nil { @@ -265,7 +296,7 @@ func GetBindNICConfig(base string) (BindNICConfig, error) { } defer jsonFile.Close() - byteValue, _ := ioutil.ReadAll(jsonFile) + byteValue, _ := io.ReadAll(jsonFile) jsonStr := string(byteValue) if len(strings.TrimSpace(jsonStr)) == 0 { return BindNICConfig{}, nil diff --git a/pkg/controller/ngxconf/ngxconf_tmpl_test.go b/pkg/controller/ngxconf/ngxconf_tmpl_test.go index 0ba5e35c..6a64a0d9 100644 --- a/pkg/controller/ngxconf/ngxconf_tmpl_test.go +++ b/pkg/controller/ngxconf/ngxconf_tmpl_test.go @@ -1,11 +1,14 @@ package ngxconf import ( + "fmt" "io/ioutil" "os" "path/filepath" + "strings" "testing" + . "alauda.io/alb2/pkg/controller/ngxconf/types" "github.com/stretchr/testify/assert" ) @@ -124,3 +127,42 @@ func TestGetBindIpConfig(t *testing.T) { cfg, err = testGet(&configStr) assert.NotNil(t, err) } + +func TestNginxConf(t *testing.T) { + tmpl_cfg := NginxTemplateConfig{ + Name: "xx", + TweakBase: "xx", + NginxBase: "/alb/nginx", + RestyBase: "/usr/local/openresty", + ShareBase: "/etc/alb2/nginx", + Frontends: map[string]FtConfig{}, + Resolver: "127.0.0.1", + TweakHash: "", + Phase: "running", + Metrics: MetricsConfig{ + Port: 1111, + IpV4BindAddress: []string{}, + IpV6BindAddress: []string{}, + }, + NginxParam: NginxParam{EnableIPV6: true}, + Flags: DefaulNgxTmplFlags(), + } + ngx_cfg, err := RenderNginxConfigEmbed(tmpl_cfg) + assert.NoError(t, err) + fmt.Println(ngx_cfg) + assert.True(t, strings.Contains(ngx_cfg, "resolver 127.0.0.1;")) +} + +func TestResolve(t *testing.T) { + dns, err := getDnsResolverRaw(` +nameserver 10.4.0.10 +`) + assert.NoError(t, err) + assert.Equal(t, dns, "10.4.0.10") + + dns, err = getDnsResolverRaw(` +nameserver fd00:10:98::a +`) + assert.NoError(t, err) + assert.Equal(t, dns, "[fd00:10:98::a]") +} diff --git a/pkg/controller/ngxconf/test/ngxconf_test.go b/pkg/controller/ngxconf/test/ngxconf_test.go index cb034165..5841c930 100644 --- a/pkg/controller/ngxconf/test/ngxconf_test.go +++ b/pkg/controller/ngxconf/test/ngxconf_test.go @@ -150,7 +150,7 @@ var _ = DescribeTable("ngx config should work", set $location_mode sub; rewrite_by_lua_file /alb/nginx/lua/phase/l7_rewrite_phase.lua; proxy_pass $backend_protocol://http_backend; - header_filter_by_lua_file /alb/nginx/lua/l7_header_filter.lua; + header_filter_by_lua_file /alb/nginx/lua/phase/l7_header_filter_phase.lua; `)) }, ), diff --git a/pkg/controller/ngxconf/types/types.go b/pkg/controller/ngxconf/types/types.go index e287b62f..fb965585 100644 --- a/pkg/controller/ngxconf/types/types.go +++ b/pkg/controller/ngxconf/types/types.go @@ -4,21 +4,23 @@ import albv1 "alauda.io/alb2/pkg/apis/alauda/v1" // a config used for nginx.tmpl to generate nginx.conf type NginxTemplateConfig struct { - Name string `yaml:"name"` - TweakBase string `yaml:"tweakBase"` // /alb/tweak - NginxBase string `yaml:"nginxBase"` // /alb/nginx - RestyBase string `yaml:"restyBase"` // /usr/local/openresty/ - ShareBase string `yaml:"shareBase"` // the /etc/alb2/nginx - Frontends map[string]FtConfig `yaml:"frontends"` - Metrics MetricsConfig `yaml:"metrics"` - TweakHash string `yaml:"tweakHash"` - Phase string `yaml:"phase"` - Base string `yaml:"base"` - NginxParam `yaml:",inline"` - RootExtra string `yaml:"rootExtra"` - HttpExtra string `yaml:"httpExtra"` - StreamExtra string `yaml:"streamExtra"` - Flags Flags `yaml:"flags"` // render part of nginx.conf. used in AlaudaLib.pm + Name string `yaml:"name"` + TweakBase string `yaml:"tweakBase"` // /alb/tweak + NginxBase string `yaml:"nginxBase"` // /alb/nginx + RestyBase string `yaml:"restyBase"` // /usr/local/openresty/ + ShareBase string `yaml:"shareBase"` // the /etc/alb2/nginx + Frontends map[string]FtConfig `yaml:"frontends"` + Metrics MetricsConfig `yaml:"metrics"` + Resolver string `yaml:"resolver"` + ResolverTimeout string `yaml:"resolver_timeout"` + TweakHash string `yaml:"tweakHash"` + Phase string `yaml:"phase"` + Base string `yaml:"base"` + NginxParam `yaml:",inline"` + RootExtra string `yaml:"rootExtra"` + HttpExtra string `yaml:"httpExtra"` + StreamExtra string `yaml:"streamExtra"` + Flags Flags `yaml:"flags"` // render part of nginx.conf. used in AlaudaLib.pm } type FtConfig struct { diff --git a/pkg/operator/controllers/depl/resources/workload/deploy.go b/pkg/operator/controllers/depl/resources/workload/deploy.go index 9959876b..49160dd9 100644 --- a/pkg/operator/controllers/depl/resources/workload/deploy.go +++ b/pkg/operator/controllers/depl/resources/workload/deploy.go @@ -284,8 +284,10 @@ func (d *DeplTemplate) expectConfig() DeployCfg { Env: conf.GetNginxContainerEnvs(d.env.Version), ReadyProbe: &corev1.Probe{ ProbeHandler: corev1.ProbeHandler{ - TCPSocket: &corev1.TCPSocketAction{ - Port: intstr.IntOrString{IntVal: int32(conf.Controller.MetricsPort)}, + HTTPGet: &corev1.HTTPGetAction{ + Port: intstr.IntOrString{IntVal: int32(conf.Controller.MetricsPort)}, + Scheme: corev1.URISchemeHTTPS, + Path: "/healthz", }, }, InitialDelaySeconds: 3, @@ -296,8 +298,10 @@ func (d *DeplTemplate) expectConfig() DeployCfg { }, Probe: &corev1.Probe{ ProbeHandler: corev1.ProbeHandler{ - TCPSocket: &corev1.TCPSocketAction{ - Port: intstr.IntOrString{IntVal: int32(conf.Controller.MetricsPort)}, + HTTPGet: &corev1.HTTPGetAction{ + Port: intstr.IntOrString{IntVal: int32(conf.Controller.MetricsPort)}, + Scheme: corev1.URISchemeHTTPS, + Path: "/healthz", }, }, InitialDelaySeconds: 60, diff --git a/pkg/operator/toolkit/tools.go b/pkg/operator/toolkit/tools.go index 9ccf1484..d33139ef 100644 --- a/pkg/operator/toolkit/tools.go +++ b/pkg/operator/toolkit/tools.go @@ -12,7 +12,6 @@ import ( ) func PrettyCr(obj client.Object) string { - // TODO a better way if IsNil(obj) { return "isnil" } diff --git a/pkg/utils/data_trans.go b/pkg/utils/data_trans.go new file mode 100644 index 00000000..a9dfb2d5 --- /dev/null +++ b/pkg/utils/data_trans.go @@ -0,0 +1,45 @@ +package utils + +import ( + "fmt" + "reflect" +) + +type ResolveAnnotationOpt struct { + Prefix []string // 我们支持多个前缀,这样用户可以用我们自己的前缀来覆盖掉nginx的前缀,这样能保证他们的兼容性 +} + +func ResolverStructFromAnnotation(t interface{}, annotation map[string]string, opt ResolveAnnotationOpt) error { + t_type := reflect.TypeOf(t).Elem() + t_val := reflect.ValueOf(t).Elem() + for i := 0; i < t_type.NumField(); i++ { + t_field := t_type.Field(i) + v_field := t_val.Field(i) + if v_field.Kind() == reflect.Struct && t_field.Anonymous { + _ = ResolverStructFromAnnotation(v_field.Addr().Interface(), annotation, opt) + } + + tag := t_field.Tag.Get("annotation") + if tag == "" { + continue + } + default_val := t_field.Tag.Get("default") + // we only support string field to set + if !(v_field.CanSet() && v_field.Kind() == reflect.String) { + continue + } + resolved := false + for _, prefix := range opt.Prefix { + full_key := fmt.Sprintf("%s/%s", prefix, tag) + if value, ok := annotation[full_key]; ok { + v_field.SetString(value) + resolved = true + break + } + } + if !resolved { + v_field.SetString(default_val) + } + } + return nil +} diff --git a/pkg/utils/metrics/metrics.go b/pkg/utils/metrics/metrics.go new file mode 100644 index 00000000..1922ff1b --- /dev/null +++ b/pkg/utils/metrics/metrics.go @@ -0,0 +1,36 @@ +package metrics + +import "sync" + +var ( + _globalMu sync.RWMutex + _globalM *metrics = nil +) + +type metrics struct { + data map[string]float64 +} + +func init() { + _globalMu.Lock() + defer _globalMu.Unlock() + if _globalM != nil { + return + } + + _globalM = &metrics{data: map[string]float64{}} +} + +// TODO 用 otel span.. + +func Write(key string, value float64) { + _globalMu.Lock() + defer _globalMu.Unlock() + _globalM.data[key] = value +} + +func Read() map[string]float64 { + _globalMu.RLock() + defer _globalMu.RUnlock() + return _globalM.data +} diff --git a/pkg/utils/test_utils/ngx_conf.go b/pkg/utils/test_utils/ngx_conf.go index bbf5fe4b..ba2a480d 100644 --- a/pkg/utils/test_utils/ngx_conf.go +++ b/pkg/utils/test_utils/ngx_conf.go @@ -103,3 +103,35 @@ func FindNestDirectives(p *gngc.Config, root string, directiveName string) gngc. } return nil } + +func PickStreamServerListen(cfgRaw string) ([]string, error) { + ret := []string{} + p, err := gngp.NewStringParser(cfgRaw, gngp.WithSkipValidDirectivesErr()).Parse() + if err != nil { + return nil, err + } + ss := p.FindDirectives("stream")[0].GetBlock().FindDirectives("server") + for _, s := range ss { + lss := s.GetBlock().FindDirectives("listen") + for _, ls := range lss { + ret = append(ret, strings.Join(ls.GetParameters(), " ")) + } + } + return ret, nil +} + +func PickHttpServerListen(cfgRaw string) ([]string, error) { + ret := []string{} + p, err := gngp.NewStringParser(cfgRaw, gngp.WithSkipValidDirectivesErr()).Parse() + if err != nil { + return nil, err + } + ss := p.FindDirectives("http")[0].GetBlock().FindDirectives("server") + for _, s := range ss { + lss := s.GetBlock().FindDirectives("listen") + for _, ls := range lss { + ret = append(ret, ls.GetParameters()...) + } + } + return ret, nil +} diff --git a/utils/test_utils/ngxconf_parser_ext_test.go b/pkg/utils/test_utils/ngxconf_parser_ext_test.go similarity index 100% rename from utils/test_utils/ngxconf_parser_ext_test.go rename to pkg/utils/test_utils/ngxconf_parser_ext_test.go diff --git a/pkg/utils/test_utils/policy_helper.go b/pkg/utils/test_utils/policy_helper.go index 1c235170..0bc31de7 100644 --- a/pkg/utils/test_utils/policy_helper.go +++ b/pkg/utils/test_utils/policy_helper.go @@ -2,6 +2,7 @@ package test_utils import ( "context" + "time" "alauda.io/alb2/config" cli "alauda.io/alb2/controller/cli" @@ -9,6 +10,7 @@ import ( drv "alauda.io/alb2/driver" . "alauda.io/alb2/pkg/controller/ngxconf" . "alauda.io/alb2/pkg/controller/ngxconf/types" + pm "alauda.io/alb2/pkg/utils/metrics" "github.com/go-logr/logr" ) @@ -32,14 +34,36 @@ func GetPolicy(ctx PolicyGetCtx) (*ct.NgxPolicy, error) { if err != nil { return nil, err } + acli.CollectAndFetchRefs(lb) policy := pcli.GenerateAlbPolicy(lb) return &policy, nil } -func GetPolicyAndNgx(ctx PolicyGetCtx) (*ct.NgxPolicy, *NginxTemplateConfig, error) { +type XCli struct { + alb cli.AlbCli + policy cli.PolicyCli + nginx NgxCli +} + +func NewXCli(ctx PolicyGetCtx) XCli { + s := time.Now() + defer func() { + pm.Write("test/init-policy-cli", float64(time.Since(s).Milliseconds())) + }() acli := cli.NewAlbCli(ctx.Drv, ctx.L) pcli := cli.NewPolicyCli(ctx.Drv, ctx.L, cli.PolicyCliOpt{MetricsPort: 0}) ncli := NewNgxCli(ctx.Drv, ctx.L, NgxCliOpt{}) + return XCli{ + alb: acli, + policy: pcli, + nginx: ncli, + } +} + +func (c XCli) GetPolicyAndNgx(ctx PolicyGetCtx) (*ct.NgxPolicy, *NginxTemplateConfig, error) { + acli := c.alb + pcli := c.policy + ncli := c.nginx lb, err := acli.GetLBConfig(ctx.Ns, ctx.Name) if err != nil { return nil, nil, err @@ -49,15 +73,17 @@ func GetPolicyAndNgx(ctx PolicyGetCtx) (*ct.NgxPolicy, *NginxTemplateConfig, err if err != nil { return nil, nil, err } + + acli.CollectAndFetchRefs(lb) policy := pcli.GenerateAlbPolicy(lb) - err = ncli.FillUpRefCms(lb) - if err != nil { - return nil, nil, err - } tmpl, err := ncli.GenerateNginxTemplateConfig(lb, "running", ctx.Cfg) if err != nil { return nil, nil, err } return &policy, tmpl, nil } + +func GetPolicyAndNgx(ctx PolicyGetCtx) (*ct.NgxPolicy, *NginxTemplateConfig, error) { + return NewXCli(ctx).GetPolicyAndNgx(ctx) +} diff --git a/pkg/utils/util.go b/pkg/utils/util.go index 4841e951..8f5f2e22 100644 --- a/pkg/utils/util.go +++ b/pkg/utils/util.go @@ -3,7 +3,10 @@ package utils import ( "crypto/sha256" "encoding/hex" + "fmt" "strings" + + "sigs.k8s.io/controller-runtime/pkg/client" ) func ToBoolOr(x string, backup bool) bool { @@ -24,8 +27,25 @@ func MergeMap(a map[string]string, b map[string]string) map[string]string { return ret } +func HashBytes(s []byte) string { + h := sha256.New() + h.Write(s) + return hex.EncodeToString(h.Sum(nil)) +} + func Hash(s string) string { h := sha256.New() h.Write([]byte(s)) return hex.EncodeToString(h.Sum(nil)) } + +func ParseStringToObjectKey(key string) (client.ObjectKey, error) { + parts := strings.Split(key, "/") + if len(parts) != 2 { + return client.ObjectKey{}, fmt.Errorf("invalid key format: %s", key) + } + return client.ObjectKey{ + Namespace: parts[0], + Name: parts[1], + }, nil +} diff --git a/scripts/alb-build-actions.sh b/scripts/alb-build-actions.sh index 40d76b1c..4106e703 100644 --- a/scripts/alb-build-actions.sh +++ b/scripts/alb-build-actions.sh @@ -17,14 +17,15 @@ function alb-fast-build() { function alb-static-build() { set -x - rm ./bin/alb - rm ./bin/operator + rm ./bin/alb || true + rm ./bin/operator || true CC=/usr/bin/musl-gcc CGO_ENABLED=1 go build -v -buildmode=pie -ldflags '-w -s -linkmode=external -extldflags=-Wl,-z,relro,-z,now,-static' -v -o ./bin/operator alauda.io/alb2/cmd/operator CC=/usr/bin/musl-gcc CGO_ENABLED=1 go build -v -buildmode=pie -ldflags '-w -s -linkmode=external -extldflags=-Wl,-z,relro,-z,now,-static' -v -o ./bin/alb alauda.io/alb2/cmd/alb CC=/usr/bin/musl-gcc CGO_ENABLED=1 go build -v -buildmode=pie -ldflags '-w -s -linkmode=external -extldflags=-Wl,-z,relro,-z,now,-static' -v -o ./bin/migrate/init-port-info alauda.io/alb2/migrate/init-port-info CC=/usr/bin/musl-gcc CGO_ENABLED=1 go build -v -buildmode=pie -ldflags '-w -s -linkmode=external -extldflags=-Wl,-z,relro,-z,now,-static' -v -o ./bin/tools/tweak_gen alauda.io/alb2/cmd/utils/tweak_gen CC=/usr/bin/musl-gcc CGO_ENABLED=1 go build -v -buildmode=pie -ldflags '-w -s -linkmode=external -extldflags=-Wl,-z,relro,-z,now,-static' -v -o ./bin/tools/albctl alauda.io/alb2/cmd/utils/albctl CC=/usr/bin/musl-gcc CGO_ENABLED=1 go build -v -buildmode=pie -ldflags '-w -s -linkmode=external -extldflags=-Wl,-z,relro,-z,now,-static' -v -o ./bin/tools/ngx_gen alauda.io/alb2/cmd/utils/ngx_gen + CC=/usr/bin/musl-gcc CGO_ENABLED=1 go build -v -buildmode=pie -ldflags '-w -s -linkmode=external -extldflags=-Wl,-z,relro,-z,now,-static' -v -o ./bin/tools/dirhash alauda.io/alb2/cmd/utils/dirhash md5sum ./bin/alb md5sum ./bin/operator diff --git a/scripts/alb-codegen-actions.sh b/scripts/alb-codegen-actions.sh index 13bbd51d..640075fc 100644 --- a/scripts/alb-codegen-actions.sh +++ b/scripts/alb-codegen-actions.sh @@ -13,6 +13,7 @@ function alb-crd-install-bin() ( ) function alb-crd-gen() ( + set -ex alb-crd-install-bin rm -rf pkg/client @@ -34,6 +35,7 @@ function alb-crd-gen() ( yq -i '.spec.versions += .spec.versions.1' ./deploy/chart/alb/crds/crd.alauda.io_alaudaloadbalancer2.yaml yq -i '.spec.versions.2.name = "v2"' ./deploy/chart/alb/crds/crd.alauda.io_alaudaloadbalancer2.yaml yq -i '.spec.versions.2.storage = false' ./deploy/chart/alb/crds/crd.alauda.io_alaudaloadbalancer2.yaml + echo "gen ok" ) @@ -63,7 +65,8 @@ function alb-crd-gen-deepcopy() ( local GOBIN="$(go env GOBIN)" local gobin="${GOBIN:-$(go env GOPATH)/bin}" - local pkgs="alauda.io/alb2/pkg/apis/alauda/v1,alauda.io/alb2/pkg/apis/alauda/v2beta1,alauda.io/alb2/pkg/controller/ext/otel/types" + local ext_pkgs=$(go list $CUR_ALB_BASE/... | grep "alb2/pkg/controller/ext" | grep types | sort | uniq | awk 'ORS=","') + local pkgs="alauda.io/alb2/pkg/apis/alauda/v1,alauda.io/alb2/pkg/apis/alauda/v2beta1,$ext_pkgs" "${gobin}/deepcopy-gen" $pkgs --input-dirs $pkgs -O zz_generated.deepcopy --go-header-file ./scripts/boilerplate.go.txt --output-base "$alb/code_gen" ) @@ -72,3 +75,16 @@ function alb-gen-depgraph() ( goda graph "alauda.io/alb2/... - alauda.io/alb2/utils/... - alauda.io/alb2/pkg/utils/... - alauda.io/alb2/pkg/apis/... - alauda.io/alb2/pkg/client/... - alauda.io/alb2/migrate/... - alauda.io/alb2/test/..." >./alb.dep cat ./alb.dep | dot -Tsvg -o graph.svg ) + +function alb-gen-mapping() ( + while read -r file; do + echo "rm $file" + rm "$file" + done < <(find ./ -name "codegen_mapping_*" -type f) + go run ./cmd/utils/map_gen/main.go + while read -r file; do + echo "fmt $file" + go fmt "$file" + gofumpt -w "$file" + done < <(find ./ -name "codegen_mapping_*" -type f) +) diff --git a/scripts/alb-dev-actions.sh b/scripts/alb-dev-actions.sh index 5d5d0e3c..d78b9dcb 100755 --- a/scripts/alb-dev-actions.sh +++ b/scripts/alb-dev-actions.sh @@ -1,4 +1,7 @@ #!/bin/bash +# shellcheck disable=SC2046,SC2086,SC1091,SC2005 + +export GOPROXY=https://goproxy.cn,https://build-nexus.alauda.cn/repository/golang,direct function _current_file() { local cf="${BASH_SOURCE[0]}" @@ -27,3 +30,17 @@ function alb-dev-install() ( # yq => yq (https://github.com/mikefarah/yq/) version > 4 return ) + +function alb-list-todo-all() ( + rg TODO +) + +function alb-list-todo-cur() ( + git diff $(git merge-base master HEAD)..HEAD | grep -i "TODO" +) + +function alb-replace-alb() ( + alb-static-build + local alb_pod=$(kubectl get po -n cpaas-system --no-headers | grep $1 | awk '{print $1}') + kubectl cp $PWD/bin/alb cpaas-system/$alb_pod:/alb/ctl/alb -c alb2 +) diff --git a/scripts/alb-lint-actions.sh b/scripts/alb-lint-actions.sh index eb2e2ec4..2d7fc36b 100644 --- a/scripts/alb-lint-actions.sh +++ b/scripts/alb-lint-actions.sh @@ -2,6 +2,7 @@ function alb-lint-all() ( cd $CUR_ALB_BASE + echo "alb-lint-all" set -e alb-lint-cspell echo "lint cspell ok" @@ -48,6 +49,7 @@ function alb-lint-bash-fix() { } function alb-lint-go() { + # shellcheck disable=SC2046 if [ ! "$(gofmt -l $(find . -type f -name '*.go' | grep -v ".deepcopy"))" = "" ]; then echo "go fmt check fail" return 1 @@ -56,6 +58,14 @@ function alb-lint-go() { alb-list-kind-e2e } +function alb-lint-gofumpt() { + gofumpt -l ./ +} + +function alb-lint-gofumpt-fix() { + alb-lint-gofumpt | xargs gofumpt -w +} + function alb-lint-go-build() { go build -v -v -o ./bin/alb alauda.io/alb2/cmd/alb go build -v -v -o ./bin/alb alauda.io/alb2/cmd/operator @@ -71,10 +81,11 @@ function alb-lint-go-fix { } function alb-lint-lua-install() { - # https://github.com/Koihik/LuaFormatter - # use VS Marketplace Link: https://marketplace.visualstudio.com/items?itemName=Koihik.vscode-lua-format - sudo luarocks install --server=https://luarocks.org/dev luaformatter #lua-format - sudo luarocks install luacheck + luarocks install luacheck + rm ./linux-x64.tar.gz || true + wget https://github.com/CppCXY/EmmyLuaCodeStyle/releases/download/1.5.6/linux-x64.tar.gz + tar -xvf linux-x64.tar.gz + mv ./linux-x64/bin/CodeFormat /usr/bin/CodeFormat } function alb-lint-lua-need-format() { @@ -86,6 +97,11 @@ function alb-lint-lua-need-format() { } function alb-lint-lua() ( + alb-lint-lua-luacheck + alb-lint-lua-emmy +) + +function alb-lint-lua-luacheck() ( while read -r f; do luacheck $f if [[ $? -ne 0 ]]; then @@ -94,28 +110,32 @@ function alb-lint-lua() ( return fi done < <(alb-lua-list-all-app-file) +) - # before i find a beeter way to format lua, disable this - # the recommended way is to use lus-ls https://github.com/LuaLS/lua-language-server in vscode +function alb-lint-lua-emmy-all() ( + CodeFormat check -w . -c ./.editorconfig -ig "vendor/*;" 2>&1 | grep Check +) + +function alb-lint-lua-emmy() ( + while read -r f; do + if head -n 1 "$f" | grep 'format:on' | grep 'style:emmy'; then + alb-lint-lua-emmy-format-check $f + fi + done < <(alb-lua-list-all-file) +) + +function alb-lint-lua-emmy-format-check() ( + local f=$1 + CodeFormat check -f $f -c ./.editorconfig return - # # TODO add all lua file - # while read -r f; do - # lua-format --check -v $f - # if [[ $? -ne 0 ]]; then - # echo "need format $f" - # exit 1 - # return - # fi - # done < <(alb-lua-list-all-needformat-file) +) - echo "lua ok" +function alb-lint-lua-emmy-format-install-arch() ( + yay -S code-format-bin ) -function alb-lua-lint-format-fix() ( - # shellcheck disable=SC2068 - while read -r f; do - lua-format -i -v $f - done < <(alb-lua-list-all-needformat-file) +function alb-lint-lua-emmy-format-fix() ( + return ) function alb-lua-list-all-file() { @@ -131,20 +151,6 @@ function alb-lua-list-all-app-file() { find $PWD/template/nginx/lua -type f | grep '\.lua' | grep -v 'types.lua' | grep -v 'vendor' | grep -v 'lua/resty/' } -function alb-lua-list-all-needformat-file() { - # TODO install luaformatter in ci - if [[ -z $(which lua-format) ]]; then - # echo "lua-format not installed" - return - fi - while read -r f; do - if [[ "false" == "$(alb-lint-lua-need-format $f)" ]]; then - continue - fi - echo $f - done < <(alb-lua-list-all-file) -} - function alb-init-git-hook { read -r -d "" PREPUSH <$key.bt" echo "stop capture lua stack for $pid" - _ng_stap_lua_gen_svg ./$key.bt ./$key.svg $root + _ng_stap_lua_gen_svg ./.fg/$key/$key.bt ./.fg/$key/$key.svg $root if [[ -n "$OPEN_SVG" ]]; then - firefox ./$key.svg + firefox ./.fg/$key/$key.svg fi ) function alb-flame-perf-ng() ( - local pid=$1 + local key=$1 + mkdir -p ./.fg/$key + local pid=$2 + if [[ -z "$pid" ]]; then + pid=$(cat ./template/servroot/logs/nginx.pid) + echo "$pid" + ps -aux | grep $pid + sleep 3 + fi local time=${2-"20"} echo "capture nginx stack for $pid for $time seconds" sudo perf record -a -g -p $pid --call-graph dwarf -- sleep $time - sudo perf script | stackcollapse-perf.pl | flamegraph.pl >nginx.svg + sudo perf script | stackcollapse-perf.pl | flamegraph.pl >./.fg/$key/nginx.svg if [[ -n "$OPEN_SVG" ]]; then - firefox ./nginx.svg + firefox ./.fg/$key/nginx.svg fi ) @@ -221,3 +236,11 @@ function _apd_find_ng_worker_pid_in_docker() ( local pid=$(_docker-ps-via-id $container_name | grep 'nginx: worker process' | awk '{print $2}' | head -n1) echo $pid ) + +function alb-perf-go-policy-gen() ( + export RULE_PERF="true" + ginkgo -focus "should ok when has 5k rule" -v ./test/e2e + go tool pprof -raw ./test/e2e/rule-perf-cpu >cpu.raw + stackcollapse-go.pl ./cpu.raw >cpu.folded + flamegraph.pl ./cpu.folded >./cpu.svg +) diff --git a/scripts/alb-test-actions.sh b/scripts/alb-test-actions.sh index 1d5e40e0..d58217e8 100644 --- a/scripts/alb-test-actions.sh +++ b/scripts/alb-test-actions.sh @@ -57,12 +57,11 @@ function alb-run-checklist-test() ( function alb-run-all-e2e-test() ( set -e - # TODO 覆盖率 local concurrent=${1:-3} local filter=${2:-""} echo concurrent $concurrent filter $filter if [[ "$filter" != "" ]]; then - ginkgo --fail-fast -focus "$filter" ./test/e2e + ginkgo --fail-fast -focus "$filter" ./test/e2e | tee ./all.e2e.log return fi @@ -70,7 +69,8 @@ function alb-run-all-e2e-test() ( local coverpkg=$(echo "$coverpkg_list" | tr "\n" ",") unset DEV_MODE # dev_mode 会导致k8s只启动一个 无法并行测试。。 rm ./test/e2e/ginkgo-node-*.log || true # clean old test log - ginkgo -v -cover -covermode=atomic -coverpkg="$coverpkg" -coverprofile=coverage.e2e --fail-fast -p -nodes $concurrent ./test/e2e + ginkgo -v -cover -covermode=atomic -coverpkg="$coverpkg" -coverprofile=coverage.e2e --fail-fast -p -nodes $concurrent ./test/e2e | tee ./all.e2e.log + # for ci if [ -f ./debug ]; then while true; do echo "debug" @@ -87,14 +87,13 @@ function alb-go-unit-test() ( set -e local concurrent=${1:-3} local filter=${2:-""} - # TODO it shoult include e2e test local s=$(date) echo "s $s" # https://github.com/ory/go-acc local coverpkg_list=$(go list ./... | grep -v 'alb2/test/' | grep -v "/pkg/client" | grep -v migrate | sort | uniq | grep "$filter") local coverpkg=$(echo "$coverpkg_list" | tr "\n" ",") - go test -p $concurrent -v -race -covermode=atomic -coverprofile=coverage.unit -coverpkg "$coverpkg" $(go list ./... | grep -v 'alb2/test/' | grep -v "/pkg/client" | grep -v migrate | sort | uniq | grep "$filter") + go test -p $concurrent -v -race -covermode=atomic -coverprofile=coverage.unit -coverpkg "$coverpkg" $(go list ./... | grep -v 'alb2/test/' | grep -v "/pkg/client" | grep -v migrate | sort | uniq | grep "$filter") | tee ./unit_test.log local e=$(date) echo $s $e ) @@ -176,9 +175,7 @@ function alb-install-golang-test-dependency() { rm -rf ./golangci-lint-1.59.1-illumos-amd64.tar.gz rm -rf ./golangci-lint-1.59.1-illumos-amd64 - apk update && apk add python3 py3-pip curl git build-base jq iproute2 openssl tree nodejs npm util-linux-misc - rm /usr/lib/python3.*/EXTERNALLY-MANAGED || true - pip install crossplane -i https://mirrors.aliyun.com/pypi/simple + apk update && apk add curl git build-base jq yq iproute2 openssl tree nodejs npm util-linux-misc alb-envtest-install git config --global --add safe.directory $PWD go version diff --git a/scripts/run-like-ci-go.sh b/scripts/run-like-ci-go.sh index 789e1cb6..78795480 100755 --- a/scripts/run-like-ci-go.sh +++ b/scripts/run-like-ci-go.sh @@ -1,11 +1,11 @@ #!/bin/sh set -x -proxy="" -if [ -n "$USE_PROXY" ]; then - proxy="--network=host -e http_proxy=$HTTP_PROXY -e https_proxy=$HTTPS_PROXY " -fi +# proxy="" +# if [ -n "$USE_PROXY" ]; then +# proxy="--network=host -e http_proxy=$HTTP_PROXY -e https_proxy=$HTTPS_PROXY " +# fi base=${1-$(cat ./Dockerfile | grep GO_BUILD_BASE | awk -F = '{print $2}')} echo "base $base" platform=${MATRIX_PLATFORM:-linux/amd64} echo "platform -- $platform --" -docker run $proxy -v $PWD:/acp-alb-test --platform $platform -e ALB_ONLINE=$ALB_ONLINE -t $base sh -c "cd /acp-alb-test ;/acp-alb-test/scripts/go-test.sh" +docker run --network=host -v $PWD:/acp-alb-test --platform $platform -e ALB_ONLINE=$ALB_ONLINE -t $base sh -c "cd /acp-alb-test ;/acp-alb-test/scripts/go-test.sh" diff --git a/scripts/run-like-ci-nginx.sh b/scripts/run-like-ci-nginx.sh index 364d5b22..f021d0c6 100755 --- a/scripts/run-like-ci-nginx.sh +++ b/scripts/run-like-ci-nginx.sh @@ -3,12 +3,15 @@ # we should build a nginx image first then use this image to test. tag=$(yq '.global.images.nginx.tag' ./deploy/chart/alb/values.yaml) image=build-harbor.alauda.cn/acp/alb2:$tag -if [ -n "$1" ]; then +if [ -n "$1" ] && [ "$1" != "cur" ]; then image="$1" fi # image=alb-nginx:test platform=${MATRIX_PLATFORM:-linux/amd64} echo "platform $platform" -docker run --user root --network=host --platform $platform -v $PWD:/acp-alb-test -t $image sh -c 'cd /acp-alb-test ;/acp-alb-test/scripts/nginx-test.sh' -# docker run --user root --network=host --platform $platform -v $PWD:/acp-alb-test -it $image sh +if [ "$2" == "shell" ]; then + docker run --user root --network=host --platform $platform -v $PWD:/acp-alb-test -it $image sh +else + docker run --user root --network=host --platform $platform -v $PWD:/acp-alb-test -t $image sh -c 'cd /acp-alb-test ;/acp-alb-test/scripts/nginx-test.sh' +fi diff --git a/template/actions/alb-nginx-install-deps.sh b/template/actions/alb-nginx-install-deps.sh index 13b360b9..b0198fb4 100755 --- a/template/actions/alb-nginx-install-deps.sh +++ b/template/actions/alb-nginx-install-deps.sh @@ -214,6 +214,7 @@ function install-lua-resty-mlcache() ( ) function install-lua-resty-cookie() ( + # https://github.com/xiangnanscu/lua-resty-cookie # md5sum ./lua-resty-cookie-0.01.opm.tar cfd011d1eb1712b47abd9cdffb7bc90b local online="https://opm.openresty.org/api/pkg/fetch?account=xiangnanscu&name=lua-resty-cookie&op=eq&version=$LUA_RESTY_COOKIE_VERSION" local offline="http://prod-minio.alauda.cn/acp/ci/alb/build/lua-resty-cookie-$LUA_RESTY_COOKIE_VERSION.opm.tar" diff --git a/template/actions/alb-nginx.sh b/template/actions/alb-nginx.sh index 1c518673..857e1aba 100755 --- a/template/actions/alb-nginx.sh +++ b/template/actions/alb-nginx.sh @@ -1,13 +1,12 @@ #!/bin/bash source ./template/actions/dev.actions.sh source ./scripts/alb-lint-actions.sh - if [[ -n "$CUR_ALB_BASE" ]]; then export ALB=$CUR_ALB_BASE fi function alb-install-nginx-test-dependency() { - apk update && apk add luarocks luacheck lua-dev lua perl-app-cpanminus wget curl make build-base perl-dev git neovim bash yq jq tree fd openssl + apk update && apk add luarocks sudo luacheck lua-dev lua perl-app-cpanminus wget curl make build-base perl-dev git neovim bash yq jq tree fd openssl mkdir /tmp && export TMP=/tmp # luarocks need this cp /usr/bin/luarocks-5.1 /usr/bin/luarocks cpanm --mirror-only --mirror https://mirrors.tuna.tsinghua.edu.cn/CPAN/ -v --notest Test::Nginx IPC::Run YAML::PP @@ -16,6 +15,7 @@ function alb-install-nginx-test-dependency() { source ./template/actions/alb-nginx-install-deps.sh alb-ng-install-test-deps ) + alb-lint-lua-install } function alb-install-nginx-test-dependency-ubuntu() { @@ -58,7 +58,6 @@ function alb-test-all-in-ci-nginx() { alb-install-nginx-test-dependency fi local end_install=$(date +"%Y %m %e %T.%6N") - # alb-lint-lua # TODO local end_check=$(date +"%Y %m %e %T.%6N") export LUACOV=true test-nginx-in-ci @@ -68,9 +67,13 @@ function alb-test-all-in-ci-nginx() { echo "check" $end_check echo "test" $end_test pwd + alb-nginx-luacov-summary +} + +function alb-nginx-luacov-summary() { + luacov # luacov-html luacov-console $PWD/template/nginx/lua/ - luacov-console $PWD/template/nginx/lua/ -s - luacov-console $PWD/template/nginx/lua/ -s >./luacov.summary + luacov-console $PWD/template/nginx/lua/ -s | tee ./luacov.summary } function test-nginx-local() { @@ -96,10 +99,23 @@ function test-nginx-in-ci() ( alb-nginx-test $1 ) +function alb-nginx-unit-test() ( + local m=$1 + export ALB_LUA_UNIT_TEST_CASE=$m + alb-nginx-test $PWD/template/t/unit/unit_test.t +) + +function alb-nginx-test-with-coverage() ( + alb-nginx-test $1 + alb-nginx-luacov-summary | grep $2 +) + function alb-nginx-test() ( set -e set -x echo "alb-nginx-test" alb is $ALB + # ngx_gen_install + # tweak_gen_install local t1=$(date) # struct of a nginx test # / @@ -183,6 +199,8 @@ function alb-nginx-watch-log() ( tail -F ./template/servroot/logs/error.log | python -u -c ' import sys for line in sys.stdin: + if "notice" in line: + continue if "keepalive connection" in line: continue if line.startswith("20"): @@ -260,9 +278,6 @@ function alb-nginx-build-tylua() ( ) function alb-nginx-tylua() ( - local coverpkg_list=$(go list ./... | grep -v e2e | grep -v test | grep -v "/pkg/client" | grep -v migrate | sort | uniq) - local coverpkg=$(echo "$coverpkg_list" | tr "\n" ",") - ./cmd/utils/tylua/bin/tylua $coverpkg NgxPolicy ./template/nginx/lua/types/ngxpolicy.types.lua - + ./cmd/utils/tylua/bin/tylua ./template/nginx/lua/types/ngxpolicy.types.lua return ) diff --git a/template/chaos/break/alb.nginx.conf b/template/chaos/break/alb.nginx.conf index fab08f73..edfd082a 100644 --- a/template/chaos/break/alb.nginx.conf +++ b/template/chaos/break/alb.nginx.conf @@ -67,7 +67,7 @@ http { rewrite_by_lua_file ./alb2/template/nginx/lua/l7_rewrite.lua; proxy_pass $backend_protocol://http_backend; - header_filter_by_lua_file ./alb2/template/nginx/lua/l7_header_filter.lua; + header_filter_by_lua_file ./alb2/template/nginx/lua/l7_header_filter_phase.lua; log_by_lua_block { diff --git a/template/nginx/lua/balancer/balance.lua b/template/nginx/lua/balancer/balance.lua index 9e646611..72602c2f 100644 --- a/template/nginx/lua/balancer/balance.lua +++ b/template/nginx/lua/balancer/balance.lua @@ -1,4 +1,4 @@ --- format:on +-- format:on style:emmy -- THIS MODULE EVALED IN BOTH HTTP AND STREAM CTX local common = require "utils.common" local ngx_balancer = require "ngx.balancer" @@ -19,7 +19,7 @@ local _M = {} local balancers = {} local DEFAULT_LB_ALG = "round_robin" -local IMPLEMENTATIONS = {round_robin = round_robin, chash = chash, sticky = sticky} +local IMPLEMENTATIONS = { round_robin = round_robin, chash = chash, sticky = sticky } local function get_implementation(backend) local name = DEFAULT_LB_ALG @@ -72,7 +72,9 @@ local function sync_backend(backend) -- here we check if `balancer` is the instance of `implementation` -- if it is not then we deduce LB algorithm has changed for the backend if getmetatable(balancer) ~= implementation then - ngx_log(ngx.INFO, string_format("LB algorithm changed from %s to %s, resetting the instance", balancer.name, implementation.name)) + ngx_log(ngx.INFO, + string_format("LB algorithm changed from %s to %s, resetting the instance", balancer.name, + implementation.name)) balancer = implementation:new(backend) balancer:sync(backend) @@ -89,7 +91,6 @@ function _M.sync_backends() ngx_log(ngx.ERR, "no backends data") return end - local new_backends, err = common.json_decode(backends_data) if not new_backends then ngx_log(ngx.ERR, "could not parse backends data: ", err) @@ -135,7 +136,7 @@ function _M.balance() return end - alb_ctx.peer = {peer = peer, conf = balancer:get_peer_conf(peer)} + alb_ctx.peer = { peer = peer, conf = balancer:get_peer_conf(peer) } -- TODO 在实现retrypolicy时这里需要被重写。注意测试。 ngx_balancer.set_more_tries(1) -- TODO FIXME @@ -149,15 +150,17 @@ function _M.balance() -- https://github.com/openresty/lua-nginx-module/pull/1600 -- ngx.log(ngx.NOTICE, "send timeout "..common.json_encode(policy)) - if common.has_key(policy, {"config", "timeout"}) then + if common.has_key(policy, { "config", "timeout" }) then local timeout = policy.config.timeout + ---@cast timeout -nil local proxy_connect_timeout_secs = ms2sec(timeout.proxy_connect_timeout_ms) local proxy_send_timeout_secs = ms2sec(timeout.proxy_send_timeout_ms) local proxy_read_timeout_secs = ms2sec(timeout.proxy_read_timeout_ms) -- ngx.log(ngx.NOTICE, -- string.format("set timeout rule %s pconnect %s psend %s pread %s\n", policy.rule, -- tostring(proxy_connect_timeout_secs), tostring(proxy_send_timeout_secs), tostring(proxy_read_timeout_secs))) - local _, err = ngx_balancer.set_timeouts(proxy_connect_timeout_secs, proxy_send_timeout_secs, proxy_read_timeout_secs) + local _, err = ngx_balancer.set_timeouts(proxy_connect_timeout_secs, proxy_send_timeout_secs, + proxy_read_timeout_secs) if err ~= nil then ngx.log(ngx.ERR, err) e.exit(e.InvalidBalancer, "set timeout fail") diff --git a/template/nginx/lua/balancer/sticky.lua b/template/nginx/lua/balancer/sticky.lua index 765727b6..0a642d5a 100644 --- a/template/nginx/lua/balancer/sticky.lua +++ b/template/nginx/lua/balancer/sticky.lua @@ -23,7 +23,7 @@ function _M.header_name(self) end function _M.new(self) - local o = {session_affinity_attribute = nil, session_affinity_policy = nil} + local o = { session_affinity_attribute = nil, session_affinity_policy = nil } setmetatable(o, self) self.__index = self @@ -66,7 +66,7 @@ function _M.set_cookie(self, value) httponly = true, secure = ngx.var.https == "on" } --- LuaFormatter on + -- LuaFormatter on local ok ok, err = cookie:set(cookie_data) diff --git a/template/nginx/lua/config/cache.lua b/template/nginx/lua/config/cache.lua index 8e678daf..193e2f08 100644 --- a/template/nginx/lua/config/cache.lua +++ b/template/nginx/lua/config/cache.lua @@ -17,20 +17,20 @@ function _M.init_mlcache(name, shared_dict, opt) end local ipc = { - register_listeners = function(events) + register_listeners = function (events) for _, event_t in pairs(events) do - ev.register(function(data) + ev.register(function (data) event_t.handler(data) end, channel_name, event_t.channel) end end, - broadcast = function(channel, data) + broadcast = function (channel, data) local ok, err = ev.post(channel_name, channel, data) if not ok then ngx.log(ngx.ERR, "failed to post event '", channel_name, "', '", channel, "': ", err) end end, - poll = function(timeout) -- luacheck: ignore + poll = function (timeout) -- luacheck: ignore return ev.poll() end } @@ -39,39 +39,39 @@ function _M.init_l4() local cache = _M local ok, err = ev.configure { shm = subsystem .. "_ipc_shared_dict", -- defined by "lua_shared_dict" - timeout = 5, -- life time of event data in shm - interval = 1, -- poll interval (seconds) + timeout = 5, -- life time of event data in shm + interval = 1, -- poll interval (seconds) - wait_interval = 0.010, -- wait before retry fetching event data - wait_max = 0.5 -- max wait time before discarding event + wait_interval = 0.010, -- wait before retry fetching event data + wait_max = 0.5 -- max wait time before discarding event } if not ok then ngx.log(ngx.ERR, "failed to start event system: ", err) return end - cache.init_mlcache("rule_cache", subsystem .. "_alb_cache", {lru_size = 2000, ttl = 30, neg_ttl = 5, ipc = ipc}) + cache.init_mlcache("rule_cache", subsystem .. "_alb_cache", { lru_size = 2000, ttl = 30, neg_ttl = 5, ipc = ipc }) end function _M.init_l7() local cache = _M local ok, err = ev.configure { shm = subsystem .. "_ipc_shared_dict", -- defined by "lua_shared_dict" - timeout = 5, -- life time of event data in shm - interval = 1, -- poll interval (seconds) + timeout = 5, -- life time of event data in shm + interval = 1, -- poll interval (seconds) - wait_interval = 0.010, -- wait before retry fetching event data - wait_max = 0.5 -- max wait time before discarding event + wait_interval = 0.010, -- wait before retry fetching event data + wait_max = 0.5 -- max wait time before discarding event } if not ok then ngx.log(ngx.ERR, "failed to start event system: ", err) return end - cache.init_mlcache("rule_cache", subsystem .. "_alb_cache", {lru_size = 2000, ttl = 60, neg_ttl = 5, ipc = ipc}) + cache.init_mlcache("rule_cache", subsystem .. "_alb_cache", { lru_size = 2000, ttl = 60, neg_ttl = 5, ipc = ipc }) - cache.init_mlcache("cert_cache", subsystem .. "_alb_cache", {lru_size = 500, ttl = 60, neg_ttl = 5, ipc = ipc}) + cache.init_mlcache("cert_cache", subsystem .. "_alb_cache", { lru_size = 500, ttl = 60, neg_ttl = 5, ipc = ipc }) - cache.init_mlcache("config_cache", subsystem .. "_alb_cache", {lru_size = 500, ttl = 60, neg_ttl = 5, ipc = ipc}) + cache.init_mlcache("config_cache", subsystem .. "_alb_cache", { lru_size = 500, ttl = 60, neg_ttl = 5, ipc = ipc }) end ---gen_rule_key @@ -98,7 +98,7 @@ local function get_config_from_shdict(key) end ---@param key string ----@return CommonPolicyConfigVal config +---@return RefBox config ---@return string error function _M.get_config(key) local cache = _M @@ -107,4 +107,51 @@ function _M.get_config(key) return config, err end +--- @class RefedConfig +--- @field hash string +--- @field config any? + +---@param policy Policy +---@param kind string +---@return RefedConfig? config +---@return string? error +function _M.get_refed_config(policy, kind) + local config = policy.config + if config == nil then + return nil, nil + end + local key = config.refs[kind] + if key == nil then + return nil, nil + end + local cfg, err = _M.get_config(key) + if err ~= nil then + return nil, err + end + return { config = cfg[kind], hash = key }, nil +end + +---@param policy Policy +---@param kind string +---@return any? config +---@return any? error +function _M.get_config_from_policy(policy, kind) + local config = policy.config + if config == nil then + return nil + end + if config[kind] ~= nil then + return config[kind] + end + local key = config.refs[kind] + if key == nil then + return nil + end + local cfg, err = _M.get_config(key) + if err ~= nil then + return nil, err + end + return cfg[kind], nil +end + return _M diff --git a/template/nginx/lua/config/policy_fetch.lua b/template/nginx/lua/config/policy_fetch.lua index f89b0af4..b189e0b5 100644 --- a/template/nginx/lua/config/policy_fetch.lua +++ b/template/nginx/lua/config/policy_fetch.lua @@ -83,32 +83,32 @@ end ---@param policy table ---@param old_policy table local function update_stream_cache(policy, old_policy) - local backend_group = common.access_or(policy, {"backend_group"}, {}) + local backend_group = common.access_or(policy, { "backend_group" }, {}) shm.set_backends(common.json_encode(backend_group, true)) - local stream_tcp_policy = common.access_or(policy, {"stream", "tcp"}, {}) - local old_stream_tcp_policy = common.access_or(old_policy, {"stream", "tcp"}, {}) + local stream_tcp_policy = common.access_or(policy, { "stream", "tcp" }, {}) + local old_stream_tcp_policy = common.access_or(old_policy, { "stream", "tcp" }, {}) update_stream_policy_cache(stream_tcp_policy, old_stream_tcp_policy, "tcp") - local stream_udp_policy = common.access_or(policy, {"stream", "udp"}, {}) - local old_stream_udp_policy = common.access_or(old_policy, {"stream", "udp"}, {}) + local stream_udp_policy = common.access_or(policy, { "stream", "udp" }, {}) + local old_stream_udp_policy = common.access_or(old_policy, { "stream", "udp" }, {}) update_stream_policy_cache(stream_udp_policy, old_stream_udp_policy, "udp") end ---@param policy table|nil ---@param old_policy table|nil local function update_http_cache(policy, old_policy) - local certificate_map = common.access_or(policy, {"certificate_map"}, {}) - local old_certificate_map = common.access_or(old_policy, {"certificate_map"}, {}) + local certificate_map = common.access_or(policy, { "certificate_map" }, {}) + local old_certificate_map = common.access_or(old_policy, { "certificate_map" }, {}) - local http_policy = common.access_or(policy, {"http", "tcp"}, {}) - local old_http_policy = common.access_or(old_policy, {"http", "tcp"}, {}) + local http_policy = common.access_or(policy, { "http", "tcp" }, {}) + local old_http_policy = common.access_or(old_policy, { "http", "tcp" }, {}) - local backend_group = common.access_or(policy, {"backend_group"}, {}) + local backend_group = common.access_or(policy, { "backend_group" }, {}) shm.set_backends(common.json_encode(backend_group, true)) - local new_config = common.access_or(policy, {"config"}, {}) - local old_config = common.access_or(old_policy, {"config"}, {}) + local new_config = common.access_or(policy, { "config" }, {}) + local old_config = common.access_or(old_policy, { "config" }, {}) -- ngx.log(ngx.ERR, string.format("newconfig %s", common.json_encode(new_config, true))) -- ngx.log(ngx.ERR, string.format("oldconfig %s", common.json_encode(old_config, true))) @@ -116,6 +116,7 @@ local function update_http_cache(policy, old_policy) -- since that we have to insert a json string.... -- update cert cache + -- TODO get_table_diff_keys 是递归的。。可能特别慢 for domain, reason in pairs(common.get_table_diff_keys(certificate_map, old_certificate_map)) do local lower_domain = string_lower(domain) if reason == common.DIFF_KIND_REMOVE then @@ -149,7 +150,7 @@ local function update_http_cache(policy, old_policy) cache.rule_cache:delete(key) end end - + -- TODO config的key是hash,我们不用比较完整的配置 -- update config cache for name, reason in pairs(common.get_table_diff_keys(new_config, old_config)) do --- 更新cache,这样下次就会直接从shdict中读新的值 @@ -185,7 +186,6 @@ function _M.update_policy(policy_raw, via) if old_policy_raw == policy_raw then return end - if common.table_equals(policy_data, old_policy_data) then return end diff --git a/template/nginx/lua/config/shmap.lua b/template/nginx/lua/config/shmap.lua index 8512a524..5be068e7 100644 --- a/template/nginx/lua/config/shmap.lua +++ b/template/nginx/lua/config/shmap.lua @@ -97,8 +97,9 @@ function _M.set_backends(value) ngx_shared[current_subsystem .. "_backend_cache"]:set("backend_group", value) end +--- @return string|nil function _M.get_backends() - return ngx_shared[current_subsystem .. "_backend_cache"]:get("backend_group") + return ngx_shared[current_subsystem .. "_backend_cache"]:get("backend_group") --[[@as string|nil]] end return _M diff --git a/template/nginx/lua/ctx/alb_ctx.lua b/template/nginx/lua/ctx/alb_ctx.lua index 3ba407de..a7279d29 100644 --- a/template/nginx/lua/ctx/alb_ctx.lua +++ b/template/nginx/lua/ctx/alb_ctx.lua @@ -18,10 +18,11 @@ local var_proxy = require "ctx.var_proxy" ---@class AlbCtx ---@field matched_policy Policy ----@field send_count number ----@field var table +---@field send_count number retry_count +---@field peer PeerConf valid only after the balance phase +---@field var table var proxy ---@field otel OtelCtx? ----@field peer PeerConf +---@field auth AuthCtx? ---@param ctx AlbCtx function _M.get_last_upstream_status(ctx) diff --git a/template/nginx/lua/error.lua b/template/nginx/lua/error.lua index ed637e71..26c626c2 100644 --- a/template/nginx/lua/error.lua +++ b/template/nginx/lua/error.lua @@ -12,6 +12,7 @@ _M.InvalidBalancer = "InvalidBalancer" _M.BackendError = "BackendError" _M.TimeoutViaAlb = "TimeoutViaAlb" _M.TimeoutViaBackend = "TimeoutViaBackend" +_M.AUTHFAIL = "AuthFail" ---comment -- exit with code 500 Internal Server Error @@ -25,6 +26,7 @@ function _M.exit_with_code(reason, msg, code) if msg ~= nil then reason = reason .. " : " .. tostring(msg) end + ngx.log(ngx.ERR, reason) if subsys.is_http_subsystem() then ngx.header[ErrReason] = reason ngx.ctx.is_alb_err = true diff --git a/template/nginx/lua/l7_header_filter.lua b/template/nginx/lua/phase/l7_header_filter_phase.lua similarity index 60% rename from template/nginx/lua/l7_header_filter.lua rename to template/nginx/lua/phase/l7_header_filter_phase.lua index 07abc37c..8b010fd7 100644 --- a/template/nginx/lua/l7_header_filter.lua +++ b/template/nginx/lua/phase/l7_header_filter_phase.lua @@ -1,7 +1,8 @@ -- format:on local e = require "error" local str = require "resty.string" -local ngx = ngx +local pm = require("plugins.core.plugin_manager") + local matched_policy = ngx.ctx.matched_policy if matched_policy == nil then return @@ -14,7 +15,12 @@ rewrite_header.rewrite_response_header() if ngx.ctx.is_alb_err then return end + +if ngx.ctx.alb_ctx.matched_policy then + pm.response_header_filter_hook(ngx.ctx.alb_ctx) +end + local code = str.atoi(ngx.var.status) if code >= 400 then - e.http_backend_error(code, "read ".. tostring(ngx.var.upstream_bytes_received).." byte data from backend") + e.http_backend_error(code, "read " .. tostring(ngx.var.upstream_bytes_received) .. " byte data from backend") end diff --git a/template/nginx/lua/phase/log_phase.lua b/template/nginx/lua/phase/log_phase.lua index 7d087ad8..92061e48 100644 --- a/template/nginx/lua/phase/log_phase.lua +++ b/template/nginx/lua/phase/log_phase.lua @@ -1,3 +1,5 @@ +-- format:on style:emmy + local metrics = require("metrics") local pm = require("plugins.core.plugin_manager") @@ -6,4 +8,6 @@ if not ngx.ctx.alb_ctx then return end metrics.log() -pm.log_hook(ngx.ctx.alb_ctx) +if ngx.ctx.alb_ctx.matched_policy then + pm.log_hook(ngx.ctx.alb_ctx) +end diff --git a/template/nginx/lua/plugins/auth/auth.lua b/template/nginx/lua/plugins/auth/auth.lua new file mode 100644 index 00000000..c604af96 --- /dev/null +++ b/template/nginx/lua/plugins/auth/auth.lua @@ -0,0 +1,68 @@ +-- format:on style:emmy + +local cache = require("config.cache") +local eh = require("error") +local forward_auth = require("plugins.auth.forward_auth") +local basic_auth = require("plugins.auth.basic_auth") + +local _m = { +} + +---@class AuthCtx +---@field auth_cookie? table the cookie from auth response +---@field always_set_cookie boolean + +---@param ctx AlbCtx +function _m.after_rule_match_hook(ctx) + local auth_cfg, err = _m.get_config(ctx) + if err ~= nil then + eh.exit("get auth config fail", err) + return + end + if auth_cfg == nil then + return + end + forward_auth.do_forward_auth_if_need(auth_cfg, ctx) + basic_auth.do_basic_auth_if_need(auth_cfg, ctx) +end + +---@param ctx AlbCtx +function _m.response_header_filter_hook(ctx) + if ctx.auth == nil then + return + end + forward_auth.add_cookie_if_need(ctx) +end + +-- [nginx.ingress.kubernetes.io/auth-keepalive] +-- [nginx.ingress.kubernetes.io/auth-keepalive-requests] +-- [nginx.ingress.kubernetes.io/auth-keepalive-timeout] + +-- [nginx.ingress.kubernetes.io/auth-realm] +-- [nginx.ingress.kubernetes.io/auth-secret] +-- [nginx.ingress.kubernetes.io/auth-secret-type] +-- [nginx.ingress.kubernetes.io/auth-type] + + +-- [nginx.ingress.kubernetes.io/auth-url] +-- [nginx.ingress.kubernetes.io/auth-method] +-- [nginx.ingress.kubernetes.io/auth-proxy-set-headers] # 从configmap中获取,go部分将其转换成具体的map +-- [nginx.ingress.kubernetes.io/auth-request-redirect] +-- [nginx.ingress.kubernetes.io/auth-response-headers] +-- [nginx.ingress.kubernetes.io/auth-signin] +-- [nginx.ingress.kubernetes.io/auth-always-set-cookie] +-- [nginx.ingress.kubernetes.io/auth-signin-redirect-param] # go部分根据这个annotation修改sign的var_string +-- not supported +-- [nginx.ingress.kubernetes.io/auth-snippet] +-- [nginx.ingress.kubernetes.io/auth-cache-duration] +-- [nginx.ingress.kubernetes.io/auth-cache-key] + +--- +---@param ctx AlbCtx +---@return AuthPolicy? +---@return any? error +function _m.get_config(ctx) + return cache.get_config_from_policy(ctx.matched_policy, "auth") +end + +return _m diff --git a/template/nginx/lua/plugins/auth/basic_auth.lua b/template/nginx/lua/plugins/auth/basic_auth.lua new file mode 100644 index 00000000..f0cbdb1d --- /dev/null +++ b/template/nginx/lua/plugins/auth/basic_auth.lua @@ -0,0 +1,91 @@ +local _m = {} +local ngx = ngx +local crypt = require "plugins.auth.crypt" +local decode_base64 = ngx.decode_base64 +local alb_err = require "error" +local s_ext = require("utils.string_ext") +local lrucache = require "resty.lrucache" + +local lru, err = lrucache.new(500) +if not lru then + error("failed to create the cache: " .. (err or "unknown")) +end + +---comment +---@param auth_header string +---@return string user +---@return string pass +---@return string? err +local function parse_basic_auth(auth_header) + if not s_ext.start_with(auth_header, "Basic") then + return "", "", "invalid Authorization scheme, not basic auth." + end + + local decoded = decode_base64(s_ext.remove_prefix(auth_header, "Basic ")) + if not decoded then + return "", "", "invalid base64 encoding" + end + local user_pass = s_ext.split(decoded, ":") + if #user_pass ~= 2 then + return "", "", "invalid format" + end + local user, pass = user_pass[1], user_pass[2] + return user, pass, nil +end + + +---comment +---@param auth_cfg AuthPolicy +---@param ctx AlbCtx +_m.do_basic_auth_if_need = function (auth_cfg, ctx) + if auth_cfg.basic_auth == nil then + return + end + if auth_cfg.basic_auth.err ~= "" then + alb_err.exit_with_code(alb_err.AUTHFAIL, "invalid cfg " .. auth_cfg.basic_auth.err, 500) + end + + ngx.header["WWW-Authenticate"] = "Basic realm=" .. "\"" .. auth_cfg.basic_auth.realm .. "\"" + + local secrets = auth_cfg.basic_auth.secret + local auth_header = ctx.var["http_authorization"] + if auth_header == nil then + alb_err.exit_with_code(alb_err.AUTHFAIL, "basic_auth but req no auth header", 401) + end + local user, pass, err = parse_basic_auth(auth_header) + if err ~= nil then + alb_err.exit_with_code(alb_err.AUTHFAIL, err, 401) + end + -- find hash for this user + if secrets[user] == nil then + alb_err.exit_with_code(alb_err.AUTHFAIL, "invalid user or passwd", 401) + end + local ok = _m.verify(pass, secrets[user]) + if ok then + ngx.header["WWW-Authenticate"] = nil + else + alb_err.exit_with_code(alb_err.AUTHFAIL, "invalid user or passwd", 401) + end +end + + + +-- apr1是性能损耗比较大的一个操作 +-- 我们的期望是在某次成功之后后续就不应该在重复计算了 +---@param pass string +---@param hash BasicAuthHash +_m.verify = function (pass, hash) + local cache_key = hash.hash + local cache_pass = lru:get(cache_key) + if cache_pass ~= nil and cache_pass ~= pass then + return false + end + local calculate_hash = crypt.apr1(pass, hash.salt) + + if calculate_hash == hash.hash then + lru:set(cache_key, pass, 60 * 60) + return true + end + return false +end +return _m diff --git a/template/nginx/lua/plugins/auth/buffer_md5.lua b/template/nginx/lua/plugins/auth/buffer_md5.lua new file mode 100644 index 00000000..b1e7292b --- /dev/null +++ b/template/nginx/lua/plugins/auth/buffer_md5.lua @@ -0,0 +1,31 @@ +local ffi = require "ffi" + +local C = ffi.C +local ffi_new = ffi.new +local ffi_string = ffi.string +local ngx = ngx +local subsystem = ngx.config.subsystem + + +local ngx_lua_ffi_md5_bin +if subsystem == "http" then + ffi.cdef [[ + void ngx_http_lua_ffi_md5_bin(const unsigned char *src, size_t len, + unsigned char *dst); + ]] + ngx_lua_ffi_md5_bin = C.ngx_http_lua_ffi_md5_bin +end + +local MD5_DIGEST_LEN = 16 +local md5_buf = ffi_new("unsigned char[?]", MD5_DIGEST_LEN) + +local _m = {} + +-- 17ms -> 14ms +_m.md5_bin = function (sb) + local ptr, len = sb:ref() + ngx_lua_ffi_md5_bin(ptr, len, md5_buf) + return ffi_string(md5_buf, MD5_DIGEST_LEN) +end + +return _m diff --git a/template/nginx/lua/plugins/auth/crypt.lua b/template/nginx/lua/plugins/auth/crypt.lua new file mode 100644 index 00000000..85c77404 --- /dev/null +++ b/template/nginx/lua/plugins/auth/crypt.lua @@ -0,0 +1,116 @@ +local bit = require("bit") +local buffer = require("string.buffer") +local buffer_md5 = require("plugins.auth.buffer_md5") +local _m = {} + +-- TODO use ffi? +-- thanks for https://github.com/Tblue/pyapr1/blob/master/apr1.py +---comment +---@param pass string +---@param salt string +---@return string hash +_m.apr1 = function (pass, salt) + local make_int = _m.make_int + local to64 = _m.to64 + + local hash1_data = pass .. "$apr1$" .. salt + local hash2_data = hash1_data + -- local hash1 = ngx.md5_bin(hash1_data) + -- ngx.log(ngx.INFO, "hash 1 " .. str.to_hex(hash1)) + local sandwich = ngx.md5_bin(pass .. salt .. pass) + -- ngx.log(ngx.INFO, "sandwich " .. str.to_hex(sandwich)) + local MD5_DIGEST_SIZE = 16 + local n_dig, n_rem = _m.divmod(#pass, MD5_DIGEST_SIZE) + for _ = 1, n_dig, 1 do + hash2_data = hash2_data .. sandwich + end + hash2_data = hash2_data .. sandwich:sub(1, n_rem) + + -- 嵌入长度二进制编码 + local i = #pass + while i > 0 do + if bit.band(i, 1) == 1 then + hash2_data = hash2_data .. "\0" + else + hash2_data = hash2_data .. pass:sub(1, 1) + end + i = bit.rshift(i, 1) -- 右移一位 + end + local hash2 = ngx.md5_bin(hash2_data) + -- ngx.log(ngx.INFO, "hash2 " .. str.to_hex(hash2)) + local final = hash2 + -- lua loop are [] not [) + -- use restry_md5 and you will find it get slower.. + local max_len = (#pass + #salt + #final) * 2 + local step = buffer.new(max_len) + for i = 0, 999, 1 do + step:reset() + -- use .. to combind byte and use ngx.md5_bin is faster than table concat + resty_md5.. + -- use string buffer but still not obvious perf improvement.. + -- use buffer aware md5. qps 17ms -> 14ms + if bit.band(i, 1) == 1 then + step:put(pass) + else + step:put(final) + end + if i % 3 ~= 0 then + step:put(salt) + end + if i % 7 ~= 0 then + step:put(pass) + end + if bit.band(i, 1) == 1 then + step:put(final) + else + step:put(pass) + end + final = buffer_md5.md5_bin(step) + end + -- base64 + local f1 = make_int(final, 0, 6, 12) + local f2 = make_int(final, 1, 7, 13) + local f3 = make_int(final, 2, 8, 14) + local f4 = make_int(final, 3, 9, 15) + local f5 = make_int(final, 4, 10, 5) + local f6 = make_int(final, 11) + -- ngx.log(ngx.INFO, "f1 " .. f1 .. " " .. to64(f1, 4)) + -- ngx.log(ngx.INFO, "f2 " .. f2 .. " " .. to64(f2, 4)) + -- ngx.log(ngx.INFO, "f3 " .. f3 .. " " .. to64(f3, 4)) + -- ngx.log(ngx.INFO, "f4 " .. f4 .. " " .. to64(f4, 4)) + -- ngx.log(ngx.INFO, "f5 " .. f5 .. " " .. to64(f5, 4)) + -- ngx.log(ngx.INFO, "f6 " .. f6 .. " " .. to64(f6, 2)) + + return to64(f1, 4) .. to64(f2, 4) .. to64(f3, 4) .. to64(f4, 4) .. to64(f5, 4) .. to64(f6, 2) +end + +_m.divmod = function (dividend, divisor) + local quotient = math.floor(dividend / divisor) -- 商 + local remainder = dividend % divisor -- 余数 + return quotient, remainder +end + +_m.make_int = function (data, ...) + local indexes = { ... } + local r = 0 + for i, idx in ipairs(indexes) do + -- 提取字节并左移,然后按位或操作 + r = bit.bor(r, bit.lshift(string.byte(data, idx + 1), 8 * (#indexes - i))) + end + return r +end +-- apr1的base64..并不是通常的base64.. +_m.to64 = function (data, n_out) + local chars = "./0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz" + local out = "" + + for _ = 1, n_out do + -- 提取低 6 位并映射到字符 + local index = bit.band(data, 0x3F) + 1 -- Lua 索引从 1 开始 + out = out .. chars:sub(index, index) + -- 右移 6 位 + data = bit.rshift(data, 6) + end + return out +end + +return _m diff --git a/template/nginx/lua/plugins/auth/forward_auth.lua b/template/nginx/lua/plugins/auth/forward_auth.lua new file mode 100644 index 00000000..ee8f95a8 --- /dev/null +++ b/template/nginx/lua/plugins/auth/forward_auth.lua @@ -0,0 +1,286 @@ +local _m = {} + +local clone = require "table.clone" +local u_url = require "utils.url" +local alb_err = require "error" + +function _m.do_forward_auth_if_need(auth_cfg, ctx) + if not auth_cfg.forward_auth then + return + end + + local ret = _m.send_auth_request(auth_cfg.forward_auth, ctx) + ngx.log(ngx.INFO, "auth-success: ", tostring(ret.success), + " url: " .. tostring(ret.url) .. " code: " .. tostring(ret.code) .. " err: " .. tostring(ret.error_reason)) + -- auth fail + if not ret.success then + for k, v in pairs(ret.client_response_extra_headers) do + ngx.header[k] = v + end + if auth_cfg.forward_auth.always_set_cookie then + ngx.header["Set-Cookie"] = ret.cookie + end + if ret.code ~= 302 then + ngx.log(ngx.ERR, "[wg] auth fail: ", ret.error_reason) + alb_err.exit_with_code(alb_err.AUTHFAIL, ret.error_reason, ret.code) + ngx.say(ret.body) + return + end + ngx.exit(302) + ngx.say(ret.body) + return + end + + -- auth success + -- always_set_cookie need auth ctx in response_header_filter + ctx.auth = { + always_set_cookie = auth_cfg.forward_auth.always_set_cookie, + auth_cookie = ret.cookie + } + for k, v in pairs(ret.upstream_req_extra_headers) do + ngx.req.set_header(k, v) + end + for k, v in pairs(ret.client_response_extra_headers) do + ngx.header[k] = v + end +end + +local valid_code = { + ["200"] = true, + ["201"] = true, + ["204"] = true, + ["206"] = true, + ["301"] = true, + ["302"] = true, + ["303"] = true, + ["304"] = true, + ["307"] = true, + ["308"] = true +} + +function _m.merge_cookie(ca, cb) + if ca == nil and cb == nil then + return + end + if ca == nil then + return cb + end + if cb == nil then + return ca + end + if type(ca) == "string" then + ca = { ca } + end + if type(cb) == "string" then + cb = { cb } + end + if type(ca) ~= "table" or type(cb) ~= "table" then + ngx.log(ngx.ERR, "invalid type of ca or cb: ", type(ca), type(cb)) + return + end + + local cookies = {} + for _, c in ipairs(ca) do + table.insert(cookies, c) + end + for _, c in ipairs(cb) do + table.insert(cookies, c) + end + return cookies +end + +---@param ctx AlbCtx +function _m.add_cookie_if_need(ctx) + local should_add_cookie = ctx.auth.auth_cookie ~= nil and + (valid_code[tostring(ngx.var.status)] or ctx.auth.always_set_cookie) + if not should_add_cookie then + return + end + ngx.header["Set-Cookie"] = _m.merge_cookie(ngx.header["Set-Cookie"], ctx.auth.auth_cookie) +end + +--- type +---@class AuthAction +---@field success boolean +---@field error_reason string +---@field url string +---@field upstream_req_extra_headers table # bypass的情况下,在发送给upstream的请求中需要增加的header +---@field client_response_extra_headers table # 给client的response中需要增加的header +---@field cookie? table # auth response中的cookie +---@field code integer # !success的情况下,返回给客户端的code +---@field body any? # !success的情况下,返回给客户端的body +-- type + +---@param auth ForwardAuthPolicy +---@param ctx AlbCtx +---@return AuthAction +function _m.send_auth_request(auth, ctx) + ---@type AuthAction + local ret = { + success = false, + error_reason = "", + url = "", + upstream_req_extra_headers = {}, + client_response_extra_headers = {}, + code = 500, + body = "ALB Auth Fail", + } + + if auth.invalid_auth_req_cm_ref then + ret.code = 503 + ret.error_reason = "invalid-auth-req-cm-ref" + return ret + end + + local req, err = _m.build_request(auth, ctx.var) + if err ~= nil then + ngx.log(ngx.ERR, "do auth, build request fail: ", err) + ret.error_reason = "build-auth-request-fail" + return ret + end + ret.url = req.url + + local httpc = require("resty.http").new() + local res, err = httpc:request_uri(req.url, req) + if err ~= nil then + ngx.log(ngx.ERR, "do auth, send request fail: ", tostring(err)) + ret.error_reason = "send-auth-request-fail" + return ret + end + ret.cookie = res.headers["Set-Cookie"] + -- success + if res.status >= 200 and res.status < 300 then + ret.success = true + ret.code = res.status + for _, h in ipairs(auth.upstream_headers) do + if res.headers[h] ~= nil then + ret.upstream_req_extra_headers[h] = res.headers[h] + end + end + return ret + end + + -- auth fail 401 + if res.status == 401 then + -- TODO 这里的大小写没问题吗... + ret.client_response_extra_headers["WWW-Authenticate"] = res.headers["WWW-Authenticate"] + ret.body = res.body + ret.code = res.status + -- with redirect + if #auth.signin_url ~= 0 then + local url, err = _m.resolve_varstring(auth.signin_url, ctx.var) + if err ~= nil then + ret.error_reason = "resolve-signinurl-fail" + return ret + end + ret.client_response_extra_headers["Location"] = url + ret.code = 302 + return ret + end + ret.error_reason = "auth-service-status: " .. tostring(res.status) + return ret + end + ret.error_reason = "auth-service-status: " .. tostring(res.status) + if res.status == 403 then + ret.code = res.status + ret.body = res.body + return ret + end + -- other fail + ret.code = 500 + return ret +end + +---comment +---@param auth ForwardAuthPolicy +---@return table +---@return string? error +function _m.build_request(auth, var) + local url, err = _m.resolve_varstring(auth.url, var) + if err ~= nil then + return {}, err + end + local parts, err = u_url.parse(url) + if err ~= nil then + return {}, tostring(err) + end + if parts == nil then + return {}, "invalid url: " .. url + end + local auth_host = parts.host + + local req = { + method = auth.method, + url = url, + headers = {}, + ssl_verify = false, + } + + local default_headers = { + ["X-Original-URI"] = { "$request_uri" }, + ["X-Scheme"] = { "$pass_access_scheme" }, + ["X-Original-URL"] = { "$scheme", "://", "$http_host", "$request_uri" }, + ["X-Original-Method"] = { "$request_method" }, + ["X-Sent-From"] = { "alb" }, + ["Host"] = { auth_host }, + ["X-Real-IP"] = { "$remote_addr" }, + ["X-Forwarded-For"] = { "$proxy_add_x_forwarded_for" }, + ["X-Auth-Request-Redirect"] = { "$request_uri" }, + ["Connection"] = { "close" }, -- explicit close. we do not support auth keep-alive now. + } + for k, v in pairs(ngx.req.get_headers()) do + req.headers[k] = v + end + for k, v in pairs(default_headers) do + req.headers[k] = _m.resolve_varstring(v, var) + end + for k, v in pairs(auth.auth_headers) do + local rv, err = _m.resolve_varstring(v, var) + if err ~= nil then + return {}, err + end + req.headers[k] = rv + end + if auth.auth_request_redirect then + local redirect_url, err = _m.resolve_varstring(auth.auth_request_redirect, var) + if err ~= nil then + return {}, err + end + if redirect_url ~= "" then + req.headers["X-Auth-Request-Redirect"] = redirect_url + end + end + return req +end + +---comment turn $host_xx into real value +---@param str_template_list string[] +---@param var table +---@return string +---@return string? error +function _m.resolve_varstring(str_template_list, var) + -- ["https://","$host","/oauth2/auth"] + -- ["https://","$host","/oauth2/start?rd=","$escaped_request_uri"] + -- don't modify the original table + local str_list = clone(str_template_list) + for index, v in pairs(str_list) do + if string.sub(v, 1, 1) == "$" and v ~= "$" then + local key = string.sub(v, 2) + local resolved_val = var[key] + -- 如果有其他变量的话,后面可能需要扩展下.. 比如把escape 作为通用的前缀 + if key == "escaped_request_uri" then + resolved_val = ngx.escape_uri(var["request_uri"]) + end + if key == "pass_access_scheme" then + resolved_val = var["scheme"] + end + if resolved_val == nil then + return "", "var " .. key .. " not found" + end + str_list[index] = resolved_val + end + end + return table.concat(str_list), nil +end + +return _m diff --git a/template/nginx/lua/plugins/core/plugin_manager.lua b/template/nginx/lua/plugins/core/plugin_manager.lua index f3947f2c..716d4d63 100644 --- a/template/nginx/lua/plugins/core/plugin_manager.lua +++ b/template/nginx/lua/plugins/core/plugin_manager.lua @@ -1,43 +1,48 @@ --- format:on +-- format:on style:emmy + ---@class Plugin ---@field after_rule_match_hook fun(ctx: AlbCtx) ----@field header_filter_hook fun(ctx: AlbCtx) +---@field response_header_filter_hook fun(ctx: AlbCtx) ---@field log_hook fun(ctx: AlbCtx) ---@class PluginManager ---@field plugins { [string]: Plugin } ---@field name string -local _m = {plugins = {}, self_plugins = {}} -- self_plugins need to access via : syntax +local _m = { plugins = {} } -- self_plugins need to access via : syntax function _m.init() local otel = require("plugins.otel.otel") - table.insert(_m.plugins, otel) + local auth = require("plugins.auth.auth") + _m.plugins = { + ["auth"] = auth, + ["otel"] = otel, + } end ---@param ctx AlbCtx -function _m.after_rule_match_hook(ctx) - for _, p in ipairs(_m.plugins) do - if p.after_rule_match_hook then - p.after_rule_match_hook(ctx) +---@param hook string +function _m._call_hook(ctx, hook) + local plugins = ctx.matched_policy.plugins or {} + for _, p_name in ipairs(plugins) do + local p = _m.plugins[p_name] + if p ~= nil and p[hook] then + p[hook](ctx) end end end ---@param ctx AlbCtx -function _m.header_filter_hook(ctx) - for _, p in ipairs(_m.plugins) do - if p.header_filter_hook then - p.header_filter_hook(ctx) - end - end +function _m.after_rule_match_hook(ctx) + _m._call_hook(ctx, "after_rule_match_hook") +end + +---@param ctx AlbCtx +function _m.response_header_filter_hook(ctx) + _m._call_hook(ctx, "response_header_filter_hook") end ---@param ctx AlbCtx function _m.log_hook(ctx) - for _, p in ipairs(_m.plugins) do - if p.log_hook then - p.log_hook(ctx) - end - end + _m._call_hook(ctx, "log_hook") end _m.init() diff --git a/template/nginx/lua/plugins/otel/otel.lua b/template/nginx/lua/plugins/otel/otel.lua index 35600ae0..d1e93f54 100644 --- a/template/nginx/lua/plugins/otel/otel.lua +++ b/template/nginx/lua/plugins/otel/otel.lua @@ -1,4 +1,4 @@ --- format:on +-- format:on style:emmy -- local _id_generator = require("opentelemetry.trace.id_generator") local span_kind = require("opentelemetry.trace.span_kind") local span_status = require("opentelemetry.trace.span_status") @@ -18,7 +18,7 @@ local _ = c ---@field context_token string | nil ---@field cfg OtelConf | nil -local _M = {name = "otel"} +local _M = { name = "otel" } local lru, err = lrucache.new(200) -- 200 different tracers should be enough.. if not lru then @@ -35,45 +35,36 @@ local HOSTNAME = os.getenv("HOSTNAME") or "" local l = ngx.log local E = ngx.ERR ----@param ctx AlbCtx -function _M.is_need(ctx) - return _M.get_otel_ref(ctx) ~= nil -end - ----@param ctx AlbCtx ----@return string | nil -function _M.get_otel_ref(ctx) - local config = ctx.matched_policy.config - if config == nil or config.otel == nil or config.otel.otel_ref == nil then - return nil +---@param policy Policy +---@return OtelConf? config +---@return string? hash +local get_otel_config = function (policy) + local ret, err = cache.get_refed_config(policy, "otel") + if err ~= nil or ret == nil then + return nil, nil end - return config.otel.otel_ref + return ret.config, ret.hash end ---@param ctx AlbCtx ---@return Tracer | nil ---@return string | nil error function _M.get_tracer_lru(ctx) - local ref = _M.get_otel_ref(ctx) - if ref == nil then - return nil + local otel, hash = get_otel_config(ctx.matched_policy) + if otel == nil or hash == nil then + return nil, "[otel] get config error " end - - local conf, err = cache.get_config(ref) - if err ~= nil or conf.otel == nil or conf.otel.otel == nil then - return nil, "[otel] get config error " .. tostring(err) - end - local otel = conf.otel.otel + --- @cast otel -nil ctx.otel.cfg = otel - - local hash = ref local tracer = lru:get(hash) if tracer ~= nil then -- l(E, "[otel] get tracer from cache ") return tracer end - local resource_attrs = {attr.string("hostname", HOSTNAME), attr.string("service.name", ALB_NAME), attr.string("service.namespace", ALB_NS), attr.string("service.type", "alb"), attr.string("service.version", ALB_VER), attr.string("service.instance.id", MY_POD_NAME)} + local resource_attrs = { attr.string("hostname", HOSTNAME), attr.string("service.name", ALB_NAME), attr.string( + "service.namespace", ALB_NS), attr.string("service.type", "alb"), attr.string("service.version", ALB_VER), attr + .string("service.instance.id", MY_POD_NAME) } local user_attrs = otel.resource or {} -- l(E, "[otel] user attr ", c.json_encode(user_attrs), "\n") for k, v in pairs(user_attrs) do @@ -94,7 +85,7 @@ end ---@param ctx AlbCtx ---@return OtelCtx function _M.init_our_ctx(ctx) - ctx.otel = {context_token = nil, cfg = nil} + ctx.otel = { context_token = nil, cfg = nil } return ctx.otel end @@ -109,9 +100,6 @@ end ---@param ctx AlbCtx function _M.after_rule_match_hook(ctx) - if not _M.is_need(ctx) then - return - end local our = _M.init_our_ctx(ctx) local tracer, err = _M.get_tracer_lru(ctx) if tracer == nil then @@ -125,12 +113,14 @@ function _M.after_rule_match_hook(ctx) upstream_context = trace_context_propagator:extract(context, ngx.req) end -- TODO add more attributes via config - local attributes = {attr.string("net.host.name", ctx.var.host), attr.string("http.request.method", ctx.var.method), attr.string("http.scheme", ctx.var.scheme), attr.string("http.target", ctx.var.request_uri), attr.string("http.user_agent", ctx.var.http_user_agent)} + local attributes = { attr.string("net.host.name", ctx.var.host), attr.string("http.request.method", ctx.var.method), + attr.string("http.scheme", ctx.var.scheme), attr.string("http.target", ctx.var.request_uri), attr.string( + "http.user_agent", ctx.var.http_user_agent) } _M.inject_rule_source_attribute(ctx, attributes) local span_name = ctx.var.method .. " " .. ctx.var.request_uri - local otel_ctx = tracer:start(upstream_context, span_name, {kind = span_kind.server, attributes = attributes}) + local otel_ctx = tracer:start(upstream_context, span_name, { kind = span_kind.server, attributes = attributes }) if otel_ctx == nil then l(E, "[otel] start span failed") return @@ -205,4 +195,5 @@ function _M.inject_http_header(flags, span) span:set_attributes(a) end end + return _M diff --git a/template/nginx/lua/plugins/otel/tracer.lua b/template/nginx/lua/plugins/otel/tracer.lua index 1a4bcea0..ebd2f828 100644 --- a/template/nginx/lua/plugins/otel/tracer.lua +++ b/template/nginx/lua/plugins/otel/tracer.lua @@ -1,3 +1,5 @@ +-- format:on style:emmy + local resource_new = require("opentelemetry.resource").new local our_exporter_client_new = require("plugins.otel.tracer_http_client").new local otlp_exporter_new = require("opentelemetry.trace.exporter.otlp").new @@ -8,7 +10,12 @@ local always_on_sampler_new = require("opentelemetry.trace.sampling.always_on_sa local parent_base_sampler_new = require("opentelemetry.trace.sampling.parent_base_sampler").new local trace_id_ratio_sampler_new = require("opentelemetry.trace.sampling.trace_id_ratio_sampler").new -local sampler_factory = {always_off = always_off_sampler_new, always_on = always_on_sampler_new, parent_base = parent_base_sampler_new, trace_id_ratio = trace_id_ratio_sampler_new} +local sampler_factory = { + always_off = always_off_sampler_new, + always_on = always_on_sampler_new, + parent_base = parent_base_sampler_new, + trace_id_ratio = trace_id_ratio_sampler_new +} local _M = {} ---comment @@ -21,14 +28,14 @@ function _M.get_sampleer(conf) return sampler_factory[sampler_name](), nil end local s_opt = conf.sampler.options - if s_opt == nil then + if type(s_opt) ~= "table" then return nil, "no opt" end local fraction = 0.5 - if s_opt.fraction ~= nil then + if type(s_opt.fraction) == "string" then fraction = tonumber(s_opt.fraction) if fraction == nil then - return nil, "invalid fraction " .. tostring(s_opt.fraction) + return nil, "invalid fraction " .. s_opt.fraction end end @@ -39,6 +46,9 @@ function _M.get_sampleer(conf) if sampler_name ~= "parent_base" then return nil, "sampler not exist" end + if type(s_opt.parent_name) ~= "string" then + return nil, "invalid parent name" + end if sampler_name == "parent_base" and sampler_factory[s_opt.parent_name] == nil then return nil, "no parent sampler" end @@ -52,9 +62,10 @@ end ---@return Tracer? tracer ---@return string? error function _M.create_tracer(conf, resource_attrs) - local collect_request_header = {["Content-Type"] = "application/json"} + local collect_request_header = { ["Content-Type"] = "application/json" } -- our_exporter_client_new are skip ssl_verify in default - local exporter = otlp_exporter_new(our_exporter_client_new(conf.exporter.collector.address, conf.exporter.collector.request_timeout, collect_request_header)) + local exporter = otlp_exporter_new(our_exporter_client_new(conf.exporter.collector.address, + conf.exporter.collector.request_timeout, collect_request_header)) -- create span processor local batch_span_processor = batch_span_processor_new(exporter, conf.exporter.batch_span_processor) -- create sampler @@ -63,7 +74,8 @@ function _M.create_tracer(conf, resource_attrs) return nil, err end -- create tracer provider - local tp = tracer_provider_new(batch_span_processor, {resource = resource_new(unpack(resource_attrs)), sampler = sampler}) + local tp = tracer_provider_new(batch_span_processor, + { resource = resource_new(unpack(resource_attrs)), sampler = sampler }) -- create tracer return tp:tracer("opentelemetry-lua") end diff --git a/template/nginx/lua/types/common.lua b/template/nginx/lua/types/common.lua new file mode 100644 index 00000000..bed32855 --- /dev/null +++ b/template/nginx/lua/types/common.lua @@ -0,0 +1 @@ +---@alias var_string string a string may contain nginx variable diff --git a/template/nginx/lua/types/ngxpolicy.types.lua b/template/nginx/lua/types/ngxpolicy.types.lua index 24776dff..a1077a88 100644 --- a/template/nginx/lua/types/ngxpolicy.types.lua +++ b/template/nginx/lua/types/ngxpolicy.types.lua @@ -1,148 +1,239 @@ ----@class NgxPolicy ----@field certificate_map table ----@field http HttpPolicy ----@field stream StreamPolicy ----@field config CommonPolicyConfig ----@field backend_group +--- @alias CJSON_NULL userdata +--- @class NgxPolicy +--- @field backend_group BackendGroup[] +--- @field certificate_map table +--- @field config table +--- @field http HttpPolicy +--- @field stream StreamPolicy + + +--- @class HttpPolicy +--- @field tcp table + + +--- @class StreamPolicy +--- @field tcp table +--- @field udp table + + +--- @class BackendGroup +--- @field backends Backend[] +--- @field mode string +--- @field name string +--- @field session_affinity_attribute string +--- @field session_affinity_policy string + + +--- @class Certificate +--- @field cert string +--- @field key string + + +--- @class RefBox +--- @field note string? +--- @field type string +--- @field auth AuthPolicy? +--- @field otel OtelConf? +--- @field rewrite_request RewriteRequestConfig? +--- @field rewrite_response RewriteResponseConfig? +--- @field timeout TimeoutPolicyConfig? + + +--- @class PolicyExt +--- @field auth AuthPolicy? +--- @field otel OtelConf? +--- @field rewrite_request RewriteRequestConfig? +--- @field rewrite_response RewriteResponseConfig? +--- @field timeout TimeoutPolicyConfig? + + +--- @class Backend +--- @field address string +--- @field ns string +--- @field otherclusters boolean +--- @field port number +--- @field svc string +--- @field weight number + + +--- @class Policy +--- @field backend_protocol string +--- @field config PolicyExtCfg +--- @field internal_dsl any[] +--- @field plugins string[] +--- @field rule string +--- @field subsystem string +--- @field to_location string? +--- @field upstream string +--- @field source_name string +--- @field source_ns string +--- @field source_type string +--- @field cors_allow_headers string +--- @field cors_allow_origin string +--- @field enable_cors boolean +--- @field redirect_code number +--- @field redirect_host string? +--- @field redirect_port number? +--- @field redirect_prefix_match string? +--- @field redirect_replace_prefix string? +--- @field redirect_scheme string? +--- @field redirect_url string +--- @field rewrite_base string +--- @field rewrite_prefix_match string? +--- @field rewrite_replace_prefix string? +--- @field rewrite_target string +--- @field url string +--- @field vhost string + + +--- @class AuthPolicy +--- @field basic_auth BasicAuthPolicy? +--- @field forward_auth ForwardAuthPolicy? + + +--- @class LegacyExtInPolicy +--- @field cors_allow_headers string +--- @field cors_allow_origin string +--- @field enable_cors boolean +--- @field redirect_code number +--- @field redirect_host string? +--- @field redirect_port number? +--- @field redirect_prefix_match string? +--- @field redirect_replace_prefix string? +--- @field redirect_scheme string? +--- @field redirect_url string +--- @field rewrite_base string +--- @field rewrite_prefix_match string? +--- @field rewrite_replace_prefix string? +--- @field rewrite_target string +--- @field url string +--- @field vhost string + + +--- @class OtelConf +--- @field exporter Exporter? +--- @field flags Flags? +--- @field resource table +--- @field sampler Sampler? + + +--- @class PolicyExtCfg +--- @field refs table +--- @field auth AuthPolicy? +--- @field otel OtelConf? +--- @field rewrite_request RewriteRequestConfig? +--- @field rewrite_response RewriteResponseConfig? +--- @field timeout TimeoutPolicyConfig? + + +--- @class RewriteRequestConfig +--- @field headers table +--- @field headers_add table +--- @field headers_add_var table +--- @field headers_remove string[] +--- @field headers_var table + + +--- @class RewriteResponseConfig +--- @field headers table +--- @field headers_add table +--- @field headers_remove string[] + + +--- @class Source +--- @field source_name string +--- @field source_ns string +--- @field source_type string + + +--- @class TimeoutPolicyConfig +--- @field proxy_connect_timeout_ms number? +--- @field proxy_read_timeout_ms number? +--- @field proxy_send_timeout_ms number? + + +--- @class Cors +--- @field cors_allow_headers string +--- @field cors_allow_origin string +--- @field enable_cors boolean + + +--- @class RedirectConf +--- @field redirect_code number +--- @field redirect_host string? +--- @field redirect_port number? +--- @field redirect_prefix_match string? +--- @field redirect_replace_prefix string? +--- @field redirect_scheme string? +--- @field redirect_url string ----@class HttpPolicy ----@field tcp table +--- @class RewriteConf +--- @field rewrite_base string +--- @field rewrite_prefix_match string? +--- @field rewrite_replace_prefix string? +--- @field rewrite_target string +--- @field url string ----@alias CommonPolicyConfig table +--- @class Vhost +--- @field vhost string ----@class CommonPolicyConfigVal ----@field type string ----@field otel? OtelInCommon +--- @class BasicAuthPolicy +--- @field auth_type string +--- @field err string +--- @field realm string +--- @field secret table ----@class StreamPolicy ----@field tcp table ----@field udp table +--- @class Exporter +--- @field batch_span_processor BatchSpanProcessor? +--- @field collector Collector? ----@alias Policies (Policy | nil)[] ----@class Certificate ----@field cert string ----@field key string +--- @class Flags +--- @field hide_upstream_attrs boolean +--- @field notrust_incoming_span boolean +--- @field report_http_request_header boolean +--- @field report_http_response_header boolean ----@class OtelInCommon ----@field otel OtelConf +--- @class ForwardAuthPolicy +--- @field always_set_cookie boolean +--- @field auth_headers table +--- @field auth_request_redirect string[] +--- @field invalid_auth_req_cm_ref boolean +--- @field method string +--- @field signin_url string[] +--- @field upstream_headers string[] +--- @field url string[] ----@class OtelConf ----@field exporter? Exporter ----@field sampler? Sampler ----@field flags? Flags ----@field resource? table +--- @class Sampler +--- @field name string +--- @field options (SamplerOptions|CJSON_NULL) ----@class Flags ----@field hide_upstream_attrs boolean ----@field report_http_request_header boolean ----@field report_http_response_header boolean ----@field notrust_incoming_span boolean +--- @class BasicAuthHash +--- @field algorithm string +--- @field hash string +--- @field name string +--- @field salt string ----@class Exporter ----@field collector? Collector ----@field batch_span_processor? BatchSpanProcessor +--- @class BatchSpanProcessor +--- @field inactive_timeout number +--- @field max_queue_size number ----@class Collector ----@field address string ----@field request_timeout number /* int */ +--- @class Collector +--- @field address string +--- @field request_timeout number ----@class BatchSpanProcessor ----@field max_queue_size number /* int */ ----@field scheduled_delay number /* int */ ----@field export_timeout number /* int */ - ----@class Sampler ----@field name string ----@field options? SamplerOptions - - ----@class SamplerOptions ----@field parent_name? string ----@field fraction? string - - ----@class Policy ----@field internal_dsl ----@field upstream string ----@field rule string ----@field config? RuleConfigInPolicy ----@field SameInRuleCr SameInRuleCr ----@field SameInPolicy SameInPolicy ----@field source_type? string ----@field source_name? string ----@field source_ns? string - - ----@class SameInRuleCr ----@field url string ----@field rewrite_base string ----@field rewrite_target string ----@field enable_cors boolean ----@field cors_allow_headers string ----@field cors_allow_origin string ----@field backend_protocol string ----@field redirect_url string ----@field vhost string ----@field redirect_code number /* int */ ----@field source? Source - - ----@class SameInPolicy ----@field rewrite_prefix_match? string ----@field rewrite_replace_prefix? string ----@field redirect_scheme? string ----@field redirect_host? string ----@field redirect_port? number /* int */ ----@field redirect_prefix_match? string ----@field redirect_replace_prefix? string - - ----@class RuleConfigInPolicy ----@field rewrite_response? RewriteResponseConfig ----@field rewrite_request? RewriteRequestConfig ----@field timeout? TimeoutPolicyConfig ----@field otel? OtelInPolicy - - ----@class RewriteResponseConfig ----@field headers? table ----@field headers_remove? ----@field headers_add? table - - ----@class RewriteRequestConfig ----@field headers? table ----@field headers_var? table ----@field headers_remove? ----@field headers_add? table ----@field headers_add_var? table - - ----@class TimeoutPolicyConfig ----@field proxy_connect_timeout_ms? number /* uint */ ----@field proxy_send_timeout_ms? number /* uint */ ----@field proxy_read_timeout_ms? number /* uint */ - - ----@class Source ----@field name string ----@field namespace string ----@field type string - - ----@class OtelInPolicy ----@field otel_ref? string ----@field otel? OtelConf +--- @class SamplerOptions +--- @field fraction (string|CJSON_NULL) +--- @field parent_name (string|CJSON_NULL) diff --git a/template/nginx/lua/utils/common.lua b/template/nginx/lua/utils/common.lua index 4ad0db1e..b08ead83 100644 --- a/template/nginx/lua/utils/common.lua +++ b/template/nginx/lua/utils/common.lua @@ -30,7 +30,7 @@ function _M.json_encode(data, empty_table_as_object) json.encode_empty_table_as_object(empty_table_as_object or false) -- 空的table默认为array end json.encode_sparse_array(true) - pcall(function(data) -- luacheck: ignore + pcall(function (data) -- luacheck: ignore json_value = json.encode(data) end, data) return json_value @@ -42,7 +42,7 @@ end ---@return table|nil function _M.json_decode(str) local json_value = nil - pcall(function(str) -- luacheck: ignore + pcall(function (str) -- luacheck: ignore json_value = json.decode(str) end, str) return json_value @@ -148,10 +148,6 @@ end -- it returns value of ngx.var[request_uri] function _M.lua_ngx_var(ngx_var) local var_name = string_sub(ngx_var, 2) - if var_name:match("^%d+$") then - var_name = tonumber(var_name) - end - return ngx.var[var_name] end @@ -214,6 +210,7 @@ end function _M.access_or(s, keys, default) if _M.has_key(s, keys) then local v = s + ---@cast v -nil for _, key in ipairs(keys) do v = v[key] end @@ -239,6 +236,7 @@ end --- --- milliseconds to seconds,if ms is nil return nil +--- @param ms number|nil ---@return number|nil function _M.ms2sec(ms) if ms == nil or ms == json.null then diff --git a/template/nginx/lua/utils/string_ext.lua b/template/nginx/lua/utils/string_ext.lua index 4a16b70d..0d26d639 100644 --- a/template/nginx/lua/utils/string_ext.lua +++ b/template/nginx/lua/utils/string_ext.lua @@ -19,6 +19,7 @@ end function _M.nil_or(first, second) return g_ext.nil_or(first, second, "") end + -- remove_prefix -- if prefix not exist return the origin str -- @return string @@ -50,4 +51,29 @@ function _M.lines_grep(s, regex) return lines end +function _M.contains(str, sub) + return str:find(sub, 1, true) ~= nil +end + +function _M.start_with(str, start) + return str:sub(1, #start) == start +end + +function _M.end_with(str, ending) + return ending == "" or str:sub(- #ending) == ending +end + +function _M.split(str, delimiter) + local result = {} + local from = 1 + local delim_from, delim_to = string.find(str, delimiter, from) + while delim_from do + table.insert(result, string.sub(str, from, delim_from - 1)) + from = delim_to + 1 + delim_from, delim_to = string.find(str, delimiter, from) + end + table.insert(result, string.sub(str, from)) + return result +end + return _M diff --git a/template/nginx/lua/utils/url.lua b/template/nginx/lua/utils/url.lua new file mode 100644 index 00000000..f0741f9e --- /dev/null +++ b/template/nginx/lua/utils/url.lua @@ -0,0 +1,174 @@ +-- https://github.com/3scale/lua-resty-url/blob/eebecb494c04681ebfbc85809bd51b223a042e74/src/resty/url.lua +local tostring = tostring +local re_match = ngx.re.match +local concat = table.concat +local tonumber = tonumber +local setmetatable = setmetatable +local re_gsub = ngx.re.gsub +local select = select +local find = string.find +local sub = string.sub +local assert = assert + +local _M = { + _VERSION = '0.3.4', + + ports = { + https = 443, + http = 80, + } +} + +function _M.default_port(scheme) + return _M.ports[scheme] +end + +function _M.scheme(url) + local start = find(url, ':', 1, true) + + if start then + return sub(url, 1, start - 1), sub(url, start + 1) + end +end + +local core_base = require "resty.core.base" +local core_regex = require "resty.core.regex" +local new_tab = core_base.new_tab +local C = require('ffi').C + +local function compile_regex(pattern) + local compiled, err, flags = core_regex.re_match_compile(pattern, 'joxi') + + assert(compiled, err) + + return compiled, flags +end + +local collect_captures = core_regex.collect_captures +local abs_regex, abs_regex_flags = compile_regex([=[ + ^ + (?:(\w+):)? # scheme (1) + // + (?: + ([^:@]+)? # user (2) + (?: + : + ([^@]+)? # password (3) + )? + @)? + ( # host (4) + [a-z\.\-\d\_]+ # domain + | + [\d\.]+ # ipv4 + | + \[[a-f0-9\:]+\] # ipv6 + ) + (?: + :(\d+) # port (5) + )? + (.*) # path (6) + $ +]=]) +local http_regex, http_regex_flags = compile_regex('^https?$') + +local function match(str, regex, flags) + local res = new_tab(regex.ncaptures, regex.name_count) + if not str then return false, res end + + local rc = C.ngx_http_lua_ffi_exec_regex(regex, flags, str, #str, 0) + + return rc > 0, collect_captures(regex, rc, str, flags, res) +end + +local function _match_opaque(scheme, opaque) + if match(scheme, http_regex, http_regex_flags) then + return nil, 'invalid endpoint' + end + + return { scheme, opaque = opaque } +end + +local function _transform_match(m) + m[0] = nil + + if m[6] == '' or m[6] == '/' then m[6] = nil end + + return m +end + +function _M.split(url, protocol) + if not url then + return nil, 'missing endpoint' + end + + local scheme, opaque = _M.scheme(url) + + if not scheme then return nil, 'missing scheme' end + + if protocol and not re_match(scheme, protocol, 'oj') then + return nil, 'invalid protocol' + end + + local ok, m = match(url, abs_regex, abs_regex_flags) + + if ok then + return _transform_match(m) + else + return _match_opaque(scheme, opaque) + end +end + +function _M.parse(url, protocol) + local parts, err = _M.split(url, protocol) + + if err then + return parts, err + end + + -- https://tools.ietf.org/html/rfc3986#section-3 + return setmetatable({ + scheme = parts[1] or nil, + user = parts[2] or nil, + password = parts[3] or nil, + host = parts[4] or nil, + port = tonumber(parts[5]), + path = parts[6] or nil, + opaque = parts.opaque, + }, { __tostring = function () return url end }) +end + +function _M.normalize(uri) + local regex = [=[ +( # Capture group + + (? e2e.auth_test.auth_test + # 这样可以被直接require my $dirname = dirname($file); - my $ALB_BASEname = basename($file, qr/\.[^.]*$/); - $ALB_BASEname=~s/.t//; - $file = "$dirname/$ALB_BASEname"; $file =~ m{^.*?/t/(.*)$}; # 匹配/t/后面的部分,捕获到$1中 my $suffix = $1; # 获取捕获的后缀部分 $suffix =~ s{/}{.}g; # 将后缀部分中的/替换为. - # warn "suffix is $suffix base $ALB_BASEname \n"; + $suffix =~ s{\.t}{}g; # 将后缀部分中的.t去掉 return $suffix; } diff --git a/template/t/AlaudaLib.pm b/template/t/AlaudaLib.pm index 3b470241..4e8d98d9 100644 --- a/template/t/AlaudaLib.pm +++ b/template/t/AlaudaLib.pm @@ -13,7 +13,7 @@ our @EXPORT_OK = qw( gen_main_config gen_ngx_tmpl_via_block gen_http_only gen_lu my $ALB_BASE = $ENV{'TEST_BASE'}; -my $LUACOV = $ENV{'LUACOV'}; +my $LUACOV = $ENV{'LUACOV'} // ''; sub tgl_log(@msgs) { @@ -291,8 +291,11 @@ metrics: port: 1936 ipV4BindAddress: [0.0.0.0] backlog: 2048 +resolver: 127.0.0.1 rootExtra: | env TEST_BASE; + env ALB_LUA_UNIT_TEST_CASE; + env ALB_LUA_TEST_CFG; streamExtra: | lua_package_path "$lua_path"; $init_full diff --git a/template/t/e2e/auth_test/auth_test.lua b/template/t/e2e/auth_test/auth_test.lua new file mode 100644 index 00000000..49f037a3 --- /dev/null +++ b/template/t/e2e/auth_test/auth_test.lua @@ -0,0 +1,398 @@ +-- format:on style:emmy + +local _m = {} +local h = require "test-helper" +local u = require "util" +local ph = require("policy_helper") +local common = require("utils.common") +local clone = require "table.clone" + +local G = {} + +function _m.as_backend() + ngx.log(ngx.INFO, "as_backend enter", ngx.var.uri) + + if ngx.var.uri == "/echo" then + ngx.say("hello") + ngx.exit(200) + return + end + + local id = ngx.var.arg_id + if not id then + ngx.status = 400 + ngx.say("id is required") + ngx.exit(400) + return + end + if not G[id] then + G[id] = {} + end + table.insert(G[id], { + uri = ngx.var.uri, + method = ngx.req.get_method(), + args = ngx.req.get_uri_args(), + headers = ngx.req.get_headers(), + body = ngx.req.get_body_data(), + }) + -- u.logs("as_backend", ngx.var.uri, G[id]) + local code = tonumber(ngx.var.arg_code or "200") or 999 + ngx.status = code + if ngx.var.uri == "/auth" then + if ngx.var.arg_ret_header then + for k, v in string.gmatch(ngx.var.arg_ret_header, "(%w+)@(%w+)") do + ngx.log(ngx.INFO, "as_backend set header ", k, v) + ngx.header[k] = v + end + ngx.log(ngx.INFO, "as_backend set header ", "Set-Cookie", "cc=xx") + ngx.header["Set-Cookie"] = "cc=xx" + end + end + if ngx.var.uri == "/cookie" then + ngx.log(ngx.INFO, "set cookie " .. tostring(ngx.var.arg_auth)) + if ngx.var.arg_auth == "simple" then + ngx.header["Set-Cookie"] = "ca=cb" + else + ngx.header["Set-Cookie"] = { + "id=a3fWa; Expires=Thu, 21 Oct 2021 07:28:00 GMT; Secure; HttpOnly; Domain=mozilla.org", + "xid=xa3fWa; Expires=Thu, 21 Oct 2021 07:28:00 GMT; Secure; HttpOnly; Domain=mozilla.org" + } + end + end + if ngx.var.uri == "/id" or ngx.var.arg_ret_data then + ngx.say(common.json_encode(G[id])) + end + ngx.log(ngx.INFO, "as_backend exit with ", code) + + if ngx.var.uri == "/" then + if ngx.var.arg_app_cookie == "simple" then + ngx.log(ngx.INFO, "as app cookie ", code) + ngx.header["Set-Cookie"] = "app=app" + end + end + if code ~= 200 then + ngx.say(ngx.var.uri .. " fail") + end + + ngx.exit(code) +end + +function _m.test() + -- _m.test_common() + -- _m.test_cookie() + _m.test_basic() +end + +function _m.test_cookie() + -- cookie set by forward authentication server + -- user retains cookie by default + -- user does not retain cookie if upstream returns error status code + -- user with annotated ingress retains cookie if upstream returns error status code + ---@type NgxPolicy + local policy = { + certificate_map = {}, + stream = {}, + http = { + tcp = { + [80] = { + { plugins = { "auth" }, rule = "1", internal_dsl = { { "STARTS_WITH", "URL", "/" } }, config = { refs = { auth = "auth-1" } }, upstream = "u1" }, + } + } + }, + config = { + ["auth-1"] = { + type = "auth", + auth = { + forward_auth = { + url = { "http://", "$host", ":1880", "/cookie?id=", "$arg_id", "&auth=", "$arg_auth" }, + always_set_cookie = false, + method = "GET", + auth_headers = {}, + auth_request_redirect = {}, + upstream_headers = {}, + signin_url = {}, + }, + } + }, + }, + backend_group = { { name = "u1", mode = "http", backends = { { address = "127.0.0.1", port = 1880, weight = 100 } }, } } + } + ph.set_policy_lua(policy) + u.logs("hello") + do + u.logs("success should has cookie") + local res, err = h.just_curl("http://127.0.0.1/?id=c1&auth=simple&app_cookie=simple") + h.assert_is_nil(err) + u.logs("check cookie ", res.headers["Set-Cookie"]) + u.logs("res ", res) + h.assert_eq(res.headers["Set-Cookie"], { "app=app", "ca=cb" }) + u.logs("res headers ", res.headers) + end + do + u.logs("fail should not set cookie") + local res, err = u.curl("http://127.0.0.1/?id=c2&auth=simple&code=500") + h.assert_is_nil(err) + u.logs("check cookie ", res.headers["Set-Cookie"]) + u.logs("res", res) + h.assert_eq(res.headers["Set-Cookie"], nil) + end + + do + u.logs("always_set_cookie + fail should set cookie") + local p = clone(policy) + p["config"]["auth-1"]["auth"]["forward_auth"]["always_set_cookie"] = true + ph.set_policy_lua(policy) + local res, err = h.just_curl("http://127.0.0.1/?id=c2&auth=simple&code=500") + h.assert_is_nil(err) + u.logs("check cookie ", res.headers["Set-Cookie"]) + u.logs("res headers ", res) + h.assert_eq(res.headers["Set-Cookie"], "ca=cb") + end +end + +function _m.test_common() + local cases = { + { + case = [[auth should ok ]], + ingress = "", + do_test = function () + ---@type NgxPolicy + local policy = { + certificate_map = {}, + stream = {}, + http = { + tcp = { + [80] = { + { plugins = { "auth" }, rule = "1", internal_dsl = { { "STARTS_WITH", "URL", "/t1" } }, config = { refs = { auth = "auth-1" } }, upstream = "u1" }, + { plugins = { "auth" }, rule = "2", internal_dsl = { { "STARTS_WITH", "URL", "/t2" } }, config = { refs = { auth = "auth-redirect" } }, upstream = "u1" }, + { plugins = { "auth" }, rule = "3", internal_dsl = { { "STARTS_WITH", "URL", "/t3" } }, config = { refs = { auth = "auth-fail" } }, upstream = "u1" }, + { plugins = { "auth" }, rule = "4", internal_dsl = { { "STARTS_WITH", "URL", "/t4" } }, config = { refs = { auth = "auth-https" } }, upstream = "u1" }, + { plugins = { "auth" }, rule = "5", internal_dsl = { { "STARTS_WITH", "URL", "/auth" } }, upstream = "u1" } + } + } + }, + config = { + ["auth-1"] = { + type = "auth", + auth = { + forward_auth = { + always_set_cookie = false, + invalid_auth_req_cm_ref = false, + url = { "http://", "$host", ":1880", "/auth?id=", "$arg_id", "&ret_header=aa@bb" }, + method = "POST", + auth_headers = { ["My-Custom-Header"] = { "42" } }, + auth_request_redirect = { "http://", "$host", "/", "$arg_id" }, + upstream_headers = { "aa" }, + signin_url = { "http://", "$host", "/signin" } + }, + } + }, + ["auth-https"] = { + type = "auth", + auth = { + forward_auth = { + invalid_auth_req_cm_ref = false, + always_set_cookie = false, + url = { "https://", "$host", "/auth?id=", "$arg_id", "&ret_header=aa@bb" }, + method = "POST", + auth_headers = { ["My-Custom-Header"] = { "42" } }, + auth_request_redirect = { "http://", "$host", "/", "$arg_id" }, + upstream_headers = { "aa" }, + signin_url = {} + }, + } + }, + ["auth-redirect"] = { + type = "auth", + auth = { + forward_auth = { + invalid_auth_req_cm_ref = false, + always_set_cookie = false, + url = { "http://", "$host", ":1880", "/auth?id=", "$arg_id", "&code=401&ret_header=aa@bb" }, + method = "POST", + auth_headers = { ["My-Custom-Header"] = { "42" } }, + auth_request_redirect = { "http://", "$host", "/", "$arg_id" }, + upstream_headers = { "aa" }, + signin_url = { "http://", "$host", "/signin" } + }, + } + }, + ["auth-fail"] = { + type = "auth", + auth = { + forward_auth = { + invalid_auth_req_cm_ref = false, + always_set_cookie = false, + url = { "http://", "$host", ":1880", "/auth?id=", "$arg_id", "&code=403&ret_header=aa@bb" }, + method = "POST", + auth_headers = { ["My-Custom-Header"] = { "42" } }, + auth_request_redirect = { "http://", "$host", "/", "$arg_id" }, + upstream_headers = { "aa" }, + signin_url = {} + }, + } + }, + }, + backend_group = { + { name = "u1", mode = "http", backends = { { address = "127.0.0.1", port = 1880, weight = 100 } }, } } + } + ph.set_policy_lua(policy) + local test_success = function () + local res, err = u.curl("http://127.0.0.1/t1?id=c1&ret_data=1") + u.logs("check ", res) + h.assert_is_nil(err) + local data = common.json_decode(res.body) + u.logs("check ", res.body) + h.assert_eq(#data, 2) + local auth_req = data[1] + local real_req = data[2] + h.assert_eq(auth_req.uri, "/auth") + h.assert_eq(real_req.uri, "/t1") + + u.logs("check real_req", auth_req.headers) + u.logs("check real_res", res.headers) + h.assert_eq(auth_req.headers["my-custom-header"], "42") + h.assert_eq(auth_req.headers["x-auth-request-redirect"], "http://127.0.0.1/c1") + h.assert_eq(auth_req.method, "POST") + h.assert_eq(real_req.headers["aa"], "bb") + + u.logs("check real_res set cookie", res.headers["set-cookie"]) + h.assert_eq(res.headers["Set-Cookie"], "cc=xx") + end + + -- auth fail to redirect + local test_redirect = function () + local res, err = u.curl("http://127.0.0.1/t2?id=c2") + h.assert_is_nil(err) + u.logs("check ", res.status) + h.assert_eq(res.status, 302) + h.assert_eq(res.headers["Location"], "http://127.0.0.1/signin") + end + -- auth fail without redirect + local test_auth_fail = function () + local res, err = u.curl("http://127.0.0.1/t3?id=c3") + h.assert_is_nil(err) + u.logs("check ", res.status, res.headers) + h.assert_eq(res.headers["X-ALB-ERR-REASON"], "AuthFail : auth-service-status: 403") + h.assert_eq(res.status, 403) + return + end + + local test_auth_with_domain = function () + local res, err = u.curl("http://127.0.0.1/t1?id=c4", { headers = { host = "127.0.0.1" } }) + h.assert_is_nil(err) + u.logs("check ", res.status) + u.logs("check ", res.headers) + h.assert_eq(res.status, 200, "domain should ok") + end + + local test_auth_with_https = function () + -- TODO should add success case + local res, err = u.curl("http://127.0.0.1/t4?id=c5", { headers = { host = "127.0.0.1" } }) + h.assert_is_nil(err) + u.logs("check ", res.status) + u.logs("check ", res.headers) + h.assert_eq(res.headers["X-ALB-ERR-REASON"], "AuthFail : send-auth-request-fail") + h.assert_eq(res.status, 500) + end + + test_success() + test_redirect() + test_auth_fail() + test_auth_with_domain() + test_auth_with_https() + end + } + } + for i, c in ipairs(cases) do + u.logs("case " .. i .. ": " .. c.case) + c.do_test() + end +end + +function _m.test_basic() + u.logs("basic") + local policy = { + certificate_map = {}, + stream = {}, + http = { + tcp = { + [80] = { + { plugins = { "auth" }, rule = "1", internal_dsl = { { "STARTS_WITH", "URL", "/" } }, config = { refs = { auth = "auth-1" } }, upstream = "u1" }, + } + } + }, + config = { + ["auth-1"] = { + type = "auth", + auth = { + basic_auth = { + auth_type = "basic", + realm = "default", + err = "", + -- openssl passwd -apr1 -salt W60B7kxR bar = $apr1$W60B7kxR$kC.He7pPyJM2io6VH2VNS. + secret = { + foo = { + algorithm = "apr1", + salt = "W60B7kxR", + hash = "kC.He7pPyJM2io6VH2VNS.", + }, + }, + }, + } + }, + }, + backend_group = { { name = "u1", mode = "http", backends = { { address = "127.0.0.1", port = 1880, weight = 100 } }, } } + } + + ph.set_policy_lua(policy) + do + u.logs("without authentication should 401") + local res, err = u.curl("http://127.0.0.1/echo") + h.assert_eq(res.status, 401) + h.assert_eq(res.headers["X-ALB-ERR-REASON"], "AuthFail : basic_auth but req no auth header") + u.logs(res, err) + end + + do + u.logs("invalid passwd should 401") + local res, err = u.curl("http://127.0.0.1/echo", { + headers = { + ["Authorization"] = "Basic not-even-base64==" + } + }) + h.assert_eq(res.status, 401) + h.assert_eq(res.headers["X-ALB-ERR-REASON"], "AuthFail : invalid base64 encoding") + u.logs(res, err) + + u.logs("invalid passwd should 401") + -- cspell:disable-next-line + -- echo "foo:bar-xxx" | base64 = Zm9vOmJhci14eHgK + local res, err = u.curl("http://127.0.0.1/echo", { + headers = { + -- cspell:disable-next-line + ["Authorization"] = "Basic Zm9vOmJhci14eHgK" + } + }) + h.assert_eq(res.status, 401) + h.assert_eq(res.headers["X-ALB-ERR-REASON"], "AuthFail : invalid user or passwd") + u.logs(res, err) + end + + do + u.logs("valid passwd should 200") + -- cspell:disable-next-line + -- echo "foo:bar" | base64 = Zm9vOmJhcg== + local res, err = u.curl("http://127.0.0.1/echo", { + headers = { + -- cspell:disable-next-line + ["Authorization"] = "Basic Zm9vOmJhcg==" + } + }) + u.logs(res, err) + h.assert_eq(res.status, 200) + h.assert_eq(res.body, "hello\n") + end +end + +return _m diff --git a/template/t/e2e/auth_test/auth_test.t b/template/t/e2e/auth_test/auth_test.t new file mode 100644 index 00000000..a17fe0fb --- /dev/null +++ b/template/t/e2e/auth_test/auth_test.t @@ -0,0 +1,21 @@ +use strict; +use warnings; +use t::Alauda; +use Test::Nginx::Socket 'no_plan'; +use Test::Nginx::Socket; + +my $ALB_BASE = $ENV{'TEST_BASE'}; + +our $tt = t::Alauda::get_test_name(__FILE__); + +log_level("info"); +master_process_enabled("on"); +no_shuffle(); +no_root_location(); +run_tests(); + +__DATA__ + +=== TEST 1: auth +--- mock_backend eval: "1880 $::tt" +--- lua_test_eval eval: "require('$::tt').test()" diff --git a/template/t/e2e/error_page/error.lua b/template/t/e2e/error_page/error.lua index ce5dd035..6a4b7111 100644 --- a/template/t/e2e/error_page/error.lua +++ b/template/t/e2e/error_page/error.lua @@ -35,19 +35,22 @@ end function _M.test() -- LuaFormatter off local policy = { - http = {tcp = {["80"] = { - {rule = "1", internal_dsl = {{"STARTS_WITH", "URL", "/t1"}}, upstream = "u1", config = {timeout = {proxy_read_timeout_ms = "300"}}}}} + http = { + tcp = { + ["80"] = { + { rule = "1", internal_dsl = { { "STARTS_WITH", "URL", "/t1" } }, upstream = "u1", config = { timeout = { proxy_read_timeout_ms = "300" } } } } + } }, backend_group = { - {name = "u1", mode = "http", backends = {{address = "127.0.0.1", port = 1880, weight = 100}}, }} - } + { name = "u1", mode = "http", backends = { { address = "127.0.0.1", port = 1880, weight = 100 } }, } } + } -- LuaFormatter on require("policy_helper").set_policy_lua(policy) do u.logs "error from backend without body" local res, err = u.curl("http://127.0.0.1/t1/404-without-body") u.logs(res, err) - h.assert_eq(res.status,404) + h.assert_eq(res.status, 404) h.assert_eq(res.body, "") end @@ -73,7 +76,6 @@ function _M.test() h.assert_eq(res.status, 504) h.assert_eq(res.body, "") end - end return _M diff --git a/template/t/e2e/metrics/metrics.lua b/template/t/e2e/metrics/metrics.lua index 68be6cb5..66cc677c 100644 --- a/template/t/e2e/metrics/metrics.lua +++ b/template/t/e2e/metrics/metrics.lua @@ -37,6 +37,12 @@ function _M.test() local status = sext.lines_grep(metrics.body, [[nginx_http_status{port="80",rule="r1]]) u.logs("after clear", status) h.assert_eq(#status, 0) + local res = h.assert_curl("http://127.0.0.1:80/t1", { ssl_verify = false }) + h.assert_eq(res.body, "ok\n") + local metrics = h.assert_curl("https://127.0.0.1:1936/metrics", { ssl_verify = false }) + local status = sext.lines_grep(metrics.body, [[nginx_http_status{port="80",rule="r1]]) + u.logs("after clear re ", status) + end return _M diff --git a/template/t/e2e/trace/trace.lua b/template/t/e2e/trace/trace.lua index 12dcbc51..6fffe31c 100644 --- a/template/t/e2e/trace/trace.lua +++ b/template/t/e2e/trace/trace.lua @@ -1,12 +1,10 @@ -- format:on local _M = {} -local F = require "F" local u = require "util" local h = require "test-helper" local common = require "utils.common" -local dsl = require "match_engine.dsl" local ups = require "match_engine.upstream" -local ph = require "t.lib.policy_helper" +local ph = require "policy_helper" local default_443_cert = "-----BEGIN CERTIFICATE-----\nMIIFFTCCAv2gAwIBAgIUNcaMWCswms56XCvj8nxC/5AKxtUwDQYJKoZIhvcNAQEL\nBQAwGjEYMBYGA1UEAwwPNDQzLmRlZmF1bHQuY29tMB4XDTIyMDUxOTA5MjEzMVoX\nDTMyMDUxNjA5MjEzMVowGjEYMBYGA1UEAwwPNDQzLmRlZmF1bHQuY29tMIICIjAN\nBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAvMqeEYs9K/DrbZ3FXj7yZaVRexub\na4OF0S/jg2qXrTK8FwPQ1MJiVxPL2jNeE7PT1fCNujb3+fZ/way99FJ0KpmbqEeP\nGt8490oqHZl7LuiEklrSiNp5qOJERsxgNrtq5RILIC1wH9eu0dNilwnCldzEXqFJ\n4+vZqPQfNxk//0vSOxsapl/nEPze6aMy+sUnyFJoq3ti/O02sV/p5sOQX3NcPoXU\n23PTr1xMDVQ7IpuR4GkxbmIVdAMuGWA2udYN0H3ou1VVy+je3RVF7xD2V/lMI3RL\nzLinfWxyNBUOoswylWRjdgwfrz5EkGuN58uT+o28Lx0APw06gwZ0eXb0cKdaYM0X\n04p4d3r//KLgm5WZpvDrjCC3aP02Yk1rITAu9owx+fNjIuEuPJtfin6r9Cjed7xL\n9CgdlFONDkNPxMz52qnf9Jbuf4HPTa/jDw7ICG8FAR8RwljJ7ohFCmullfXtumpX\nbT8+4DK1+H1fqkLV4lCWtQwn8ULqqCDQJZszco5KcnNnenqKgNPLVe7t6/ZxDZ8j\nMYAyGIR+DMDp0tLjfHD26IjEzF/n3E0pZiTXFaRirjKcFd523qEWvZeZc0nSykhH\nBUYXgxh2Nqi3Cv9VxA6sHVto5GvQBWq0kl6Qo9IGof51+4HCm++8/bCpf2Gcv/kI\ny39JIHMSGCa4ztcCAwEAAaNTMFEwHQYDVR0OBBYEFDawzxBvztJOekhp/DU9GKo+\nsnc9MB8GA1UdIwQYMBaAFDawzxBvztJOekhp/DU9GKo+snc9MA8GA1UdEwEB/wQF\nMAMBAf8wDQYJKoZIhvcNAQELBQADggIBACd2Z9XyESvQ4MfYMUID2DCmuVGBDhyo\n8cN88nuy+plrcYSpsthp55C+dhhfJRocES0NkpIVojpiQiPQdAyLFKb1M1Mcd9bg\n+qtYrOH2lS0Uem2s366D8LLJSOzWv/f75wUHe3eyivzW73zcM3znr5TrAFrCkUBF\npkK90G1VEznpD+VDvXYfcXklTZ7lMVZJ1ck2MDYPkh3nGtCyY6z+r41vJo/OcW8A\ncxicgsKXjEiXOH42B8ugad5gK27gA/FKwtTNPPU4K0UeDCAJaY+L7USjbrUgeQ17\nmjCOrY53OjyjjD4YjsE9EqsU/Hc9lqIUdCktZEDrLKfjGT1raaqDlSzEYYcs/oai\n0Ka3MXao2czYEJz6YZIOtp7FatRUBajCZ3NJeTgPFMZn10g7CktJR5QJDvvqbUBs\nHCddmahNPdgQwjxGVfoAI5SDH2QnIlj3bLivU+4oqR7hO7Nmhx9BtNRdHhM+M+wp\nsLvVETvtZdHC3RX4rX4pAl/r7pjhC7n0tbn3XyK96yZ4Yu/E+d/Cqhs0+rssqLzH\nDtMZCMOsaZi1AUEtc2cmZweOXEHeEoyPn3nJeVLfW2+dThlK/i9RaZbPThTS/GdK\nCU530BEDG+y/I5p6dndYySm2+LJiC0Xso1S1gLa7NccV8Y1E9Y8026J3lpvMilhP\nBwA4jE77yBPI\n-----END CERTIFICATE-----"; @@ -86,6 +84,8 @@ function _M.as_backend() end if string.find(ngx.var.uri, "sleep") then local t = tonumber(ngx.var.arg_sleep) + ---@cast t -nil + u.logs("sleep in backend start", t) ngx.sleep(t) u.logs("sleep in backend over", t) @@ -235,9 +235,11 @@ function _M.test_trace() h.assert_eq(type(cpaas_trace), "table") local trace1 = common.json_decode(cpaas_trace[1]) u.logs(trace1) + ---@cast trace1 -nil h.assert_eq(trace1.rule, "trace1") h.assert_eq(trace1.upstream, "trace1") local trace2 = common.json_decode(cpaas_trace[2]) + ---@cast trace2 -nil u.logs(trace2) h.assert_eq(trace2.rule, "trace2") h.assert_eq(trace2.upstream, "trace2") @@ -249,7 +251,7 @@ function _M.test_trace() u.logs(res) local cpaas_trace = res.headers["x-cpaas-trace"] h.assert_eq(type(cpaas_trace), "string") - local trace = common.json_decode(cpaas_trace) + local trace = common.json_decode(cpaas_trace) or {} u.logs(trace) h.assert_eq(trace.rule, "t6") h.assert_eq(trace.upstream, "test-upstream-1") @@ -259,7 +261,7 @@ function _M.test_trace() local res, err = httpc:request_uri("http://127.0.0.1:80/t7/detail", { headers = { ["cpaas-trace"] = "true" } }) u.logs(res, err) h.assert_eq(res.status, 200) - local trace = common.json_decode(res.headers["x-cpaas-trace"]) + local trace = common.json_decode(res.headers["x-cpaas-trace"]) or {} h.assert_eq(trace.rule, "t7") h.assert_eq(trace.upstream, "test-upstream-1") u.logs(trace) @@ -268,7 +270,7 @@ function _M.test_trace() local res, err = httpc:request_uri("http://127.0.0.1:80/t1/detail", { headers = { ["cpaas-trace"] = "true" } }) u.logs(res, err) h.assert_eq(res.status, 200) - local trace = common.json_decode(res.headers["x-cpaas-trace"]) + local trace = common.json_decode(res.headers["x-cpaas-trace"]) or {} h.assert_eq(trace.rule, "1") h.assert_eq(trace.upstream, "test-upstream-1") u.logs(trace) @@ -279,7 +281,7 @@ function _M.test_trace() u.logs(res, err) h.assert_eq(res.status, 200) h.assert_eq(res.body, "from backend\n") - local trace = common.json_decode(res.headers["x-cpaas-trace"]) + local trace = common.json_decode(res.headers["x-cpaas-trace"]) or {} h.assert_eq(trace.rule, "2") h.assert_eq(trace.upstream, "test-upstream-1") u.logs(trace) diff --git a/template/t/e2e/waf/waf.lua b/template/t/e2e/waf/waf.lua index a67e6b2d..839d938c 100644 --- a/template/t/e2e/waf/waf.lua +++ b/template/t/e2e/waf/waf.lua @@ -90,8 +90,9 @@ function _M.test() -- hit p1 rule local res, err = h.just_curl("http://127.0.0.1:80/p1?testparam=test") + u.logs("curl err", err) h.assert_eq(err, nil) - u.logs(h.curl_res_to_string(res)) + u.logs("curl res", h.curl_res_to_string(res)) h.assert_eq(res.status, 403) local res, err = h.just_curl("http://127.0.0.1:80/redirect_p1?testparam=redirect") diff --git a/template/t/e2e/waf/waf.t b/template/t/e2e/waf/waf.t index 39410e26..48ed7c94 100644 --- a/template/t/e2e/waf/waf.t +++ b/template/t/e2e/waf/waf.t @@ -77,5 +77,4 @@ __DATA__ === TEST 1: waf --- custom_location_raw eval: $::loc --- mock_backend eval: "1880 $::tt" ---- init_worker_eval: require("mock_worker_init").init_worker() --- lua_test_eval eval: "require('$::tt').test()" diff --git a/template/t/lib/mock_worker_init.lua b/template/t/lib/mock_worker_init.lua index 1def70ef..0ae6d3aa 100644 --- a/template/t/lib/mock_worker_init.lua +++ b/template/t/lib/mock_worker_init.lua @@ -4,15 +4,12 @@ local M = {} local balancer = require("balancer.balance") local ph = require("config.policy_fetch") local cache = require("config.cache") -local u = require("t.lib.util") +local u = require("util") local subsys = require "utils.subsystem" local shm = require "config.shmap" --- we donot want to interval to update policy in mock mode --- it may cause luacov stuck forever -function M.init_worker(cfg) +function M.init_worker(_cfg) ngx.update_time() - cfg = cfg or {} u.log("life: init worker " .. tostring(ngx.worker.id())) if subsys.is_http_subsystem() then cache.init_l7() @@ -23,7 +20,6 @@ function M.init_worker(cfg) if err ~= nil then ngx.exit(0) end - -- speed up for luacov ngx.update_time() shm.set_policy_raw("{}") ngx.update_time() diff --git a/template/t/lib/test-helper.lua b/template/t/lib/test-helper.lua index accb98a3..84d70347 100644 --- a/template/t/lib/test-helper.lua +++ b/template/t/lib/test-helper.lua @@ -114,9 +114,14 @@ function _M.fail(msg) ngx.exit(ngx.ERR) end +---comment +---@param url any +---@param req_cfg any +---@return table res +---@return any err function _M.just_curl(url, req_cfg) local res, err = u.curl(url, req_cfg) - return res, err + return _M.curl_res_simple(res), err end --- curl and assert @@ -136,9 +141,14 @@ function _M.assert_curl(url, req_cfg, assert_cfg) return res end +function _M.curl_res_simple(res) + setmetatable(res.headers, nil) + return { body = res.body, headers = res.headers, status = res.status } +end + function _M.curl_res_to_string(res) - local t = { body = res.body, headers = res.headers, status = res.status } - return c.json_encode(t) + local ret = _M.curl_res_simple(res) + return c.json_encode(ret) end function _M.assert_curl_success(res, err, body) diff --git a/template/t/lib/util.lua b/template/t/lib/util.lua index 8bdd9b25..770b1345 100644 --- a/template/t/lib/util.lua +++ b/template/t/lib/util.lua @@ -10,7 +10,7 @@ function _M.httpc() end function _M.get_caller_info(f, t) - local msg="" + local msg = "" for i = f, t do local callerinfo = debug.getinfo(i) local caller = sext.remove_prefix(callerinfo.source, "@") .. " " .. tostring(callerinfo.currentline) @@ -23,12 +23,18 @@ function _M.curl(url, cfg) local httpc = require("resty.http").new() if cfg == nil then local res, err = httpc:request_uri(url, { method = "GET" }) + if res.headers then + setmetatable(res.headers, nil) + end return res, err end if cfg.method == nil then cfg.method = "GET" end local res, err = httpc:request_uri(url, cfg) + if res.headers then + setmetatable(res.headers, nil) + end return res, err end @@ -107,4 +113,15 @@ function _M.file_read_to_string(path) return raw, nil end +---@param t table +---@return integer +function _M.count_table_as_map_length(t) + -- shame for lua + local n = 0 + for _ in pairs(table) do + n = n + 1 + end + return n +end + return _M diff --git a/template/t/manually/otel_test/otel_test.lua b/template/t/manually/otel_test/otel_test.lua index aea38b2e..bff24572 100644 --- a/template/t/manually/otel_test/otel_test.lua +++ b/template/t/manually/otel_test/otel_test.lua @@ -17,11 +17,11 @@ end ---@param otel_ref string|nil the otel ref use in each rule ---@return table policy the policy local function policy_with_otel(otel_ref, custom) - local otel_config = function(name) + local otel_config = function (name) if custom and custom[name] then - return {otel = custom[name]} + return { otel = custom[name] } end - return {otel = {otel_ref = otel_ref}} + return { refs = { otel = otel_ref } } end --[=====[ @@ -36,10 +36,10 @@ local function policy_with_otel(otel_ref, custom) http = { tcp = { ["80"] = { - {rule = "test", ingress = {name = "", path_index = 1, rule_index = 1}, internal_dsl = {{"STARTS_WITH", "URL", "/test"}}, upstream = "test", config = otel_config("2")}, - {rule = "frontend", internal_dsl = {{"STARTS_WITH", "URL", "/dispatch"}}, upstream = "frontend", config = otel_config("frontend"), source_type = "ingress", source_name = "ing-x", source_ns = "ing-x", ingress_rule_index = "1:1"}, - {rule = "customer", internal_dsl = {{"STARTS_WITH", "URL", "/customer"}}, upstream = "customer", config = otel_config("customer")}, - {rule = "router", internal_dsl = {{"STARTS_WITH", "URL", "/route"}}, upstream = "router", config = otel_config("router")} + { rule = "test", ingress = { name = "", path_index = 1, rule_index = 1 }, internal_dsl = { { "STARTS_WITH", "URL", "/test" } }, upstream = "test", config = otel_config("2") }, + { rule = "frontend", internal_dsl = { { "STARTS_WITH", "URL", "/dispatch" } }, upstream = "frontend", config = otel_config("frontend"), source_type = "ingress", source_name = "ing-x", source_ns = "ing-x", ingress_rule_index = "1:1" }, + { rule = "customer", internal_dsl = { { "STARTS_WITH", "URL", "/customer" } }, upstream = "customer", config = otel_config("customer") }, + { rule = "router", internal_dsl = { { "STARTS_WITH", "URL", "/route" } }, upstream = "router", config = otel_config("router") } } } }, @@ -47,41 +47,47 @@ local function policy_with_otel(otel_ref, custom) ["off_trace"] = { type = "otel", otel = { - otel = { - exporter = {collector = {address = default_collect_address, request_timeout = 1000}, batch_span_processor = {max_queue_size = 2048}}, - flags = {hide_upstream_attrs = false, trust_incoming_span = false}, - sampler = {name = "always_off"}, - resource = {} - } + exporter = { collector = { address = default_collect_address, request_timeout = 1000 }, batch_span_processor = { max_queue_size = 2048 } }, + flags = { hide_upstream_attrs = false, trust_incoming_span = false }, + sampler = { name = "always_off" }, + resource = {} } }, - ["parent-base"] = {type = "otel", otel = {otel = {exporter = {collector = {address = default_collect_address, request_timeout = 1000}}, flags = {hide_upstream_attrs = false, trust_incoming_span = false}, - sampler = { - name = "parent_base", options = { parent_name = "always_off"}, + ["parent-base"] = { + type = "otel", + otel = { + exporter = { collector = { address = default_collect_address, request_timeout = 1000 } }, + flags = { hide_upstream_attrs = false, trust_incoming_span = false }, + sampler = { + name = "parent_base", options = { parent_name = "always_off" }, + } } - }}}, - ["ratio-base"] = {type = "otel", otel = {otel = {exporter = {collector = {address = default_collect_address, request_timeout = 1000}}, flags = {hide_upstream_attrs = false, trust_incoming_span = false}, - sampler = { - name = "trace_id_ratio", options={fraction = "0.5"} + }, + ["ratio-base"] = { + type = "otel", + otel = { + exporter = { collector = { address = default_collect_address, request_timeout = 1000 } }, + flags = { hide_upstream_attrs = false, trust_incoming_span = false }, + sampler = { + name = "trace_id_ratio", options = { fraction = "0.5" } + } } - }}}, + }, ["default_trace"] = { type = "otel", otel = { - otel = { - exporter = {collector = {address = default_collect_address, request_timeout = 1000}, batch_span_processor = {max_queue_size = 2048}}, - flags = {hide_upstream_attrs = false}, - sampler = {name = "always_on"}, - resource = {["a"] = "x"} - } + exporter = { collector = { address = default_collect_address, request_timeout = 1000 }, batch_span_processor = { max_queue_size = 2048 } }, + flags = { hide_upstream_attrs = false }, + sampler = { name = "always_on" }, + resource = { ["a"] = "x" } } } }, backend_group = { - {name = "frontend", mode = "http", backends = {{address = "127.0.0.1", port = 8080, weight = 100, svc = "frontend", ns = "default"}}}, - {name = "customer", mode = "http", backends = {{address = "127.0.0.1", port = 8081, weight = 100, svc = "customer", ns = "default"}}}, - {name = "router", mode = "http", backends = {{address = "127.0.0.1", port = 8083, weight = 100, svc = "router", ns = "default"}}}, - {name = "test", mode = "http", backends = {{address = "127.0.0.1", port = 1880, weight = 100, svc = "test", ns = "default"}}} + { name = "frontend", mode = "http", backends = { { address = "127.0.0.1", port = 8080, weight = 100, svc = "frontend", ns = "default" } } }, + { name = "customer", mode = "http", backends = { { address = "127.0.0.1", port = 8081, weight = 100, svc = "customer", ns = "default" } } }, + { name = "router", mode = "http", backends = { { address = "127.0.0.1", port = 8083, weight = 100, svc = "router", ns = "default" } } }, + { name = "test", mode = "http", backends = { { address = "127.0.0.1", port = 1880, weight = 100, svc = "test", ns = "default" } } } } } -- LuaFormatter on @@ -94,11 +100,12 @@ function _M.test_trace(policy, opt) ph.set_policy_lua(policy) local curl_opt = {} if opt ~= nil and opt.traceparent ~= nil then - curl_opt = {headers = {traceparent = opt.traceparent}} + curl_opt = { headers = { traceparent = opt.traceparent } } end local res = h.assert_curl("http://127.0.0.1:80/dispatch?customer=567&nonse=" .. tostring(ngx.now()), curl_opt) local trace = res.headers.Traceresponse local trace_ids, err = re_split(trace, "-", "jo") + ---@cast trace_ids -nil h.assert_is_nil(err) local trace_id = trace_ids[2] ngx.sleep(10) -- it seems we need wait a little bit for the trace to be ready @@ -111,8 +118,6 @@ end ---@param policy table ---@param should_has_trace boolean ----@return table trace_res ----@return string trace_id function _M.assert_has_trace(policy, should_has_trace) local trace_res, _ = _M.test_trace(policy) local alb_trace = sext.lines_grep(trace_res.body, "alb") @@ -124,11 +129,13 @@ function _M.assert_has_trace(policy, should_has_trace) end function _M.test_parent() - local res, id = _M.test_trace(policy_with_otel("parent-base"), {traceparent = "00-a0000000000000010000000000000001-0000000000000001-01"}) + local res, id = _M.test_trace(policy_with_otel("parent-base"), + { traceparent = "00-a0000000000000010000000000000001-0000000000000001-01" }) h.assert_eq(res.status, 200) u.logs("trace id", id) u.logs("res ", res.status) - local res, id = _M.test_trace(policy_with_otel("parent-base"), {traceparent = "00-b0000000000000010000000000000001-0000000000000002-00"}) + local res, id = _M.test_trace(policy_with_otel("parent-base"), + { traceparent = "00-b0000000000000010000000000000001-0000000000000002-00" }) h.assert_eq(res.status, 404) u.logs("trace id", id) u.logs("res ", res.status) @@ -140,11 +147,13 @@ end function _M.test_ratio() -- restart jaeger each test.. - local res, id = _M.test_trace(policy_with_otel("ratio-base"), {traceparent = "00-00000000000000010000000000000001-0000000000000001-01"}) + local res, id = _M.test_trace(policy_with_otel("ratio-base"), + { traceparent = "00-00000000000000010000000000000001-0000000000000001-01" }) u.logs("trace id", id) u.logs("res ", res.status) h.assert_eq(res.status, 200) - local res, id = _M.test_trace(policy_with_otel("ratio-base"), {traceparent = "00-ffffffff000000000000000100000001-0000000000000001-01"}) + local res, id = _M.test_trace(policy_with_otel("ratio-base"), + { traceparent = "00-ffffffff000000000000000100000001-0000000000000001-01" }) u.logs("trace id", id) u.logs("res ", res.status) h.assert_eq(res.status, 404) @@ -160,7 +169,7 @@ function _M.test() -- -- off_trace should not have trace _M.assert_has_trace(policy_with_otel("off_trace"), false) -- -- we could disable trace for a specific rule - _M.assert_has_trace(policy_with_otel("default_trace", {router = {otel_ref = nil}}), true) + _M.assert_has_trace(policy_with_otel("default_trace", { router = { otel_ref = nil } }), true) end return _M diff --git a/template/t/unit/common_test.lua b/template/t/unit/common_test.lua new file mode 100644 index 00000000..f0dc27f0 --- /dev/null +++ b/template/t/unit/common_test.lua @@ -0,0 +1,12 @@ +local _M = {} + +local t = require("test-helper"); +local u = require("util"); +local c = require("utils.common") + +function _M.test() + u.logs("in common test") + u.logs(c.json_decode('{"a":null}')) +end + +return _M diff --git a/template/t/unit/plugins/auth/auth_unit_test.lua b/template/t/unit/plugins/auth/auth_unit_test.lua new file mode 100644 index 00000000..ca309545 --- /dev/null +++ b/template/t/unit/plugins/auth/auth_unit_test.lua @@ -0,0 +1,133 @@ +-- format:on style:emmy +local _M = {} +local F = require("F"); +local u = require("util") +local h = require("test-helper"); +local str = require "resty.string" +local auth = require("plugins.auth.auth") +local crypt = require "plugins.auth.crypt" +local forward_auth = require("plugins.auth.forward_auth") + +function _M.test() + u.logs("in auth unit test") + _M.test_resolve_varstring() + _M.test_merge_cookie() + + -- -- 实际上不能处理t=$xx这种语句 + u.logs("test var ", ngx.var["t=${msec}"], ngx.var["msec"]) + _M.test_basic_auth() + _M.test_simple_apr1_perf() +end + +function _M.test_resolve_varstring() + local var = { + uri = "a.com/1", + header_z = "zzz" + } + local cases = { + { + arg = { "http://", "$uri", "/", "$header_z" }, + expect = "http://a.com/1/zzz" + } + } + for k, c in pairs(cases) do + local result, err = forward_auth.resolve_varstring(c.arg, var) + u.logs(c, result, err) + if err then + h.P(F "fail {k} e {c.expect} r {result} " .. u.inspect(c)) + h.fail() + end + if c.expect ~= result then + h.P(F "fail {k} e {c.expect} r {result} " .. u.inspect(c)) + h.fail() + end + end +end + +function _M.test_merge_cookie() + local cases = { + { + arg = { nil, nil }, + expect = nil + }, + { + arg = { "a", nil }, + expect = "a" + }, + { + arg = { "a", "b" }, + expect = { "a", "b" } + }, + { + arg = { { "a" }, "b" }, + expect = { "a", "b" } + }, + { + arg = { { "a" }, { "b" } }, + expect = { "a", "b" } + }, + { + arg = { { "a" }, { "b", "c" } }, + expect = { "a", "b", "c" } + } + } + for k, c in pairs(cases) do + local result = forward_auth.merge_cookie(c.arg[1], c.arg[2]) + u.logs("x", k, c.expect, "r", result, "case", c.arg) + h.assert_eq(result, c.expect) + end +end + +function _M.test_simple_apr1_perf() + local t_cfg = os.getenv("ALB_LUA_TEST_CFG") or "" + local apr1 = string.find(t_cfg, "apr1", 1, true) + if not apr1 then + return + end + local keep_run = string.find(t_cfg, "flamegraph", 1, true) + while true do + ngx.update_time() + local s = ngx.now() + local n = 10000 + for i = 1, n, 1 do + crypt.apr1("bar", "W60B7kxR") + end + ngx.update_time() + local e = ngx.now() + ngx.log(ngx.INFO, "time " .. tostring(n) .. " " .. (e - s) .. " qps " .. 1 * n / (e - s), + " 1 call " .. tostring((e - s) / n * 1000) .. "ms") + -- ngx.sleep(3) + if not keep_run then + break + end + end +end + +function _M.test_basic_auth() + local cases = { + { + pass = "bar", + slat = "W60B7kxR", + hash = "kC.He7pPyJM2io6VH2VNS." + }, + { + pass = "%^&*", + slat = "WEm2C/nC", + -- cspell:disable-next-line + hash = "MjXcOZacoKaDjPuE0.Xyc." + }, + { + pass = "1a2b3c%^&*()_+", + slat = "FhnHptBM", + hash = "4HP5UXIwuVHSvhZr/o96s." + } + } + -- test via `openssl passwd -apr1 -salt W60B7kxR bar` or + for k, c in pairs(cases) do + local result = crypt.apr1(c.pass, c.slat) + u.logs("x", k, c.hash, "r", result, #c.hash, #result) + h.assert_eq(result, c.hash) + end +end + +return _M diff --git a/template/t/unit/unit_test.lua b/template/t/unit/unit_test.lua index e4891971..0eb9c1dc 100644 --- a/template/t/unit/unit_test.lua +++ b/template/t/unit/unit_test.lua @@ -1,11 +1,21 @@ local _M = {} +local u = require "util" local h = require("test-helper"); + function _M.test() - h.P("in unit test") + u.logs("in unit test") + local run_only = os.getenv("ALB_LUA_UNIT_TEST_CASE") + if run_only and run_only ~= "" then + u.logs("run only", run_only) + require(run_only).test() + return + end require("unit.replace_prefix_match_test").test() require("unit.cert_test").test() require("unit.cors_test").test() + require("unit.common_test").test() + require("unit.plugins.auth.auth_unit_test").test() end return _M diff --git a/test/alauda/README.md b/test/alauda/README.md new file mode 100644 index 00000000..f2feb37d --- /dev/null +++ b/test/alauda/README.md @@ -0,0 +1,2 @@ +tests which hash testlink id and will generate allure report. +which will do test in acp env. \ No newline at end of file diff --git a/test/checklist/alb.go b/test/checklist/alb.go index b86f737f..7e8b1cb1 100644 --- a/test/checklist/alb.go +++ b/test/checklist/alb.go @@ -129,7 +129,6 @@ var _ = Describe("checklist for alb", func() { base := InitBase() l := alog.InitKlogV2(alog.LogCfg{ToFile: base + "/chlist.log"}) test := func(cfg cfg, t func(c ctx)) { - global := BaseWithDir(base, "global") genv := NewEnvtestExt(global, l).WithName("global").Crds([]string{GetAlbBase() + "/scripts/yaml/crds/extra/mock"}) genv.AssertStart() @@ -911,7 +910,6 @@ data: GinkgoAssertTrue(strings.Contains(out, `集群 p1 的 alb test 的端口项目信息 configmap 与 hr不一致 cm: [{"port":"111-2233","projects":["ALL_ALL"]}] hr: [] 请更新hr p1-test`), "") _ = out }) - }) GIt("should notice mis used cpaas-system project ", func() { diff --git a/test/conformance/suite_test.go b/test/conformance/gatewayapi/suite_test.go similarity index 100% rename from test/conformance/suite_test.go rename to test/conformance/gatewayapi/suite_test.go diff --git a/test/conformance/ingress-nginx/README.md b/test/conformance/ingress-nginx/README.md new file mode 100644 index 00000000..ac23bbeb --- /dev/null +++ b/test/conformance/ingress-nginx/README.md @@ -0,0 +1 @@ +这个目录下的测试的特殊之处在于我们可以用相同的测试函数来依次测试alb和ingress-nginx的行为. diff --git a/test/conformance/ingress-nginx/auth.go b/test/conformance/ingress-nginx/auth.go new file mode 100644 index 00000000..9112cfd3 --- /dev/null +++ b/test/conformance/ingress-nginx/auth.go @@ -0,0 +1,841 @@ +package ingressnginx + +import ( + "context" + "encoding/json" + "fmt" + "net/http" + "os" + "strings" + "time" + + . "alauda.io/alb2/test/kind/pkg/helper" + . "alauda.io/alb2/utils" + "alauda.io/alb2/utils/log" + . "alauda.io/alb2/utils/test_utils" + mapset "github.com/deckarep/golang-set/v2" + gr "github.com/go-resty/resty/v2" + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + "github.com/samber/lo" + "github.com/xorcare/pointer" + corev1 "k8s.io/api/core/v1" + + nv1 "k8s.io/api/networking/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/rest" +) + +// 期望是无缝兼容的。。用户不需要额外配置,原本通过ingress-nginx 设置auth annotation 能正常使用的,换成alb 仍然能正常使用 +// The expectation is seamless compatibility. Users should not need any additional configuration; if the auth annotation set through ingress-nginx works properly, it should still work without issues when switching to ALB. +var _ = Describe("Ingress Auth Test", Ordered, func() { + l := log.InitKlogV2(log.LogCfg{}).WithName("auth-test") + ctx := context.Background() + _ = ctx + var k *Kubectl + var kc *K8sClient + var cfg *rest.Config + var alb_ip string = "" + alb_http_port := "11180" + alb_ip_port := "" + ingng_ip := "" + ingng_ip_port := "" + echo_ip := "" + echo_public_ip := "" + BeforeAll(func() { + // 部署alb,ingress-nginx,echo-resty + l.Info("fetch info from k8s") + cfg_, err := RESTFromKubeConfigFile(os.Getenv("KUBECONFIG")) + Expect(err).Should(BeNil()) + cfg = cfg_ + + k = NewKubectl("", cfg, l) + kc = NewK8sClient(ctx, cfg) + + alb_ips, err := kc.GetPodIp("cpaas-system", "service_name=alb2-auth") + GinkgoNoErr(err) + alb_ip = alb_ips[0] + alb_ip_port = alb_ip + ":" + alb_http_port + + ingng_ips, err := kc.GetPodIp("ingress-nginx", "app.kubernetes.io/name=ingress-nginx") + GinkgoNoErr(err) + ingng_ip = ingng_ips[0] + ingng_ip_port = ingng_ip + ":" + "80" + + _ = k + _ = alb_http_port + _ = alb_ip_port + _ = ingng_ip + _ = ingng_ip_port + }) + + // 正常情况下,只有auth 成功时,才会在response header中额外加上 auth response设置的cookie + // Under normal circumstances, the cookie specified in the auth response will only be added to the response header if the authentication is successful. + // The "always set cookie" option means that the cookie specified in the auth response will be added to the response header regardless of whether the authentication is successful or not.// always set cookie 的意思是,无论auth是否成功,都会在response header中额外加上 auth response设置的cookie + // {{ if $externalAuth.AlwaysSetCookie }} + // add_header Set-Cookie $auth_cookie always; + // {{ else }} + // add_header Set-Cookie $auth_cookie; + // {{ end }} + DescribeTableSubtree("auth cookie", func(always_set_cookie bool, auth_set_cookie string, upstream_error bool, upstream_set_cookie string, expect_cookie string) { + ingress_template := ` +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: auth-check-cookies + namespace: default +spec: + rules: + - host: "auth-check-cookies" + http: + paths: + - backend: + service: + name: auth-server + port: + number: 80 + path: / + pathType: Prefix +--- +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: auth-check-cookies-error + namespace: default +spec: + rules: + - host: "auth-check-cookies" + http: + paths: + - backend: + service: + name: auth-server + port: + number: 80 + path: /error + pathType: Prefix +` + ingress := "" + BeforeAll(func() { + auth_and_upstream_raw := ` + access_log /dev/stdout ; + error_log /dev/stdout info; + location ~ ^/cookies/set/(?.*)/(?.*) { + content_by_lua_block { + ngx.log(ngx.INFO,"im auth xx "..ngx.var.key) + if ngx.var.key ~= "not-set" then + ngx.log(ngx.INFO,"im auth. set cookie") + ngx.header['Set-Cookie'] = {ngx.var.key.."="..ngx.var.value} + end + if ngx.var.key == "failed" then + local code = 403 + ngx.status = code + ngx.exit(code) + return + end + ngx.say("OK") + } + } + location / { + content_by_lua_block { + ngx.log(ngx.INFO,"im app xx ok "..tostring(ngx.var.http_add_cookie)) + if ngx.var.http_add_cookie ~= nil then + ngx.log(ngx.INFO,"add cookie "..ngx.var.http_add_cookie) + ngx.header['Set-Cookie'] = {ngx.var.http_add_cookie} + end + ngx.say("OK") + } + } + location /error { + content_by_lua_block { + ngx.log(ngx.INFO,"im app xx fail "..tostring(ngx.var.http_add_cookie)) + local h, err = ngx.req.get_headers() + if err ~=nil then + ngx.log(ngx.INFO,"err: "..tostring(err)) + end + for k, v in pairs(h) do + ngx.log(ngx.INFO,"h "..tostring(k).." : "..tostring(v)) + end + if ngx.var.http_add_cookie ~= nil then + ngx.log(ngx.INFO,"add cookie "..ngx.var.http_add_cookie) + ngx.header['Set-Cookie'] = {ngx.var.http_add_cookie} + end + ngx.exit(503) + } + } + ` + echo, err := NewEchoResty("", cfg, l).Deploy(EchoCfg{Name: "auth-server", Image: os.Getenv("ALB_IMAGE"), Ip: "v4", Raw: auth_and_upstream_raw, PodPort: "80", PodHostPort: "60080"}) + GinkgoNoErr(err) + echo_ip_, err := echo.GetIp() + GinkgoNoErr(err) + echo_ip = echo_ip_ + l.Info("echo", "echo ip", echo_ip) + ingress = Template(ingress_template, map[string]interface{}{ + "echo_ip": echo_ip, + }) + }) + AfterAll(func() { + k.Kubectl("delete ingress auth-check-cookies") + k.Kubectl("delete ingress auth-check-cookies-error") + }) + do_test := func(ip_port string) { + if auth_set_cookie != "" { + key := strings.Split(auth_set_cookie, "=")[0] + val := strings.Split(auth_set_cookie, "=")[1] + k.AssertKubectl("annotate", "ingresses", "auth-check-cookies", "--overwrite", "nginx.ingress.kubernetes.io/auth-url=http://"+echo_ip+"/cookies/set/"+key+"/"+val) + k.AssertKubectl("annotate", "ingresses", "auth-check-cookies-error", "--overwrite", "nginx.ingress.kubernetes.io/auth-url=http://"+echo_ip+"/cookies/set/"+key+"/"+val) + } + if always_set_cookie { + k.AssertKubectl("annotate", "ingresses", "auth-check-cookies", "--overwrite", "nginx.ingress.kubernetes.io/auth-always-set-cookie=true") + k.AssertKubectl("annotate", "ingresses", "auth-check-cookies-error", "--overwrite", "nginx.ingress.kubernetes.io/auth-always-set-cookie=true") + } else { + k.AssertKubectl("annotate", "ingresses", "auth-check-cookies", "--overwrite", "nginx.ingress.kubernetes.io/auth-always-set-cookie=false") + k.AssertKubectl("annotate", "ingresses", "auth-check-cookies-error", "--overwrite", "nginx.ingress.kubernetes.io/auth-always-set-cookie=false") + } + l.Info("sleep ") + time.Sleep(time.Second * 10) + + url := "http://" + ip_port + if upstream_error { + url += "/error" + } + l.Info("url", url) + r := gr.New().R() + if upstream_set_cookie != "" { + r.SetHeader("add-cookie", upstream_set_cookie) + } + + r.SetHeader("HOST", "auth-check-cookies") + res, err := r.Get(url) + GinkgoNoErr(err) + expect_code := 200 + if upstream_error { + expect_code = 503 + } + if strings.Contains(auth_set_cookie, "failed") { + expect_code = 500 + } + // Expect(res.StatusCode()).Should(Equal(expect_code)) + _ = expect_code + l.Info("res", "cookie", res.Cookies(), "expect", expect_cookie) + if expect_cookie == "" { + Expect(len(res.Cookies())).Should(Equal(0)) + } else { + expect_cookies := strings.Split(expect_cookie, ",") + expect_cookie_set := mapset.NewSet(expect_cookies...) + real_cookie_set := mapset.NewSet(lo.Map(res.Cookies(), func(item *http.Cookie, _ int) string { return item.Raw })...) + Expect(real_cookie_set.Equal(expect_cookie_set)).Should(BeTrue()) + } + } + + It("alb should ok", Label("alb", "auth-cookie"), func() { + ingress, err := YqDo(ingress, `yq ".spec.ingressClassName=\"auth\""`) + GinkgoNoErr(err) + l.Info("update ingress", "ingress", ingress, "ip", echo_ip) + k.KubectlApply(ingress) + do_test(alb_ip_port) + }) + + It("ingng should ok", Label("ingng", "auth-cookie"), func() { + ingress, err := YqDo(ingress, `yq ".spec.ingressClassName=\"nginx\""`) + GinkgoNoErr(err) + l.Info("update ingress", "ingress", ingress, "ip", echo_ip) + k.KubectlApply(ingress) + do_test(ingng_ip_port) + }) + }, + Entry("always_set_cookie false | upstream fail | both cookie", false, "auth=auth", true, "up=up", "up=up"), + Entry("always_set_cookie false | upstream success | both cookie", false, "auth=auth", false, "up=up", "auth=auth,up=up"), + Entry("always_set_cookie false | upstream success | both cookie", false, "same=auth", false, "same=up", "same=auth,same=up"), + Entry("always_set_cookie true | upstream fail | both cookie", true, "auth=auth", true, "up=up", "auth=auth,up=up"), + Entry("always_set_cookie true | upstream success | both cookie", true, "auth=auth", false, "up=up", "auth=auth,up=up"), + + Entry("always_set_cookie true | auth fail with cookie", true, "failed=xx", false, "", "failed=xx"), + Entry("always_set_cookie false | auth fail with cookie", false, "failed=xx", false, "", ""), + ) + + // 默认会将 所有的header都发送的auth server + // 可以通过 proxy_set_header 来指定额外或者要覆盖的header + // 默认的额外要发送的header是 + // X-Original-URI $request_uri; + // X-Scheme $pass_access_scheme; + // X-Original-URL $scheme://$http_host$request_uri; + // X-Original-Method $request_method; + // X-Sent-From "alb"; + // X-Real-IP $remote_addr; + // X-Forwarded-For $proxy_add_x_forwarded_for; + // X-Auth-Request-Redirect $request_uri; + + type AuthTestCase struct { + Title string + Annotations map[string]string + Cm map[string]map[string]string + ReqHeader map[string]string + ExpectAuthReqHeader map[string]string + AuthExit int + AuthResponseHeader map[string]string + AuthResponseBody string + ExpectAppReqHeader map[string]string + AppExit int + AppResponseHeader map[string]string + AppResponseBody string + ExpectExit int + ExpectResponseHeader map[string][]string + ExpectBody string + extra_check func(g Gomega, state map[string]interface{}) + alb_external_check func(g Gomega, state map[string]interface{}) + ingng_external_check func(g Gomega, state map[string]interface{}) + } + + init_auth_ingres := func(name string) string { + ingress_template := ` +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: {{.name}} + namespace: default + annotations: + "nginx.ingress.kubernetes.io/auth-url": "http://{{.echo_ip}}/auth" +spec: + rules: + - host: {{.name}} + http: + paths: + - backend: + service: + name: auth-server + port: + number: 80 + path: / + pathType: Prefix +` + return Template(ingress_template, map[string]interface{}{ + "echo_ip": echo_ip, + "name": name, + }) + } + + signin_entries := []TableEntry{ + Entry(func(c AuthTestCase) string { return c.Title }, AuthTestCase{ + Title: "auth exit with not 401 403 fail should 500", + Annotations: map[string]string{}, + AuthExit: 404, + AppExit: 200, + ExpectExit: 500, + }), + Entry(func(c AuthTestCase) string { return c.Title }, AuthTestCase{ + Title: "auth 401 should 401", + Annotations: map[string]string{}, + AuthResponseHeader: map[string]string{ + "www-authenticate": "xxx", + "xx": "asf", + }, + ExpectResponseHeader: map[string][]string{ + "Www-Authenticate": {"xxx"}, + }, + AuthExit: 401, + AppExit: 200, + ExpectExit: 401, + }), + Entry(func(c AuthTestCase) string { return c.Title }, AuthTestCase{ + Title: "auth 401 with signin should 302", + Annotations: map[string]string{ + "nginx.ingress.kubernetes.io/auth-signin": "http://$host/auth/start?rd=$escaped_request_uri&xx=bb", + }, + AuthExit: 401, + AppExit: 200, + ExpectExit: 302, + ExpectResponseHeader: map[string][]string{ + "Location": {"http://auth-host/auth/start?rd=%2Fabc\u0026xx=bb"}, // 就是这样的。。\u0026是& + }, + }), + Entry(func(c AuthTestCase) string { return c.Title }, AuthTestCase{ + Title: "auth 401 with signin should 302 without rd", + Annotations: map[string]string{ + "nginx.ingress.kubernetes.io/auth-signin": "http://$host/auth/start?xx=bb", + }, + AuthExit: 401, + AppExit: 200, + ExpectExit: 302, + ExpectResponseHeader: map[string][]string{ + "Location": {"http://auth-host/auth/start?xx=bb\u0026rd=http://auth-host%2Fabc"}, // 就是这样的。。\u0026是& + }, + }), + + Entry(func(c AuthTestCase) string { return c.Title }, AuthTestCase{ + Title: "auth 302", + Annotations: map[string]string{}, + AuthExit: 302, + AuthResponseHeader: map[string]string{ + "location": "http://a.com", + }, + AppExit: 200, + ExpectExit: 500, + }), + + Entry(func(c AuthTestCase) string { return c.Title }, AuthTestCase{ + Title: "auth 403 should 403", + Annotations: map[string]string{}, + AuthExit: 403, + AppExit: 200, + ExpectExit: 403, + }), + + Entry(func(c AuthTestCase) string { return c.Title }, AuthTestCase{ + Title: "normal ok", + Annotations: map[string]string{}, + ReqHeader: map[string]string{ + "authentication": "xxx", + }, + ExpectAuthReqHeader: map[string]string{ + "authentication": "xxx", + "x-original-method": "GET", + "x-original-url": "http://auth-host/abc", + "x-auth-request-redirect": "/abc", + }, + AuthExit: 200, + AuthResponseHeader: map[string]string{}, + AuthResponseBody: "ok", + AppExit: 200, + ExpectAppReqHeader: map[string]string{ + "host": "auth-host", + "authentication": "xxx", + "x-forwarded-host": "auth-host", + }, + ExpectExit: 200, + alb_external_check: func(g Gomega, state map[string]interface{}) { + header := state["/auth"].((map[string]interface{})) + g.Expect(header["x-sent-from"].(string) == "alb") + }, + ingng_external_check: func(g Gomega, state map[string]interface{}) { + header := state["/auth"].((map[string]interface{})) + g.Expect(header["x-sent-from"]).ShouldNot(BeNil()) + g.Expect(header["x-sent-from"].(string) == "nginx-ingress-controller") + }, + }), + } + do_auth_test := func(ing_name string, testcase AuthTestCase, ip_port string, extra_check func(Gomega, map[string]interface{})) { + for ak, av := range testcase.Annotations { + k.AssertKubectl("annotate", "ingresses", ing_name, "--overwrite", ak+"="+av) + } + if testcase.Cm != nil { + for name, cm_map := range testcase.Cm { + ns, name := strings.Split(name, "/")[0], strings.Split(name, "/")[1] + k.Kubectl("delete cm -n " + ns + " " + name) + cm := corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: ns, + }, + Data: cm_map, + } + _, err := kc.GetK8sClient().CoreV1().ConfigMaps(ns).Create(ctx, &cm, metav1.CreateOptions{}) + GinkgoNoErr(err) + } + } + // 嗯。。。。 + time.Sleep(time.Second * 6) + real_test := func(g Gomega) { + id := fmt.Sprintf("%v", time.Now().UnixNano()) + // tell auth server what should do + expect_auth_behavior := map[string]interface{}{} + auth_exit := 200 + if testcase.AuthExit != 0 { + auth_exit = testcase.AuthExit + } + app_exit := 200 + if testcase.AppExit != 0 { + app_exit = testcase.AppExit + } + auth_response_header := make(map[string]string) + if testcase.AuthResponseHeader != nil { + auth_response_header = testcase.AuthResponseHeader + } + auth_response_body := "ok" + if testcase.AuthResponseBody != "" { + auth_response_body = testcase.AuthResponseBody + } + app_response_header := make(map[string]string) + if testcase.AppResponseHeader != nil { + app_response_header = testcase.AppResponseHeader + } + app_response_body := "ok" + if testcase.AppResponseBody != "" { + app_response_body = testcase.AppResponseBody + } + + expect_auth_behavior["auth_exit"] = auth_exit + expect_auth_behavior["auth_response_header"] = auth_response_header + expect_auth_behavior["auth_response_body"] = auth_response_body + expect_auth_behavior["app_exit"] = app_exit + expect_auth_behavior["app_response_header"] = app_response_header + expect_auth_behavior["app_response_body"] = app_response_body + _, err := gr.New().R(). + SetHeader("id", id). + SetBody(expect_auth_behavior). + Put("http://" + echo_public_ip + ":60080" + "/state") + g.Expect(err).ShouldNot(HaveOccurred()) + + // client send request + cli := gr.New() + cli.SetRedirectPolicy(gr.NoRedirectPolicy()) + r := cli.R() + r.SetHeader("Accept-Encoding", "*") + r.SetHeader("host", ing_name) + r.SetHeader("id", id) + for k, v := range testcase.ReqHeader { + r.SetHeader(k, v) + } + res, _ := r.Get("http://" + ip_port + "/abc") + l.Info("ret", "code", res.StatusCode(), "header", res.Header(), "body", res.String()) + if testcase.ExpectExit != 0 { + g.Expect(res.StatusCode()).Should(Equal(testcase.ExpectExit)) + } + if testcase.ExpectResponseHeader != nil { + same, patch, err := JsonBelongsTO(res.Header(), testcase.ExpectResponseHeader) + g.Expect(err).ShouldNot(HaveOccurred()) + g.Expect(same).Should(BeTrue(), "auth req header not match\n"+PrettyJson(patch)) + } + + // so what auth server received? + res, err = gr.New().R(). + SetHeader("id", id). + ForceContentType("application/json"). + Get("http://" + echo_public_ip + ":60080" + "/state") + _ = err + l.Info("ret", "code", res.StatusCode(), "body", res.String()) + + data := map[string]interface{}{} + err = json.Unmarshal([]byte(res.String()), &data) + g.Expect(err).ShouldNot(HaveOccurred()) + l.Info("auth all", "data", PrettyJson(data)) + if testcase.ExpectAuthReqHeader != nil { + l.Info("auth_req", "auth_req_header", PrettyJson(data["/auth"]), "expect_auth_req_header", PrettyJson(testcase.ExpectAuthReqHeader)) + same, patch, err := JsonBelongsTO(data["/auth"], testcase.ExpectAuthReqHeader) + g.Expect(err).ShouldNot(HaveOccurred()) + g.Expect(same).Should(BeTrue(), "auth req header not match\n"+PrettyJson(patch)) + } + + if data["/"] != nil && len(testcase.ExpectAppReqHeader) > 0 { + l.Info("app_req", "app_req_header", PrettyJson(data["/auth"]), "expect_app_req_header", PrettyJson(testcase.ExpectAppReqHeader)) + same, patch, err := JsonBelongsTO(data["/"], testcase.ExpectAppReqHeader) + g.Expect(err).ShouldNot(HaveOccurred()) + g.Expect(same).Should(BeTrue(), "app req header not match\n"+PrettyJson(patch)) + } + if testcase.extra_check != nil { + testcase.extra_check(g, data) + } + if extra_check != nil { + extra_check(g, data) + } + } + real_test(NewGomegaWithT(GinkgoT())) + } + DescribeTableSubtree("auth signin", func(testcase AuthTestCase) { + ingress := "" + BeforeAll(func() { + auth_resty, err := NewAuthResty(l, cfg) + GinkgoNoErr(err) + echo_ip, echo_public_ip, err = auth_resty.GetIpAndHostIp() + GinkgoNoErr(err) + ingress = init_auth_ingres("auth-host") + }) + AfterAll(func() { + k.Kubectl("delete ingress auth-host") + }) + BeforeEach(func() { + k.Kubectl("delete ingress auth-host") + }) + It("ingng should ok", Label("ingng", "auth-signin"), func() { + ingress, err := YqDo(ingress, `yq ".spec.ingressClassName=\"nginx\""`) + GinkgoNoErr(err) + l.Info("update ingress", "ingress", ingress, "ip", echo_ip) + k.KubectlApply(ingress) + l.Info("ingng", "host", ingng_ip_port) + do_auth_test("auth-host", testcase, ingng_ip_port, testcase.ingng_external_check) + }) + + It("alb should ok", Label("alb", "auth-signin"), func() { + ingress, err := YqDo(ingress, `yq ".spec.ingressClassName=\"auth\""`) + GinkgoNoErr(err) + k.Kubectl("delete ingress auth-signin") + l.Info("update ingress", "ingress", ingress, "ip", echo_ip) + k.KubectlApply(ingress) + do_auth_test("auth-host", testcase, alb_ip_port, testcase.alb_external_check) + }) + }, signin_entries) + + // nginx.ingress.kubernetes.io/auth-response-headers: to specify headers to pass to backend once authentication request completes. + // nginx.ingress.kubernetes.io/auth-proxy-set-headers: the name of a ConfigMap that specifies headers to pass to the authentication service + // nginx.ingress.kubernetes.io/auth-request-redirect: to specify the X-Auth-Request-Redirect header value. + extra_headers_entries := []TableEntry{ + Entry(func(c AuthTestCase) string { return c.Title }, AuthTestCase{ + Title: "auth-response-headers", + Annotations: map[string]string{ + "nginx.ingress.kubernetes.io/auth-response-headers": "X-Auth-Request-Redirect,X-Test-Auth", + }, + ReqHeader: map[string]string{ + "a": "b", + }, + AuthResponseHeader: map[string]string{ + "x-test-auth": "xxx", + }, + ExpectAppReqHeader: map[string]string{ + "x-test-auth": "xxx", + "a": "b", + }, + }), + + Entry(func(c AuthTestCase) string { return c.Title }, AuthTestCase{ + Title: "invalid auth-proxy-set-headers", + Annotations: map[string]string{ + "nginx.ingress.kubernetes.io/auth-proxy-set-headers": "default/xx-not-exist", + }, + ExpectExit: 503, + }), + Entry(func(c AuthTestCase) string { return c.Title }, AuthTestCase{ + Title: "auth-proxy-set-headers", + Annotations: map[string]string{ + "nginx.ingress.kubernetes.io/auth-proxy-set-headers": "default/custom-header", + }, + Cm: map[string]map[string]string{ + "default/custom-header": { + "X-Different-Name": "true", + "X-Request-Start": "t=${msec}", + "xx-host-x": "$http_host", + "Complex": "http://${http_host} x _ :;.,\"?!(){}[]@<>=-+*#&|~^% ", + "X-Using-Nginx-Controller": "true", + }, + }, + ExpectAuthReqHeader: map[string]string{ + "x-different-name": "true", + "xx-host-x": "auth-extra", + "complex": "http://auth-extra x _ :;.,\"?!(){}[]@\u003c\u003e=-+*#\u0026|~^%", + "x-using-nginx-controller": "true", + }, + extra_check: func(g Gomega, data map[string]interface{}) { + start := data["/auth"].(map[string]interface{})["x-request-start"].(string) + l.Info("start", "start", start) + g.Expect(strings.HasPrefix(start, "t=173")).Should(BeTrue()) + }, + }), + Entry(func(c AuthTestCase) string { return c.Title }, AuthTestCase{ + Title: "auth-request-redirect", + Annotations: map[string]string{ + "nginx.ingress.kubernetes.io/auth-request-redirect": "https://a.b.c", + }, + ExpectAuthReqHeader: map[string]string{ + "x-auth-request-redirect": "https://a.b.c", + }, + }), + } + + DescribeTableSubtree("auth extra", func(testcase AuthTestCase) { + ingress := "" + BeforeAll(func() { + auth_resty, err := NewAuthResty(l, cfg) + GinkgoNoErr(err) + echo_ip, echo_public_ip, err = auth_resty.GetIpAndHostIp() + GinkgoNoErr(err) + ingress = init_auth_ingres("auth-extra") + }) + + AfterAll(func() { + k.Kubectl("delete ingress auth-extra") + }) + BeforeEach(func() { + k.Kubectl("delete ingress auth-extra") + }) + + It("ingng should ok", Label("ingng", "auth-extra"), func() { + ingress, err := YqDo(ingress, `yq ".spec.ingressClassName=\"nginx\""`) + GinkgoNoErr(err) + l.Info("update ingress", "ingress", ingress, "ip", echo_ip) + k.KubectlApply(ingress) + l.Info("ingng", "host", ingng_ip_port) + do_auth_test("auth-extra", testcase, ingng_ip_port, testcase.ingng_external_check) + }) + + It("alb should ok", Label("alb", "auth-extra"), func() { + ingress, err := YqDo(ingress, `yq ".spec.ingressClassName=\"auth\""`) + GinkgoNoErr(err) + k.Kubectl("delete ingress auth-extra") + l.Info("update ingress", "ingress", ingress, "ip", echo_ip) + k.KubectlApply(ingress) + do_auth_test("auth-extra", testcase, alb_ip_port, testcase.alb_external_check) + }) + }, extra_headers_entries) + + type IngCfg struct { + title string + annotation map[string]string + secret *corev1.Secret + } + + type AuthBasicTestCase struct { + IngCfg + ReqHeader map[string]string + ExpectResHeader map[string]string + ExpectCode int + } + auth_basic_entries := []TableEntry{ + Entry(func(c AuthBasicTestCase) string { return c.title }, AuthBasicTestCase{ + IngCfg: IngCfg{ + title: "apr1 file type should ok 401", + annotation: map[string]string{ + "nginx.ingress.kubernetes.io/auth-realm": "default", + "nginx.ingress.kubernetes.io/auth-secret": "default/auth-secret", + "nginx.ingress.kubernetes.io/auth-secret-type": "auth-file", + "nginx.ingress.kubernetes.io/auth-type": "basic", + }, + secret: &corev1.Secret{ + Type: corev1.SecretTypeOpaque, + Data: map[string][]byte{ + // foo : bar + "auth": []byte("foo:$apr1$qICNZ61Q$2iooiJVUAMmprq258/ChP1"), // cspell:disable-line + }, + }, + }, + ExpectCode: 401, + ExpectResHeader: map[string]string{ + "Www-Authenticate": "Basic realm=\"default\"", + }, + }), + Entry(func(c AuthBasicTestCase) string { return c.title }, AuthBasicTestCase{ + IngCfg: IngCfg{ + title: "apr1 file type should ok 200", + annotation: map[string]string{ + "nginx.ingress.kubernetes.io/auth-realm": "default", + "nginx.ingress.kubernetes.io/auth-secret": "default/auth-secret", + "nginx.ingress.kubernetes.io/auth-secret-type": "auth-file", + "nginx.ingress.kubernetes.io/auth-type": "basic", + }, + secret: &corev1.Secret{ + Type: corev1.SecretTypeOpaque, + Data: map[string][]byte{ + // foo : bar + "auth": []byte("foo:$apr1$qICNZ61Q$2iooiJVUAMmprq258/ChP1"), // cspell:disable-line + }, + }, + }, + ExpectCode: 200, + ReqHeader: map[string]string{ + "Authorization": "Basic Zm9vOmJhcg==", // cspell:disable-line + }, + ExpectResHeader: map[string]string{}, + }), + Entry(func(c AuthBasicTestCase) string { return c.title }, AuthBasicTestCase{ + IngCfg: IngCfg{ + title: "apr1 map type should ok 200", + annotation: map[string]string{ + "nginx.ingress.kubernetes.io/auth-realm": "default", + "nginx.ingress.kubernetes.io/auth-secret": "default/auth-secret", + "nginx.ingress.kubernetes.io/auth-secret-type": "auth-map", + "nginx.ingress.kubernetes.io/auth-type": "basic", + }, + secret: &corev1.Secret{ + Type: corev1.SecretTypeOpaque, + Data: map[string][]byte{ + "foo": []byte("$apr1$qICNZ61Q$2iooiJVUAMmprq258/ChP1"), // cspell:disable-line + }, + }, + }, + ExpectCode: 200, + ReqHeader: map[string]string{ + "Authorization": "Basic Zm9vOmJhcg==", // cspell:disable-line + }, + ExpectResHeader: map[string]string{}, + }), + } + DescribeTableSubtree("auth basic", func(testcase AuthBasicTestCase) { + init_auth_basic_ingres := func(class string, kt *Kubectl) { + cfg := testcase.IngCfg + annotations := cfg.annotation + p_type := nv1.PathTypeExact + ing := nv1.Ingress{ + ObjectMeta: metav1.ObjectMeta{ + Name: "auth-basic", + Namespace: "default", + Annotations: annotations, + }, + Spec: nv1.IngressSpec{ + IngressClassName: pointer.String(class), + Rules: []nv1.IngressRule{ + { + Host: "auth-basic", + IngressRuleValue: nv1.IngressRuleValue{ + HTTP: &nv1.HTTPIngressRuleValue{ + Paths: []nv1.HTTPIngressPath{ + { + Path: "/ok", + PathType: &p_type, + Backend: nv1.IngressBackend{ + Service: &nv1.IngressServiceBackend{ + Name: "auth-server", + Port: nv1.ServiceBackendPort{ + Number: 80, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + } + kt.Kubectl("delete ingress -n default auth-basic") + kt.Kubectl("delete secret -n default auth-secret") + time.Sleep(3 * time.Second) + if testcase.secret != nil { + cfg.secret.ObjectMeta = metav1.ObjectMeta{ + Name: "auth-secret", + Namespace: "default", + } + _, err := kc.GetK8sClient().CoreV1().Secrets("default").Create(ctx, testcase.secret, metav1.CreateOptions{}) + GinkgoNoErr(err) + } + _, err := kc.GetK8sClient().NetworkingV1().Ingresses("default").Create(ctx, &ing, metav1.CreateOptions{}) + GinkgoNoErr(err) + time.Sleep(5 * time.Second) + } + + BeforeAll(func() { + auth_resty, err := NewAuthResty(l, cfg) + GinkgoNoErr(err) + echo_ip, echo_public_ip, err = auth_resty.GetIpAndHostIp() + GinkgoNoErr(err) + }) + + AfterAll(func() { + k.Kubectl("delete ingress auth-basic") + }) + BeforeEach(func() { + k.Kubectl("delete ingress auth-basic") + }) + do_auth_basic_test := func(base_url string) { + url := "http://" + base_url + "/ok" + l.Info("curl", "url", url) + r := gr.New().R(). + SetHeader("HOST", "auth-basic"). + SetHeaders(testcase.ReqHeader) + res, err := r.Get(url) + GinkgoNoErr(err) + l.Info("res", "body", res.Body(), "status", res.Status(), "header", res.Header()) + GinkgoAssertTrue(res.StatusCode() == testcase.ExpectCode, "") + for k, v := range testcase.ExpectResHeader { + GinkgoAssertStringEq(strings.Join(res.Header()[k], ","), v, "") + } + } + It("ingng should ok", Label("ingng", "auth-basic"), func() { + init_auth_basic_ingres("nginx", k) + do_auth_basic_test(ingng_ip_port) + }) + + It("alb should ok", Label("alb", "auth-basic"), func() { + init_auth_basic_ingres("auth", k) + do_auth_basic_test(alb_ip_port) + }) + }, auth_basic_entries) +}) diff --git a/test/conformance/ingress-nginx/auth_kind.sh b/test/conformance/ingress-nginx/auth_kind.sh new file mode 100644 index 00000000..75be66e7 --- /dev/null +++ b/test/conformance/ingress-nginx/auth_kind.sh @@ -0,0 +1,295 @@ +#!/bin/bash + +function auth-note() { + # https://github.com/kubernetes/ingress-nginx/blob/main/docs/user-guide/nginx-configuration/annotations.md#authentication + source ./scripts/alb-dev-actions.sh + export LUACOV=true + rm -rf ./luacov* + alb-nginx-test $PWD/template/t/e2e/auth_test/auth_test.t + alb-nginx-luacov-summary | grep auth + + helm upgrade --install ingress-nginx ingress-nginx --set controller.image.digest= --set controller.admissionWebhooks.enabled=false --set controller.image.pullPolicy=Never --repo https://kubernetes.github.io/ingress-nginx --namespace ingress-nginx --create-namespace + return +} + +function auth-kind() ( + local chart="$1" + + unset KUBECONFIG + kind-create-1.28.0 auth + kind get kubeconfig --name=auth >~/.kube/auth + cp ~/.kube/auth ~/.kube/config + + local alb_image=$(auth-get-alb-image $chart) + echo "alb image |$alb_image|" + + auth-init-alb-operator $chart + auth-init-ingress-nginx auth + auth-init-echo-resty $alb_image + local IP=$(kubectl get po -n cpaas-system -l service_name=alb2-auth -o wide --no-headers | awk '{print $6}') + auth-init-alb-ingress + auth-init-ingress-nginx-ingress + # ingress-nginx + curl http://$IP/echo + # alb + curl http://$IP:11180/echo +) + +function auth-get-alb-image() ( + local chart=$1 + if [[ ! -d "./alauda-alb2" ]]; then + helm-pull $chart + fi + local tag=$(cat ./alauda-alb2/values.yaml | yq .global.images.alb2.tag) + local alb_image=registry.alauda.cn:60080/acp/alb2:$tag + echo "$alb_image" +) + +function auth-init-alb-operator() ( + local chart="$1" + if [[ ! -d "./alauda-alb2" ]]; then + echo "pull chart $chart" + helm-pull $chart + fi + + local alb_image=$(_load_alb_image_in_cur_kind ./alauda-alb2 auth) + # local alb_image="registry.alauda.cn:60080/acp/alb2:v3.19.0-fix.590.31.gf3ed81db-feat-acp-38937" + echo "alb image $alb_image" + helm upgrade --install --namespace cpaas-system --create-namespace --debug alauda-alb --set operator.albImagePullPolicy=IfNotPresent --set defaultAlb=false -f ./alauda-alb2/values.yaml ./alauda-alb2 + local alb=$( + cat </dev/null + echo registry.alauda.cn:60080/acp/alb2:$tag +) + +function loop() ( + alb-static-build + md5sum $PWD/bin/alb + local alb_pod=$(kubectl get po -n cpaas-system --no-headers | grep auth | awk '{print $1}') + kubectl cp $PWD/bin/alb cpaas-system/$alb_pod:/alb/ctl/alb -c alb2 +# kubectl cp $PWD/template/nginx/lua cpaas-system/$alb_pod:/alb/nginx/luax -c nginx +# ./bin/tools/dirhash ./template/nginx/lua +# kubectl exec -n cpaas-system $alb_pod -c nginx -- sh -c 'rm -rf /alb/nginx/lua && mv /alb/nginx/luax /alb/nginx/lua && /alb/tools/dirhash /alb/nginx/lua/' +) diff --git a/test/conformance/ingress-nginx/auth_resty.go b/test/conformance/ingress-nginx/auth_resty.go new file mode 100644 index 00000000..2a1de9c5 --- /dev/null +++ b/test/conformance/ingress-nginx/auth_resty.go @@ -0,0 +1,106 @@ +package ingressnginx + +import ( + "embed" + "os" + + . "alauda.io/alb2/test/kind/pkg/helper" + . "alauda.io/alb2/utils/test_utils" + "github.com/go-logr/logr" + "github.com/pborman/indent" + "k8s.io/client-go/rest" +) + +//go:embed lua_snip/* +var EMBED_LUA_SNIP embed.FS + +type AuthResty struct { + l logr.Logger + cfg *rest.Config + e *Echo +} + +func NewAuthResty(l logr.Logger, cfg *rest.Config) (*AuthResty, error) { + l.Info("init echo resty here") + state_lua, err := EMBED_LUA_SNIP.ReadFile("lua_snip/state.lua") + if err != nil { + return nil, err + } + auth_lua, err := EMBED_LUA_SNIP.ReadFile("lua_snip/auth.lua") + if err != nil { + return nil, err + } + app_lua, err := EMBED_LUA_SNIP.ReadFile("lua_snip/app.lua") + if err != nil { + return nil, err + } + pad := " " + auth_and_upstream_raw := Template(` + access_log /dev/stdout ; + error_log /dev/stdout info; + location /state { + content_by_lua_block { + {{.state_lua}} + } + } + location /auth { + content_by_lua_block { + {{.auth_lua}} + } + } + location / { + content_by_lua_block { + {{.app_lua}} + } + } + `, map[string]interface{}{ + "state_lua": indent.String(pad, string(state_lua)), + "auth_lua": indent.String(pad, string(auth_lua)), + "app_lua": indent.String(pad, string(app_lua)), + }) + + echo, err := NewEchoResty("", cfg, l).Deploy(EchoCfg{Name: "auth-server", Image: os.Getenv("ALB_IMAGE"), Ip: "v4", Raw: auth_and_upstream_raw, PodPort: "80", PodHostPort: "60080"}) + if err != nil { + return nil, err + } + echo_ip, err := echo.GetIp() + if err != nil { + return nil, err + } + l.Info("echo", "echo ip", echo_ip) + + echo_host_ip, err := echo.GetHostIp() + if err != nil { + return nil, err + } + l.Info("echo", "echo host ip", echo_host_ip) + return &AuthResty{ + l: l, + cfg: cfg, + e: echo, + }, nil +} + +func (a *AuthResty) Drop() error { + return a.e.Drop() +} + +func (a *AuthResty) GetIp() (string, error) { + return a.e.GetIp() +} + +func (a *AuthResty) GetHostIp() (string, error) { + return a.e.GetHostIp() +} + +func (a *AuthResty) GetIpAndHostIp() (string, string, error) { + ip, err := a.GetIp() + if err != nil { + return "", "", err + } + host_ip, err := a.GetHostIp() + if err != nil { + return "", "", err + } + return ip, host_ip, nil +} diff --git a/test/conformance/ingress-nginx/lua_snip/app.lua b/test/conformance/ingress-nginx/lua_snip/app.lua new file mode 100644 index 00000000..4844b4c2 --- /dev/null +++ b/test/conformance/ingress-nginx/lua_snip/app.lua @@ -0,0 +1,30 @@ +local c = require("utils.common") +if ngx.var.uri == "/ok" then + ngx.say("ok") + return +end +local id = ngx.var.http_id +ngx.log(ngx.INFO, "im app " .. id) +local h, err = ngx.req.get_headers() +if err ~= nil then + ngx.log(ngx.ERR, "err: " .. tostring(err)) +end +for k, v in pairs(h) do + ngx.log(ngx.ERR, "app " .. tostring(k) .. " : " .. tostring(v)) +end +if ngx.shared.state:get(id) == nil then + ngx.shared.state:set(id, c.json_encode({})) +end + +local data = c.json_decode(ngx.shared.state:get(id)) +data["/"] = h +ngx.shared.state:set(id, c.json_encode(data)) + +local data = c.json_decode(ngx.shared.state:get(id .. "-cfg")) +for k, v in pairs(data.app_response_header) do + ngx.header[k] = v +end +ngx.status = data.app_exit +ngx.say(data.app_response_body) +ngx.exit(data.app_exit) +ngx.say("OK") diff --git a/test/conformance/ingress-nginx/lua_snip/auth.lua b/test/conformance/ingress-nginx/lua_snip/auth.lua new file mode 100644 index 00000000..04db7d0d --- /dev/null +++ b/test/conformance/ingress-nginx/lua_snip/auth.lua @@ -0,0 +1,32 @@ +local c = require("utils.common") +ngx.log(ngx.INFO, "im auth") +local id = ngx.var.http_id +local h, err = ngx.req.get_headers() +if err ~= nil then + ngx.log(ngx.ERR, "err: " .. tostring(err)) +end + +if ngx.shared.state:get(id) == nil then + local data = c.json_encode({}, true) + ngx.log(ngx.ERR, "init state ", data, id) + ngx.shared.state:set(id, data) +end + +ngx.log(ngx.ERR, "state is " .. id .. " " .. tostring(ngx.shared.state:get(id))) +local data = c.json_decode(ngx.shared.state:get(id)) +data["/auth"] = h +ngx.shared.state:set(id, c.json_encode(data)) + +for k, v in pairs(h) do + ngx.log(ngx.ERR, "auth " .. tostring(k) .. " : " .. tostring(v)) +end + +local cfg = c.json_decode(ngx.shared.state:get(id .. "-cfg")) +for k, v in pairs(cfg.auth_response_header) do + ngx.header[k] = v +end + +ngx.log(ngx.ERR, "auth exit with " .. tostring(cfg.auth_exit)) +ngx.status = cfg.auth_exit +ngx.exit(cfg.auth_exit) +ngx.say(cfg.auth_response_body) diff --git a/test/conformance/ingress-nginx/lua_snip/state.lua b/test/conformance/ingress-nginx/lua_snip/state.lua new file mode 100644 index 00000000..92a57bdf --- /dev/null +++ b/test/conformance/ingress-nginx/lua_snip/state.lua @@ -0,0 +1,21 @@ +ngx.req.read_body() +ngx.log(ngx.INFO, + "im state " .. ngx.var.http_id .. " " .. tostring(ngx.var.request_method) .. " " .. tostring(ngx.req.get_body_data())) +local id = ngx.var.http_id +local c = require("utils.common") + +if ngx.shared.state:get(id) == nil then + ngx.shared.state:set(id, c.json_encode({}, true)) +end + +if ngx.var.request_method == "PUT" then + ngx.shared.state:set(id .. "-cfg", ngx.req.get_body_data()) + ngx.say("OK") + return +end +if ngx.var.request_method == "GET" then + local out = ngx.shared.state:get(id) or "{}" + ngx.log(ngx.INFO, "state is " .. id .. " " .. tostring(out)) + ngx.header["Content-Type"] = "application/json" + ngx.say(out) +end diff --git a/test/conformance/ingress-nginx/suite_test.go b/test/conformance/ingress-nginx/suite_test.go new file mode 100644 index 00000000..de0a5813 --- /dev/null +++ b/test/conformance/ingress-nginx/suite_test.go @@ -0,0 +1,16 @@ +package ingressnginx + +import ( + "testing" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +// 在兼容ingress-nginx的情况下,用户常问的问题是,在xxx配置下,ingress-nginx 会是什么行为 +// conformance test 应该能够给出一个快速的有力的回应。 +// 同时,这下面的case 也是我们的e2e测试用例 +func TestIngressNginx(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "ingress nginx related e2e") +} diff --git a/test/e2e/framework/ingress_assert_ext.go b/test/e2e/framework/ingress_assert_ext.go index 769bfa7e..24cc774c 100644 --- a/test/e2e/framework/ingress_assert_ext.go +++ b/test/e2e/framework/ingress_assert_ext.go @@ -145,7 +145,6 @@ func (i *IngressExt) InitIngressCase(ingressCase IngressCase) { assert.Nil(ginkgo.GinkgoT(), err, "") } -// TODO: use f.AssertKubectlApply func (i *IngressExt) CreateIngress(ns, name string, path string, svc string, port int) { f := i.kc _, err := f.GetK8sClient().NetworkingV1().Ingresses(ns).Create(context.Background(), &networkingv1.Ingress{ diff --git a/test/e2e/gateway/policyattachment/timeout_policy.go b/test/e2e/gateway/policyattachment/timeout_policy.go index 512c7a21..77f29848 100644 --- a/test/e2e/gateway/policyattachment/timeout_policy.go +++ b/test/e2e/gateway/policyattachment/timeout_policy.go @@ -314,7 +314,7 @@ spec: }) // h2_90 attach to g2, no config h2_0_90_ok := TestEq(func() bool { - return h2_0_90.Config == nil + return h2_0_90.Config.Timeout == nil }) return h1_0_ok && h1_1_ok && h2_0_ok && h2_0_90_ok, nil }) @@ -353,7 +353,7 @@ spec: return ret }) && TestEq(func() bool { - ret := h2_0.Config == nil + ret := h2_0.Config.Timeout == nil Logf("h20 ok") return ret }), nil diff --git a/test/e2e/ingress/gg_test.go b/test/e2e/ingress/gg_test.go deleted file mode 100644 index 9f57e2ec..00000000 --- a/test/e2e/ingress/gg_test.go +++ /dev/null @@ -1,28 +0,0 @@ -package ingress - -import "testing" - -func TestGinkgo(t *testing.T) { - t.Logf("test") - a := 1 - fi1 := func() { - a = 2 - t.Logf("test %v", a) - } - fi2 := func() { - a = 3 - t.Logf("test %v", a) - } - fd1 := func() { - t.Logf("fd1 %v", a) - } - fd2 := func() { - t.Logf("fd2 %v", a) - } - - _ = fi2 - fi1() - fd1() - fi2() - fd2() -} diff --git a/test/e2e/ingress/ingress.go b/test/e2e/ingress/ingress.go index 7b5b2891..941045c2 100644 --- a/test/e2e/ingress/ingress.go +++ b/test/e2e/ingress/ingress.go @@ -148,11 +148,10 @@ var _ = ginkgo.Describe("Ingress", func() { assert.Equal(ginkgo.GinkgoT(), rule.Spec.ServiceGroup.Services[0].Port, 8080) - f.WaitPolicy(func(policyRaw string) bool { - fmt.Printf("policyRaw %s", policyRaw) - hasRule := PolicyHasRule(policyRaw, 80, ruleName) - hasPod := PolicyHasBackEnds(policyRaw, ruleName, `[]`) - return hasRule && hasPod + f.WaitNgxPolicy(func(p NgxPolicy) (bool, error) { + rp, _, _ := p.FindHttpPolicy(ruleName) + fmt.Printf("policy %#v", rp) + return rp != nil && rp.RedirectCode == 302, nil }) }) @@ -578,7 +577,6 @@ spec: rule := f.WaitIngressRule(ingname, ns, 1)[0] GinkgoAssertStringEq(ruleName, rule.Name, "") } - }) ginkgo.It("should create rule with project label", func() { f.AssertKubectlApply(` diff --git a/test/e2e/operator/simple/chart.go b/test/e2e/operator/simple/chart.go index ebb73da7..bb1711ee 100644 --- a/test/e2e/operator/simple/chart.go +++ b/test/e2e/operator/simple/chart.go @@ -78,11 +78,6 @@ var _ = Describe("chart", func() { GinkgoNoErr(err) assert.Equal(GinkgoT(), "ares-alb2", *alb.Spec.Config.LoadbalancerName) - csv, err := kt.Kubectl("get csv -A") - GinkgoNoErr(err) - l.Info("csv", "csv", csv) - assert.Equal(GinkgoT(), strings.Contains(csv, "No resources found"), true) - l.Info("alb", "annotation", alb.Annotations["alb.cpaas.io/migrate-backup"]) _, err = kc.GetK8sClient().RbacV1().ClusterRoleBindings().Get(ctx, "alb-operator", metav1.GetOptions{}) GinkgoNoErr(err) diff --git a/test/e2e/perf/perf.go b/test/e2e/perf/perf.go new file mode 100644 index 00000000..0338b6d3 --- /dev/null +++ b/test/e2e/perf/perf.go @@ -0,0 +1,211 @@ +package perf + +import ( + "context" + "fmt" + "os" + "time" + + "github.com/go-logr/logr" + . "github.com/onsi/ginkgo/v2" + + pprof "runtime/pprof" + + "alauda.io/alb2/config" + "alauda.io/alb2/driver" + albv1 "alauda.io/alb2/pkg/apis/alauda/v1" + at "alauda.io/alb2/pkg/controller/ext/auth/types" + pm "alauda.io/alb2/pkg/utils/metrics" + ptu "alauda.io/alb2/pkg/utils/test_utils" + "alauda.io/alb2/utils" + "alauda.io/alb2/utils/log" + . "alauda.io/alb2/utils/test_utils" + k8sv1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/client-go/rest" +) + +// alb 处理规则的速度 5k条规则 87ms alb-perf-go-policy-gen + +var _ = Describe("rule perf", func() { + base := InitBase() + var env *EnvtestExt + var kt *Kubectl + var kc *K8sClient + var ctx context.Context + var l logr.Logger + var ctx_cancel context.CancelFunc + BeforeEach(func() { + l = log.L() + env = NewEnvtestExt(InitBase(), l) + env.AssertStart() + kt = env.Kubectl() + ctx, ctx_cancel = context.WithCancel(context.Background()) + kc = NewK8sClient(ctx, env.GetRestCfg()) + _ = base + _ = l + _ = kt + _ = kc + }) + + AfterEach(func() { + ctx_cancel() + env.Stop() + }) + + It("should ok when has 5k rule", func() { + if os.Getenv("RULE_PERF") == "" { + return + } + init_k8s(env.GetRestCfg()) + mock := config.DefaultMock() + drv, err := driver.NewDriver(driver.DrvOpt{Ctx: ctx, Cf: env.GetRestCfg(), Opt: driver.Cfg2opt(mock)}) + GinkgoNoErr(err) + s := time.Now() + ctx := ptu.PolicyGetCtx{ + Ctx: ctx, Name: "alb-dev", Ns: "cpaas-system", Drv: drv, L: l, + Cfg: mock, + } + cli := ptu.NewXCli(ctx) + + f, err := os.Create("rule-perf-cpu") + if err != nil { + GinkgoNoErr(err) + } + if err := pprof.StartCPUProfile(f); err != nil { + GinkgoNoErr(err) + } + defer pprof.StopCPUProfile() + count := 100 + for i := 1; i <= count; i++ { + l.Info("perf policy", "i", i, "a", count) + _, _, err := cli.GetPolicyAndNgx(ctx) + GinkgoNoErr(err) + } + e := time.Now() + // l.Info("xx", "p", utils.PrettyJson(policy.SharedConfig)) + l.Info("xx", "t", pm.Read()) + l.Info("xx", "all", e.UnixMilli()-s.UnixMilli()) + }) +}) + +func init_svc_and_ep(ns string, name string, port int, ip string, kt *K8sClient) { + svc := k8sv1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: ns, + }, + Spec: k8sv1.ServiceSpec{ + Type: k8sv1.ServiceTypeClusterIP, + Ports: []k8sv1.ServicePort{ + {Port: int32(port), TargetPort: intstr.FromInt(port), Protocol: k8sv1.ProtocolTCP}, + }, + }, + } + ep := k8sv1.Endpoints{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: ns, + }, + Subsets: []k8sv1.EndpointSubset{ + { + Ports: []k8sv1.EndpointPort{ + { + Port: int32(port), + Protocol: k8sv1.ProtocolTCP, + }, + }, + Addresses: []k8sv1.EndpointAddress{ + { + IP: ip, + Hostname: "s-1-ep-2", + }, + }, + }, + }, + } + ctx := context.Background() + kt.GetK8sClient().CoreV1().Services(ns).Create(ctx, &svc, metav1.CreateOptions{}) + kt.GetK8sClient().CoreV1().Endpoints(ns).Create(ctx, &ep, metav1.CreateOptions{}) +} + +func gen_rule(alb string, ft string, count int, kc *K8sClient) error { + init_svc_and_ep("cpaas-system", "demo", 80, "192.168.0.1", kc) + ctx := context.Background() + for i := 0; i < count; i++ { + r := albv1.Rule{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("rule-%v", i), + Namespace: "cpaas-system", + Labels: map[string]string{ + "alb2.cpaas.io/name": alb, + "alb2.cpaas.io/frontend": ft, + }, + }, + Spec: albv1.RuleSpec{ + Priority: 4, + DSLX: albv1.DSLX{ + { + Values: [][]string{{utils.OP_EQ, fmt.Sprintf("/rule-%v", i)}}, + Type: utils.KEY_URL, + }, + }, + Config: &albv1.RuleConfigInCr{ + Auth: &at.AuthCr{ + Forward: &at.ForwardAuthInCr{ + Url: "http://a.com", + }, + }, + }, + + ServiceGroup: &albv1.ServiceGroup{ + Services: []albv1.Service{ + { + Name: "demo", + Namespace: "cpaas-system", + Port: 80, + Weight: 100, + }, + }, + }, + }, + } + _, err := kc.GetAlbClient().CrdV1().Rules("cpaas-system").Create(ctx, &r, metav1.CreateOptions{}) + if err != nil { + return err + } + } + return nil +} + +func init_k8s(cfg *rest.Config) { + kt := NewKubectl("", cfg, log.L()) + kc := NewK8sClient(context.Background(), cfg) + kt.AssertKubectlApply(` +apiVersion: crd.alauda.io/v2beta1 +kind: ALB2 +metadata: + name: alb-dev + namespace: cpaas-system +spec: + address: "127.0.0.1" + type: "nginx" + config: + replicas: 1 +--- +apiVersion: crd.alauda.io/v1 +kind: Frontend +metadata: + labels: + alb2.cpaas.io/name: alb-dev + name: alb-dev-00080 + namespace: cpaas-system +spec: + backendProtocol: "" + certificate_name: "" + port: 80 + protocol: http +`) + gen_rule("alb-dev", "alb-dev-00080", 5000, kc) +} diff --git a/test/e2e/suite_test.go b/test/e2e/suite_test.go index b80a04da..a16df300 100644 --- a/test/e2e/suite_test.go +++ b/test/e2e/suite_test.go @@ -12,6 +12,7 @@ import ( _ "alauda.io/alb2/test/e2e/operator/public-cloud" _ "alauda.io/alb2/test/e2e/operator/rawk8s" _ "alauda.io/alb2/test/e2e/operator/simple" + _ "alauda.io/alb2/test/e2e/perf" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" ) diff --git a/test/kind/pkg/helper/alb-ctx.go b/test/kind/pkg/helper/alb-ctx.go index 102bb43a..2d54daa2 100644 --- a/test/kind/pkg/helper/alb-ctx.go +++ b/test/kind/pkg/helper/alb-ctx.go @@ -211,7 +211,8 @@ func (a *AlbK8sCtx) DeployEchoResty() error { return err } a.Log.Info("nginx", "image", nginx) - return e.Deploy(EchoCfg{Name: "echo-resty", Image: nginx, Ip: "v4"}) + _, err = e.Deploy(EchoCfg{Name: "echo-resty", Image: nginx, Ip: "v4"}) + return err } func (a *AlbK8sCtx) Destroy() error { diff --git a/test/kind/pkg/helper/echo-resty-ext.go b/test/kind/pkg/helper/echo-resty-ext.go index 7f523d33..a43ca220 100644 --- a/test/kind/pkg/helper/echo-resty-ext.go +++ b/test/kind/pkg/helper/echo-resty-ext.go @@ -1,7 +1,18 @@ package helper import ( + "context" + "crypto/sha256" + "encoding/hex" + "fmt" + "strings" + "time" + "github.com/go-logr/logr" + "github.com/samber/lo" + "github.com/xorcare/pointer" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/rest" _ "embed" @@ -16,6 +27,8 @@ type Echo struct { log logr.Logger base string k *Kubectl + kc *K8sClient + cfg EchoCfg } func NewEchoResty(base string, cfg *rest.Config, log logr.Logger) *Echo { @@ -23,31 +36,146 @@ func NewEchoResty(base string, cfg *rest.Config, log logr.Logger) *Echo { log: log, base: base, k: NewKubectl(base, cfg, log), + kc: NewK8sClient(context.Background(), cfg), } } type EchoCfg struct { - Image string - Name string - Ip string - Lb string + Image string + Ns string + Name string + Ip string + Lb string + PodPort string + PodHostPort string + Raw string + DefaultIngress *bool } -func (e *Echo) Deploy(cfg EchoCfg) error { +func (e *Echo) Deploy(cfg EchoCfg) (*Echo, error) { + if cfg.Ip == "" { + cfg.Ip = "v4" + } + if cfg.DefaultIngress == nil { + cfg.DefaultIngress = pointer.Bool(true) + } + if cfg.PodPort == "" { + cfg.PodPort = "11180" + } + if cfg.Ns == "" { + cfg.Ns = "default" + } + if cfg.Image == "" { + out, err := e.k.Kubectl("get deployments.apps -n cpaas-system alb-operator-ctl -o jsonpath='{.spec.template.spec.containers[*].image}'") + if err != nil { + return nil, err + } + cfg.Image = strings.TrimSpace(out) + } + cfg.Raw = strings.TrimSpace(cfg.Raw) + cfg.Raw = strings.ReplaceAll(cfg.Raw, " ", " ") + e.cfg = cfg + hash_bytes := sha256.Sum256([]byte(cfg.Raw)) + hash_key := hex.EncodeToString(hash_bytes[:]) // k := e.k echo := Template(EchoRestyTemplate, map[string]interface{}{ "Values": map[string]interface{}{ - "image": cfg.Image, - "name": cfg.Name, - "ip": cfg.Ip, - "replicas": 1, + "image": cfg.Image, + "name": cfg.Name, + "ip": cfg.Ip, + "replicas": 1, + "port": cfg.PodPort, + "hostport": cfg.PodHostPort, + "raw": cfg.Raw, + "hash": hash_key, + "defaultIngress": cfg.DefaultIngress, }, }) - e.log.Info("echo", "yaml", echo) + e.log.Info("yaml", "yaml", echo, "port", cfg.PodPort) out, err := e.k.KubectlApply(echo) if err != nil { - return err + return nil, err } e.log.Info(out) + // wait and reload nginx. to make sure volume work.. + ctx, _ := context.WithTimeout(context.Background(), time.Second*30) + + err = wait.PollUntilContextTimeout(ctx, time.Second*1, time.Second*3, true, + func(ctx context.Context) (done bool, err error) { + pods, err := e.GetRunningPods() + if err != nil { + return false, err + } + if len(pods) > 0 { + return true, nil + } + return false, nil + }, + ) + if err != nil { + return nil, err + } + pods, err := e.GetRunningPods() + if err != nil { + return nil, err + } + pod := pods[0] + + err = wait.PollUntilContextTimeout(ctx, time.Second*1, time.Second*20, true, + func(ctx context.Context) (done bool, err error) { + // speed up https://ahmet.im/blog/kubernetes-secret-volumes-delay/ + e.k.Kubectl("annotate", "pod", "--overwrite", "-n", cfg.Ns, pod.GetName(), fmt.Sprintf("update=%d", time.Now().Unix())) + out, err = e.k.Kubectl("exec", "-n", cfg.Ns, pod.GetName(), "--", "cat", "/etc/nginx/nginx.conf") + if err != nil { + e.log.Error(err, "get nginx conf fail") + return false, nil + } + if strings.Contains(out, hash_key) { + return true, nil + } + return false, nil + }, + ) + if err != nil { + return nil, err + } + _, err = e.k.Kubectl("exec", "-n", cfg.Ns, pod.GetName(), "--", "bash", "-c", "cat /alb/nginx.pid | xargs -I{} kill -HUP {}") + if err != nil { + return nil, err + } + return e, nil +} + +func (e *Echo) GetRunningPods() ([]corev1.Pod, error) { + pods, err := e.kc.GetPods(e.cfg.Ns, "k8s-app="+e.cfg.Name) + if err != nil { + return nil, err + } + pods = lo.Filter(pods, func(p corev1.Pod, _ int) bool { + return p.Status.Phase == "Running" + }) + return pods, nil +} + +func (e *Echo) GetIp() (string, error) { + ips, err := e.kc.GetPodIp(e.cfg.Ns, "k8s-app="+e.cfg.Name) + if err != nil { + return "", err + } + ip := ips[0] + return ip, nil +} + +func (e *Echo) GetHostIp() (string, error) { + pods, err := e.GetRunningPods() + if err != nil { + return "", err + } + pod := pods[0] + ip := pod.Status.HostIP + return ip, nil +} + +func (e *Echo) Drop() error { return nil } diff --git a/test/kind/pkg/helper/echo-resty.yaml b/test/kind/pkg/helper/echo-resty.yaml index 48bc094a..c0d74da7 100644 --- a/test/kind/pkg/helper/echo-resty.yaml +++ b/test/kind/pkg/helper/echo-resty.yaml @@ -13,75 +13,22 @@ metadata: name: {{.Values.name}}-config data: nginx-config: | - worker_processes 4; - + worker_processes 1; + error_log stderr notice; + pid /alb/nginx.pid; events { worker_connections 1024; } - stream { - log_format stream '[$time_local] $remote_addr $protocol $server_port $status $bytes_received $bytes_sent $session_time'; - access_log /dev/stdout stream; - error_log stderr info; - server { - listen 53 udp; - listen [::]:53 udp; - content_by_lua_block { - ngx.log(ngx.INFO,"udp socket connect") - local sock,err = ngx.req.socket() - local data, err = sock:receive() - if err ~= nil then - sock:send("err "..tostring(err)) - end - sock:send(data) - } - } - } - + # Hash: {{.Values.hash}} http { + lua_shared_dict state 10m; + lua_package_path '/usr/local/lib/lua/?.lua;/usr/local/openresty/lualib/?.lua;/usr/local/openresty/site/lualib/?.lua;/alb/nginx/lua/?.lua;;'; + lua_package_cpath '/usr/local/lib/lua/?.so;;'; server { - listen 80; - listen [::]:80; - location / { - content_by_lua_block { - local h, err = ngx.req.get_headers() - if err ~=nil then - ngx.say("err: "..tostring(err)) - end - for k, v in pairs(h) do - ngx.say("header "..tostring(k).." : "..tostring(v)) - end - ngx.say("url "..ngx.var.request_uri) - ngx.say("http client-ip "..ngx.var.remote_addr.." client-port "..ngx.var.remote_port.." server-ip "..ngx.var.server_addr.." server-port "..ngx.var.server_port) - } - } - } - - server { - listen 443 ssl; - listen [::]:443 ssl; - server_name _; - - ssl_certificate /cert/tls.crt; - ssl_certificate_key /cert/tls.key; - - ssl_session_timeout 5m; - ssl_ciphers ECDHE-RSA-AES128-GCM-SHA256:ECDHE:ECDH:AES:HIGH:!NULL:!aNULL:!MD5:!ADH:!RC4; - ssl_protocols TLSv1 TLSv1.1 TLSv1.2; - ssl_prefer_server_ciphers on; - - location / { - content_by_lua_block { - local h, err = ngx.req.get_headers() - if err ~=nil then - ngx.say("err: "..tostring(err)) - end - for k, v in pairs(h) do - ngx.say(tostring(k).." : "+tostring(v)) - end - ngx.say("https client-ip "..ngx.var.remote_addr.." client-port "..ngx.var.remote_port.." server-ip "..ngx.var.server_addr.." server-port "..ngx.var.server_port) - } - } + listen {{.Values.port}}; + listen [::]:{{.Values.port}}; + {{.Values.raw}} } } --- @@ -123,7 +70,8 @@ spec: - -c - 'mkdir -p /alb/logs && nginx -g "daemon off;" -p /alb -c /etc/nginx/nginx.conf && tail -f /dev/null' ports: - - containerPort: 80 + - containerPort: {{.Values.port}} + hostPort: {{.Values.hostport}} volumes: - name: config-volume configMap: @@ -145,16 +93,9 @@ metadata: spec: type: LoadBalancer ports: - - name: http - port: 80 - targetPort: 80 - - name: https - port: 443 - targetPort: 443 - - name: udp - protocol: UDP - port: 53 - targetPort: 53 + - name: {{.Values.name}} + port: {{.Values.port}} + targetPort: {{.Values.port}} selector: k8s-app: {{.Values.name}} {{- end }} @@ -194,6 +135,8 @@ spec: selector: k8s-app: {{.Values.name}} --- + +{{- if .Values.defualtIngress }} apiVersion: networking.k8s.io/v1 kind: Ingress metadata: @@ -208,4 +151,5 @@ spec: service: name: {{.Values.name}} port: - number: 80 \ No newline at end of file + number: {{.Values.ingressPort}} +{{- end }} \ No newline at end of file diff --git a/test/kind/pkg/helper/echo-resty_test.go b/test/kind/pkg/helper/echo-resty_test.go index 3128902c..1681b892 100644 --- a/test/kind/pkg/helper/echo-resty_test.go +++ b/test/kind/pkg/helper/echo-resty_test.go @@ -1,6 +1,7 @@ package helper import ( + "os" "testing" "alauda.io/alb2/utils/log" @@ -9,13 +10,43 @@ import ( ) func TestEchoResty(t *testing.T) { + t.SkipNow() print(EchoRestyTemplate) base := InitBase() - kd := AdoptKind(base, "alb-dual", log.L()) - cfg, err := kd.GetConfig() + cfg, err := RESTFromKubeConfigFile(os.Getenv("KUBECONFIG")) assert.NoError(t, err) - image := "registry.alauda.cn:60080/acp/alb-nginx:v3.12.2" - e := NewEchoResty(base, cfg, log.L()) - err = e.Deploy(EchoCfg{Name: "echo-resty", Image: image, Ip: "v4"}) + image := os.Getenv("ALB_IMAGE") + raw := ` + access_log /dev/stdout ; + error_log /dev/stdout info; + location / { + content_by_lua_block { + ngx.say("1") + } + } + ` + e, err := NewEchoResty(base, cfg, log.L()).Deploy(EchoCfg{Name: "echo-resty", Image: image, Raw: raw, PodPort: "80"}) assert.NoError(t, err) + print(e.GetIp()) + assert.NoError(t, err) + k := NewKubectl(base, cfg, log.L()) + pods, err := e.GetRunningPods() + assert.NoError(t, err) + pod := pods[0] + out := k.AssertKubectl("exec", pod.Name, "--", "curl", "-s", "http://127.0.0.1:80") + assert.Equal(t, "1", out) + + raw = ` + access_log /dev/stdout ; + error_log /dev/stdout info; + location / { + content_by_lua_block { + ngx.say("2") + } + } + ` + e, err = NewEchoResty(base, cfg, log.L()).Deploy(EchoCfg{Name: "echo-resty", Image: image, Raw: raw, PodPort: "80"}) + assert.NoError(t, err) + out = k.AssertKubectl("exec", pod.Name, "--", "curl", "-s", "http://127.0.0.1:80") + assert.Equal(t, "2", out) } diff --git a/utils/test_utils/command.go b/utils/test_utils/command.go index 4bc79472..f0e3b7ac 100644 --- a/utils/test_utils/command.go +++ b/utils/test_utils/command.go @@ -2,6 +2,7 @@ package test_utils import ( "fmt" + "os" "os/exec" "strings" ) @@ -14,7 +15,8 @@ type Cmd struct { } func NewCmd() *Cmd { - return &Cmd{logcmd: true, logout: true, cwd: ""} + log := os.Getenv("ALB_LOG_CMD") == "true" + return &Cmd{logcmd: log, logout: log, cwd: ""} } func (c *Cmd) Logout(logout bool) *Cmd { diff --git a/utils/test_utils/fake_alb_env.go b/utils/test_utils/fake_alb_env.go index 4f24403b..c3d28414 100644 --- a/utils/test_utils/fake_alb_env.go +++ b/utils/test_utils/fake_alb_env.go @@ -3,23 +3,13 @@ package test_utils import ( "context" "fmt" - "strings" - "time" "alauda.io/alb2/utils/log" "github.com/go-logr/logr" "k8s.io/client-go/rest" - . "github.com/onsi/gomega" - - . "github.com/onsi/ginkgo/v2" - corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/client-go/kubernetes" - "sigs.k8s.io/controller-runtime/pkg/client" ) // FakeAlbEnv is a env which is could apply FakeResource @@ -61,121 +51,10 @@ func (a *FakeAlbEnv) ApplyFakes(fake FakeResource) error { return nil } -func (a *FakeAlbEnv) ClearFakes(fake FakeResource) error { - crs := fake.ListCr() - a.deleteAll(crs...) - - // make sure ns been deleted. - // https://book.kubebuilder.io/reference/envtest#namespace-usage-limitation - for _, ns := range fake.K8s.Namespaces { - for { - ns, err := a.kc.k8sClient.CoreV1().Namespaces().Get(a.ctx, ns.GetName(), metav1.GetOptions{}) - fmt.Printf("clearfakes ns %v err %v\n", ns, err) - if apierrors.IsNotFound(err) { - break - } - time.Sleep(1 * time.Second) - } - } - return nil -} - func (a *FakeAlbEnv) Stop() { a.e.Stop() } -func (a *FakeAlbEnv) deleteAll(objs ...client.Object) { - ctx := a.ctx - cfg := a.e.cfg - k8sClient := a.kc.GetClient() - timeout := 10 * time.Second - interval := 1 * time.Second - - RegisterFailHandler(Fail) - // copy from https://book.kubebuilder.io/reference/envtest#namespace-usage-limitation - clientGo, err := kubernetes.NewForConfig(cfg) - Expect(err).ShouldNot(HaveOccurred()) - for _, obj := range objs { - Expect(client.IgnoreNotFound(k8sClient.Delete(ctx, obj))).Should(Succeed()) - - if ns, ok := obj.(*corev1.Namespace); ok { - // Normally the kube-controller-manager would handle finalization - // and garbage collection of namespaces, but with envtest, we aren't - // running a kube-controller-manager. Instead we're gonna approximate - // (poorly) the kube-controller-manager by explicitly deleting some - // resources within the namespace and then removing the `kubernetes` - // finalizer from the namespace resource so it can finish deleting. - // Note that any resources within the namespace that we don't - // successfully delete could reappear if the namespace is ever - // recreated with the same name. - - // Look up all namespaced resources under the discovery API - _, apiResources, err := clientGo.Discovery().ServerGroupsAndResources() - Expect(err).ShouldNot(HaveOccurred()) - namespacedGVKs := make(map[string]schema.GroupVersionKind) - for _, apiResourceList := range apiResources { - defaultGV, err := schema.ParseGroupVersion(apiResourceList.GroupVersion) - Expect(err).ShouldNot(HaveOccurred()) - for _, r := range apiResourceList.APIResources { - if !r.Namespaced || strings.Contains(r.Name, "/") { - // skip non-namespaced and subresources - continue - } - gvk := schema.GroupVersionKind{ - Group: defaultGV.Group, - Version: defaultGV.Version, - Kind: r.Kind, - } - if r.Group != "" { - gvk.Group = r.Group - } - if r.Version != "" { - gvk.Version = r.Version - } - namespacedGVKs[gvk.String()] = gvk - } - } - - // Delete all namespaced resources in this namespace - for _, gvk := range namespacedGVKs { - var u unstructured.Unstructured - u.SetGroupVersionKind(gvk) - err := k8sClient.DeleteAllOf(ctx, &u, client.InNamespace(ns.Name)) - Expect(client.IgnoreNotFound(ignoreMethodNotAllowed(err))).ShouldNot(HaveOccurred()) - } - - Eventually(func() error { - key := client.ObjectKeyFromObject(ns) - if err := k8sClient.Get(ctx, key, ns); err != nil { - return client.IgnoreNotFound(err) - } - // remove `kubernetes` finalizer - const kubernetes = "kubernetes" - finalizers := []corev1.FinalizerName{} - for _, f := range ns.Spec.Finalizers { - if f != kubernetes { - finalizers = append(finalizers, f) - } - } - ns.Spec.Finalizers = finalizers - - // We have to use the k8s.io/client-go library here to expose - // ability to patch the /finalize subresource on the namespace - _, err = clientGo.CoreV1().Namespaces().Finalize(ctx, ns, metav1.UpdateOptions{}) - return err - }, timeout, interval).Should(Succeed()) - } - - Eventually(func() metav1.StatusReason { - key := client.ObjectKeyFromObject(obj) - if err := k8sClient.Get(ctx, key, obj); err != nil { - return apierrors.ReasonForError(err) - } - return "" - }, timeout, interval).Should(Equal(metav1.StatusReasonNotFound)) - } -} - func ignoreMethodNotAllowed(err error) error { if err != nil { if apierrors.ReasonForError(err) == metav1.StatusReasonMethodNotAllowed { diff --git a/utils/test_utils/fake_alb_env_test.go b/utils/test_utils/fake_alb_env_test.go index ccdab2f9..81d213c3 100644 --- a/utils/test_utils/fake_alb_env_test.go +++ b/utils/test_utils/fake_alb_env_test.go @@ -4,6 +4,7 @@ import ( "testing" albv2 "alauda.io/alb2/pkg/apis/alauda/v2beta1" + "github.com/onsi/ginkgo/v2" "github.com/stretchr/testify/assert" k8sv1 "k8s.io/api/core/v1" networkingv1 "k8s.io/api/networking/v1" @@ -53,11 +54,10 @@ func TestFakeEnv(t *testing.T) { Namespaces: defaultNamespaces, }, } + defer ginkgo.GinkgoRecover() env := NewFakeEnv() env.AssertStart() err := env.ApplyFakes(res) assert.NoError(t, err) - err = env.ClearFakes(res) - assert.NoError(t, err) env.Stop() } diff --git a/utils/test_utils/ginkgo_logger.go b/utils/test_utils/ginkgo_logger.go index 5e68b17a..b581bb45 100644 --- a/utils/test_utils/ginkgo_logger.go +++ b/utils/test_utils/ginkgo_logger.go @@ -11,6 +11,8 @@ type GinkgoLogSink struct { prefix string } +// Deprecated +// 字符串中含有%时, %v的处理是错的.. func GinkgoLog() logr.Logger { return logr.New(GinkgoLogSink{}) } diff --git a/utils/test_utils/initer.go b/utils/test_utils/initer.go index e0de262d..778d9666 100644 --- a/utils/test_utils/initer.go +++ b/utils/test_utils/initer.go @@ -41,7 +41,9 @@ func InitCrds(base string, cfg *rest.Config, crds []string) error { // create cpaas-system ns func InitAlbCr(base string, cfg *rest.Config) error { InitAlbNs(base, cfg) - InitCrds(base, cfg, AlbCrds()) + crds := AlbCrds() + fmt.Println("int crds", crds) + InitCrds(base, cfg, crds) return nil } diff --git a/utils/test_utils/json.go b/utils/test_utils/json.go new file mode 100644 index 00000000..7008e64d --- /dev/null +++ b/utils/test_utils/json.go @@ -0,0 +1,16 @@ +package test_utils + +import "github.com/wI2L/jsondiff" + +func JsonBelongsTO(left, right interface{}) (bool, interface{}, error) { + patch, err := jsondiff.Compare(left, right) + if err != nil { + return false, nil, err + } + for _, v := range patch { + if v.Type != "remove" { + return false, patch, nil + } + } + return true, patch, nil +} diff --git a/utils/test_utils/k8s_client_ext.go b/utils/test_utils/k8s_client_ext.go index 84442f7b..56974c94 100644 --- a/utils/test_utils/k8s_client_ext.go +++ b/utils/test_utils/k8s_client_ext.go @@ -2,6 +2,7 @@ package test_utils import ( "context" + "fmt" "github.com/onsi/ginkgo/v2" "github.com/stretchr/testify/assert" @@ -165,3 +166,28 @@ func (f *K8sClient) CreateNsIfNotExist(name string) error { } return nil } + +func (f *K8sClient) GetPods(ns string, labelSel string) ([]corev1.Pod, error) { + pods, err := f.k8sClient.CoreV1().Pods(ns).List(f.ctx, metav1.ListOptions{LabelSelector: labelSel}) + if err != nil { + return nil, err + } + if len(pods.Items) == 0 { + return nil, fmt.Errorf("should at least get one") + } + return pods.Items, nil +} + +func (f *K8sClient) GetPodIp(ns string, labelSel string) ([]string, error) { + ips := []string{} + pods, err := f.GetPods(ns, labelSel) + if err != nil { + return nil, err + } + for _, p := range pods { + if p.Status.Phase == "Running" { + ips = append(ips, p.Status.PodIP) + } + } + return ips, nil +} diff --git a/utils/test_utils/kubectl_ext.go b/utils/test_utils/kubectl_ext.go index 5184d35c..5f249046 100644 --- a/utils/test_utils/kubectl_ext.go +++ b/utils/test_utils/kubectl_ext.go @@ -4,10 +4,10 @@ import ( "fmt" "math/rand" "os" - "os/exec" "path" "strings" + "alauda.io/alb2/utils/log" "github.com/go-logr/logr" "github.com/onsi/ginkgo/v2" "github.com/onsi/gomega" @@ -21,6 +21,14 @@ type Kubectl struct { log logr.Logger } +func KubectlViaEnv() (*Kubectl, error) { + cfg, err := RESTFromKubeConfigFile(os.Getenv("KUBECONFIG")) + if err != nil { + return nil, err + } + return NewKubectl("", cfg, log.L()), nil +} + // if base =="" it will create /tmp/kubectl-xx else create base/kubectl-xx func NewKubectl(base string, kubeCfg *rest.Config, log logr.Logger) *Kubectl { cfg := fmt.Sprintf("kubectl-%d", rand.Int()) @@ -81,20 +89,17 @@ func (k *Kubectl) Kubectl(cmds ...string) (string, error) { if len(cmds) == 1 { cmds = strings.Split(cmds[0], " ") } - cmds = append(cmds, "--kubeconfig", k.kubeCfgPath) - k.log.Info("cmd", "cmds", cmds) - cmd := exec.Command("kubectl", cmds...) - stdout, err := cmd.CombinedOutput() + out, err := NewCmd().Env(map[string]string{"KUBECONFIG": k.kubeCfgPath}).Call("kubectl", cmds...) if err != nil { - return "", fmt.Errorf("eval %s %s err: %v", cmd, stdout, err) + return "", fmt.Errorf("eval |%s| %s err: %v", cmds, out, err) } - return string(stdout), nil + return string(out), nil } func (k *Kubectl) AssertKubectl(cmds ...string) string { ret, err := k.Kubectl(cmds...) assert.Nil(ginkgo.GinkgoT(), err, "") - return ret + return strings.TrimSpace(ret) } func (k *Kubectl) AssertKubectlOmgea(o gomega.Gomega, cmds ...string) string { diff --git a/utils/test_utils/ngxconf_parser_ext.go b/utils/test_utils/ngxconf_parser_ext.go deleted file mode 100644 index ee1f595e..00000000 --- a/utils/test_utils/ngxconf_parser_ext.go +++ /dev/null @@ -1,35 +0,0 @@ -package test_utils - -import ( - "fmt" - "io/ioutil" - "os/exec" - "strings" -) - -func PickStreamServerListen(cfgRaw string) ([]string, error) { - return jqNgxConf(cfgRaw, `jq -r ".config[0].parsed|.[]|select(.directive==\"stream\").block|.[]|select(.directive==\"server\").block|.[]|select(.directive==\"listen\").args| join(\" \")"`) -} - -func PickHttpServerListen(cfgRaw string) ([]string, error) { - return jqNgxConf(cfgRaw, `jq -r ".config[0].parsed|.[]|select(.directive==\"http\").block|.[]|select(.directive==\"server\").block|.[]|select(.directive==\"listen\").args| join(\" \")"`) -} - -// jqNgxConf use crossplane convert nginx.conf to json and use jq to query it. -// you need to install crossplane,jq,bash first. -// TODO a better way to parse nginx.conf. -func jqNgxConf(cfgRaw string, jq string) ([]string, error) { - f, err := ioutil.TempFile("", "ngx-conf.") - if err != nil { - return nil, err - } - defer f.Close() - f.WriteString(cfgRaw) - p := f.Name() - shell := fmt.Sprintf(`crossplane parse %s | %s`, p, jq) - out, err := exec.Command("bash", "-c", shell).CombinedOutput() - if err != nil { - return nil, err - } - return strings.Split(strings.TrimSpace(string(out)), "\n"), nil -} diff --git a/utils/test_utils/util.go b/utils/test_utils/util.go index 73e058f8..a3a69403 100644 --- a/utils/test_utils/util.go +++ b/utils/test_utils/util.go @@ -52,6 +52,14 @@ func GenCert(domain string) (key, cert string, err error) { return string(keyByte), string(certByte), nil } +func RESTFromKubeConfigFile(p string) (*rest.Config, error) { + cfg_bytes, err := os.ReadFile(p) + if err != nil { + return nil, err + } + return clientcmd.RESTConfigFromKubeConfig(cfg_bytes) +} + func RESTFromKubeConfig(raw string) (*rest.Config, error) { return clientcmd.RESTConfigFromKubeConfig([]byte(raw)) } diff --git a/utils/test_utils/yq.go b/utils/test_utils/yq.go new file mode 100644 index 00000000..0123ec78 --- /dev/null +++ b/utils/test_utils/yq.go @@ -0,0 +1,36 @@ +package test_utils + +import ( + "fmt" + "os" + "os/exec" + "strings" +) + +type Yq struct { + Base string +} + +func YqDo(raw string, cmd string) (string, error) { + // only use for test + raw = strings.TrimSpace(raw) + base, err := os.MkdirTemp("", "yq") + if err != nil { + return "", err + } + p := base + "/" + "x.yaml" + err = os.WriteFile(p, []byte(raw), 0o666) + if err != nil { + return "", err + } + sh := fmt.Sprintf(`#!/bin/bash +cat %s | %s +`, p, cmd) + os.WriteFile(base+"/x.sh", []byte(sh), 0o666) + sh_p := base + "/x.sh" + out, err := exec.Command("bash", sh_p).CombinedOutput() + if err != nil { + return "", fmt.Errorf("eval %s fail %v", sh_p, err) + } + return strings.TrimSpace(string(out)), nil +} diff --git a/utils/test_utils/yq_test.go b/utils/test_utils/yq_test.go new file mode 100644 index 00000000..559def51 --- /dev/null +++ b/utils/test_utils/yq_test.go @@ -0,0 +1,54 @@ +package test_utils + +import ( + "strings" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestYq(t *testing.T) { + raw := ` +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: auth-check-cookies + namespace: default +spec: + rules: + - host: "auth-check-cookies" + http: + paths: + - backend: + service: + name: auth-server + port: + number: 80 + path: / + pathType: Prefix` + expect := strings.TrimSpace(` +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: auth-check-cookies + namespace: default +spec: + rules: + - host: "auth-check-cookies" + http: + paths: + - backend: + service: + name: auth-server + port: + number: 80 + path: / + pathType: Prefix + ingressClassName: nginx`) + out, err := YqDo(raw, `yq ".spec.ingressClassName=\"nginx\""`) + t.Logf("%v | %v", out, err) + assert.NoError(t, err) + _ = expect + _ = out + assert.Equal(t, expect, out) +}