From d9225a7310441508c5c11045403ec8f7ef348dd9 Mon Sep 17 00:00:00 2001 From: Hein Date: Fri, 19 Dec 2025 22:27:20 +0200 Subject: [PATCH] Vendor packages update --- go.mod | 9 +- go.sum | 14 + vendor/github.com/jinzhu/inflection/LICENSE | 21 + vendor/github.com/jinzhu/inflection/README.md | 55 + .../jinzhu/inflection/inflections.go | 273 +++ .../github.com/jinzhu/inflection/wercker.yml | 23 + .../github.com/puzpuzpuz/xsync/v3/.gitignore | 15 + .../puzpuzpuz/xsync/v3/BENCHMARKS.md | 133 ++ vendor/github.com/puzpuzpuz/xsync/v3/LICENSE | 201 +++ .../github.com/puzpuzpuz/xsync/v3/README.md | 195 ++ .../github.com/puzpuzpuz/xsync/v3/counter.go | 99 ++ vendor/github.com/puzpuzpuz/xsync/v3/map.go | 917 ++++++++++ vendor/github.com/puzpuzpuz/xsync/v3/mapof.go | 738 ++++++++ .../puzpuzpuz/xsync/v3/mpmcqueue.go | 125 ++ .../puzpuzpuz/xsync/v3/mpmcqueueof.go | 138 ++ .../github.com/puzpuzpuz/xsync/v3/rbmutex.go | 188 ++ .../puzpuzpuz/xsync/v3/spscqueue.go | 92 + .../puzpuzpuz/xsync/v3/spscqueueof.go | 96 + vendor/github.com/puzpuzpuz/xsync/v3/util.go | 66 + .../puzpuzpuz/xsync/v3/util_hash.go | 77 + vendor/github.com/tmthrgd/go-hex/.travis.yml | 11 + vendor/github.com/tmthrgd/go-hex/LICENSE | 82 + vendor/github.com/tmthrgd/go-hex/README.md | 108 ++ vendor/github.com/tmthrgd/go-hex/hex.go | 137 ++ vendor/github.com/tmthrgd/go-hex/hex_amd64.go | 94 + .../tmthrgd/go-hex/hex_decode_amd64.s | 303 ++++ .../tmthrgd/go-hex/hex_encode_amd64.s | 227 +++ vendor/github.com/tmthrgd/go-hex/hex_other.go | 36 + vendor/github.com/uptrace/bun/.gitignore | 4 + vendor/github.com/uptrace/bun/.prettierrc.yml | 6 + vendor/github.com/uptrace/bun/CHANGELOG.md | 1089 ++++++++++++ vendor/github.com/uptrace/bun/CONTRIBUTING.md | 34 + vendor/github.com/uptrace/bun/LICENSE | 24 + vendor/github.com/uptrace/bun/Makefile | 30 + vendor/github.com/uptrace/bun/README.md | 286 +++ vendor/github.com/uptrace/bun/bun.go | 98 + .../uptrace/bun/commitlint.config.js | 1 + vendor/github.com/uptrace/bun/db.go | 778 ++++++++ .../github.com/uptrace/bun/dialect/append.go | 105 ++ .../github.com/uptrace/bun/dialect/dialect.go | 31 + .../uptrace/bun/dialect/feature/feature.go | 98 + .../uptrace/bun/dialect/sqltype/sqltype.go | 16 + .../uptrace/bun/extra/bunjson/json.go | 26 + .../uptrace/bun/extra/bunjson/provider.go | 43 + vendor/github.com/uptrace/bun/hook.go | 112 ++ .../github.com/uptrace/bun/internal/flag.go | 16 + vendor/github.com/uptrace/bun/internal/hex.go | 43 + .../github.com/uptrace/bun/internal/logger.go | 54 + .../uptrace/bun/internal/map_key.go | 67 + .../uptrace/bun/internal/parser/parser.go | 169 ++ .../github.com/uptrace/bun/internal/safe.go | 11 + .../uptrace/bun/internal/tagparser/parser.go | 184 ++ .../github.com/uptrace/bun/internal/time.go | 61 + .../uptrace/bun/internal/underscore.go | 67 + .../github.com/uptrace/bun/internal/unsafe.go | 22 + .../github.com/uptrace/bun/internal/util.go | 87 + vendor/github.com/uptrace/bun/model.go | 208 +++ vendor/github.com/uptrace/bun/model_map.go | 186 ++ .../github.com/uptrace/bun/model_map_slice.go | 153 ++ vendor/github.com/uptrace/bun/model_scan.go | 56 + vendor/github.com/uptrace/bun/model_slice.go | 82 + .../uptrace/bun/model_table_has_many.go | 185 ++ .../github.com/uptrace/bun/model_table_m2m.go | 142 ++ .../uptrace/bun/model_table_slice.go | 136 ++ .../uptrace/bun/model_table_struct.go | 373 ++++ vendor/github.com/uptrace/bun/package.json | 8 + vendor/github.com/uptrace/bun/query_base.go | 1582 +++++++++++++++++ .../uptrace/bun/query_column_add.go | 150 ++ .../uptrace/bun/query_column_drop.go | 148 ++ vendor/github.com/uptrace/bun/query_delete.go | 452 +++++ .../uptrace/bun/query_index_create.go | 267 +++ .../uptrace/bun/query_index_drop.go | 134 ++ vendor/github.com/uptrace/bun/query_insert.go | 706 ++++++++ vendor/github.com/uptrace/bun/query_merge.go | 351 ++++ vendor/github.com/uptrace/bun/query_raw.go | 107 ++ vendor/github.com/uptrace/bun/query_select.go | 1376 ++++++++++++++ .../uptrace/bun/query_table_create.go | 427 +++++ .../uptrace/bun/query_table_drop.go | 176 ++ .../uptrace/bun/query_table_truncate.go | 155 ++ vendor/github.com/uptrace/bun/query_update.go | 677 +++++++ vendor/github.com/uptrace/bun/query_values.go | 250 +++ .../github.com/uptrace/bun/relation_join.go | 453 +++++ .../github.com/uptrace/bun/schema/append.go | 81 + .../uptrace/bun/schema/append_value.go | 316 ++++ .../github.com/uptrace/bun/schema/dialect.go | 194 ++ vendor/github.com/uptrace/bun/schema/field.go | 137 ++ vendor/github.com/uptrace/bun/schema/hook.go | 43 + .../github.com/uptrace/bun/schema/querygen.go | 291 +++ .../github.com/uptrace/bun/schema/reflect.go | 51 + .../github.com/uptrace/bun/schema/relation.go | 84 + vendor/github.com/uptrace/bun/schema/scan.go | 566 ++++++ .../github.com/uptrace/bun/schema/sqlfmt.go | 133 ++ .../github.com/uptrace/bun/schema/sqltype.go | 141 ++ vendor/github.com/uptrace/bun/schema/table.go | 1130 ++++++++++++ .../github.com/uptrace/bun/schema/tables.go | 114 ++ .../uptrace/bun/schema/zerochecker.go | 161 ++ vendor/github.com/uptrace/bun/util.go | 112 ++ vendor/github.com/uptrace/bun/version.go | 6 + .../vmihailenco/msgpack/v5/.prettierrc | 4 + .../vmihailenco/msgpack/v5/.travis.yml | 20 + .../vmihailenco/msgpack/v5/CHANGELOG.md | 75 + .../github.com/vmihailenco/msgpack/v5/LICENSE | 25 + .../vmihailenco/msgpack/v5/Makefile | 6 + .../vmihailenco/msgpack/v5/README.md | 100 ++ .../msgpack/v5/commitlint.config.js | 1 + .../vmihailenco/msgpack/v5/decode.go | 708 ++++++++ .../vmihailenco/msgpack/v5/decode_map.go | 356 ++++ .../vmihailenco/msgpack/v5/decode_number.go | 295 +++ .../vmihailenco/msgpack/v5/decode_query.go | 157 ++ .../vmihailenco/msgpack/v5/decode_slice.go | 198 +++ .../vmihailenco/msgpack/v5/decode_string.go | 192 ++ .../vmihailenco/msgpack/v5/decode_typgen.go | 46 + .../vmihailenco/msgpack/v5/decode_value.go | 251 +++ .../vmihailenco/msgpack/v5/encode.go | 270 +++ .../vmihailenco/msgpack/v5/encode_map.go | 225 +++ .../vmihailenco/msgpack/v5/encode_number.go | 252 +++ .../vmihailenco/msgpack/v5/encode_slice.go | 139 ++ .../vmihailenco/msgpack/v5/encode_value.go | 254 +++ .../github.com/vmihailenco/msgpack/v5/ext.go | 303 ++++ .../vmihailenco/msgpack/v5/intern.go | 236 +++ .../vmihailenco/msgpack/v5/msgpack.go | 52 + .../msgpack/v5/msgpcode/msgpcode.go | 88 + .../vmihailenco/msgpack/v5/package.json | 4 + .../github.com/vmihailenco/msgpack/v5/safe.go | 13 + .../github.com/vmihailenco/msgpack/v5/time.go | 151 ++ .../vmihailenco/msgpack/v5/types.go | 413 +++++ .../vmihailenco/msgpack/v5/unsafe.go | 22 + .../vmihailenco/msgpack/v5/version.go | 6 + .../vmihailenco/tagparser/v2/.travis.yml | 19 + .../vmihailenco/tagparser/v2/LICENSE | 25 + .../vmihailenco/tagparser/v2/Makefile | 9 + .../vmihailenco/tagparser/v2/README.md | 24 + .../tagparser/v2/internal/parser/parser.go | 82 + .../vmihailenco/tagparser/v2/internal/safe.go | 11 + .../tagparser/v2/internal/unsafe.go | 22 + .../vmihailenco/tagparser/v2/tagparser.go | 166 ++ vendor/golang.org/x/sys/LICENSE | 27 + vendor/golang.org/x/sys/PATENTS | 22 + vendor/golang.org/x/sys/cpu/asm_aix_ppc64.s | 17 + .../golang.org/x/sys/cpu/asm_darwin_x86_gc.s | 17 + vendor/golang.org/x/sys/cpu/byteorder.go | 66 + vendor/golang.org/x/sys/cpu/cpu.go | 341 ++++ vendor/golang.org/x/sys/cpu/cpu_aix.go | 33 + vendor/golang.org/x/sys/cpu/cpu_arm.go | 73 + vendor/golang.org/x/sys/cpu/cpu_arm64.go | 210 +++ vendor/golang.org/x/sys/cpu/cpu_arm64.s | 42 + vendor/golang.org/x/sys/cpu/cpu_darwin_x86.go | 61 + vendor/golang.org/x/sys/cpu/cpu_gc_arm64.go | 13 + vendor/golang.org/x/sys/cpu/cpu_gc_s390x.go | 21 + vendor/golang.org/x/sys/cpu/cpu_gc_x86.go | 15 + vendor/golang.org/x/sys/cpu/cpu_gc_x86.s | 26 + .../golang.org/x/sys/cpu/cpu_gccgo_arm64.go | 12 + .../golang.org/x/sys/cpu/cpu_gccgo_s390x.go | 22 + vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.c | 37 + vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.go | 25 + vendor/golang.org/x/sys/cpu/cpu_linux.go | 15 + vendor/golang.org/x/sys/cpu/cpu_linux_arm.go | 39 + .../golang.org/x/sys/cpu/cpu_linux_arm64.go | 120 ++ .../golang.org/x/sys/cpu/cpu_linux_loong64.go | 22 + .../golang.org/x/sys/cpu/cpu_linux_mips64x.go | 22 + .../golang.org/x/sys/cpu/cpu_linux_noinit.go | 9 + .../golang.org/x/sys/cpu/cpu_linux_ppc64x.go | 30 + .../golang.org/x/sys/cpu/cpu_linux_riscv64.go | 160 ++ .../golang.org/x/sys/cpu/cpu_linux_s390x.go | 40 + vendor/golang.org/x/sys/cpu/cpu_loong64.go | 50 + vendor/golang.org/x/sys/cpu/cpu_loong64.s | 13 + vendor/golang.org/x/sys/cpu/cpu_mips64x.go | 15 + vendor/golang.org/x/sys/cpu/cpu_mipsx.go | 11 + .../golang.org/x/sys/cpu/cpu_netbsd_arm64.go | 173 ++ .../golang.org/x/sys/cpu/cpu_openbsd_arm64.go | 65 + .../golang.org/x/sys/cpu/cpu_openbsd_arm64.s | 11 + vendor/golang.org/x/sys/cpu/cpu_other_arm.go | 9 + .../golang.org/x/sys/cpu/cpu_other_arm64.go | 9 + .../golang.org/x/sys/cpu/cpu_other_mips64x.go | 11 + .../golang.org/x/sys/cpu/cpu_other_ppc64x.go | 12 + .../golang.org/x/sys/cpu/cpu_other_riscv64.go | 11 + vendor/golang.org/x/sys/cpu/cpu_other_x86.go | 11 + vendor/golang.org/x/sys/cpu/cpu_ppc64x.go | 16 + vendor/golang.org/x/sys/cpu/cpu_riscv64.go | 32 + vendor/golang.org/x/sys/cpu/cpu_s390x.go | 172 ++ vendor/golang.org/x/sys/cpu/cpu_s390x.s | 57 + vendor/golang.org/x/sys/cpu/cpu_wasm.go | 17 + vendor/golang.org/x/sys/cpu/cpu_x86.go | 162 ++ vendor/golang.org/x/sys/cpu/cpu_zos.go | 10 + vendor/golang.org/x/sys/cpu/cpu_zos_s390x.go | 25 + vendor/golang.org/x/sys/cpu/endian_big.go | 10 + vendor/golang.org/x/sys/cpu/endian_little.go | 10 + vendor/golang.org/x/sys/cpu/hwcap_linux.go | 71 + vendor/golang.org/x/sys/cpu/parse.go | 43 + .../x/sys/cpu/proc_cpuinfo_linux.go | 53 + vendor/golang.org/x/sys/cpu/runtime_auxv.go | 16 + .../x/sys/cpu/runtime_auxv_go121.go | 18 + .../golang.org/x/sys/cpu/syscall_aix_gccgo.go | 26 + .../x/sys/cpu/syscall_aix_ppc64_gc.go | 35 + .../x/sys/cpu/syscall_darwin_x86_gc.go | 98 + vendor/modules.txt | 32 + 196 files changed, 28595 insertions(+), 1 deletion(-) create mode 100644 vendor/github.com/jinzhu/inflection/LICENSE create mode 100644 vendor/github.com/jinzhu/inflection/README.md create mode 100644 vendor/github.com/jinzhu/inflection/inflections.go create mode 100644 vendor/github.com/jinzhu/inflection/wercker.yml create mode 100644 vendor/github.com/puzpuzpuz/xsync/v3/.gitignore create mode 100644 vendor/github.com/puzpuzpuz/xsync/v3/BENCHMARKS.md create mode 100644 vendor/github.com/puzpuzpuz/xsync/v3/LICENSE create mode 100644 vendor/github.com/puzpuzpuz/xsync/v3/README.md create mode 100644 vendor/github.com/puzpuzpuz/xsync/v3/counter.go create mode 100644 vendor/github.com/puzpuzpuz/xsync/v3/map.go create mode 100644 vendor/github.com/puzpuzpuz/xsync/v3/mapof.go create mode 100644 vendor/github.com/puzpuzpuz/xsync/v3/mpmcqueue.go create mode 100644 vendor/github.com/puzpuzpuz/xsync/v3/mpmcqueueof.go create mode 100644 vendor/github.com/puzpuzpuz/xsync/v3/rbmutex.go create mode 100644 vendor/github.com/puzpuzpuz/xsync/v3/spscqueue.go create mode 100644 vendor/github.com/puzpuzpuz/xsync/v3/spscqueueof.go create mode 100644 vendor/github.com/puzpuzpuz/xsync/v3/util.go create mode 100644 vendor/github.com/puzpuzpuz/xsync/v3/util_hash.go create mode 100644 vendor/github.com/tmthrgd/go-hex/.travis.yml create mode 100644 vendor/github.com/tmthrgd/go-hex/LICENSE create mode 100644 vendor/github.com/tmthrgd/go-hex/README.md create mode 100644 vendor/github.com/tmthrgd/go-hex/hex.go create mode 100644 vendor/github.com/tmthrgd/go-hex/hex_amd64.go create mode 100644 vendor/github.com/tmthrgd/go-hex/hex_decode_amd64.s create mode 100644 vendor/github.com/tmthrgd/go-hex/hex_encode_amd64.s create mode 100644 vendor/github.com/tmthrgd/go-hex/hex_other.go create mode 100644 vendor/github.com/uptrace/bun/.gitignore create mode 100644 vendor/github.com/uptrace/bun/.prettierrc.yml create mode 100644 vendor/github.com/uptrace/bun/CHANGELOG.md create mode 100644 vendor/github.com/uptrace/bun/CONTRIBUTING.md create mode 100644 vendor/github.com/uptrace/bun/LICENSE create mode 100644 vendor/github.com/uptrace/bun/Makefile create mode 100644 vendor/github.com/uptrace/bun/README.md create mode 100644 vendor/github.com/uptrace/bun/bun.go create mode 100644 vendor/github.com/uptrace/bun/commitlint.config.js create mode 100644 vendor/github.com/uptrace/bun/db.go create mode 100644 vendor/github.com/uptrace/bun/dialect/append.go create mode 100644 vendor/github.com/uptrace/bun/dialect/dialect.go create mode 100644 vendor/github.com/uptrace/bun/dialect/feature/feature.go create mode 100644 vendor/github.com/uptrace/bun/dialect/sqltype/sqltype.go create mode 100644 vendor/github.com/uptrace/bun/extra/bunjson/json.go create mode 100644 vendor/github.com/uptrace/bun/extra/bunjson/provider.go create mode 100644 vendor/github.com/uptrace/bun/hook.go create mode 100644 vendor/github.com/uptrace/bun/internal/flag.go create mode 100644 vendor/github.com/uptrace/bun/internal/hex.go create mode 100644 vendor/github.com/uptrace/bun/internal/logger.go create mode 100644 vendor/github.com/uptrace/bun/internal/map_key.go create mode 100644 vendor/github.com/uptrace/bun/internal/parser/parser.go create mode 100644 vendor/github.com/uptrace/bun/internal/safe.go create mode 100644 vendor/github.com/uptrace/bun/internal/tagparser/parser.go create mode 100644 vendor/github.com/uptrace/bun/internal/time.go create mode 100644 vendor/github.com/uptrace/bun/internal/underscore.go create mode 100644 vendor/github.com/uptrace/bun/internal/unsafe.go create mode 100644 vendor/github.com/uptrace/bun/internal/util.go create mode 100644 vendor/github.com/uptrace/bun/model.go create mode 100644 vendor/github.com/uptrace/bun/model_map.go create mode 100644 vendor/github.com/uptrace/bun/model_map_slice.go create mode 100644 vendor/github.com/uptrace/bun/model_scan.go create mode 100644 vendor/github.com/uptrace/bun/model_slice.go create mode 100644 vendor/github.com/uptrace/bun/model_table_has_many.go create mode 100644 vendor/github.com/uptrace/bun/model_table_m2m.go create mode 100644 vendor/github.com/uptrace/bun/model_table_slice.go create mode 100644 vendor/github.com/uptrace/bun/model_table_struct.go create mode 100644 vendor/github.com/uptrace/bun/package.json create mode 100644 vendor/github.com/uptrace/bun/query_base.go create mode 100644 vendor/github.com/uptrace/bun/query_column_add.go create mode 100644 vendor/github.com/uptrace/bun/query_column_drop.go create mode 100644 vendor/github.com/uptrace/bun/query_delete.go create mode 100644 vendor/github.com/uptrace/bun/query_index_create.go create mode 100644 vendor/github.com/uptrace/bun/query_index_drop.go create mode 100644 vendor/github.com/uptrace/bun/query_insert.go create mode 100644 vendor/github.com/uptrace/bun/query_merge.go create mode 100644 vendor/github.com/uptrace/bun/query_raw.go create mode 100644 vendor/github.com/uptrace/bun/query_select.go create mode 100644 vendor/github.com/uptrace/bun/query_table_create.go create mode 100644 vendor/github.com/uptrace/bun/query_table_drop.go create mode 100644 vendor/github.com/uptrace/bun/query_table_truncate.go create mode 100644 vendor/github.com/uptrace/bun/query_update.go create mode 100644 vendor/github.com/uptrace/bun/query_values.go create mode 100644 vendor/github.com/uptrace/bun/relation_join.go create mode 100644 vendor/github.com/uptrace/bun/schema/append.go create mode 100644 vendor/github.com/uptrace/bun/schema/append_value.go create mode 100644 vendor/github.com/uptrace/bun/schema/dialect.go create mode 100644 vendor/github.com/uptrace/bun/schema/field.go create mode 100644 vendor/github.com/uptrace/bun/schema/hook.go create mode 100644 vendor/github.com/uptrace/bun/schema/querygen.go create mode 100644 vendor/github.com/uptrace/bun/schema/reflect.go create mode 100644 vendor/github.com/uptrace/bun/schema/relation.go create mode 100644 vendor/github.com/uptrace/bun/schema/scan.go create mode 100644 vendor/github.com/uptrace/bun/schema/sqlfmt.go create mode 100644 vendor/github.com/uptrace/bun/schema/sqltype.go create mode 100644 vendor/github.com/uptrace/bun/schema/table.go create mode 100644 vendor/github.com/uptrace/bun/schema/tables.go create mode 100644 vendor/github.com/uptrace/bun/schema/zerochecker.go create mode 100644 vendor/github.com/uptrace/bun/util.go create mode 100644 vendor/github.com/uptrace/bun/version.go create mode 100644 vendor/github.com/vmihailenco/msgpack/v5/.prettierrc create mode 100644 vendor/github.com/vmihailenco/msgpack/v5/.travis.yml create mode 100644 vendor/github.com/vmihailenco/msgpack/v5/CHANGELOG.md create mode 100644 vendor/github.com/vmihailenco/msgpack/v5/LICENSE create mode 100644 vendor/github.com/vmihailenco/msgpack/v5/Makefile create mode 100644 vendor/github.com/vmihailenco/msgpack/v5/README.md create mode 100644 vendor/github.com/vmihailenco/msgpack/v5/commitlint.config.js create mode 100644 vendor/github.com/vmihailenco/msgpack/v5/decode.go create mode 100644 vendor/github.com/vmihailenco/msgpack/v5/decode_map.go create mode 100644 vendor/github.com/vmihailenco/msgpack/v5/decode_number.go create mode 100644 vendor/github.com/vmihailenco/msgpack/v5/decode_query.go create mode 100644 vendor/github.com/vmihailenco/msgpack/v5/decode_slice.go create mode 100644 vendor/github.com/vmihailenco/msgpack/v5/decode_string.go create mode 100644 vendor/github.com/vmihailenco/msgpack/v5/decode_typgen.go create mode 100644 vendor/github.com/vmihailenco/msgpack/v5/decode_value.go create mode 100644 vendor/github.com/vmihailenco/msgpack/v5/encode.go create mode 100644 vendor/github.com/vmihailenco/msgpack/v5/encode_map.go create mode 100644 vendor/github.com/vmihailenco/msgpack/v5/encode_number.go create mode 100644 vendor/github.com/vmihailenco/msgpack/v5/encode_slice.go create mode 100644 vendor/github.com/vmihailenco/msgpack/v5/encode_value.go create mode 100644 vendor/github.com/vmihailenco/msgpack/v5/ext.go create mode 100644 vendor/github.com/vmihailenco/msgpack/v5/intern.go create mode 100644 vendor/github.com/vmihailenco/msgpack/v5/msgpack.go create mode 100644 vendor/github.com/vmihailenco/msgpack/v5/msgpcode/msgpcode.go create mode 100644 vendor/github.com/vmihailenco/msgpack/v5/package.json create mode 100644 vendor/github.com/vmihailenco/msgpack/v5/safe.go create mode 100644 vendor/github.com/vmihailenco/msgpack/v5/time.go create mode 100644 vendor/github.com/vmihailenco/msgpack/v5/types.go create mode 100644 vendor/github.com/vmihailenco/msgpack/v5/unsafe.go create mode 100644 vendor/github.com/vmihailenco/msgpack/v5/version.go create mode 100644 vendor/github.com/vmihailenco/tagparser/v2/.travis.yml create mode 100644 vendor/github.com/vmihailenco/tagparser/v2/LICENSE create mode 100644 vendor/github.com/vmihailenco/tagparser/v2/Makefile create mode 100644 vendor/github.com/vmihailenco/tagparser/v2/README.md create mode 100644 vendor/github.com/vmihailenco/tagparser/v2/internal/parser/parser.go create mode 100644 vendor/github.com/vmihailenco/tagparser/v2/internal/safe.go create mode 100644 vendor/github.com/vmihailenco/tagparser/v2/internal/unsafe.go create mode 100644 vendor/github.com/vmihailenco/tagparser/v2/tagparser.go create mode 100644 vendor/golang.org/x/sys/LICENSE create mode 100644 vendor/golang.org/x/sys/PATENTS create mode 100644 vendor/golang.org/x/sys/cpu/asm_aix_ppc64.s create mode 100644 vendor/golang.org/x/sys/cpu/asm_darwin_x86_gc.s create mode 100644 vendor/golang.org/x/sys/cpu/byteorder.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_aix.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_arm.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_arm64.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_arm64.s create mode 100644 vendor/golang.org/x/sys/cpu/cpu_darwin_x86.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_gc_arm64.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_gc_s390x.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_gc_x86.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_gc_x86.s create mode 100644 vendor/golang.org/x/sys/cpu/cpu_gccgo_arm64.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_gccgo_s390x.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.c create mode 100644 vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_linux.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_linux_arm.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_linux_arm64.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_linux_loong64.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_linux_mips64x.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_linux_noinit.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_linux_ppc64x.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_linux_riscv64.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_linux_s390x.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_loong64.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_loong64.s create mode 100644 vendor/golang.org/x/sys/cpu/cpu_mips64x.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_mipsx.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_netbsd_arm64.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_openbsd_arm64.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_openbsd_arm64.s create mode 100644 vendor/golang.org/x/sys/cpu/cpu_other_arm.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_other_arm64.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_other_mips64x.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_other_ppc64x.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_other_riscv64.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_other_x86.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_ppc64x.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_riscv64.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_s390x.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_s390x.s create mode 100644 vendor/golang.org/x/sys/cpu/cpu_wasm.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_x86.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_zos.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_zos_s390x.go create mode 100644 vendor/golang.org/x/sys/cpu/endian_big.go create mode 100644 vendor/golang.org/x/sys/cpu/endian_little.go create mode 100644 vendor/golang.org/x/sys/cpu/hwcap_linux.go create mode 100644 vendor/golang.org/x/sys/cpu/parse.go create mode 100644 vendor/golang.org/x/sys/cpu/proc_cpuinfo_linux.go create mode 100644 vendor/golang.org/x/sys/cpu/runtime_auxv.go create mode 100644 vendor/golang.org/x/sys/cpu/runtime_auxv_go121.go create mode 100644 vendor/golang.org/x/sys/cpu/syscall_aix_gccgo.go create mode 100644 vendor/golang.org/x/sys/cpu/syscall_aix_ppc64_gc.go create mode 100644 vendor/golang.org/x/sys/cpu/syscall_darwin_x86_gc.go diff --git a/go.mod b/go.mod index d4fac52..c936f27 100644 --- a/go.mod +++ b/go.mod @@ -1,12 +1,13 @@ module git.warky.dev/wdevs/relspecgo -go 1.24 +go 1.24.0 require ( github.com/google/uuid v1.6.0 github.com/jackc/pgx/v5 v5.7.6 github.com/spf13/cobra v1.10.2 github.com/stretchr/testify v1.11.1 + github.com/uptrace/bun v1.2.16 gopkg.in/yaml.v3 v3.0.1 ) @@ -15,10 +16,16 @@ require ( github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/jackc/pgpassfile v1.0.0 // indirect github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 // indirect + github.com/jinzhu/inflection v1.0.0 // indirect github.com/kr/pretty v0.3.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/puzpuzpuz/xsync/v3 v3.5.1 // indirect github.com/rogpeppe/go-internal v1.14.1 // indirect github.com/spf13/pflag v1.0.10 // indirect + github.com/tmthrgd/go-hex v0.0.0-20190904060850-447a3041c3bc // indirect + github.com/vmihailenco/msgpack/v5 v5.4.1 // indirect + github.com/vmihailenco/tagparser/v2 v2.0.0 // indirect golang.org/x/crypto v0.41.0 // indirect + golang.org/x/sys v0.38.0 // indirect golang.org/x/text v0.28.0 // indirect ) diff --git a/go.sum b/go.sum index 7160511..18dba6b 100644 --- a/go.sum +++ b/go.sum @@ -15,6 +15,8 @@ github.com/jackc/pgx/v5 v5.7.6 h1:rWQc5FwZSPX58r1OQmkuaNicxdmExaEz5A2DO2hUuTk= github.com/jackc/pgx/v5 v5.7.6/go.mod h1:aruU7o91Tc2q2cFp5h4uP3f6ztExVpyVv88Xl/8Vl8M= github.com/jackc/puddle/v2 v2.2.2 h1:PR8nw+E/1w0GLuRFSmiioY6UooMp6KJv0/61nB7icHo= github.com/jackc/puddle/v2 v2.2.2/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4= +github.com/jinzhu/inflection v1.0.0 h1:K317FqzuhWc8YvSVlFMCCUb36O/S9MCKRDI7QkRKD/E= +github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= @@ -22,6 +24,8 @@ github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/puzpuzpuz/xsync/v3 v3.5.1 h1:GJYJZwO6IdxN/IKbneznS6yPkVC+c3zyY/j19c++5Fg= +github.com/puzpuzpuz/xsync/v3 v3.5.1/go.mod h1:VjzYrABPabuM4KyBh1Ftq6u8nhwY5tBPKP9jpmh0nnA= github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ= github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc= @@ -36,11 +40,21 @@ github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UV github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= +github.com/tmthrgd/go-hex v0.0.0-20190904060850-447a3041c3bc h1:9lRDQMhESg+zvGYmW5DyG0UqvY96Bu5QYsTLvCHdrgo= +github.com/tmthrgd/go-hex v0.0.0-20190904060850-447a3041c3bc/go.mod h1:bciPuU6GHm1iF1pBvUfxfsH0Wmnc2VbpgvbI9ZWuIRs= +github.com/uptrace/bun v1.2.16 h1:QlObi6ZIK5Ao7kAALnh91HWYNZUBbVwye52fmlQM9kc= +github.com/uptrace/bun v1.2.16/go.mod h1:jMoNg2n56ckaawi/O/J92BHaECmrz6IRjuMWqlMaMTM= +github.com/vmihailenco/msgpack/v5 v5.4.1 h1:cQriyiUvjTwOHg8QZaPihLWeRAAVoCpE00IUPn0Bjt8= +github.com/vmihailenco/msgpack/v5 v5.4.1/go.mod h1:GaZTsDaehaPpQVyxrf5mtQlH+pc21PIudVV/E3rRQok= +github.com/vmihailenco/tagparser/v2 v2.0.0 h1:y09buUbR+b5aycVFQs/g70pqKVZNBmxwAhO7/IwNM9g= +github.com/vmihailenco/tagparser/v2 v2.0.0/go.mod h1:Wri+At7QHww0WTrCBeu4J6bNtoV6mEfg5OIWRZA9qds= go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg= golang.org/x/crypto v0.41.0 h1:WKYxWedPGCTVVl5+WHSSrOBT0O8lx32+zxmHxijgXp4= golang.org/x/crypto v0.41.0/go.mod h1:pO5AFd7FA68rFak7rOAGVuygIISepHftHnr8dr6+sUc= golang.org/x/sync v0.16.0 h1:ycBJEhp9p4vXvUZNszeOq0kGTPghopOL8q0fq3vstxw= golang.org/x/sync v0.16.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= +golang.org/x/sys v0.38.0 h1:3yZWxaJjBmCWXqhN1qh02AkOnCQ1poK6oF+a7xWL6Gc= +golang.org/x/sys v0.38.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= golang.org/x/text v0.28.0 h1:rhazDwis8INMIwQ4tpjLDzUhx6RlXqZNPEM0huQojng= golang.org/x/text v0.28.0/go.mod h1:U8nCwOR8jO/marOQ0QbDiOngZVEBB7MAiitBuMjXiNU= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/vendor/github.com/jinzhu/inflection/LICENSE b/vendor/github.com/jinzhu/inflection/LICENSE new file mode 100644 index 0000000..a1ca9a0 --- /dev/null +++ b/vendor/github.com/jinzhu/inflection/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2015 - Jinzhu + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/jinzhu/inflection/README.md b/vendor/github.com/jinzhu/inflection/README.md new file mode 100644 index 0000000..a3de336 --- /dev/null +++ b/vendor/github.com/jinzhu/inflection/README.md @@ -0,0 +1,55 @@ +# Inflection + +Inflection pluralizes and singularizes English nouns + +[![wercker status](https://app.wercker.com/status/f8c7432b097d1f4ce636879670be0930/s/master "wercker status")](https://app.wercker.com/project/byKey/f8c7432b097d1f4ce636879670be0930) + +## Basic Usage + +```go +inflection.Plural("person") => "people" +inflection.Plural("Person") => "People" +inflection.Plural("PERSON") => "PEOPLE" +inflection.Plural("bus") => "buses" +inflection.Plural("BUS") => "BUSES" +inflection.Plural("Bus") => "Buses" + +inflection.Singular("people") => "person" +inflection.Singular("People") => "Person" +inflection.Singular("PEOPLE") => "PERSON" +inflection.Singular("buses") => "bus" +inflection.Singular("BUSES") => "BUS" +inflection.Singular("Buses") => "Bus" + +inflection.Plural("FancyPerson") => "FancyPeople" +inflection.Singular("FancyPeople") => "FancyPerson" +``` + +## Register Rules + +Standard rules are from Rails's ActiveSupport (https://github.com/rails/rails/blob/master/activesupport/lib/active_support/inflections.rb) + +If you want to register more rules, follow: + +``` +inflection.AddUncountable("fish") +inflection.AddIrregular("person", "people") +inflection.AddPlural("(bu)s$", "${1}ses") # "bus" => "buses" / "BUS" => "BUSES" / "Bus" => "Buses" +inflection.AddSingular("(bus)(es)?$", "${1}") # "buses" => "bus" / "Buses" => "Bus" / "BUSES" => "BUS" +``` + +## Contributing + +You can help to make the project better, check out [http://gorm.io/contribute.html](http://gorm.io/contribute.html) for things you can do. + +## Author + +**jinzhu** + +* +* +* + +## License + +Released under the [MIT License](http://www.opensource.org/licenses/MIT). diff --git a/vendor/github.com/jinzhu/inflection/inflections.go b/vendor/github.com/jinzhu/inflection/inflections.go new file mode 100644 index 0000000..606263b --- /dev/null +++ b/vendor/github.com/jinzhu/inflection/inflections.go @@ -0,0 +1,273 @@ +/* +Package inflection pluralizes and singularizes English nouns. + + inflection.Plural("person") => "people" + inflection.Plural("Person") => "People" + inflection.Plural("PERSON") => "PEOPLE" + + inflection.Singular("people") => "person" + inflection.Singular("People") => "Person" + inflection.Singular("PEOPLE") => "PERSON" + + inflection.Plural("FancyPerson") => "FancydPeople" + inflection.Singular("FancyPeople") => "FancydPerson" + +Standard rules are from Rails's ActiveSupport (https://github.com/rails/rails/blob/master/activesupport/lib/active_support/inflections.rb) + +If you want to register more rules, follow: + + inflection.AddUncountable("fish") + inflection.AddIrregular("person", "people") + inflection.AddPlural("(bu)s$", "${1}ses") # "bus" => "buses" / "BUS" => "BUSES" / "Bus" => "Buses" + inflection.AddSingular("(bus)(es)?$", "${1}") # "buses" => "bus" / "Buses" => "Bus" / "BUSES" => "BUS" +*/ +package inflection + +import ( + "regexp" + "strings" +) + +type inflection struct { + regexp *regexp.Regexp + replace string +} + +// Regular is a regexp find replace inflection +type Regular struct { + find string + replace string +} + +// Irregular is a hard replace inflection, +// containing both singular and plural forms +type Irregular struct { + singular string + plural string +} + +// RegularSlice is a slice of Regular inflections +type RegularSlice []Regular + +// IrregularSlice is a slice of Irregular inflections +type IrregularSlice []Irregular + +var pluralInflections = RegularSlice{ + {"([a-z])$", "${1}s"}, + {"s$", "s"}, + {"^(ax|test)is$", "${1}es"}, + {"(octop|vir)us$", "${1}i"}, + {"(octop|vir)i$", "${1}i"}, + {"(alias|status)$", "${1}es"}, + {"(bu)s$", "${1}ses"}, + {"(buffal|tomat)o$", "${1}oes"}, + {"([ti])um$", "${1}a"}, + {"([ti])a$", "${1}a"}, + {"sis$", "ses"}, + {"(?:([^f])fe|([lr])f)$", "${1}${2}ves"}, + {"(hive)$", "${1}s"}, + {"([^aeiouy]|qu)y$", "${1}ies"}, + {"(x|ch|ss|sh)$", "${1}es"}, + {"(matr|vert|ind)(?:ix|ex)$", "${1}ices"}, + {"^(m|l)ouse$", "${1}ice"}, + {"^(m|l)ice$", "${1}ice"}, + {"^(ox)$", "${1}en"}, + {"^(oxen)$", "${1}"}, + {"(quiz)$", "${1}zes"}, +} + +var singularInflections = RegularSlice{ + {"s$", ""}, + {"(ss)$", "${1}"}, + {"(n)ews$", "${1}ews"}, + {"([ti])a$", "${1}um"}, + {"((a)naly|(b)a|(d)iagno|(p)arenthe|(p)rogno|(s)ynop|(t)he)(sis|ses)$", "${1}sis"}, + {"(^analy)(sis|ses)$", "${1}sis"}, + {"([^f])ves$", "${1}fe"}, + {"(hive)s$", "${1}"}, + {"(tive)s$", "${1}"}, + {"([lr])ves$", "${1}f"}, + {"([^aeiouy]|qu)ies$", "${1}y"}, + {"(s)eries$", "${1}eries"}, + {"(m)ovies$", "${1}ovie"}, + {"(c)ookies$", "${1}ookie"}, + {"(x|ch|ss|sh)es$", "${1}"}, + {"^(m|l)ice$", "${1}ouse"}, + {"(bus)(es)?$", "${1}"}, + {"(o)es$", "${1}"}, + {"(shoe)s$", "${1}"}, + {"(cris|test)(is|es)$", "${1}is"}, + {"^(a)x[ie]s$", "${1}xis"}, + {"(octop|vir)(us|i)$", "${1}us"}, + {"(alias|status)(es)?$", "${1}"}, + {"^(ox)en", "${1}"}, + {"(vert|ind)ices$", "${1}ex"}, + {"(matr)ices$", "${1}ix"}, + {"(quiz)zes$", "${1}"}, + {"(database)s$", "${1}"}, +} + +var irregularInflections = IrregularSlice{ + {"person", "people"}, + {"man", "men"}, + {"child", "children"}, + {"sex", "sexes"}, + {"move", "moves"}, + {"mombie", "mombies"}, +} + +var uncountableInflections = []string{"equipment", "information", "rice", "money", "species", "series", "fish", "sheep", "jeans", "police"} + +var compiledPluralMaps []inflection +var compiledSingularMaps []inflection + +func compile() { + compiledPluralMaps = []inflection{} + compiledSingularMaps = []inflection{} + for _, uncountable := range uncountableInflections { + inf := inflection{ + regexp: regexp.MustCompile("^(?i)(" + uncountable + ")$"), + replace: "${1}", + } + compiledPluralMaps = append(compiledPluralMaps, inf) + compiledSingularMaps = append(compiledSingularMaps, inf) + } + + for _, value := range irregularInflections { + infs := []inflection{ + inflection{regexp: regexp.MustCompile(strings.ToUpper(value.singular) + "$"), replace: strings.ToUpper(value.plural)}, + inflection{regexp: regexp.MustCompile(strings.Title(value.singular) + "$"), replace: strings.Title(value.plural)}, + inflection{regexp: regexp.MustCompile(value.singular + "$"), replace: value.plural}, + } + compiledPluralMaps = append(compiledPluralMaps, infs...) + } + + for _, value := range irregularInflections { + infs := []inflection{ + inflection{regexp: regexp.MustCompile(strings.ToUpper(value.plural) + "$"), replace: strings.ToUpper(value.singular)}, + inflection{regexp: regexp.MustCompile(strings.Title(value.plural) + "$"), replace: strings.Title(value.singular)}, + inflection{regexp: regexp.MustCompile(value.plural + "$"), replace: value.singular}, + } + compiledSingularMaps = append(compiledSingularMaps, infs...) + } + + for i := len(pluralInflections) - 1; i >= 0; i-- { + value := pluralInflections[i] + infs := []inflection{ + inflection{regexp: regexp.MustCompile(strings.ToUpper(value.find)), replace: strings.ToUpper(value.replace)}, + inflection{regexp: regexp.MustCompile(value.find), replace: value.replace}, + inflection{regexp: regexp.MustCompile("(?i)" + value.find), replace: value.replace}, + } + compiledPluralMaps = append(compiledPluralMaps, infs...) + } + + for i := len(singularInflections) - 1; i >= 0; i-- { + value := singularInflections[i] + infs := []inflection{ + inflection{regexp: regexp.MustCompile(strings.ToUpper(value.find)), replace: strings.ToUpper(value.replace)}, + inflection{regexp: regexp.MustCompile(value.find), replace: value.replace}, + inflection{regexp: regexp.MustCompile("(?i)" + value.find), replace: value.replace}, + } + compiledSingularMaps = append(compiledSingularMaps, infs...) + } +} + +func init() { + compile() +} + +// AddPlural adds a plural inflection +func AddPlural(find, replace string) { + pluralInflections = append(pluralInflections, Regular{find, replace}) + compile() +} + +// AddSingular adds a singular inflection +func AddSingular(find, replace string) { + singularInflections = append(singularInflections, Regular{find, replace}) + compile() +} + +// AddIrregular adds an irregular inflection +func AddIrregular(singular, plural string) { + irregularInflections = append(irregularInflections, Irregular{singular, plural}) + compile() +} + +// AddUncountable adds an uncountable inflection +func AddUncountable(values ...string) { + uncountableInflections = append(uncountableInflections, values...) + compile() +} + +// GetPlural retrieves the plural inflection values +func GetPlural() RegularSlice { + plurals := make(RegularSlice, len(pluralInflections)) + copy(plurals, pluralInflections) + return plurals +} + +// GetSingular retrieves the singular inflection values +func GetSingular() RegularSlice { + singulars := make(RegularSlice, len(singularInflections)) + copy(singulars, singularInflections) + return singulars +} + +// GetIrregular retrieves the irregular inflection values +func GetIrregular() IrregularSlice { + irregular := make(IrregularSlice, len(irregularInflections)) + copy(irregular, irregularInflections) + return irregular +} + +// GetUncountable retrieves the uncountable inflection values +func GetUncountable() []string { + uncountables := make([]string, len(uncountableInflections)) + copy(uncountables, uncountableInflections) + return uncountables +} + +// SetPlural sets the plural inflections slice +func SetPlural(inflections RegularSlice) { + pluralInflections = inflections + compile() +} + +// SetSingular sets the singular inflections slice +func SetSingular(inflections RegularSlice) { + singularInflections = inflections + compile() +} + +// SetIrregular sets the irregular inflections slice +func SetIrregular(inflections IrregularSlice) { + irregularInflections = inflections + compile() +} + +// SetUncountable sets the uncountable inflections slice +func SetUncountable(inflections []string) { + uncountableInflections = inflections + compile() +} + +// Plural converts a word to its plural form +func Plural(str string) string { + for _, inflection := range compiledPluralMaps { + if inflection.regexp.MatchString(str) { + return inflection.regexp.ReplaceAllString(str, inflection.replace) + } + } + return str +} + +// Singular converts a word to its singular form +func Singular(str string) string { + for _, inflection := range compiledSingularMaps { + if inflection.regexp.MatchString(str) { + return inflection.regexp.ReplaceAllString(str, inflection.replace) + } + } + return str +} diff --git a/vendor/github.com/jinzhu/inflection/wercker.yml b/vendor/github.com/jinzhu/inflection/wercker.yml new file mode 100644 index 0000000..5e6ce98 --- /dev/null +++ b/vendor/github.com/jinzhu/inflection/wercker.yml @@ -0,0 +1,23 @@ +box: golang + +build: + steps: + - setup-go-workspace + + # Gets the dependencies + - script: + name: go get + code: | + go get + + # Build the project + - script: + name: go build + code: | + go build ./... + + # Test the project + - script: + name: go test + code: | + go test ./... diff --git a/vendor/github.com/puzpuzpuz/xsync/v3/.gitignore b/vendor/github.com/puzpuzpuz/xsync/v3/.gitignore new file mode 100644 index 0000000..66fd13c --- /dev/null +++ b/vendor/github.com/puzpuzpuz/xsync/v3/.gitignore @@ -0,0 +1,15 @@ +# Binaries for programs and plugins +*.exe +*.exe~ +*.dll +*.so +*.dylib + +# Test binary, built with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + +# Dependency directories (remove the comment below to include it) +# vendor/ diff --git a/vendor/github.com/puzpuzpuz/xsync/v3/BENCHMARKS.md b/vendor/github.com/puzpuzpuz/xsync/v3/BENCHMARKS.md new file mode 100644 index 0000000..aaa72fa --- /dev/null +++ b/vendor/github.com/puzpuzpuz/xsync/v3/BENCHMARKS.md @@ -0,0 +1,133 @@ +# xsync benchmarks + +If you're interested in `MapOf` comparison with some of the popular concurrent hash maps written in Go, check [this](https://github.com/cornelk/hashmap/pull/70) and [this](https://github.com/alphadose/haxmap/pull/22) PRs. + +The below results were obtained for xsync v2.3.1 on a c6g.metal EC2 instance (64 CPU, 128GB RAM) running Linux and Go 1.19.3. I'd like to thank [@felixge](https://github.com/felixge) who kindly ran the benchmarks. + +The following commands were used to run the benchmarks: +```bash +$ go test -run='^$' -cpu=1,2,4,8,16,32,64 -bench . -count=30 -timeout=0 | tee bench.txt +$ benchstat bench.txt | tee benchstat.txt +``` + +The below sections contain some of the results. Refer to [this gist](https://gist.github.com/puzpuzpuz/e62e38e06feadecfdc823c0f941ece0b) for the complete output. + +Please note that `MapOf` got a number of optimizations since v2.3.1, so the current result is likely to be different. + +### Counter vs. atomic int64 + +``` +name time/op +Counter 27.3ns ± 1% +Counter-2 27.2ns ±11% +Counter-4 15.3ns ± 8% +Counter-8 7.43ns ± 7% +Counter-16 3.70ns ±10% +Counter-32 1.77ns ± 3% +Counter-64 0.96ns ±10% +AtomicInt64 7.60ns ± 0% +AtomicInt64-2 12.6ns ±13% +AtomicInt64-4 13.5ns ±14% +AtomicInt64-8 12.7ns ± 9% +AtomicInt64-16 12.8ns ± 8% +AtomicInt64-32 13.0ns ± 6% +AtomicInt64-64 12.9ns ± 7% +``` + +Here `time/op` stands for average time spent on operation. If you divide `10^9` by the result in nanoseconds per operation, you'd get the throughput in operations per second. Thus, the ideal theoretical scalability of a concurrent data structure implies that the reported `time/op` decreases proportionally with the increased number of CPU cores. On the contrary, if the measured time per operation increases when run on more cores, it means performance degradation. + +### MapOf vs. sync.Map + +1,000 `[int, int]` entries with a warm-up, 100% Loads: +``` +IntegerMapOf_WarmUp/reads=100% 24.0ns ± 0% +IntegerMapOf_WarmUp/reads=100%-2 12.0ns ± 0% +IntegerMapOf_WarmUp/reads=100%-4 6.02ns ± 0% +IntegerMapOf_WarmUp/reads=100%-8 3.01ns ± 0% +IntegerMapOf_WarmUp/reads=100%-16 1.50ns ± 0% +IntegerMapOf_WarmUp/reads=100%-32 0.75ns ± 0% +IntegerMapOf_WarmUp/reads=100%-64 0.38ns ± 0% +IntegerMapStandard_WarmUp/reads=100% 55.3ns ± 0% +IntegerMapStandard_WarmUp/reads=100%-2 27.6ns ± 0% +IntegerMapStandard_WarmUp/reads=100%-4 16.1ns ± 3% +IntegerMapStandard_WarmUp/reads=100%-8 8.35ns ± 7% +IntegerMapStandard_WarmUp/reads=100%-16 4.24ns ± 7% +IntegerMapStandard_WarmUp/reads=100%-32 2.18ns ± 6% +IntegerMapStandard_WarmUp/reads=100%-64 1.11ns ± 3% +``` + +1,000 `[int, int]` entries with a warm-up, 99% Loads, 0.5% Stores, 0.5% Deletes: +``` +IntegerMapOf_WarmUp/reads=99% 31.0ns ± 0% +IntegerMapOf_WarmUp/reads=99%-2 16.4ns ± 1% +IntegerMapOf_WarmUp/reads=99%-4 8.42ns ± 0% +IntegerMapOf_WarmUp/reads=99%-8 4.41ns ± 0% +IntegerMapOf_WarmUp/reads=99%-16 2.38ns ± 2% +IntegerMapOf_WarmUp/reads=99%-32 1.37ns ± 4% +IntegerMapOf_WarmUp/reads=99%-64 0.85ns ± 2% +IntegerMapStandard_WarmUp/reads=99% 121ns ± 1% +IntegerMapStandard_WarmUp/reads=99%-2 109ns ± 3% +IntegerMapStandard_WarmUp/reads=99%-4 115ns ± 4% +IntegerMapStandard_WarmUp/reads=99%-8 114ns ± 2% +IntegerMapStandard_WarmUp/reads=99%-16 105ns ± 2% +IntegerMapStandard_WarmUp/reads=99%-32 97.0ns ± 3% +IntegerMapStandard_WarmUp/reads=99%-64 98.0ns ± 2% +``` + +1,000 `[int, int]` entries with a warm-up, 75% Loads, 12.5% Stores, 12.5% Deletes: +``` +IntegerMapOf_WarmUp/reads=75%-reads 46.2ns ± 1% +IntegerMapOf_WarmUp/reads=75%-reads-2 36.7ns ± 2% +IntegerMapOf_WarmUp/reads=75%-reads-4 22.0ns ± 1% +IntegerMapOf_WarmUp/reads=75%-reads-8 12.8ns ± 2% +IntegerMapOf_WarmUp/reads=75%-reads-16 7.69ns ± 1% +IntegerMapOf_WarmUp/reads=75%-reads-32 5.16ns ± 1% +IntegerMapOf_WarmUp/reads=75%-reads-64 4.91ns ± 1% +IntegerMapStandard_WarmUp/reads=75%-reads 156ns ± 0% +IntegerMapStandard_WarmUp/reads=75%-reads-2 177ns ± 1% +IntegerMapStandard_WarmUp/reads=75%-reads-4 197ns ± 1% +IntegerMapStandard_WarmUp/reads=75%-reads-8 221ns ± 2% +IntegerMapStandard_WarmUp/reads=75%-reads-16 242ns ± 1% +IntegerMapStandard_WarmUp/reads=75%-reads-32 258ns ± 1% +IntegerMapStandard_WarmUp/reads=75%-reads-64 264ns ± 1% +``` + +### MPMCQueue vs. Go channels + +Concurrent producers and consumers (1:1), queue/channel size 1,000, some work done by both producers and consumers: +``` +QueueProdConsWork100 252ns ± 0% +QueueProdConsWork100-2 206ns ± 5% +QueueProdConsWork100-4 136ns ±12% +QueueProdConsWork100-8 110ns ± 6% +QueueProdConsWork100-16 108ns ± 2% +QueueProdConsWork100-32 102ns ± 2% +QueueProdConsWork100-64 101ns ± 0% +ChanProdConsWork100 283ns ± 0% +ChanProdConsWork100-2 406ns ±21% +ChanProdConsWork100-4 549ns ± 7% +ChanProdConsWork100-8 754ns ± 7% +ChanProdConsWork100-16 828ns ± 7% +ChanProdConsWork100-32 810ns ± 8% +ChanProdConsWork100-64 832ns ± 4% +``` + +### RBMutex vs. sync.RWMutex + +The writer locks on each 100,000 iteration with some work in the critical section for both readers and the writer: +``` +RBMutexWorkWrite100000 146ns ± 0% +RBMutexWorkWrite100000-2 73.3ns ± 0% +RBMutexWorkWrite100000-4 36.7ns ± 0% +RBMutexWorkWrite100000-8 18.6ns ± 0% +RBMutexWorkWrite100000-16 9.83ns ± 3% +RBMutexWorkWrite100000-32 5.53ns ± 0% +RBMutexWorkWrite100000-64 4.04ns ± 3% +RWMutexWorkWrite100000 121ns ± 0% +RWMutexWorkWrite100000-2 128ns ± 1% +RWMutexWorkWrite100000-4 124ns ± 2% +RWMutexWorkWrite100000-8 101ns ± 1% +RWMutexWorkWrite100000-16 92.9ns ± 1% +RWMutexWorkWrite100000-32 89.9ns ± 1% +RWMutexWorkWrite100000-64 88.4ns ± 1% +``` diff --git a/vendor/github.com/puzpuzpuz/xsync/v3/LICENSE b/vendor/github.com/puzpuzpuz/xsync/v3/LICENSE new file mode 100644 index 0000000..261eeb9 --- /dev/null +++ b/vendor/github.com/puzpuzpuz/xsync/v3/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/puzpuzpuz/xsync/v3/README.md b/vendor/github.com/puzpuzpuz/xsync/v3/README.md new file mode 100644 index 0000000..3971553 --- /dev/null +++ b/vendor/github.com/puzpuzpuz/xsync/v3/README.md @@ -0,0 +1,195 @@ +[![GoDoc reference](https://img.shields.io/badge/godoc-reference-blue.svg)](https://pkg.go.dev/github.com/puzpuzpuz/xsync/v3) +[![GoReport](https://goreportcard.com/badge/github.com/puzpuzpuz/xsync/v3)](https://goreportcard.com/report/github.com/puzpuzpuz/xsync/v3) +[![codecov](https://codecov.io/gh/puzpuzpuz/xsync/branch/main/graph/badge.svg)](https://codecov.io/gh/puzpuzpuz/xsync) + +# xsync + +Concurrent data structures for Go. Aims to provide more scalable alternatives for some of the data structures from the standard `sync` package, but not only. + +Covered with tests following the approach described [here](https://puzpuzpuz.dev/testing-concurrent-code-for-fun-and-profit). + +## Benchmarks + +Benchmark results may be found [here](BENCHMARKS.md). I'd like to thank [@felixge](https://github.com/felixge) who kindly ran the benchmarks on a beefy multicore machine. + +Also, a non-scientific, unfair benchmark comparing Java's [j.u.c.ConcurrentHashMap](https://docs.oracle.com/en/java/javase/17/docs/api/java.base/java/util/concurrent/ConcurrentHashMap.html) and `xsync.MapOf` is available [here](https://puzpuzpuz.dev/concurrent-map-in-go-vs-java-yet-another-meaningless-benchmark). + +## Usage + +The latest xsync major version is v3, so `/v3` suffix should be used when importing the library: + +```go +import ( + "github.com/puzpuzpuz/xsync/v3" +) +``` + +*Note for pre-v3 users*: v1 and v2 support is discontinued, so please upgrade to v3. While the API has some breaking changes, the migration should be trivial. + +### Counter + +A `Counter` is a striped `int64` counter inspired by the `j.u.c.a.LongAdder` class from the Java standard library. + +```go +c := xsync.NewCounter() +// increment and decrement the counter +c.Inc() +c.Dec() +// read the current value +v := c.Value() +``` + +Works better in comparison with a single atomically updated `int64` counter in high contention scenarios. + +### Map + +A `Map` is like a concurrent hash table-based map. It follows the interface of `sync.Map` with a number of valuable extensions like `Compute` or `Size`. + +```go +m := xsync.NewMap() +m.Store("foo", "bar") +v, ok := m.Load("foo") +s := m.Size() +``` + +`Map` uses a modified version of Cache-Line Hash Table (CLHT) data structure: https://github.com/LPD-EPFL/CLHT + +CLHT is built around the idea of organizing the hash table in cache-line-sized buckets, so that on all modern CPUs update operations complete with minimal cache-line transfer. Also, `Get` operations are obstruction-free and involve no writes to shared memory, hence no mutexes or any other sort of locks. Due to this design, in all considered scenarios `Map` outperforms `sync.Map`. + +One important difference with `sync.Map` is that only string keys are supported. That's because Golang standard library does not expose the built-in hash functions for `interface{}` values. + +`MapOf[K, V]` is an implementation with parametrized key and value types. While it's still a CLHT-inspired hash map, `MapOf`'s design is quite different from `Map`. As a result, less GC pressure and fewer atomic operations on reads. + +```go +m := xsync.NewMapOf[string, string]() +m.Store("foo", "bar") +v, ok := m.Load("foo") +``` + +Apart from CLHT, `MapOf` borrows ideas from Java's `j.u.c.ConcurrentHashMap` (immutable K/V pair structs instead of atomic snapshots) and C++'s `absl::flat_hash_map` (meta memory and SWAR-based lookups). It also has more dense memory layout when compared with `Map`. Long story short, `MapOf` should be preferred over `Map` when possible. + +An important difference with `Map` is that `MapOf` supports arbitrary `comparable` key types: + +```go +type Point struct { + x int32 + y int32 +} +m := NewMapOf[Point, int]() +m.Store(Point{42, 42}, 42) +v, ok := m.Load(point{42, 42}) +``` + +Apart from `Range` method available for map iteration, there are also `ToPlainMap`/`ToPlainMapOf` utility functions to convert a `Map`/`MapOf` to a built-in Go's `map`: +```go +m := xsync.NewMapOf[int, int]() +m.Store(42, 42) +pm := xsync.ToPlainMapOf(m) +``` + +Both `Map` and `MapOf` use the built-in Golang's hash function which has DDOS protection. This means that each map instance gets its own seed number and the hash function uses that seed for hash code calculation. However, for smaller keys this hash function has some overhead. So, if you don't need DDOS protection, you may provide a custom hash function when creating a `MapOf`. For instance, Murmur3 finalizer does a decent job when it comes to integers: + +```go +m := NewMapOfWithHasher[int, int](func(i int, _ uint64) uint64 { + h := uint64(i) + h = (h ^ (h >> 33)) * 0xff51afd7ed558ccd + h = (h ^ (h >> 33)) * 0xc4ceb9fe1a85ec53 + return h ^ (h >> 33) +}) +``` + +When benchmarking concurrent maps, make sure to configure all of the competitors with the same hash function or, at least, take hash function performance into the consideration. + +### SPSCQueue + +A `SPSCQueue` is a bounded single-producer single-consumer concurrent queue. This means that not more than a single goroutine must be publishing items to the queue while not more than a single goroutine must be consuming those items. + +```go +q := xsync.NewSPSCQueue(1024) +// producer inserts an item into the queue +// optimistic insertion attempt; doesn't block +inserted := q.TryEnqueue("bar") +// consumer obtains an item from the queue +// optimistic obtain attempt; doesn't block +item, ok := q.TryDequeue() // interface{} pointing to a string +``` + +`SPSCQueueOf[I]` is an implementation with parametrized item type. It is available for Go 1.19 or later. + +```go +q := xsync.NewSPSCQueueOf[string](1024) +inserted := q.TryEnqueue("foo") +item, ok := q.TryDequeue() // string +``` + +The queue is based on the data structure from this [article](https://rigtorp.se/ringbuffer). The idea is to reduce the CPU cache coherency traffic by keeping cached copies of read and write indexes used by producer and consumer respectively. + +### MPMCQueue + +A `MPMCQueue` is a bounded multi-producer multi-consumer concurrent queue. + +```go +q := xsync.NewMPMCQueue(1024) +// producer optimistically inserts an item into the queue +// optimistic insertion attempt; doesn't block +inserted := q.TryEnqueue("bar") +// consumer obtains an item from the queue +// optimistic obtain attempt; doesn't block +item, ok := q.TryDequeue() // interface{} pointing to a string +``` + +`MPMCQueueOf[I]` is an implementation with parametrized item type. It is available for Go 1.19 or later. + +```go +q := xsync.NewMPMCQueueOf[string](1024) +inserted := q.TryEnqueue("foo") +item, ok := q.TryDequeue() // string +``` + +The queue is based on the algorithm from the [MPMCQueue](https://github.com/rigtorp/MPMCQueue) C++ library which in its turn references D.Vyukov's [MPMC queue](https://www.1024cores.net/home/lock-free-algorithms/queues/bounded-mpmc-queue). According to the following [classification](https://www.1024cores.net/home/lock-free-algorithms/queues), the queue is array-based, fails on overflow, provides causal FIFO, has blocking producers and consumers. + +The idea of the algorithm is to allow parallelism for concurrent producers and consumers by introducing the notion of tickets, i.e. values of two counters, one per producers/consumers. An atomic increment of one of those counters is the only noticeable contention point in queue operations. The rest of the operation avoids contention on writes thanks to the turn-based read/write access for each of the queue items. + +In essence, `MPMCQueue` is a specialized queue for scenarios where there are multiple concurrent producers and consumers of a single queue running on a large multicore machine. + +To get the optimal performance, you may want to set the queue size to be large enough, say, an order of magnitude greater than the number of producers/consumers, to allow producers and consumers to progress with their queue operations in parallel most of the time. + +### RBMutex + +A `RBMutex` is a reader-biased reader/writer mutual exclusion lock. The lock can be held by many readers or a single writer. + +```go +mu := xsync.NewRBMutex() +// reader lock calls return a token +t := mu.RLock() +// the token must be later used to unlock the mutex +mu.RUnlock(t) +// writer locks are the same as in sync.RWMutex +mu.Lock() +mu.Unlock() +``` + +`RBMutex` is based on a modified version of BRAVO (Biased Locking for Reader-Writer Locks) algorithm: https://arxiv.org/pdf/1810.01553.pdf + +The idea of the algorithm is to build on top of an existing reader-writer mutex and introduce a fast path for readers. On the fast path, reader lock attempts are sharded over an internal array based on the reader identity (a token in the case of Golang). This means that readers do not contend over a single atomic counter like it's done in, say, `sync.RWMutex` allowing for better scalability in terms of cores. + +Hence, by the design `RBMutex` is a specialized mutex for scenarios, such as caches, where the vast majority of locks are acquired by readers and write lock acquire attempts are infrequent. In such scenarios, `RBMutex` should perform better than the `sync.RWMutex` on large multicore machines. + +`RBMutex` extends `sync.RWMutex` internally and uses it as the "reader bias disabled" fallback, so the same semantics apply. The only noticeable difference is in the reader tokens returned from the `RLock`/`RUnlock` methods. + +Apart from blocking methods, `RBMutex` also has methods for optimistic locking: +```go +mu := xsync.NewRBMutex() +if locked, t := mu.TryRLock(); locked { + // critical reader section... + mu.RUnlock(t) +} +if mu.TryLock() { + // critical writer section... + mu.Unlock() +} +``` + +## License + +Licensed under MIT. diff --git a/vendor/github.com/puzpuzpuz/xsync/v3/counter.go b/vendor/github.com/puzpuzpuz/xsync/v3/counter.go new file mode 100644 index 0000000..4d4dc87 --- /dev/null +++ b/vendor/github.com/puzpuzpuz/xsync/v3/counter.go @@ -0,0 +1,99 @@ +package xsync + +import ( + "sync" + "sync/atomic" +) + +// pool for P tokens +var ptokenPool sync.Pool + +// a P token is used to point at the current OS thread (P) +// on which the goroutine is run; exact identity of the thread, +// as well as P migration tolerance, is not important since +// it's used to as a best effort mechanism for assigning +// concurrent operations (goroutines) to different stripes of +// the counter +type ptoken struct { + idx uint32 + //lint:ignore U1000 prevents false sharing + pad [cacheLineSize - 4]byte +} + +// A Counter is a striped int64 counter. +// +// Should be preferred over a single atomically updated int64 +// counter in high contention scenarios. +// +// A Counter must not be copied after first use. +type Counter struct { + stripes []cstripe + mask uint32 +} + +type cstripe struct { + c int64 + //lint:ignore U1000 prevents false sharing + pad [cacheLineSize - 8]byte +} + +// NewCounter creates a new Counter instance. +func NewCounter() *Counter { + nstripes := nextPowOf2(parallelism()) + c := Counter{ + stripes: make([]cstripe, nstripes), + mask: nstripes - 1, + } + return &c +} + +// Inc increments the counter by 1. +func (c *Counter) Inc() { + c.Add(1) +} + +// Dec decrements the counter by 1. +func (c *Counter) Dec() { + c.Add(-1) +} + +// Add adds the delta to the counter. +func (c *Counter) Add(delta int64) { + t, ok := ptokenPool.Get().(*ptoken) + if !ok { + t = new(ptoken) + t.idx = runtime_fastrand() + } + for { + stripe := &c.stripes[t.idx&c.mask] + cnt := atomic.LoadInt64(&stripe.c) + if atomic.CompareAndSwapInt64(&stripe.c, cnt, cnt+delta) { + break + } + // Give a try with another randomly selected stripe. + t.idx = runtime_fastrand() + } + ptokenPool.Put(t) +} + +// Value returns the current counter value. +// The returned value may not include all of the latest operations in +// presence of concurrent modifications of the counter. +func (c *Counter) Value() int64 { + v := int64(0) + for i := 0; i < len(c.stripes); i++ { + stripe := &c.stripes[i] + v += atomic.LoadInt64(&stripe.c) + } + return v +} + +// Reset resets the counter to zero. +// This method should only be used when it is known that there are +// no concurrent modifications of the counter. +func (c *Counter) Reset() { + for i := 0; i < len(c.stripes); i++ { + stripe := &c.stripes[i] + atomic.StoreInt64(&stripe.c, 0) + } +} diff --git a/vendor/github.com/puzpuzpuz/xsync/v3/map.go b/vendor/github.com/puzpuzpuz/xsync/v3/map.go new file mode 100644 index 0000000..c7837e9 --- /dev/null +++ b/vendor/github.com/puzpuzpuz/xsync/v3/map.go @@ -0,0 +1,917 @@ +package xsync + +import ( + "fmt" + "math" + "runtime" + "strings" + "sync" + "sync/atomic" + "unsafe" +) + +type mapResizeHint int + +const ( + mapGrowHint mapResizeHint = 0 + mapShrinkHint mapResizeHint = 1 + mapClearHint mapResizeHint = 2 +) + +const ( + // number of Map entries per bucket; 3 entries lead to size of 64B + // (one cache line) on 64-bit machines + entriesPerMapBucket = 3 + // threshold fraction of table occupation to start a table shrinking + // when deleting the last entry in a bucket chain + mapShrinkFraction = 128 + // map load factor to trigger a table resize during insertion; + // a map holds up to mapLoadFactor*entriesPerMapBucket*mapTableLen + // key-value pairs (this is a soft limit) + mapLoadFactor = 0.75 + // minimal table size, i.e. number of buckets; thus, minimal map + // capacity can be calculated as entriesPerMapBucket*defaultMinMapTableLen + defaultMinMapTableLen = 32 + // minimum counter stripes to use + minMapCounterLen = 8 + // maximum counter stripes to use; stands for around 4KB of memory + maxMapCounterLen = 32 +) + +var ( + topHashMask = uint64((1<<20)-1) << 44 + topHashEntryMasks = [3]uint64{ + topHashMask, + topHashMask >> 20, + topHashMask >> 40, + } +) + +// Map is like a Go map[string]interface{} but is safe for concurrent +// use by multiple goroutines without additional locking or +// coordination. It follows the interface of sync.Map with +// a number of valuable extensions like Compute or Size. +// +// A Map must not be copied after first use. +// +// Map uses a modified version of Cache-Line Hash Table (CLHT) +// data structure: https://github.com/LPD-EPFL/CLHT +// +// CLHT is built around idea to organize the hash table in +// cache-line-sized buckets, so that on all modern CPUs update +// operations complete with at most one cache-line transfer. +// Also, Get operations involve no write to memory, as well as no +// mutexes or any other sort of locks. Due to this design, in all +// considered scenarios Map outperforms sync.Map. +// +// One important difference with sync.Map is that only string keys +// are supported. That's because Golang standard library does not +// expose the built-in hash functions for interface{} values. +type Map struct { + totalGrowths int64 + totalShrinks int64 + resizing int64 // resize in progress flag; updated atomically + resizeMu sync.Mutex // only used along with resizeCond + resizeCond sync.Cond // used to wake up resize waiters (concurrent modifications) + table unsafe.Pointer // *mapTable + minTableLen int + growOnly bool +} + +type mapTable struct { + buckets []bucketPadded + // striped counter for number of table entries; + // used to determine if a table shrinking is needed + // occupies min(buckets_memory/1024, 64KB) of memory + size []counterStripe + seed uint64 +} + +type counterStripe struct { + c int64 + //lint:ignore U1000 prevents false sharing + pad [cacheLineSize - 8]byte +} + +type bucketPadded struct { + //lint:ignore U1000 ensure each bucket takes two cache lines on both 32 and 64-bit archs + pad [cacheLineSize - unsafe.Sizeof(bucket{})]byte + bucket +} + +type bucket struct { + next unsafe.Pointer // *bucketPadded + keys [entriesPerMapBucket]unsafe.Pointer + values [entriesPerMapBucket]unsafe.Pointer + // topHashMutex is a 2-in-1 value. + // + // It contains packed top 20 bits (20 MSBs) of hash codes for keys + // stored in the bucket: + // | key 0's top hash | key 1's top hash | key 2's top hash | bitmap for keys | mutex | + // | 20 bits | 20 bits | 20 bits | 3 bits | 1 bit | + // + // The least significant bit is used for the mutex (TTAS spinlock). + topHashMutex uint64 +} + +type rangeEntry struct { + key unsafe.Pointer + value unsafe.Pointer +} + +// MapConfig defines configurable Map/MapOf options. +type MapConfig struct { + sizeHint int + growOnly bool +} + +// WithPresize configures new Map/MapOf instance with capacity enough +// to hold sizeHint entries. The capacity is treated as the minimal +// capacity meaning that the underlying hash table will never shrink +// to a smaller capacity. If sizeHint is zero or negative, the value +// is ignored. +func WithPresize(sizeHint int) func(*MapConfig) { + return func(c *MapConfig) { + c.sizeHint = sizeHint + } +} + +// WithGrowOnly configures new Map/MapOf instance to be grow-only. +// This means that the underlying hash table grows in capacity when +// new keys are added, but does not shrink when keys are deleted. +// The only exception to this rule is the Clear method which +// shrinks the hash table back to the initial capacity. +func WithGrowOnly() func(*MapConfig) { + return func(c *MapConfig) { + c.growOnly = true + } +} + +// NewMap creates a new Map instance configured with the given +// options. +func NewMap(options ...func(*MapConfig)) *Map { + c := &MapConfig{ + sizeHint: defaultMinMapTableLen * entriesPerMapBucket, + } + for _, o := range options { + o(c) + } + + m := &Map{} + m.resizeCond = *sync.NewCond(&m.resizeMu) + var table *mapTable + if c.sizeHint <= defaultMinMapTableLen*entriesPerMapBucket { + table = newMapTable(defaultMinMapTableLen) + } else { + tableLen := nextPowOf2(uint32((float64(c.sizeHint) / entriesPerMapBucket) / mapLoadFactor)) + table = newMapTable(int(tableLen)) + } + m.minTableLen = len(table.buckets) + m.growOnly = c.growOnly + atomic.StorePointer(&m.table, unsafe.Pointer(table)) + return m +} + +// NewMapPresized creates a new Map instance with capacity enough to hold +// sizeHint entries. The capacity is treated as the minimal capacity +// meaning that the underlying hash table will never shrink to +// a smaller capacity. If sizeHint is zero or negative, the value +// is ignored. +// +// Deprecated: use NewMap in combination with WithPresize. +func NewMapPresized(sizeHint int) *Map { + return NewMap(WithPresize(sizeHint)) +} + +func newMapTable(minTableLen int) *mapTable { + buckets := make([]bucketPadded, minTableLen) + counterLen := minTableLen >> 10 + if counterLen < minMapCounterLen { + counterLen = minMapCounterLen + } else if counterLen > maxMapCounterLen { + counterLen = maxMapCounterLen + } + counter := make([]counterStripe, counterLen) + t := &mapTable{ + buckets: buckets, + size: counter, + seed: makeSeed(), + } + return t +} + +// ToPlainMap returns a native map with a copy of xsync Map's +// contents. The copied xsync Map should not be modified while +// this call is made. If the copied Map is modified, the copying +// behavior is the same as in the Range method. +func ToPlainMap(m *Map) map[string]interface{} { + pm := make(map[string]interface{}) + if m != nil { + m.Range(func(key string, value interface{}) bool { + pm[key] = value + return true + }) + } + return pm +} + +// Load returns the value stored in the map for a key, or nil if no +// value is present. +// The ok result indicates whether value was found in the map. +func (m *Map) Load(key string) (value interface{}, ok bool) { + table := (*mapTable)(atomic.LoadPointer(&m.table)) + hash := hashString(key, table.seed) + bidx := uint64(len(table.buckets)-1) & hash + b := &table.buckets[bidx] + for { + topHashes := atomic.LoadUint64(&b.topHashMutex) + for i := 0; i < entriesPerMapBucket; i++ { + if !topHashMatch(hash, topHashes, i) { + continue + } + atomic_snapshot: + // Start atomic snapshot. + vp := atomic.LoadPointer(&b.values[i]) + kp := atomic.LoadPointer(&b.keys[i]) + if kp != nil && vp != nil { + if key == derefKey(kp) { + if uintptr(vp) == uintptr(atomic.LoadPointer(&b.values[i])) { + // Atomic snapshot succeeded. + return derefValue(vp), true + } + // Concurrent update/remove. Go for another spin. + goto atomic_snapshot + } + } + } + bptr := atomic.LoadPointer(&b.next) + if bptr == nil { + return + } + b = (*bucketPadded)(bptr) + } +} + +// Store sets the value for a key. +func (m *Map) Store(key string, value interface{}) { + m.doCompute( + key, + func(interface{}, bool) (interface{}, bool) { + return value, false + }, + false, + false, + ) +} + +// LoadOrStore returns the existing value for the key if present. +// Otherwise, it stores and returns the given value. +// The loaded result is true if the value was loaded, false if stored. +func (m *Map) LoadOrStore(key string, value interface{}) (actual interface{}, loaded bool) { + return m.doCompute( + key, + func(interface{}, bool) (interface{}, bool) { + return value, false + }, + true, + false, + ) +} + +// LoadAndStore returns the existing value for the key if present, +// while setting the new value for the key. +// It stores the new value and returns the existing one, if present. +// The loaded result is true if the existing value was loaded, +// false otherwise. +func (m *Map) LoadAndStore(key string, value interface{}) (actual interface{}, loaded bool) { + return m.doCompute( + key, + func(interface{}, bool) (interface{}, bool) { + return value, false + }, + false, + false, + ) +} + +// LoadOrCompute returns the existing value for the key if present. +// Otherwise, it computes the value using the provided function, and +// then stores and returns the computed value. The loaded result is +// true if the value was loaded, false if computed. +// +// This call locks a hash table bucket while the compute function +// is executed. It means that modifications on other entries in +// the bucket will be blocked until the valueFn executes. Consider +// this when the function includes long-running operations. +func (m *Map) LoadOrCompute(key string, valueFn func() interface{}) (actual interface{}, loaded bool) { + return m.doCompute( + key, + func(interface{}, bool) (interface{}, bool) { + return valueFn(), false + }, + true, + false, + ) +} + +// LoadOrTryCompute returns the existing value for the key if present. +// Otherwise, it tries to compute the value using the provided function +// and, if successful, stores and returns the computed value. The loaded +// result is true if the value was loaded, or false if computed (whether +// successfully or not). If the compute attempt was cancelled (due to an +// error, for example), a nil value will be returned. +// +// This call locks a hash table bucket while the compute function +// is executed. It means that modifications on other entries in +// the bucket will be blocked until the valueFn executes. Consider +// this when the function includes long-running operations. +func (m *Map) LoadOrTryCompute( + key string, + valueFn func() (newValue interface{}, cancel bool), +) (value interface{}, loaded bool) { + return m.doCompute( + key, + func(interface{}, bool) (interface{}, bool) { + nv, c := valueFn() + if !c { + return nv, false + } + return nil, true + }, + true, + false, + ) +} + +// Compute either sets the computed new value for the key or deletes +// the value for the key. When the delete result of the valueFn function +// is set to true, the value will be deleted, if it exists. When delete +// is set to false, the value is updated to the newValue. +// The ok result indicates whether value was computed and stored, thus, is +// present in the map. The actual result contains the new value in cases where +// the value was computed and stored. See the example for a few use cases. +// +// This call locks a hash table bucket while the compute function +// is executed. It means that modifications on other entries in +// the bucket will be blocked until the valueFn executes. Consider +// this when the function includes long-running operations. +func (m *Map) Compute( + key string, + valueFn func(oldValue interface{}, loaded bool) (newValue interface{}, delete bool), +) (actual interface{}, ok bool) { + return m.doCompute(key, valueFn, false, true) +} + +// LoadAndDelete deletes the value for a key, returning the previous +// value if any. The loaded result reports whether the key was +// present. +func (m *Map) LoadAndDelete(key string) (value interface{}, loaded bool) { + return m.doCompute( + key, + func(value interface{}, loaded bool) (interface{}, bool) { + return value, true + }, + false, + false, + ) +} + +// Delete deletes the value for a key. +func (m *Map) Delete(key string) { + m.doCompute( + key, + func(value interface{}, loaded bool) (interface{}, bool) { + return value, true + }, + false, + false, + ) +} + +func (m *Map) doCompute( + key string, + valueFn func(oldValue interface{}, loaded bool) (interface{}, bool), + loadIfExists, computeOnly bool, +) (interface{}, bool) { + // Read-only path. + if loadIfExists { + if v, ok := m.Load(key); ok { + return v, !computeOnly + } + } + // Write path. + for { + compute_attempt: + var ( + emptyb *bucketPadded + emptyidx int + hintNonEmpty int + ) + table := (*mapTable)(atomic.LoadPointer(&m.table)) + tableLen := len(table.buckets) + hash := hashString(key, table.seed) + bidx := uint64(len(table.buckets)-1) & hash + rootb := &table.buckets[bidx] + lockBucket(&rootb.topHashMutex) + // The following two checks must go in reverse to what's + // in the resize method. + if m.resizeInProgress() { + // Resize is in progress. Wait, then go for another attempt. + unlockBucket(&rootb.topHashMutex) + m.waitForResize() + goto compute_attempt + } + if m.newerTableExists(table) { + // Someone resized the table. Go for another attempt. + unlockBucket(&rootb.topHashMutex) + goto compute_attempt + } + b := rootb + for { + topHashes := atomic.LoadUint64(&b.topHashMutex) + for i := 0; i < entriesPerMapBucket; i++ { + if b.keys[i] == nil { + if emptyb == nil { + emptyb = b + emptyidx = i + } + continue + } + if !topHashMatch(hash, topHashes, i) { + hintNonEmpty++ + continue + } + if key == derefKey(b.keys[i]) { + vp := b.values[i] + if loadIfExists { + unlockBucket(&rootb.topHashMutex) + return derefValue(vp), !computeOnly + } + // In-place update/delete. + // We get a copy of the value via an interface{} on each call, + // thus the live value pointers are unique. Otherwise atomic + // snapshot won't be correct in case of multiple Store calls + // using the same value. + oldValue := derefValue(vp) + newValue, del := valueFn(oldValue, true) + if del { + // Deletion. + // First we update the value, then the key. + // This is important for atomic snapshot states. + atomic.StoreUint64(&b.topHashMutex, eraseTopHash(topHashes, i)) + atomic.StorePointer(&b.values[i], nil) + atomic.StorePointer(&b.keys[i], nil) + leftEmpty := false + if hintNonEmpty == 0 { + leftEmpty = isEmptyBucket(b) + } + unlockBucket(&rootb.topHashMutex) + table.addSize(bidx, -1) + // Might need to shrink the table. + if leftEmpty { + m.resize(table, mapShrinkHint) + } + return oldValue, !computeOnly + } + nvp := unsafe.Pointer(&newValue) + if assertionsEnabled && vp == nvp { + panic("non-unique value pointer") + } + atomic.StorePointer(&b.values[i], nvp) + unlockBucket(&rootb.topHashMutex) + if computeOnly { + // Compute expects the new value to be returned. + return newValue, true + } + // LoadAndStore expects the old value to be returned. + return oldValue, true + } + hintNonEmpty++ + } + if b.next == nil { + if emptyb != nil { + // Insertion into an existing bucket. + var zeroV interface{} + newValue, del := valueFn(zeroV, false) + if del { + unlockBucket(&rootb.topHashMutex) + return zeroV, false + } + // First we update the value, then the key. + // This is important for atomic snapshot states. + topHashes = atomic.LoadUint64(&emptyb.topHashMutex) + atomic.StoreUint64(&emptyb.topHashMutex, storeTopHash(hash, topHashes, emptyidx)) + atomic.StorePointer(&emptyb.values[emptyidx], unsafe.Pointer(&newValue)) + atomic.StorePointer(&emptyb.keys[emptyidx], unsafe.Pointer(&key)) + unlockBucket(&rootb.topHashMutex) + table.addSize(bidx, 1) + return newValue, computeOnly + } + growThreshold := float64(tableLen) * entriesPerMapBucket * mapLoadFactor + if table.sumSize() > int64(growThreshold) { + // Need to grow the table. Then go for another attempt. + unlockBucket(&rootb.topHashMutex) + m.resize(table, mapGrowHint) + goto compute_attempt + } + // Insertion into a new bucket. + var zeroV interface{} + newValue, del := valueFn(zeroV, false) + if del { + unlockBucket(&rootb.topHashMutex) + return newValue, false + } + // Create and append a bucket. + newb := new(bucketPadded) + newb.keys[0] = unsafe.Pointer(&key) + newb.values[0] = unsafe.Pointer(&newValue) + newb.topHashMutex = storeTopHash(hash, newb.topHashMutex, 0) + atomic.StorePointer(&b.next, unsafe.Pointer(newb)) + unlockBucket(&rootb.topHashMutex) + table.addSize(bidx, 1) + return newValue, computeOnly + } + b = (*bucketPadded)(b.next) + } + } +} + +func (m *Map) newerTableExists(table *mapTable) bool { + curTablePtr := atomic.LoadPointer(&m.table) + return uintptr(curTablePtr) != uintptr(unsafe.Pointer(table)) +} + +func (m *Map) resizeInProgress() bool { + return atomic.LoadInt64(&m.resizing) == 1 +} + +func (m *Map) waitForResize() { + m.resizeMu.Lock() + for m.resizeInProgress() { + m.resizeCond.Wait() + } + m.resizeMu.Unlock() +} + +func (m *Map) resize(knownTable *mapTable, hint mapResizeHint) { + knownTableLen := len(knownTable.buckets) + // Fast path for shrink attempts. + if hint == mapShrinkHint { + if m.growOnly || + m.minTableLen == knownTableLen || + knownTable.sumSize() > int64((knownTableLen*entriesPerMapBucket)/mapShrinkFraction) { + return + } + } + // Slow path. + if !atomic.CompareAndSwapInt64(&m.resizing, 0, 1) { + // Someone else started resize. Wait for it to finish. + m.waitForResize() + return + } + var newTable *mapTable + table := (*mapTable)(atomic.LoadPointer(&m.table)) + tableLen := len(table.buckets) + switch hint { + case mapGrowHint: + // Grow the table with factor of 2. + atomic.AddInt64(&m.totalGrowths, 1) + newTable = newMapTable(tableLen << 1) + case mapShrinkHint: + shrinkThreshold := int64((tableLen * entriesPerMapBucket) / mapShrinkFraction) + if tableLen > m.minTableLen && table.sumSize() <= shrinkThreshold { + // Shrink the table with factor of 2. + atomic.AddInt64(&m.totalShrinks, 1) + newTable = newMapTable(tableLen >> 1) + } else { + // No need to shrink. Wake up all waiters and give up. + m.resizeMu.Lock() + atomic.StoreInt64(&m.resizing, 0) + m.resizeCond.Broadcast() + m.resizeMu.Unlock() + return + } + case mapClearHint: + newTable = newMapTable(m.minTableLen) + default: + panic(fmt.Sprintf("unexpected resize hint: %d", hint)) + } + // Copy the data only if we're not clearing the map. + if hint != mapClearHint { + for i := 0; i < tableLen; i++ { + copied := copyBucket(&table.buckets[i], newTable) + newTable.addSizePlain(uint64(i), copied) + } + } + // Publish the new table and wake up all waiters. + atomic.StorePointer(&m.table, unsafe.Pointer(newTable)) + m.resizeMu.Lock() + atomic.StoreInt64(&m.resizing, 0) + m.resizeCond.Broadcast() + m.resizeMu.Unlock() +} + +func copyBucket(b *bucketPadded, destTable *mapTable) (copied int) { + rootb := b + lockBucket(&rootb.topHashMutex) + for { + for i := 0; i < entriesPerMapBucket; i++ { + if b.keys[i] != nil { + k := derefKey(b.keys[i]) + hash := hashString(k, destTable.seed) + bidx := uint64(len(destTable.buckets)-1) & hash + destb := &destTable.buckets[bidx] + appendToBucket(hash, b.keys[i], b.values[i], destb) + copied++ + } + } + if b.next == nil { + unlockBucket(&rootb.topHashMutex) + return + } + b = (*bucketPadded)(b.next) + } +} + +func appendToBucket(hash uint64, keyPtr, valPtr unsafe.Pointer, b *bucketPadded) { + for { + for i := 0; i < entriesPerMapBucket; i++ { + if b.keys[i] == nil { + b.keys[i] = keyPtr + b.values[i] = valPtr + b.topHashMutex = storeTopHash(hash, b.topHashMutex, i) + return + } + } + if b.next == nil { + newb := new(bucketPadded) + newb.keys[0] = keyPtr + newb.values[0] = valPtr + newb.topHashMutex = storeTopHash(hash, newb.topHashMutex, 0) + b.next = unsafe.Pointer(newb) + return + } + b = (*bucketPadded)(b.next) + } +} + +func isEmptyBucket(rootb *bucketPadded) bool { + b := rootb + for { + for i := 0; i < entriesPerMapBucket; i++ { + if b.keys[i] != nil { + return false + } + } + if b.next == nil { + return true + } + b = (*bucketPadded)(b.next) + } +} + +// Range calls f sequentially for each key and value present in the +// map. If f returns false, range stops the iteration. +// +// Range does not necessarily correspond to any consistent snapshot +// of the Map's contents: no key will be visited more than once, but +// if the value for any key is stored or deleted concurrently, Range +// may reflect any mapping for that key from any point during the +// Range call. +// +// It is safe to modify the map while iterating it, including entry +// creation, modification and deletion. However, the concurrent +// modification rule apply, i.e. the changes may be not reflected +// in the subsequently iterated entries. +func (m *Map) Range(f func(key string, value interface{}) bool) { + var zeroEntry rangeEntry + // Pre-allocate array big enough to fit entries for most hash tables. + bentries := make([]rangeEntry, 0, 16*entriesPerMapBucket) + tablep := atomic.LoadPointer(&m.table) + table := *(*mapTable)(tablep) + for i := range table.buckets { + rootb := &table.buckets[i] + b := rootb + // Prevent concurrent modifications and copy all entries into + // the intermediate slice. + lockBucket(&rootb.topHashMutex) + for { + for i := 0; i < entriesPerMapBucket; i++ { + if b.keys[i] != nil { + bentries = append(bentries, rangeEntry{ + key: b.keys[i], + value: b.values[i], + }) + } + } + if b.next == nil { + unlockBucket(&rootb.topHashMutex) + break + } + b = (*bucketPadded)(b.next) + } + // Call the function for all copied entries. + for j := range bentries { + k := derefKey(bentries[j].key) + v := derefValue(bentries[j].value) + if !f(k, v) { + return + } + // Remove the reference to avoid preventing the copied + // entries from being GCed until this method finishes. + bentries[j] = zeroEntry + } + bentries = bentries[:0] + } +} + +// Clear deletes all keys and values currently stored in the map. +func (m *Map) Clear() { + table := (*mapTable)(atomic.LoadPointer(&m.table)) + m.resize(table, mapClearHint) +} + +// Size returns current size of the map. +func (m *Map) Size() int { + table := (*mapTable)(atomic.LoadPointer(&m.table)) + return int(table.sumSize()) +} + +func derefKey(keyPtr unsafe.Pointer) string { + return *(*string)(keyPtr) +} + +func derefValue(valuePtr unsafe.Pointer) interface{} { + return *(*interface{})(valuePtr) +} + +func lockBucket(mu *uint64) { + for { + var v uint64 + for { + v = atomic.LoadUint64(mu) + if v&1 != 1 { + break + } + runtime.Gosched() + } + if atomic.CompareAndSwapUint64(mu, v, v|1) { + return + } + runtime.Gosched() + } +} + +func unlockBucket(mu *uint64) { + v := atomic.LoadUint64(mu) + atomic.StoreUint64(mu, v&^1) +} + +func topHashMatch(hash, topHashes uint64, idx int) bool { + if topHashes&(1<<(idx+1)) == 0 { + // Entry is not present. + return false + } + hash = hash & topHashMask + topHashes = (topHashes & topHashEntryMasks[idx]) << (20 * idx) + return hash == topHashes +} + +func storeTopHash(hash, topHashes uint64, idx int) uint64 { + // Zero out top hash at idx. + topHashes = topHashes &^ topHashEntryMasks[idx] + // Chop top 20 MSBs of the given hash and position them at idx. + hash = (hash & topHashMask) >> (20 * idx) + // Store the MSBs. + topHashes = topHashes | hash + // Mark the entry as present. + return topHashes | (1 << (idx + 1)) +} + +func eraseTopHash(topHashes uint64, idx int) uint64 { + return topHashes &^ (1 << (idx + 1)) +} + +func (table *mapTable) addSize(bucketIdx uint64, delta int) { + cidx := uint64(len(table.size)-1) & bucketIdx + atomic.AddInt64(&table.size[cidx].c, int64(delta)) +} + +func (table *mapTable) addSizePlain(bucketIdx uint64, delta int) { + cidx := uint64(len(table.size)-1) & bucketIdx + table.size[cidx].c += int64(delta) +} + +func (table *mapTable) sumSize() int64 { + sum := int64(0) + for i := range table.size { + sum += atomic.LoadInt64(&table.size[i].c) + } + return sum +} + +// MapStats is Map/MapOf statistics. +// +// Warning: map statistics are intented to be used for diagnostic +// purposes, not for production code. This means that breaking changes +// may be introduced into this struct even between minor releases. +type MapStats struct { + // RootBuckets is the number of root buckets in the hash table. + // Each bucket holds a few entries. + RootBuckets int + // TotalBuckets is the total number of buckets in the hash table, + // including root and their chained buckets. Each bucket holds + // a few entries. + TotalBuckets int + // EmptyBuckets is the number of buckets that hold no entries. + EmptyBuckets int + // Capacity is the Map/MapOf capacity, i.e. the total number of + // entries that all buckets can physically hold. This number + // does not consider the load factor. + Capacity int + // Size is the exact number of entries stored in the map. + Size int + // Counter is the number of entries stored in the map according + // to the internal atomic counter. In case of concurrent map + // modifications this number may be different from Size. + Counter int + // CounterLen is the number of internal atomic counter stripes. + // This number may grow with the map capacity to improve + // multithreaded scalability. + CounterLen int + // MinEntries is the minimum number of entries per a chain of + // buckets, i.e. a root bucket and its chained buckets. + MinEntries int + // MinEntries is the maximum number of entries per a chain of + // buckets, i.e. a root bucket and its chained buckets. + MaxEntries int + // TotalGrowths is the number of times the hash table grew. + TotalGrowths int64 + // TotalGrowths is the number of times the hash table shrinked. + TotalShrinks int64 +} + +// ToString returns string representation of map stats. +func (s *MapStats) ToString() string { + var sb strings.Builder + sb.WriteString("MapStats{\n") + sb.WriteString(fmt.Sprintf("RootBuckets: %d\n", s.RootBuckets)) + sb.WriteString(fmt.Sprintf("TotalBuckets: %d\n", s.TotalBuckets)) + sb.WriteString(fmt.Sprintf("EmptyBuckets: %d\n", s.EmptyBuckets)) + sb.WriteString(fmt.Sprintf("Capacity: %d\n", s.Capacity)) + sb.WriteString(fmt.Sprintf("Size: %d\n", s.Size)) + sb.WriteString(fmt.Sprintf("Counter: %d\n", s.Counter)) + sb.WriteString(fmt.Sprintf("CounterLen: %d\n", s.CounterLen)) + sb.WriteString(fmt.Sprintf("MinEntries: %d\n", s.MinEntries)) + sb.WriteString(fmt.Sprintf("MaxEntries: %d\n", s.MaxEntries)) + sb.WriteString(fmt.Sprintf("TotalGrowths: %d\n", s.TotalGrowths)) + sb.WriteString(fmt.Sprintf("TotalShrinks: %d\n", s.TotalShrinks)) + sb.WriteString("}\n") + return sb.String() +} + +// Stats returns statistics for the Map. Just like other map +// methods, this one is thread-safe. Yet it's an O(N) operation, +// so it should be used only for diagnostics or debugging purposes. +func (m *Map) Stats() MapStats { + stats := MapStats{ + TotalGrowths: atomic.LoadInt64(&m.totalGrowths), + TotalShrinks: atomic.LoadInt64(&m.totalShrinks), + MinEntries: math.MaxInt32, + } + table := (*mapTable)(atomic.LoadPointer(&m.table)) + stats.RootBuckets = len(table.buckets) + stats.Counter = int(table.sumSize()) + stats.CounterLen = len(table.size) + for i := range table.buckets { + nentries := 0 + b := &table.buckets[i] + stats.TotalBuckets++ + for { + nentriesLocal := 0 + stats.Capacity += entriesPerMapBucket + for i := 0; i < entriesPerMapBucket; i++ { + if atomic.LoadPointer(&b.keys[i]) != nil { + stats.Size++ + nentriesLocal++ + } + } + nentries += nentriesLocal + if nentriesLocal == 0 { + stats.EmptyBuckets++ + } + if b.next == nil { + break + } + b = (*bucketPadded)(atomic.LoadPointer(&b.next)) + stats.TotalBuckets++ + } + if nentries < stats.MinEntries { + stats.MinEntries = nentries + } + if nentries > stats.MaxEntries { + stats.MaxEntries = nentries + } + } + return stats +} diff --git a/vendor/github.com/puzpuzpuz/xsync/v3/mapof.go b/vendor/github.com/puzpuzpuz/xsync/v3/mapof.go new file mode 100644 index 0000000..d1ce9b2 --- /dev/null +++ b/vendor/github.com/puzpuzpuz/xsync/v3/mapof.go @@ -0,0 +1,738 @@ +package xsync + +import ( + "fmt" + "math" + "sync" + "sync/atomic" + "unsafe" +) + +const ( + // number of MapOf entries per bucket; 5 entries lead to size of 64B + // (one cache line) on 64-bit machines + entriesPerMapOfBucket = 5 + defaultMeta uint64 = 0x8080808080808080 + metaMask uint64 = 0xffffffffff + defaultMetaMasked uint64 = defaultMeta & metaMask + emptyMetaSlot uint8 = 0x80 +) + +// MapOf is like a Go map[K]V but is safe for concurrent +// use by multiple goroutines without additional locking or +// coordination. It follows the interface of sync.Map with +// a number of valuable extensions like Compute or Size. +// +// A MapOf must not be copied after first use. +// +// MapOf uses a modified version of Cache-Line Hash Table (CLHT) +// data structure: https://github.com/LPD-EPFL/CLHT +// +// CLHT is built around idea to organize the hash table in +// cache-line-sized buckets, so that on all modern CPUs update +// operations complete with at most one cache-line transfer. +// Also, Get operations involve no write to memory, as well as no +// mutexes or any other sort of locks. Due to this design, in all +// considered scenarios MapOf outperforms sync.Map. +// +// MapOf also borrows ideas from Java's j.u.c.ConcurrentHashMap +// (immutable K/V pair structs instead of atomic snapshots) +// and C++'s absl::flat_hash_map (meta memory and SWAR-based +// lookups). +type MapOf[K comparable, V any] struct { + totalGrowths int64 + totalShrinks int64 + resizing int64 // resize in progress flag; updated atomically + resizeMu sync.Mutex // only used along with resizeCond + resizeCond sync.Cond // used to wake up resize waiters (concurrent modifications) + table unsafe.Pointer // *mapOfTable + hasher func(K, uint64) uint64 + minTableLen int + growOnly bool +} + +type mapOfTable[K comparable, V any] struct { + buckets []bucketOfPadded + // striped counter for number of table entries; + // used to determine if a table shrinking is needed + // occupies min(buckets_memory/1024, 64KB) of memory + size []counterStripe + seed uint64 +} + +// bucketOfPadded is a CL-sized map bucket holding up to +// entriesPerMapOfBucket entries. +type bucketOfPadded struct { + //lint:ignore U1000 ensure each bucket takes two cache lines on both 32 and 64-bit archs + pad [cacheLineSize - unsafe.Sizeof(bucketOf{})]byte + bucketOf +} + +type bucketOf struct { + meta uint64 + entries [entriesPerMapOfBucket]unsafe.Pointer // *entryOf + next unsafe.Pointer // *bucketOfPadded + mu sync.Mutex +} + +// entryOf is an immutable map entry. +type entryOf[K comparable, V any] struct { + key K + value V +} + +// NewMapOf creates a new MapOf instance configured with the given +// options. +func NewMapOf[K comparable, V any](options ...func(*MapConfig)) *MapOf[K, V] { + return NewMapOfWithHasher[K, V](defaultHasher[K](), options...) +} + +// NewMapOfWithHasher creates a new MapOf instance configured with +// the given hasher and options. The hash function is used instead +// of the built-in hash function configured when a map is created +// with the NewMapOf function. +func NewMapOfWithHasher[K comparable, V any]( + hasher func(K, uint64) uint64, + options ...func(*MapConfig), +) *MapOf[K, V] { + c := &MapConfig{ + sizeHint: defaultMinMapTableLen * entriesPerMapOfBucket, + } + for _, o := range options { + o(c) + } + + m := &MapOf[K, V]{} + m.resizeCond = *sync.NewCond(&m.resizeMu) + m.hasher = hasher + var table *mapOfTable[K, V] + if c.sizeHint <= defaultMinMapTableLen*entriesPerMapOfBucket { + table = newMapOfTable[K, V](defaultMinMapTableLen) + } else { + tableLen := nextPowOf2(uint32((float64(c.sizeHint) / entriesPerMapOfBucket) / mapLoadFactor)) + table = newMapOfTable[K, V](int(tableLen)) + } + m.minTableLen = len(table.buckets) + m.growOnly = c.growOnly + atomic.StorePointer(&m.table, unsafe.Pointer(table)) + return m +} + +// NewMapOfPresized creates a new MapOf instance with capacity enough +// to hold sizeHint entries. The capacity is treated as the minimal capacity +// meaning that the underlying hash table will never shrink to +// a smaller capacity. If sizeHint is zero or negative, the value +// is ignored. +// +// Deprecated: use NewMapOf in combination with WithPresize. +func NewMapOfPresized[K comparable, V any](sizeHint int) *MapOf[K, V] { + return NewMapOf[K, V](WithPresize(sizeHint)) +} + +func newMapOfTable[K comparable, V any](minTableLen int) *mapOfTable[K, V] { + buckets := make([]bucketOfPadded, minTableLen) + for i := range buckets { + buckets[i].meta = defaultMeta + } + counterLen := minTableLen >> 10 + if counterLen < minMapCounterLen { + counterLen = minMapCounterLen + } else if counterLen > maxMapCounterLen { + counterLen = maxMapCounterLen + } + counter := make([]counterStripe, counterLen) + t := &mapOfTable[K, V]{ + buckets: buckets, + size: counter, + seed: makeSeed(), + } + return t +} + +// ToPlainMapOf returns a native map with a copy of xsync Map's +// contents. The copied xsync Map should not be modified while +// this call is made. If the copied Map is modified, the copying +// behavior is the same as in the Range method. +func ToPlainMapOf[K comparable, V any](m *MapOf[K, V]) map[K]V { + pm := make(map[K]V) + if m != nil { + m.Range(func(key K, value V) bool { + pm[key] = value + return true + }) + } + return pm +} + +// Load returns the value stored in the map for a key, or zero value +// of type V if no value is present. +// The ok result indicates whether value was found in the map. +func (m *MapOf[K, V]) Load(key K) (value V, ok bool) { + table := (*mapOfTable[K, V])(atomic.LoadPointer(&m.table)) + hash := m.hasher(key, table.seed) + h1 := h1(hash) + h2w := broadcast(h2(hash)) + bidx := uint64(len(table.buckets)-1) & h1 + b := &table.buckets[bidx] + for { + metaw := atomic.LoadUint64(&b.meta) + markedw := markZeroBytes(metaw^h2w) & metaMask + for markedw != 0 { + idx := firstMarkedByteIndex(markedw) + eptr := atomic.LoadPointer(&b.entries[idx]) + if eptr != nil { + e := (*entryOf[K, V])(eptr) + if e.key == key { + return e.value, true + } + } + markedw &= markedw - 1 + } + bptr := atomic.LoadPointer(&b.next) + if bptr == nil { + return + } + b = (*bucketOfPadded)(bptr) + } +} + +// Store sets the value for a key. +func (m *MapOf[K, V]) Store(key K, value V) { + m.doCompute( + key, + func(V, bool) (V, bool) { + return value, false + }, + false, + false, + ) +} + +// LoadOrStore returns the existing value for the key if present. +// Otherwise, it stores and returns the given value. +// The loaded result is true if the value was loaded, false if stored. +func (m *MapOf[K, V]) LoadOrStore(key K, value V) (actual V, loaded bool) { + return m.doCompute( + key, + func(V, bool) (V, bool) { + return value, false + }, + true, + false, + ) +} + +// LoadAndStore returns the existing value for the key if present, +// while setting the new value for the key. +// It stores the new value and returns the existing one, if present. +// The loaded result is true if the existing value was loaded, +// false otherwise. +func (m *MapOf[K, V]) LoadAndStore(key K, value V) (actual V, loaded bool) { + return m.doCompute( + key, + func(V, bool) (V, bool) { + return value, false + }, + false, + false, + ) +} + +// LoadOrCompute returns the existing value for the key if present. +// Otherwise, it computes the value using the provided function, and +// then stores and returns the computed value. The loaded result is +// true if the value was loaded, false if computed. +// +// This call locks a hash table bucket while the compute function +// is executed. It means that modifications on other entries in +// the bucket will be blocked until the valueFn executes. Consider +// this when the function includes long-running operations. +func (m *MapOf[K, V]) LoadOrCompute(key K, valueFn func() V) (actual V, loaded bool) { + return m.doCompute( + key, + func(V, bool) (V, bool) { + return valueFn(), false + }, + true, + false, + ) +} + +// LoadOrTryCompute returns the existing value for the key if present. +// Otherwise, it tries to compute the value using the provided function +// and, if successful, stores and returns the computed value. The loaded +// result is true if the value was loaded, or false if computed (whether +// successfully or not). If the compute attempt was cancelled (due to an +// error, for example), a zero value of type V will be returned. +// +// This call locks a hash table bucket while the compute function +// is executed. It means that modifications on other entries in +// the bucket will be blocked until the valueFn executes. Consider +// this when the function includes long-running operations. +func (m *MapOf[K, V]) LoadOrTryCompute( + key K, + valueFn func() (newValue V, cancel bool), +) (value V, loaded bool) { + return m.doCompute( + key, + func(V, bool) (V, bool) { + nv, c := valueFn() + if !c { + return nv, false + } + return nv, true // nv is ignored + }, + true, + false, + ) +} + +// Compute either sets the computed new value for the key or deletes +// the value for the key. When the delete result of the valueFn function +// is set to true, the value will be deleted, if it exists. When delete +// is set to false, the value is updated to the newValue. +// The ok result indicates whether value was computed and stored, thus, is +// present in the map. The actual result contains the new value in cases where +// the value was computed and stored. See the example for a few use cases. +// +// This call locks a hash table bucket while the compute function +// is executed. It means that modifications on other entries in +// the bucket will be blocked until the valueFn executes. Consider +// this when the function includes long-running operations. +func (m *MapOf[K, V]) Compute( + key K, + valueFn func(oldValue V, loaded bool) (newValue V, delete bool), +) (actual V, ok bool) { + return m.doCompute(key, valueFn, false, true) +} + +// LoadAndDelete deletes the value for a key, returning the previous +// value if any. The loaded result reports whether the key was +// present. +func (m *MapOf[K, V]) LoadAndDelete(key K) (value V, loaded bool) { + return m.doCompute( + key, + func(value V, loaded bool) (V, bool) { + return value, true + }, + false, + false, + ) +} + +// Delete deletes the value for a key. +func (m *MapOf[K, V]) Delete(key K) { + m.doCompute( + key, + func(value V, loaded bool) (V, bool) { + return value, true + }, + false, + false, + ) +} + +func (m *MapOf[K, V]) doCompute( + key K, + valueFn func(oldValue V, loaded bool) (V, bool), + loadIfExists, computeOnly bool, +) (V, bool) { + // Read-only path. + if loadIfExists { + if v, ok := m.Load(key); ok { + return v, !computeOnly + } + } + // Write path. + for { + compute_attempt: + var ( + emptyb *bucketOfPadded + emptyidx int + ) + table := (*mapOfTable[K, V])(atomic.LoadPointer(&m.table)) + tableLen := len(table.buckets) + hash := m.hasher(key, table.seed) + h1 := h1(hash) + h2 := h2(hash) + h2w := broadcast(h2) + bidx := uint64(len(table.buckets)-1) & h1 + rootb := &table.buckets[bidx] + rootb.mu.Lock() + // The following two checks must go in reverse to what's + // in the resize method. + if m.resizeInProgress() { + // Resize is in progress. Wait, then go for another attempt. + rootb.mu.Unlock() + m.waitForResize() + goto compute_attempt + } + if m.newerTableExists(table) { + // Someone resized the table. Go for another attempt. + rootb.mu.Unlock() + goto compute_attempt + } + b := rootb + for { + metaw := b.meta + markedw := markZeroBytes(metaw^h2w) & metaMask + for markedw != 0 { + idx := firstMarkedByteIndex(markedw) + eptr := b.entries[idx] + if eptr != nil { + e := (*entryOf[K, V])(eptr) + if e.key == key { + if loadIfExists { + rootb.mu.Unlock() + return e.value, !computeOnly + } + // In-place update/delete. + // We get a copy of the value via an interface{} on each call, + // thus the live value pointers are unique. Otherwise atomic + // snapshot won't be correct in case of multiple Store calls + // using the same value. + oldv := e.value + newv, del := valueFn(oldv, true) + if del { + // Deletion. + // First we update the hash, then the entry. + newmetaw := setByte(metaw, emptyMetaSlot, idx) + atomic.StoreUint64(&b.meta, newmetaw) + atomic.StorePointer(&b.entries[idx], nil) + rootb.mu.Unlock() + table.addSize(bidx, -1) + // Might need to shrink the table if we left bucket empty. + if newmetaw == defaultMeta { + m.resize(table, mapShrinkHint) + } + return oldv, !computeOnly + } + newe := new(entryOf[K, V]) + newe.key = key + newe.value = newv + atomic.StorePointer(&b.entries[idx], unsafe.Pointer(newe)) + rootb.mu.Unlock() + if computeOnly { + // Compute expects the new value to be returned. + return newv, true + } + // LoadAndStore expects the old value to be returned. + return oldv, true + } + } + markedw &= markedw - 1 + } + if emptyb == nil { + // Search for empty entries (up to 5 per bucket). + emptyw := metaw & defaultMetaMasked + if emptyw != 0 { + idx := firstMarkedByteIndex(emptyw) + emptyb = b + emptyidx = idx + } + } + if b.next == nil { + if emptyb != nil { + // Insertion into an existing bucket. + var zeroV V + newValue, del := valueFn(zeroV, false) + if del { + rootb.mu.Unlock() + return zeroV, false + } + newe := new(entryOf[K, V]) + newe.key = key + newe.value = newValue + // First we update meta, then the entry. + atomic.StoreUint64(&emptyb.meta, setByte(emptyb.meta, h2, emptyidx)) + atomic.StorePointer(&emptyb.entries[emptyidx], unsafe.Pointer(newe)) + rootb.mu.Unlock() + table.addSize(bidx, 1) + return newValue, computeOnly + } + growThreshold := float64(tableLen) * entriesPerMapOfBucket * mapLoadFactor + if table.sumSize() > int64(growThreshold) { + // Need to grow the table. Then go for another attempt. + rootb.mu.Unlock() + m.resize(table, mapGrowHint) + goto compute_attempt + } + // Insertion into a new bucket. + var zeroV V + newValue, del := valueFn(zeroV, false) + if del { + rootb.mu.Unlock() + return newValue, false + } + // Create and append a bucket. + newb := new(bucketOfPadded) + newb.meta = setByte(defaultMeta, h2, 0) + newe := new(entryOf[K, V]) + newe.key = key + newe.value = newValue + newb.entries[0] = unsafe.Pointer(newe) + atomic.StorePointer(&b.next, unsafe.Pointer(newb)) + rootb.mu.Unlock() + table.addSize(bidx, 1) + return newValue, computeOnly + } + b = (*bucketOfPadded)(b.next) + } + } +} + +func (m *MapOf[K, V]) newerTableExists(table *mapOfTable[K, V]) bool { + curTablePtr := atomic.LoadPointer(&m.table) + return uintptr(curTablePtr) != uintptr(unsafe.Pointer(table)) +} + +func (m *MapOf[K, V]) resizeInProgress() bool { + return atomic.LoadInt64(&m.resizing) == 1 +} + +func (m *MapOf[K, V]) waitForResize() { + m.resizeMu.Lock() + for m.resizeInProgress() { + m.resizeCond.Wait() + } + m.resizeMu.Unlock() +} + +func (m *MapOf[K, V]) resize(knownTable *mapOfTable[K, V], hint mapResizeHint) { + knownTableLen := len(knownTable.buckets) + // Fast path for shrink attempts. + if hint == mapShrinkHint { + if m.growOnly || + m.minTableLen == knownTableLen || + knownTable.sumSize() > int64((knownTableLen*entriesPerMapOfBucket)/mapShrinkFraction) { + return + } + } + // Slow path. + if !atomic.CompareAndSwapInt64(&m.resizing, 0, 1) { + // Someone else started resize. Wait for it to finish. + m.waitForResize() + return + } + var newTable *mapOfTable[K, V] + table := (*mapOfTable[K, V])(atomic.LoadPointer(&m.table)) + tableLen := len(table.buckets) + switch hint { + case mapGrowHint: + // Grow the table with factor of 2. + atomic.AddInt64(&m.totalGrowths, 1) + newTable = newMapOfTable[K, V](tableLen << 1) + case mapShrinkHint: + shrinkThreshold := int64((tableLen * entriesPerMapOfBucket) / mapShrinkFraction) + if tableLen > m.minTableLen && table.sumSize() <= shrinkThreshold { + // Shrink the table with factor of 2. + atomic.AddInt64(&m.totalShrinks, 1) + newTable = newMapOfTable[K, V](tableLen >> 1) + } else { + // No need to shrink. Wake up all waiters and give up. + m.resizeMu.Lock() + atomic.StoreInt64(&m.resizing, 0) + m.resizeCond.Broadcast() + m.resizeMu.Unlock() + return + } + case mapClearHint: + newTable = newMapOfTable[K, V](m.minTableLen) + default: + panic(fmt.Sprintf("unexpected resize hint: %d", hint)) + } + // Copy the data only if we're not clearing the map. + if hint != mapClearHint { + for i := 0; i < tableLen; i++ { + copied := copyBucketOf(&table.buckets[i], newTable, m.hasher) + newTable.addSizePlain(uint64(i), copied) + } + } + // Publish the new table and wake up all waiters. + atomic.StorePointer(&m.table, unsafe.Pointer(newTable)) + m.resizeMu.Lock() + atomic.StoreInt64(&m.resizing, 0) + m.resizeCond.Broadcast() + m.resizeMu.Unlock() +} + +func copyBucketOf[K comparable, V any]( + b *bucketOfPadded, + destTable *mapOfTable[K, V], + hasher func(K, uint64) uint64, +) (copied int) { + rootb := b + rootb.mu.Lock() + for { + for i := 0; i < entriesPerMapOfBucket; i++ { + if b.entries[i] != nil { + e := (*entryOf[K, V])(b.entries[i]) + hash := hasher(e.key, destTable.seed) + bidx := uint64(len(destTable.buckets)-1) & h1(hash) + destb := &destTable.buckets[bidx] + appendToBucketOf(h2(hash), b.entries[i], destb) + copied++ + } + } + if b.next == nil { + rootb.mu.Unlock() + return + } + b = (*bucketOfPadded)(b.next) + } +} + +// Range calls f sequentially for each key and value present in the +// map. If f returns false, range stops the iteration. +// +// Range does not necessarily correspond to any consistent snapshot +// of the Map's contents: no key will be visited more than once, but +// if the value for any key is stored or deleted concurrently, Range +// may reflect any mapping for that key from any point during the +// Range call. +// +// It is safe to modify the map while iterating it, including entry +// creation, modification and deletion. However, the concurrent +// modification rule apply, i.e. the changes may be not reflected +// in the subsequently iterated entries. +func (m *MapOf[K, V]) Range(f func(key K, value V) bool) { + var zeroPtr unsafe.Pointer + // Pre-allocate array big enough to fit entries for most hash tables. + bentries := make([]unsafe.Pointer, 0, 16*entriesPerMapOfBucket) + tablep := atomic.LoadPointer(&m.table) + table := *(*mapOfTable[K, V])(tablep) + for i := range table.buckets { + rootb := &table.buckets[i] + b := rootb + // Prevent concurrent modifications and copy all entries into + // the intermediate slice. + rootb.mu.Lock() + for { + for i := 0; i < entriesPerMapOfBucket; i++ { + if b.entries[i] != nil { + bentries = append(bentries, b.entries[i]) + } + } + if b.next == nil { + rootb.mu.Unlock() + break + } + b = (*bucketOfPadded)(b.next) + } + // Call the function for all copied entries. + for j := range bentries { + entry := (*entryOf[K, V])(bentries[j]) + if !f(entry.key, entry.value) { + return + } + // Remove the reference to avoid preventing the copied + // entries from being GCed until this method finishes. + bentries[j] = zeroPtr + } + bentries = bentries[:0] + } +} + +// Clear deletes all keys and values currently stored in the map. +func (m *MapOf[K, V]) Clear() { + table := (*mapOfTable[K, V])(atomic.LoadPointer(&m.table)) + m.resize(table, mapClearHint) +} + +// Size returns current size of the map. +func (m *MapOf[K, V]) Size() int { + table := (*mapOfTable[K, V])(atomic.LoadPointer(&m.table)) + return int(table.sumSize()) +} + +func appendToBucketOf(h2 uint8, entryPtr unsafe.Pointer, b *bucketOfPadded) { + for { + for i := 0; i < entriesPerMapOfBucket; i++ { + if b.entries[i] == nil { + b.meta = setByte(b.meta, h2, i) + b.entries[i] = entryPtr + return + } + } + if b.next == nil { + newb := new(bucketOfPadded) + newb.meta = setByte(defaultMeta, h2, 0) + newb.entries[0] = entryPtr + b.next = unsafe.Pointer(newb) + return + } + b = (*bucketOfPadded)(b.next) + } +} + +func (table *mapOfTable[K, V]) addSize(bucketIdx uint64, delta int) { + cidx := uint64(len(table.size)-1) & bucketIdx + atomic.AddInt64(&table.size[cidx].c, int64(delta)) +} + +func (table *mapOfTable[K, V]) addSizePlain(bucketIdx uint64, delta int) { + cidx := uint64(len(table.size)-1) & bucketIdx + table.size[cidx].c += int64(delta) +} + +func (table *mapOfTable[K, V]) sumSize() int64 { + sum := int64(0) + for i := range table.size { + sum += atomic.LoadInt64(&table.size[i].c) + } + return sum +} + +func h1(h uint64) uint64 { + return h >> 7 +} + +func h2(h uint64) uint8 { + return uint8(h & 0x7f) +} + +// Stats returns statistics for the MapOf. Just like other map +// methods, this one is thread-safe. Yet it's an O(N) operation, +// so it should be used only for diagnostics or debugging purposes. +func (m *MapOf[K, V]) Stats() MapStats { + stats := MapStats{ + TotalGrowths: atomic.LoadInt64(&m.totalGrowths), + TotalShrinks: atomic.LoadInt64(&m.totalShrinks), + MinEntries: math.MaxInt32, + } + table := (*mapOfTable[K, V])(atomic.LoadPointer(&m.table)) + stats.RootBuckets = len(table.buckets) + stats.Counter = int(table.sumSize()) + stats.CounterLen = len(table.size) + for i := range table.buckets { + nentries := 0 + b := &table.buckets[i] + stats.TotalBuckets++ + for { + nentriesLocal := 0 + stats.Capacity += entriesPerMapOfBucket + for i := 0; i < entriesPerMapOfBucket; i++ { + if atomic.LoadPointer(&b.entries[i]) != nil { + stats.Size++ + nentriesLocal++ + } + } + nentries += nentriesLocal + if nentriesLocal == 0 { + stats.EmptyBuckets++ + } + if b.next == nil { + break + } + b = (*bucketOfPadded)(atomic.LoadPointer(&b.next)) + stats.TotalBuckets++ + } + if nentries < stats.MinEntries { + stats.MinEntries = nentries + } + if nentries > stats.MaxEntries { + stats.MaxEntries = nentries + } + } + return stats +} diff --git a/vendor/github.com/puzpuzpuz/xsync/v3/mpmcqueue.go b/vendor/github.com/puzpuzpuz/xsync/v3/mpmcqueue.go new file mode 100644 index 0000000..c5fd262 --- /dev/null +++ b/vendor/github.com/puzpuzpuz/xsync/v3/mpmcqueue.go @@ -0,0 +1,125 @@ +package xsync + +import ( + "runtime" + "sync/atomic" + "unsafe" +) + +// A MPMCQueue is a bounded multi-producer multi-consumer concurrent +// queue. +// +// MPMCQueue instances must be created with NewMPMCQueue function. +// A MPMCQueue must not be copied after first use. +// +// Based on the data structure from the following C++ library: +// https://github.com/rigtorp/MPMCQueue +type MPMCQueue struct { + cap uint64 + head uint64 + //lint:ignore U1000 prevents false sharing + hpad [cacheLineSize - 8]byte + tail uint64 + //lint:ignore U1000 prevents false sharing + tpad [cacheLineSize - 8]byte + slots []slotPadded +} + +type slotPadded struct { + slot + //lint:ignore U1000 prevents false sharing + pad [cacheLineSize - unsafe.Sizeof(slot{})]byte +} + +type slot struct { + turn uint64 + item interface{} +} + +// NewMPMCQueue creates a new MPMCQueue instance with the given +// capacity. +func NewMPMCQueue(capacity int) *MPMCQueue { + if capacity < 1 { + panic("capacity must be positive number") + } + return &MPMCQueue{ + cap: uint64(capacity), + slots: make([]slotPadded, capacity), + } +} + +// Enqueue inserts the given item into the queue. +// Blocks, if the queue is full. +// +// Deprecated: use TryEnqueue in combination with runtime.Gosched(). +func (q *MPMCQueue) Enqueue(item interface{}) { + head := atomic.AddUint64(&q.head, 1) - 1 + slot := &q.slots[q.idx(head)] + turn := q.turn(head) * 2 + for atomic.LoadUint64(&slot.turn) != turn { + runtime.Gosched() + } + slot.item = item + atomic.StoreUint64(&slot.turn, turn+1) +} + +// Dequeue retrieves and removes the item from the head of the queue. +// Blocks, if the queue is empty. +// +// Deprecated: use TryDequeue in combination with runtime.Gosched(). +func (q *MPMCQueue) Dequeue() interface{} { + tail := atomic.AddUint64(&q.tail, 1) - 1 + slot := &q.slots[q.idx(tail)] + turn := q.turn(tail)*2 + 1 + for atomic.LoadUint64(&slot.turn) != turn { + runtime.Gosched() + } + item := slot.item + slot.item = nil + atomic.StoreUint64(&slot.turn, turn+1) + return item +} + +// TryEnqueue inserts the given item into the queue. Does not block +// and returns immediately. The result indicates that the queue isn't +// full and the item was inserted. +func (q *MPMCQueue) TryEnqueue(item interface{}) bool { + head := atomic.LoadUint64(&q.head) + slot := &q.slots[q.idx(head)] + turn := q.turn(head) * 2 + if atomic.LoadUint64(&slot.turn) == turn { + if atomic.CompareAndSwapUint64(&q.head, head, head+1) { + slot.item = item + atomic.StoreUint64(&slot.turn, turn+1) + return true + } + } + return false +} + +// TryDequeue retrieves and removes the item from the head of the +// queue. Does not block and returns immediately. The ok result +// indicates that the queue isn't empty and an item was retrieved. +func (q *MPMCQueue) TryDequeue() (item interface{}, ok bool) { + tail := atomic.LoadUint64(&q.tail) + slot := &q.slots[q.idx(tail)] + turn := q.turn(tail)*2 + 1 + if atomic.LoadUint64(&slot.turn) == turn { + if atomic.CompareAndSwapUint64(&q.tail, tail, tail+1) { + item = slot.item + ok = true + slot.item = nil + atomic.StoreUint64(&slot.turn, turn+1) + return + } + } + return +} + +func (q *MPMCQueue) idx(i uint64) uint64 { + return i % q.cap +} + +func (q *MPMCQueue) turn(i uint64) uint64 { + return i / q.cap +} diff --git a/vendor/github.com/puzpuzpuz/xsync/v3/mpmcqueueof.go b/vendor/github.com/puzpuzpuz/xsync/v3/mpmcqueueof.go new file mode 100644 index 0000000..3f7e4cc --- /dev/null +++ b/vendor/github.com/puzpuzpuz/xsync/v3/mpmcqueueof.go @@ -0,0 +1,138 @@ +//go:build go1.19 +// +build go1.19 + +package xsync + +import ( + "runtime" + "sync/atomic" + "unsafe" +) + +// A MPMCQueueOf is a bounded multi-producer multi-consumer concurrent +// queue. It's a generic version of MPMCQueue. +// +// MPMCQueueOf instances must be created with NewMPMCQueueOf function. +// A MPMCQueueOf must not be copied after first use. +// +// Based on the data structure from the following C++ library: +// https://github.com/rigtorp/MPMCQueue +type MPMCQueueOf[I any] struct { + cap uint64 + head uint64 + //lint:ignore U1000 prevents false sharing + hpad [cacheLineSize - 8]byte + tail uint64 + //lint:ignore U1000 prevents false sharing + tpad [cacheLineSize - 8]byte + slots []slotOfPadded[I] +} + +type slotOfPadded[I any] struct { + slotOf[I] + // Unfortunately, proper padding like the below one: + // + // pad [cacheLineSize - (unsafe.Sizeof(slotOf[I]{}) % cacheLineSize)]byte + // + // won't compile, so here we add a best-effort padding for items up to + // 56 bytes size. + //lint:ignore U1000 prevents false sharing + pad [cacheLineSize - unsafe.Sizeof(atomic.Uint64{})]byte +} + +type slotOf[I any] struct { + // atomic.Uint64 is used here to get proper 8 byte alignment on + // 32-bit archs. + turn atomic.Uint64 + item I +} + +// NewMPMCQueueOf creates a new MPMCQueueOf instance with the given +// capacity. +func NewMPMCQueueOf[I any](capacity int) *MPMCQueueOf[I] { + if capacity < 1 { + panic("capacity must be positive number") + } + return &MPMCQueueOf[I]{ + cap: uint64(capacity), + slots: make([]slotOfPadded[I], capacity), + } +} + +// Enqueue inserts the given item into the queue. +// Blocks, if the queue is full. +// +// Deprecated: use TryEnqueue in combination with runtime.Gosched(). +func (q *MPMCQueueOf[I]) Enqueue(item I) { + head := atomic.AddUint64(&q.head, 1) - 1 + slot := &q.slots[q.idx(head)] + turn := q.turn(head) * 2 + for slot.turn.Load() != turn { + runtime.Gosched() + } + slot.item = item + slot.turn.Store(turn + 1) +} + +// Dequeue retrieves and removes the item from the head of the queue. +// Blocks, if the queue is empty. +// +// Deprecated: use TryDequeue in combination with runtime.Gosched(). +func (q *MPMCQueueOf[I]) Dequeue() I { + var zeroI I + tail := atomic.AddUint64(&q.tail, 1) - 1 + slot := &q.slots[q.idx(tail)] + turn := q.turn(tail)*2 + 1 + for slot.turn.Load() != turn { + runtime.Gosched() + } + item := slot.item + slot.item = zeroI + slot.turn.Store(turn + 1) + return item +} + +// TryEnqueue inserts the given item into the queue. Does not block +// and returns immediately. The result indicates that the queue isn't +// full and the item was inserted. +func (q *MPMCQueueOf[I]) TryEnqueue(item I) bool { + head := atomic.LoadUint64(&q.head) + slot := &q.slots[q.idx(head)] + turn := q.turn(head) * 2 + if slot.turn.Load() == turn { + if atomic.CompareAndSwapUint64(&q.head, head, head+1) { + slot.item = item + slot.turn.Store(turn + 1) + return true + } + } + return false +} + +// TryDequeue retrieves and removes the item from the head of the +// queue. Does not block and returns immediately. The ok result +// indicates that the queue isn't empty and an item was retrieved. +func (q *MPMCQueueOf[I]) TryDequeue() (item I, ok bool) { + tail := atomic.LoadUint64(&q.tail) + slot := &q.slots[q.idx(tail)] + turn := q.turn(tail)*2 + 1 + if slot.turn.Load() == turn { + if atomic.CompareAndSwapUint64(&q.tail, tail, tail+1) { + var zeroI I + item = slot.item + ok = true + slot.item = zeroI + slot.turn.Store(turn + 1) + return + } + } + return +} + +func (q *MPMCQueueOf[I]) idx(i uint64) uint64 { + return i % q.cap +} + +func (q *MPMCQueueOf[I]) turn(i uint64) uint64 { + return i / q.cap +} diff --git a/vendor/github.com/puzpuzpuz/xsync/v3/rbmutex.go b/vendor/github.com/puzpuzpuz/xsync/v3/rbmutex.go new file mode 100644 index 0000000..4cbd9c4 --- /dev/null +++ b/vendor/github.com/puzpuzpuz/xsync/v3/rbmutex.go @@ -0,0 +1,188 @@ +package xsync + +import ( + "runtime" + "sync" + "sync/atomic" + "time" +) + +// slow-down guard +const nslowdown = 7 + +// pool for reader tokens +var rtokenPool sync.Pool + +// RToken is a reader lock token. +type RToken struct { + slot uint32 + //lint:ignore U1000 prevents false sharing + pad [cacheLineSize - 4]byte +} + +// A RBMutex is a reader biased reader/writer mutual exclusion lock. +// The lock can be held by an many readers or a single writer. +// The zero value for a RBMutex is an unlocked mutex. +// +// A RBMutex must not be copied after first use. +// +// RBMutex is based on a modified version of BRAVO +// (Biased Locking for Reader-Writer Locks) algorithm: +// https://arxiv.org/pdf/1810.01553.pdf +// +// RBMutex is a specialized mutex for scenarios, such as caches, +// where the vast majority of locks are acquired by readers and write +// lock acquire attempts are infrequent. In such scenarios, RBMutex +// performs better than sync.RWMutex on large multicore machines. +// +// RBMutex extends sync.RWMutex internally and uses it as the "reader +// bias disabled" fallback, so the same semantics apply. The only +// noticeable difference is in reader tokens returned from the +// RLock/RUnlock methods. +type RBMutex struct { + rslots []rslot + rmask uint32 + rbias int32 + inhibitUntil time.Time + rw sync.RWMutex +} + +type rslot struct { + mu int32 + //lint:ignore U1000 prevents false sharing + pad [cacheLineSize - 4]byte +} + +// NewRBMutex creates a new RBMutex instance. +func NewRBMutex() *RBMutex { + nslots := nextPowOf2(parallelism()) + mu := RBMutex{ + rslots: make([]rslot, nslots), + rmask: nslots - 1, + rbias: 1, + } + return &mu +} + +// TryRLock tries to lock m for reading without blocking. +// When TryRLock succeeds, it returns true and a reader token. +// In case of a failure, a false is returned. +func (mu *RBMutex) TryRLock() (bool, *RToken) { + if t := mu.fastRlock(); t != nil { + return true, t + } + // Optimistic slow path. + if mu.rw.TryRLock() { + if atomic.LoadInt32(&mu.rbias) == 0 && time.Now().After(mu.inhibitUntil) { + atomic.StoreInt32(&mu.rbias, 1) + } + return true, nil + } + return false, nil +} + +// RLock locks m for reading and returns a reader token. The +// token must be used in the later RUnlock call. +// +// Should not be used for recursive read locking; a blocked Lock +// call excludes new readers from acquiring the lock. +func (mu *RBMutex) RLock() *RToken { + if t := mu.fastRlock(); t != nil { + return t + } + // Slow path. + mu.rw.RLock() + if atomic.LoadInt32(&mu.rbias) == 0 && time.Now().After(mu.inhibitUntil) { + atomic.StoreInt32(&mu.rbias, 1) + } + return nil +} + +func (mu *RBMutex) fastRlock() *RToken { + if atomic.LoadInt32(&mu.rbias) == 1 { + t, ok := rtokenPool.Get().(*RToken) + if !ok { + t = new(RToken) + t.slot = runtime_fastrand() + } + // Try all available slots to distribute reader threads to slots. + for i := 0; i < len(mu.rslots); i++ { + slot := t.slot + uint32(i) + rslot := &mu.rslots[slot&mu.rmask] + rslotmu := atomic.LoadInt32(&rslot.mu) + if atomic.CompareAndSwapInt32(&rslot.mu, rslotmu, rslotmu+1) { + if atomic.LoadInt32(&mu.rbias) == 1 { + // Hot path succeeded. + t.slot = slot + return t + } + // The mutex is no longer reader biased. Roll back. + atomic.AddInt32(&rslot.mu, -1) + rtokenPool.Put(t) + return nil + } + // Contention detected. Give a try with the next slot. + } + } + return nil +} + +// RUnlock undoes a single RLock call. A reader token obtained from +// the RLock call must be provided. RUnlock does not affect other +// simultaneous readers. A panic is raised if m is not locked for +// reading on entry to RUnlock. +func (mu *RBMutex) RUnlock(t *RToken) { + if t == nil { + mu.rw.RUnlock() + return + } + if atomic.AddInt32(&mu.rslots[t.slot&mu.rmask].mu, -1) < 0 { + panic("invalid reader state detected") + } + rtokenPool.Put(t) +} + +// TryLock tries to lock m for writing without blocking. +func (mu *RBMutex) TryLock() bool { + if mu.rw.TryLock() { + if atomic.LoadInt32(&mu.rbias) == 1 { + atomic.StoreInt32(&mu.rbias, 0) + for i := 0; i < len(mu.rslots); i++ { + if atomic.LoadInt32(&mu.rslots[i].mu) > 0 { + // There is a reader. Roll back. + atomic.StoreInt32(&mu.rbias, 1) + mu.rw.Unlock() + return false + } + } + } + return true + } + return false +} + +// Lock locks m for writing. If the lock is already locked for +// reading or writing, Lock blocks until the lock is available. +func (mu *RBMutex) Lock() { + mu.rw.Lock() + if atomic.LoadInt32(&mu.rbias) == 1 { + atomic.StoreInt32(&mu.rbias, 0) + start := time.Now() + for i := 0; i < len(mu.rslots); i++ { + for atomic.LoadInt32(&mu.rslots[i].mu) > 0 { + runtime.Gosched() + } + } + mu.inhibitUntil = time.Now().Add(time.Since(start) * nslowdown) + } +} + +// Unlock unlocks m for writing. A panic is raised if m is not locked +// for writing on entry to Unlock. +// +// As with RWMutex, a locked RBMutex is not associated with a +// particular goroutine. One goroutine may RLock (Lock) a RBMutex and +// then arrange for another goroutine to RUnlock (Unlock) it. +func (mu *RBMutex) Unlock() { + mu.rw.Unlock() +} diff --git a/vendor/github.com/puzpuzpuz/xsync/v3/spscqueue.go b/vendor/github.com/puzpuzpuz/xsync/v3/spscqueue.go new file mode 100644 index 0000000..6e4f84b --- /dev/null +++ b/vendor/github.com/puzpuzpuz/xsync/v3/spscqueue.go @@ -0,0 +1,92 @@ +package xsync + +import ( + "sync/atomic" +) + +// A SPSCQueue is a bounded single-producer single-consumer concurrent +// queue. This means that not more than a single goroutine must be +// publishing items to the queue while not more than a single goroutine +// must be consuming those items. +// +// SPSCQueue instances must be created with NewSPSCQueue function. +// A SPSCQueue must not be copied after first use. +// +// Based on the data structure from the following article: +// https://rigtorp.se/ringbuffer/ +type SPSCQueue struct { + cap uint64 + pidx uint64 + //lint:ignore U1000 prevents false sharing + pad0 [cacheLineSize - 8]byte + pcachedIdx uint64 + //lint:ignore U1000 prevents false sharing + pad1 [cacheLineSize - 8]byte + cidx uint64 + //lint:ignore U1000 prevents false sharing + pad2 [cacheLineSize - 8]byte + ccachedIdx uint64 + //lint:ignore U1000 prevents false sharing + pad3 [cacheLineSize - 8]byte + items []interface{} +} + +// NewSPSCQueue creates a new SPSCQueue instance with the given +// capacity. +func NewSPSCQueue(capacity int) *SPSCQueue { + if capacity < 1 { + panic("capacity must be positive number") + } + return &SPSCQueue{ + cap: uint64(capacity + 1), + items: make([]interface{}, capacity+1), + } +} + +// TryEnqueue inserts the given item into the queue. Does not block +// and returns immediately. The result indicates that the queue isn't +// full and the item was inserted. +func (q *SPSCQueue) TryEnqueue(item interface{}) bool { + // relaxed memory order would be enough here + idx := atomic.LoadUint64(&q.pidx) + nextIdx := idx + 1 + if nextIdx == q.cap { + nextIdx = 0 + } + cachedIdx := q.ccachedIdx + if nextIdx == cachedIdx { + cachedIdx = atomic.LoadUint64(&q.cidx) + q.ccachedIdx = cachedIdx + if nextIdx == cachedIdx { + return false + } + } + q.items[idx] = item + atomic.StoreUint64(&q.pidx, nextIdx) + return true +} + +// TryDequeue retrieves and removes the item from the head of the +// queue. Does not block and returns immediately. The ok result +// indicates that the queue isn't empty and an item was retrieved. +func (q *SPSCQueue) TryDequeue() (item interface{}, ok bool) { + // relaxed memory order would be enough here + idx := atomic.LoadUint64(&q.cidx) + cachedIdx := q.pcachedIdx + if idx == cachedIdx { + cachedIdx = atomic.LoadUint64(&q.pidx) + q.pcachedIdx = cachedIdx + if idx == cachedIdx { + return + } + } + item = q.items[idx] + q.items[idx] = nil + ok = true + nextIdx := idx + 1 + if nextIdx == q.cap { + nextIdx = 0 + } + atomic.StoreUint64(&q.cidx, nextIdx) + return +} diff --git a/vendor/github.com/puzpuzpuz/xsync/v3/spscqueueof.go b/vendor/github.com/puzpuzpuz/xsync/v3/spscqueueof.go new file mode 100644 index 0000000..3ae132e --- /dev/null +++ b/vendor/github.com/puzpuzpuz/xsync/v3/spscqueueof.go @@ -0,0 +1,96 @@ +//go:build go1.19 +// +build go1.19 + +package xsync + +import ( + "sync/atomic" +) + +// A SPSCQueueOf is a bounded single-producer single-consumer concurrent +// queue. This means that not more than a single goroutine must be +// publishing items to the queue while not more than a single goroutine +// must be consuming those items. +// +// SPSCQueueOf instances must be created with NewSPSCQueueOf function. +// A SPSCQueueOf must not be copied after first use. +// +// Based on the data structure from the following article: +// https://rigtorp.se/ringbuffer/ +type SPSCQueueOf[I any] struct { + cap uint64 + pidx uint64 + //lint:ignore U1000 prevents false sharing + pad0 [cacheLineSize - 8]byte + pcachedIdx uint64 + //lint:ignore U1000 prevents false sharing + pad1 [cacheLineSize - 8]byte + cidx uint64 + //lint:ignore U1000 prevents false sharing + pad2 [cacheLineSize - 8]byte + ccachedIdx uint64 + //lint:ignore U1000 prevents false sharing + pad3 [cacheLineSize - 8]byte + items []I +} + +// NewSPSCQueueOf creates a new SPSCQueueOf instance with the given +// capacity. +func NewSPSCQueueOf[I any](capacity int) *SPSCQueueOf[I] { + if capacity < 1 { + panic("capacity must be positive number") + } + return &SPSCQueueOf[I]{ + cap: uint64(capacity + 1), + items: make([]I, capacity+1), + } +} + +// TryEnqueue inserts the given item into the queue. Does not block +// and returns immediately. The result indicates that the queue isn't +// full and the item was inserted. +func (q *SPSCQueueOf[I]) TryEnqueue(item I) bool { + // relaxed memory order would be enough here + idx := atomic.LoadUint64(&q.pidx) + next_idx := idx + 1 + if next_idx == q.cap { + next_idx = 0 + } + cached_idx := q.ccachedIdx + if next_idx == cached_idx { + cached_idx = atomic.LoadUint64(&q.cidx) + q.ccachedIdx = cached_idx + if next_idx == cached_idx { + return false + } + } + q.items[idx] = item + atomic.StoreUint64(&q.pidx, next_idx) + return true +} + +// TryDequeue retrieves and removes the item from the head of the +// queue. Does not block and returns immediately. The ok result +// indicates that the queue isn't empty and an item was retrieved. +func (q *SPSCQueueOf[I]) TryDequeue() (item I, ok bool) { + // relaxed memory order would be enough here + idx := atomic.LoadUint64(&q.cidx) + cached_idx := q.pcachedIdx + if idx == cached_idx { + cached_idx = atomic.LoadUint64(&q.pidx) + q.pcachedIdx = cached_idx + if idx == cached_idx { + return + } + } + var zeroI I + item = q.items[idx] + q.items[idx] = zeroI + ok = true + next_idx := idx + 1 + if next_idx == q.cap { + next_idx = 0 + } + atomic.StoreUint64(&q.cidx, next_idx) + return +} diff --git a/vendor/github.com/puzpuzpuz/xsync/v3/util.go b/vendor/github.com/puzpuzpuz/xsync/v3/util.go new file mode 100644 index 0000000..7692708 --- /dev/null +++ b/vendor/github.com/puzpuzpuz/xsync/v3/util.go @@ -0,0 +1,66 @@ +package xsync + +import ( + "math/bits" + "runtime" + _ "unsafe" +) + +// test-only assert()-like flag +var assertionsEnabled = false + +const ( + // cacheLineSize is used in paddings to prevent false sharing; + // 64B are used instead of 128B as a compromise between + // memory footprint and performance; 128B usage may give ~30% + // improvement on NUMA machines. + cacheLineSize = 64 +) + +// nextPowOf2 computes the next highest power of 2 of 32-bit v. +// Source: https://graphics.stanford.edu/~seander/bithacks.html#RoundUpPowerOf2 +func nextPowOf2(v uint32) uint32 { + if v == 0 { + return 1 + } + v-- + v |= v >> 1 + v |= v >> 2 + v |= v >> 4 + v |= v >> 8 + v |= v >> 16 + v++ + return v +} + +func parallelism() uint32 { + maxProcs := uint32(runtime.GOMAXPROCS(0)) + numCores := uint32(runtime.NumCPU()) + if maxProcs < numCores { + return maxProcs + } + return numCores +} + +//go:noescape +//go:linkname runtime_fastrand runtime.fastrand +func runtime_fastrand() uint32 + +func broadcast(b uint8) uint64 { + return 0x101010101010101 * uint64(b) +} + +func firstMarkedByteIndex(w uint64) int { + return bits.TrailingZeros64(w) >> 3 +} + +// SWAR byte search: may produce false positives, e.g. for 0x0100, +// so make sure to double-check bytes found by this function. +func markZeroBytes(w uint64) uint64 { + return ((w - 0x0101010101010101) & (^w) & 0x8080808080808080) +} + +func setByte(w uint64, b uint8, idx int) uint64 { + shift := idx << 3 + return (w &^ (0xff << shift)) | (uint64(b) << shift) +} diff --git a/vendor/github.com/puzpuzpuz/xsync/v3/util_hash.go b/vendor/github.com/puzpuzpuz/xsync/v3/util_hash.go new file mode 100644 index 0000000..9aa6597 --- /dev/null +++ b/vendor/github.com/puzpuzpuz/xsync/v3/util_hash.go @@ -0,0 +1,77 @@ +package xsync + +import ( + "reflect" + "unsafe" +) + +// makeSeed creates a random seed. +func makeSeed() uint64 { + var s1 uint32 + for { + s1 = runtime_fastrand() + // We use seed 0 to indicate an uninitialized seed/hash, + // so keep trying until we get a non-zero seed. + if s1 != 0 { + break + } + } + s2 := runtime_fastrand() + return uint64(s1)<<32 | uint64(s2) +} + +// hashString calculates a hash of s with the given seed. +func hashString(s string, seed uint64) uint64 { + if s == "" { + return seed + } + strh := (*reflect.StringHeader)(unsafe.Pointer(&s)) + return uint64(runtime_memhash(unsafe.Pointer(strh.Data), uintptr(seed), uintptr(strh.Len))) +} + +//go:noescape +//go:linkname runtime_memhash runtime.memhash +func runtime_memhash(p unsafe.Pointer, h, s uintptr) uintptr + +// defaultHasher creates a fast hash function for the given comparable type. +// The only limitation is that the type should not contain interfaces inside +// based on runtime.typehash. +func defaultHasher[T comparable]() func(T, uint64) uint64 { + var zero T + + if reflect.TypeOf(&zero).Elem().Kind() == reflect.Interface { + return func(value T, seed uint64) uint64 { + iValue := any(value) + i := (*iface)(unsafe.Pointer(&iValue)) + return runtime_typehash64(i.typ, i.word, seed) + } + } else { + var iZero any = zero + i := (*iface)(unsafe.Pointer(&iZero)) + return func(value T, seed uint64) uint64 { + return runtime_typehash64(i.typ, unsafe.Pointer(&value), seed) + } + } +} + +// how interface is represented in memory +type iface struct { + typ uintptr + word unsafe.Pointer +} + +// same as runtime_typehash, but always returns a uint64 +// see: maphash.rthash function for details +func runtime_typehash64(t uintptr, p unsafe.Pointer, seed uint64) uint64 { + if unsafe.Sizeof(uintptr(0)) == 8 { + return uint64(runtime_typehash(t, p, uintptr(seed))) + } + + lo := runtime_typehash(t, p, uintptr(seed)) + hi := runtime_typehash(t, p, uintptr(seed>>32)) + return uint64(hi)<<32 | uint64(lo) +} + +//go:noescape +//go:linkname runtime_typehash runtime.typehash +func runtime_typehash(t uintptr, p unsafe.Pointer, h uintptr) uintptr diff --git a/vendor/github.com/tmthrgd/go-hex/.travis.yml b/vendor/github.com/tmthrgd/go-hex/.travis.yml new file mode 100644 index 0000000..b73e2f3 --- /dev/null +++ b/vendor/github.com/tmthrgd/go-hex/.travis.yml @@ -0,0 +1,11 @@ +language: go +go: + - 1.10.x + - 1.11.x + - 1.12.x + - 1.13.x + - tip +matrix: + fast_finish: true + allow_failures: + - go: tip diff --git a/vendor/github.com/tmthrgd/go-hex/LICENSE b/vendor/github.com/tmthrgd/go-hex/LICENSE new file mode 100644 index 0000000..1163cdf --- /dev/null +++ b/vendor/github.com/tmthrgd/go-hex/LICENSE @@ -0,0 +1,82 @@ +Copyright (c) 2016, Tom Thorogood. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of the Tom Thorogood nor the + names of its contributors may be used to endorse or promote products + derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +---- Portions of the source code are also covered by the following license: ---- + +Copyright (c) 2012 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +---- Portions of the source code are also covered by the following license: ---- + +Copyright (c) 2005-2016, Wojciech Muła +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + +1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS +IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED +TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A +PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED +TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/tmthrgd/go-hex/README.md b/vendor/github.com/tmthrgd/go-hex/README.md new file mode 100644 index 0000000..565411f --- /dev/null +++ b/vendor/github.com/tmthrgd/go-hex/README.md @@ -0,0 +1,108 @@ +# go-hex + +[![GoDoc](https://godoc.org/github.com/tmthrgd/go-hex?status.svg)](https://godoc.org/github.com/tmthrgd/go-hex) +[![Build Status](https://travis-ci.org/tmthrgd/go-hex.svg?branch=master)](https://travis-ci.org/tmthrgd/go-hex) + +An efficient hexadecimal implementation for Golang. + +go-hex provides hex encoding and decoding using SSE/AVX instructions on x86-64. + +## Download + +``` +go get github.com/tmthrgd/go-hex +``` + +## Benchmark + +go-hex: +``` +BenchmarkEncode/15-8 100000000 17.4 ns/op 863.43 MB/s +BenchmarkEncode/32-8 100000000 11.9 ns/op 2690.43 MB/s +BenchmarkEncode/128-8 100000000 21.4 ns/op 5982.92 MB/s +BenchmarkEncode/1k-8 20000000 88.5 ns/op 11572.80 MB/s +BenchmarkEncode/16k-8 1000000 1254 ns/op 13058.10 MB/s +BenchmarkEncode/128k-8 100000 12965 ns/op 10109.53 MB/s +BenchmarkEncode/1M-8 10000 119465 ns/op 8777.23 MB/s +BenchmarkEncode/16M-8 500 3530380 ns/op 4752.24 MB/s +BenchmarkEncode/128M-8 50 28001913 ns/op 4793.16 MB/s +BenchmarkDecode/14-8 100000000 12.6 ns/op 1110.01 MB/s +BenchmarkDecode/32-8 100000000 12.5 ns/op 2558.10 MB/s +BenchmarkDecode/128-8 50000000 27.2 ns/op 4697.66 MB/s +BenchmarkDecode/1k-8 10000000 168 ns/op 6093.43 MB/s +BenchmarkDecode/16k-8 500000 2543 ns/op 6442.09 MB/s +BenchmarkDecode/128k-8 100000 20339 ns/op 6444.24 MB/s +BenchmarkDecode/1M-8 10000 164313 ns/op 6381.57 MB/s +BenchmarkDecode/16M-8 500 3099822 ns/op 5412.31 MB/s +BenchmarkDecode/128M-8 50 24865822 ns/op 5397.68 MB/s +``` + +[encoding/hex](https://golang.org/pkg/encoding/hex/): +``` +BenchmarkRefEncode/15-8 50000000 36.1 ns/op 415.07 MB/s +BenchmarkRefEncode/32-8 20000000 72.9 ns/op 439.14 MB/s +BenchmarkRefEncode/128-8 5000000 289 ns/op 441.54 MB/s +BenchmarkRefEncode/1k-8 1000000 2268 ns/op 451.49 MB/s +BenchmarkRefEncode/16k-8 30000 39110 ns/op 418.91 MB/s +BenchmarkRefEncode/128k-8 5000 291260 ns/op 450.02 MB/s +BenchmarkRefEncode/1M-8 1000 2277578 ns/op 460.39 MB/s +BenchmarkRefEncode/16M-8 30 37087543 ns/op 452.37 MB/s +BenchmarkRefEncode/128M-8 5 293611713 ns/op 457.13 MB/s +BenchmarkRefDecode/14-8 30000000 53.7 ns/op 260.49 MB/s +BenchmarkRefDecode/32-8 10000000 128 ns/op 248.44 MB/s +BenchmarkRefDecode/128-8 3000000 481 ns/op 265.95 MB/s +BenchmarkRefDecode/1k-8 300000 4172 ns/op 245.43 MB/s +BenchmarkRefDecode/16k-8 10000 111989 ns/op 146.30 MB/s +BenchmarkRefDecode/128k-8 2000 909077 ns/op 144.18 MB/s +BenchmarkRefDecode/1M-8 200 7275779 ns/op 144.12 MB/s +BenchmarkRefDecode/16M-8 10 116574839 ns/op 143.92 MB/s +BenchmarkRefDecode/128M-8 2 933871637 ns/op 143.72 MB/s +``` + +[encoding/hex](https://golang.org/pkg/encoding/hex/) -> go-hex: +``` +benchmark old ns/op new ns/op delta +BenchmarkEncode/15-8 36.1 17.4 -51.80% +BenchmarkEncode/32-8 72.9 11.9 -83.68% +BenchmarkEncode/128-8 289 21.4 -92.60% +BenchmarkEncode/1k-8 2268 88.5 -96.10% +BenchmarkEncode/16k-8 39110 1254 -96.79% +BenchmarkEncode/128k-8 291260 12965 -95.55% +BenchmarkEncode/1M-8 2277578 119465 -94.75% +BenchmarkEncode/16M-8 37087543 3530380 -90.48% +BenchmarkEncode/128M-8 293611713 28001913 -90.46% +BenchmarkDecode/14-8 53.7 12.6 -76.54% +BenchmarkDecode/32-8 128 12.5 -90.23% +BenchmarkDecode/128-8 481 27.2 -94.35% +BenchmarkDecode/1k-8 4172 168 -95.97% +BenchmarkDecode/16k-8 111989 2543 -97.73% +BenchmarkDecode/128k-8 909077 20339 -97.76% +BenchmarkDecode/1M-8 7275779 164313 -97.74% +BenchmarkDecode/16M-8 116574839 3099822 -97.34% +BenchmarkDecode/128M-8 933871637 24865822 -97.34% + +benchmark old MB/s new MB/s speedup +BenchmarkEncode/15-8 415.07 863.43 2.08x +BenchmarkEncode/32-8 439.14 2690.43 6.13x +BenchmarkEncode/128-8 441.54 5982.92 13.55x +BenchmarkEncode/1k-8 451.49 11572.80 25.63x +BenchmarkEncode/16k-8 418.91 13058.10 31.17x +BenchmarkEncode/128k-8 450.02 10109.53 22.46x +BenchmarkEncode/1M-8 460.39 8777.23 19.06x +BenchmarkEncode/16M-8 452.37 4752.24 10.51x +BenchmarkEncode/128M-8 457.13 4793.16 10.49x +BenchmarkDecode/14-8 260.49 1110.01 4.26x +BenchmarkDecode/32-8 248.44 2558.10 10.30x +BenchmarkDecode/128-8 265.95 4697.66 17.66x +BenchmarkDecode/1k-8 245.43 6093.43 24.83x +BenchmarkDecode/16k-8 146.30 6442.09 44.03x +BenchmarkDecode/128k-8 144.18 6444.24 44.70x +BenchmarkDecode/1M-8 144.12 6381.57 44.28x +BenchmarkDecode/16M-8 143.92 5412.31 37.61x +BenchmarkDecode/128M-8 143.72 5397.68 37.56x +``` + +## License + +Unless otherwise noted, the go-hex source files are distributed under the Modified BSD License +found in the LICENSE file. diff --git a/vendor/github.com/tmthrgd/go-hex/hex.go b/vendor/github.com/tmthrgd/go-hex/hex.go new file mode 100644 index 0000000..f4eca0e --- /dev/null +++ b/vendor/github.com/tmthrgd/go-hex/hex.go @@ -0,0 +1,137 @@ +// Copyright 2016 Tom Thorogood. All rights reserved. +// Use of this source code is governed by a +// Modified BSD License license that can be found in +// the LICENSE file. +// +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package hex is an efficient hexadecimal implementation for Golang. +package hex + +import ( + "errors" + "fmt" +) + +var errLength = errors.New("go-hex: odd length hex string") + +var ( + lower = []byte("0123456789abcdef") + upper = []byte("0123456789ABCDEF") +) + +// InvalidByteError values describe errors resulting from an invalid byte in a hex string. +type InvalidByteError byte + +func (e InvalidByteError) Error() string { + return fmt.Sprintf("go-hex: invalid byte: %#U", rune(e)) +} + +// EncodedLen returns the length of an encoding of n source bytes. +func EncodedLen(n int) int { + return n * 2 +} + +// DecodedLen returns the length of a decoding of n source bytes. +func DecodedLen(n int) int { + return n / 2 +} + +// Encode encodes src into EncodedLen(len(src)) +// bytes of dst. As a convenience, it returns the number +// of bytes written to dst, but this value is always EncodedLen(len(src)). +// Encode implements lowercase hexadecimal encoding. +func Encode(dst, src []byte) int { + return RawEncode(dst, src, lower) +} + +// EncodeUpper encodes src into EncodedLen(len(src)) +// bytes of dst. As a convenience, it returns the number +// of bytes written to dst, but this value is always EncodedLen(len(src)). +// EncodeUpper implements uppercase hexadecimal encoding. +func EncodeUpper(dst, src []byte) int { + return RawEncode(dst, src, upper) +} + +// EncodeToString returns the lowercase hexadecimal encoding of src. +func EncodeToString(src []byte) string { + return RawEncodeToString(src, lower) +} + +// EncodeUpperToString returns the uppercase hexadecimal encoding of src. +func EncodeUpperToString(src []byte) string { + return RawEncodeToString(src, upper) +} + +// RawEncodeToString returns the hexadecimal encoding of src for a given +// alphabet. +func RawEncodeToString(src, alpha []byte) string { + dst := make([]byte, EncodedLen(len(src))) + RawEncode(dst, src, alpha) + return string(dst) +} + +// DecodeString returns the bytes represented by the hexadecimal string s. +func DecodeString(s string) ([]byte, error) { + src := []byte(s) + dst := make([]byte, DecodedLen(len(src))) + + if _, err := Decode(dst, src); err != nil { + return nil, err + } + + return dst, nil +} + +// MustDecodeString is like DecodeString but panics if the string cannot be +// parsed. It simplifies safe initialization of global variables holding +// binary data. +func MustDecodeString(str string) []byte { + dst, err := DecodeString(str) + if err != nil { + panic(err) + } + + return dst +} + +func encodeGeneric(dst, src, alpha []byte) { + for i, v := range src { + dst[i*2] = alpha[v>>4] + dst[i*2+1] = alpha[v&0x0f] + } +} + +func decodeGeneric(dst, src []byte) (uint64, bool) { + for i := 0; i < len(src)/2; i++ { + a, ok := fromHexChar(src[i*2]) + if !ok { + return uint64(i * 2), false + } + + b, ok := fromHexChar(src[i*2+1]) + if !ok { + return uint64(i*2 + 1), false + } + + dst[i] = (a << 4) | b + } + + return 0, true +} + +// fromHexChar converts a hex character into its value and a success flag. +func fromHexChar(c byte) (byte, bool) { + switch { + case '0' <= c && c <= '9': + return c - '0', true + case 'a' <= c && c <= 'f': + return c - 'a' + 10, true + case 'A' <= c && c <= 'F': + return c - 'A' + 10, true + } + + return 0, false +} diff --git a/vendor/github.com/tmthrgd/go-hex/hex_amd64.go b/vendor/github.com/tmthrgd/go-hex/hex_amd64.go new file mode 100644 index 0000000..0f9f9a5 --- /dev/null +++ b/vendor/github.com/tmthrgd/go-hex/hex_amd64.go @@ -0,0 +1,94 @@ +// Copyright 2016 Tom Thorogood. All rights reserved. +// Use of this source code is governed by a +// Modified BSD License license that can be found in +// the LICENSE file. + +// +build amd64,!gccgo,!appengine + +package hex + +import "golang.org/x/sys/cpu" + +// RawEncode encodes src into EncodedLen(len(src)) +// bytes of dst. As a convenience, it returns the number +// of bytes written to dst, but this value is always EncodedLen(len(src)). +// RawEncode implements hexadecimal encoding for a given alphabet. +func RawEncode(dst, src, alpha []byte) int { + if len(alpha) != 16 { + panic("invalid alphabet") + } + + if len(dst) < len(src)*2 { + panic("dst buffer is too small") + } + + if len(src) == 0 { + return 0 + } + + switch { + case cpu.X86.HasAVX: + encodeAVX(&dst[0], &src[0], uint64(len(src)), &alpha[0]) + case cpu.X86.HasSSE41: + encodeSSE(&dst[0], &src[0], uint64(len(src)), &alpha[0]) + default: + encodeGeneric(dst, src, alpha) + } + + return len(src) * 2 +} + +// Decode decodes src into DecodedLen(len(src)) bytes, returning the actual +// number of bytes written to dst. +// +// If Decode encounters invalid input, it returns an error describing the failure. +func Decode(dst, src []byte) (int, error) { + if len(src)%2 != 0 { + return 0, errLength + } + + if len(dst) < len(src)/2 { + panic("dst buffer is too small") + } + + if len(src) == 0 { + return 0, nil + } + + var ( + n uint64 + ok bool + ) + switch { + case cpu.X86.HasAVX: + n, ok = decodeAVX(&dst[0], &src[0], uint64(len(src))) + case cpu.X86.HasSSE41: + n, ok = decodeSSE(&dst[0], &src[0], uint64(len(src))) + default: + n, ok = decodeGeneric(dst, src) + } + + if !ok { + return 0, InvalidByteError(src[n]) + } + + return len(src) / 2, nil +} + +//go:generate go run asm_gen.go + +// This function is implemented in hex_encode_amd64.s +//go:noescape +func encodeAVX(dst *byte, src *byte, len uint64, alpha *byte) + +// This function is implemented in hex_encode_amd64.s +//go:noescape +func encodeSSE(dst *byte, src *byte, len uint64, alpha *byte) + +// This function is implemented in hex_decode_amd64.s +//go:noescape +func decodeAVX(dst *byte, src *byte, len uint64) (n uint64, ok bool) + +// This function is implemented in hex_decode_amd64.s +//go:noescape +func decodeSSE(dst *byte, src *byte, len uint64) (n uint64, ok bool) diff --git a/vendor/github.com/tmthrgd/go-hex/hex_decode_amd64.s b/vendor/github.com/tmthrgd/go-hex/hex_decode_amd64.s new file mode 100644 index 0000000..25d9cef --- /dev/null +++ b/vendor/github.com/tmthrgd/go-hex/hex_decode_amd64.s @@ -0,0 +1,303 @@ +// Copyright 2016 Tom Thorogood. All rights reserved. +// Use of this source code is governed by a +// Modified BSD License license that can be found in +// the LICENSE file. +// +// Copyright 2005-2016, Wojciech Muła. All rights reserved. +// Use of this source code is governed by a +// Simplified BSD License license that can be found in +// the LICENSE file. +// +// This file is auto-generated - do not modify + +// +build amd64,!gccgo,!appengine + +#include "textflag.h" + +DATA decodeBase<>+0x00(SB)/8, $0x3030303030303030 +DATA decodeBase<>+0x08(SB)/8, $0x3030303030303030 +DATA decodeBase<>+0x10(SB)/8, $0x2727272727272727 +DATA decodeBase<>+0x18(SB)/8, $0x2727272727272727 +GLOBL decodeBase<>(SB),RODATA,$32 + +DATA decodeToLower<>+0x00(SB)/8, $0x2020202020202020 +DATA decodeToLower<>+0x08(SB)/8, $0x2020202020202020 +GLOBL decodeToLower<>(SB),RODATA,$16 + +DATA decodeHigh<>+0x00(SB)/8, $0x0e0c0a0806040200 +DATA decodeHigh<>+0x08(SB)/8, $0xffffffffffffffff +GLOBL decodeHigh<>(SB),RODATA,$16 + +DATA decodeLow<>+0x00(SB)/8, $0x0f0d0b0907050301 +DATA decodeLow<>+0x08(SB)/8, $0xffffffffffffffff +GLOBL decodeLow<>(SB),RODATA,$16 + +DATA decodeValid<>+0x00(SB)/8, $0xb0b0b0b0b0b0b0b0 +DATA decodeValid<>+0x08(SB)/8, $0xb0b0b0b0b0b0b0b0 +DATA decodeValid<>+0x10(SB)/8, $0xb9b9b9b9b9b9b9b9 +DATA decodeValid<>+0x18(SB)/8, $0xb9b9b9b9b9b9b9b9 +DATA decodeValid<>+0x20(SB)/8, $0xe1e1e1e1e1e1e1e1 +DATA decodeValid<>+0x28(SB)/8, $0xe1e1e1e1e1e1e1e1 +DATA decodeValid<>+0x30(SB)/8, $0xe6e6e6e6e6e6e6e6 +DATA decodeValid<>+0x38(SB)/8, $0xe6e6e6e6e6e6e6e6 +GLOBL decodeValid<>(SB),RODATA,$64 + +DATA decodeToSigned<>+0x00(SB)/8, $0x8080808080808080 +DATA decodeToSigned<>+0x08(SB)/8, $0x8080808080808080 +GLOBL decodeToSigned<>(SB),RODATA,$16 + +TEXT ·decodeAVX(SB),NOSPLIT,$0 + MOVQ dst+0(FP), DI + MOVQ src+8(FP), SI + MOVQ len+16(FP), BX + MOVQ SI, R15 + MOVOU decodeValid<>(SB), X14 + MOVOU decodeValid<>+0x20(SB), X15 + MOVW $65535, DX + CMPQ BX, $16 + JB tail +bigloop: + MOVOU (SI), X0 + VPXOR decodeToSigned<>(SB), X0, X1 + POR decodeToLower<>(SB), X0 + VPXOR decodeToSigned<>(SB), X0, X2 + VPCMPGTB X1, X14, X3 + PCMPGTB decodeValid<>+0x10(SB), X1 + VPCMPGTB X2, X15, X4 + PCMPGTB decodeValid<>+0x30(SB), X2 + PAND X4, X1 + POR X2, X3 + POR X1, X3 + PMOVMSKB X3, AX + TESTW AX, DX + JNZ invalid + PSUBB decodeBase<>(SB), X0 + PANDN decodeBase<>+0x10(SB), X4 + PSUBB X4, X0 + VPSHUFB decodeLow<>(SB), X0, X3 + PSHUFB decodeHigh<>(SB), X0 + PSLLW $4, X0 + POR X3, X0 + MOVQ X0, (DI) + SUBQ $16, BX + JZ ret + ADDQ $16, SI + ADDQ $8, DI + CMPQ BX, $16 + JAE bigloop +tail: + MOVQ $16, CX + SUBQ BX, CX + SHRW CX, DX + CMPQ BX, $4 + JB tail_in_2 + JE tail_in_4 + CMPQ BX, $8 + JB tail_in_6 + JE tail_in_8 + CMPQ BX, $12 + JB tail_in_10 + JE tail_in_12 +tail_in_14: + PINSRW $6, 12(SI), X0 +tail_in_12: + PINSRW $5, 10(SI), X0 +tail_in_10: + PINSRW $4, 8(SI), X0 +tail_in_8: + PINSRQ $0, (SI), X0 + JMP tail_conv +tail_in_6: + PINSRW $2, 4(SI), X0 +tail_in_4: + PINSRW $1, 2(SI), X0 +tail_in_2: + PINSRW $0, (SI), X0 +tail_conv: + VPXOR decodeToSigned<>(SB), X0, X1 + POR decodeToLower<>(SB), X0 + VPXOR decodeToSigned<>(SB), X0, X2 + VPCMPGTB X1, X14, X3 + PCMPGTB decodeValid<>+0x10(SB), X1 + VPCMPGTB X2, X15, X4 + PCMPGTB decodeValid<>+0x30(SB), X2 + PAND X4, X1 + POR X2, X3 + POR X1, X3 + PMOVMSKB X3, AX + TESTW AX, DX + JNZ invalid + PSUBB decodeBase<>(SB), X0 + PANDN decodeBase<>+0x10(SB), X4 + PSUBB X4, X0 + VPSHUFB decodeLow<>(SB), X0, X3 + PSHUFB decodeHigh<>(SB), X0 + PSLLW $4, X0 + POR X3, X0 + CMPQ BX, $4 + JB tail_out_2 + JE tail_out_4 + CMPQ BX, $8 + JB tail_out_6 + JE tail_out_8 + CMPQ BX, $12 + JB tail_out_10 + JE tail_out_12 +tail_out_14: + PEXTRB $6, X0, 6(DI) +tail_out_12: + PEXTRB $5, X0, 5(DI) +tail_out_10: + PEXTRB $4, X0, 4(DI) +tail_out_8: + MOVL X0, (DI) + JMP ret +tail_out_6: + PEXTRB $2, X0, 2(DI) +tail_out_4: + PEXTRB $1, X0, 1(DI) +tail_out_2: + PEXTRB $0, X0, (DI) +ret: + MOVB $1, ok+32(FP) + RET +invalid: + BSFW AX, AX + SUBQ R15, SI + ADDQ SI, AX + MOVQ AX, n+24(FP) + MOVB $0, ok+32(FP) + RET + +TEXT ·decodeSSE(SB),NOSPLIT,$0 + MOVQ dst+0(FP), DI + MOVQ src+8(FP), SI + MOVQ len+16(FP), BX + MOVQ SI, R15 + MOVOU decodeValid<>(SB), X14 + MOVOU decodeValid<>+0x20(SB), X15 + MOVW $65535, DX + CMPQ BX, $16 + JB tail +bigloop: + MOVOU (SI), X0 + MOVOU X0, X1 + PXOR decodeToSigned<>(SB), X1 + POR decodeToLower<>(SB), X0 + MOVOU X0, X2 + PXOR decodeToSigned<>(SB), X2 + MOVOU X14, X3 + PCMPGTB X1, X3 + PCMPGTB decodeValid<>+0x10(SB), X1 + MOVOU X15, X4 + PCMPGTB X2, X4 + PCMPGTB decodeValid<>+0x30(SB), X2 + PAND X4, X1 + POR X2, X3 + POR X1, X3 + PMOVMSKB X3, AX + TESTW AX, DX + JNZ invalid + PSUBB decodeBase<>(SB), X0 + PANDN decodeBase<>+0x10(SB), X4 + PSUBB X4, X0 + MOVOU X0, X3 + PSHUFB decodeLow<>(SB), X3 + PSHUFB decodeHigh<>(SB), X0 + PSLLW $4, X0 + POR X3, X0 + MOVQ X0, (DI) + SUBQ $16, BX + JZ ret + ADDQ $16, SI + ADDQ $8, DI + CMPQ BX, $16 + JAE bigloop +tail: + MOVQ $16, CX + SUBQ BX, CX + SHRW CX, DX + CMPQ BX, $4 + JB tail_in_2 + JE tail_in_4 + CMPQ BX, $8 + JB tail_in_6 + JE tail_in_8 + CMPQ BX, $12 + JB tail_in_10 + JE tail_in_12 +tail_in_14: + PINSRW $6, 12(SI), X0 +tail_in_12: + PINSRW $5, 10(SI), X0 +tail_in_10: + PINSRW $4, 8(SI), X0 +tail_in_8: + PINSRQ $0, (SI), X0 + JMP tail_conv +tail_in_6: + PINSRW $2, 4(SI), X0 +tail_in_4: + PINSRW $1, 2(SI), X0 +tail_in_2: + PINSRW $0, (SI), X0 +tail_conv: + MOVOU X0, X1 + PXOR decodeToSigned<>(SB), X1 + POR decodeToLower<>(SB), X0 + MOVOU X0, X2 + PXOR decodeToSigned<>(SB), X2 + MOVOU X14, X3 + PCMPGTB X1, X3 + PCMPGTB decodeValid<>+0x10(SB), X1 + MOVOU X15, X4 + PCMPGTB X2, X4 + PCMPGTB decodeValid<>+0x30(SB), X2 + PAND X4, X1 + POR X2, X3 + POR X1, X3 + PMOVMSKB X3, AX + TESTW AX, DX + JNZ invalid + PSUBB decodeBase<>(SB), X0 + PANDN decodeBase<>+0x10(SB), X4 + PSUBB X4, X0 + MOVOU X0, X3 + PSHUFB decodeLow<>(SB), X3 + PSHUFB decodeHigh<>(SB), X0 + PSLLW $4, X0 + POR X3, X0 + CMPQ BX, $4 + JB tail_out_2 + JE tail_out_4 + CMPQ BX, $8 + JB tail_out_6 + JE tail_out_8 + CMPQ BX, $12 + JB tail_out_10 + JE tail_out_12 +tail_out_14: + PEXTRB $6, X0, 6(DI) +tail_out_12: + PEXTRB $5, X0, 5(DI) +tail_out_10: + PEXTRB $4, X0, 4(DI) +tail_out_8: + MOVL X0, (DI) + JMP ret +tail_out_6: + PEXTRB $2, X0, 2(DI) +tail_out_4: + PEXTRB $1, X0, 1(DI) +tail_out_2: + PEXTRB $0, X0, (DI) +ret: + MOVB $1, ok+32(FP) + RET +invalid: + BSFW AX, AX + SUBQ R15, SI + ADDQ SI, AX + MOVQ AX, n+24(FP) + MOVB $0, ok+32(FP) + RET diff --git a/vendor/github.com/tmthrgd/go-hex/hex_encode_amd64.s b/vendor/github.com/tmthrgd/go-hex/hex_encode_amd64.s new file mode 100644 index 0000000..96e6e4c --- /dev/null +++ b/vendor/github.com/tmthrgd/go-hex/hex_encode_amd64.s @@ -0,0 +1,227 @@ +// Copyright 2016 Tom Thorogood. All rights reserved. +// Use of this source code is governed by a +// Modified BSD License license that can be found in +// the LICENSE file. +// +// Copyright 2005-2016, Wojciech Muła. All rights reserved. +// Use of this source code is governed by a +// Simplified BSD License license that can be found in +// the LICENSE file. +// +// This file is auto-generated - do not modify + +// +build amd64,!gccgo,!appengine + +#include "textflag.h" + +DATA encodeMask<>+0x00(SB)/8, $0x0f0f0f0f0f0f0f0f +DATA encodeMask<>+0x08(SB)/8, $0x0f0f0f0f0f0f0f0f +GLOBL encodeMask<>(SB),RODATA,$16 + +TEXT ·encodeAVX(SB),NOSPLIT,$0 + MOVQ dst+0(FP), DI + MOVQ src+8(FP), SI + MOVQ len+16(FP), BX + MOVQ alpha+24(FP), DX + MOVOU (DX), X15 + CMPQ BX, $16 + JB tail +bigloop: + MOVOU -16(SI)(BX*1), X0 + VPAND encodeMask<>(SB), X0, X1 + PSRLW $4, X0 + PAND encodeMask<>(SB), X0 + VPUNPCKHBW X1, X0, X3 + PUNPCKLBW X1, X0 + VPSHUFB X0, X15, X1 + VPSHUFB X3, X15, X2 + MOVOU X2, -16(DI)(BX*2) + MOVOU X1, -32(DI)(BX*2) + SUBQ $16, BX + JZ ret + CMPQ BX, $16 + JAE bigloop +tail: + CMPQ BX, $2 + JB tail_in_1 + JE tail_in_2 + CMPQ BX, $4 + JB tail_in_3 + JE tail_in_4 + CMPQ BX, $6 + JB tail_in_5 + JE tail_in_6 + CMPQ BX, $8 + JB tail_in_7 +tail_in_8: + MOVQ (SI), X0 + JMP tail_conv +tail_in_7: + PINSRB $6, 6(SI), X0 +tail_in_6: + PINSRB $5, 5(SI), X0 +tail_in_5: + PINSRB $4, 4(SI), X0 +tail_in_4: + PINSRD $0, (SI), X0 + JMP tail_conv +tail_in_3: + PINSRB $2, 2(SI), X0 +tail_in_2: + PINSRB $1, 1(SI), X0 +tail_in_1: + PINSRB $0, (SI), X0 +tail_conv: + VPAND encodeMask<>(SB), X0, X1 + PSRLW $4, X0 + PAND encodeMask<>(SB), X0 + PUNPCKLBW X1, X0 + VPSHUFB X0, X15, X1 + CMPQ BX, $2 + JB tail_out_1 + JE tail_out_2 + CMPQ BX, $4 + JB tail_out_3 + JE tail_out_4 + CMPQ BX, $6 + JB tail_out_5 + JE tail_out_6 + CMPQ BX, $8 + JB tail_out_7 +tail_out_8: + MOVOU X1, (DI) + SUBQ $8, BX + JZ ret + ADDQ $8, SI + ADDQ $16, DI + JMP tail +tail_out_7: + PEXTRB $13, X1, 13(DI) + PEXTRB $12, X1, 12(DI) +tail_out_6: + PEXTRB $11, X1, 11(DI) + PEXTRB $10, X1, 10(DI) +tail_out_5: + PEXTRB $9, X1, 9(DI) + PEXTRB $8, X1, 8(DI) +tail_out_4: + MOVQ X1, (DI) + RET +tail_out_3: + PEXTRB $5, X1, 5(DI) + PEXTRB $4, X1, 4(DI) +tail_out_2: + PEXTRB $3, X1, 3(DI) + PEXTRB $2, X1, 2(DI) +tail_out_1: + PEXTRB $1, X1, 1(DI) + PEXTRB $0, X1, (DI) +ret: + RET + +TEXT ·encodeSSE(SB),NOSPLIT,$0 + MOVQ dst+0(FP), DI + MOVQ src+8(FP), SI + MOVQ len+16(FP), BX + MOVQ alpha+24(FP), DX + MOVOU (DX), X15 + CMPQ BX, $16 + JB tail +bigloop: + MOVOU -16(SI)(BX*1), X0 + MOVOU X0, X1 + PAND encodeMask<>(SB), X1 + PSRLW $4, X0 + PAND encodeMask<>(SB), X0 + MOVOU X0, X3 + PUNPCKHBW X1, X3 + PUNPCKLBW X1, X0 + MOVOU X15, X1 + PSHUFB X0, X1 + MOVOU X15, X2 + PSHUFB X3, X2 + MOVOU X2, -16(DI)(BX*2) + MOVOU X1, -32(DI)(BX*2) + SUBQ $16, BX + JZ ret + CMPQ BX, $16 + JAE bigloop +tail: + CMPQ BX, $2 + JB tail_in_1 + JE tail_in_2 + CMPQ BX, $4 + JB tail_in_3 + JE tail_in_4 + CMPQ BX, $6 + JB tail_in_5 + JE tail_in_6 + CMPQ BX, $8 + JB tail_in_7 +tail_in_8: + MOVQ (SI), X0 + JMP tail_conv +tail_in_7: + PINSRB $6, 6(SI), X0 +tail_in_6: + PINSRB $5, 5(SI), X0 +tail_in_5: + PINSRB $4, 4(SI), X0 +tail_in_4: + PINSRD $0, (SI), X0 + JMP tail_conv +tail_in_3: + PINSRB $2, 2(SI), X0 +tail_in_2: + PINSRB $1, 1(SI), X0 +tail_in_1: + PINSRB $0, (SI), X0 +tail_conv: + MOVOU X0, X1 + PAND encodeMask<>(SB), X1 + PSRLW $4, X0 + PAND encodeMask<>(SB), X0 + PUNPCKLBW X1, X0 + MOVOU X15, X1 + PSHUFB X0, X1 + CMPQ BX, $2 + JB tail_out_1 + JE tail_out_2 + CMPQ BX, $4 + JB tail_out_3 + JE tail_out_4 + CMPQ BX, $6 + JB tail_out_5 + JE tail_out_6 + CMPQ BX, $8 + JB tail_out_7 +tail_out_8: + MOVOU X1, (DI) + SUBQ $8, BX + JZ ret + ADDQ $8, SI + ADDQ $16, DI + JMP tail +tail_out_7: + PEXTRB $13, X1, 13(DI) + PEXTRB $12, X1, 12(DI) +tail_out_6: + PEXTRB $11, X1, 11(DI) + PEXTRB $10, X1, 10(DI) +tail_out_5: + PEXTRB $9, X1, 9(DI) + PEXTRB $8, X1, 8(DI) +tail_out_4: + MOVQ X1, (DI) + RET +tail_out_3: + PEXTRB $5, X1, 5(DI) + PEXTRB $4, X1, 4(DI) +tail_out_2: + PEXTRB $3, X1, 3(DI) + PEXTRB $2, X1, 2(DI) +tail_out_1: + PEXTRB $1, X1, 1(DI) + PEXTRB $0, X1, (DI) +ret: + RET diff --git a/vendor/github.com/tmthrgd/go-hex/hex_other.go b/vendor/github.com/tmthrgd/go-hex/hex_other.go new file mode 100644 index 0000000..fab2321 --- /dev/null +++ b/vendor/github.com/tmthrgd/go-hex/hex_other.go @@ -0,0 +1,36 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !amd64 gccgo appengine + +package hex + +// RawEncode encodes src into EncodedLen(len(src)) +// bytes of dst. As a convenience, it returns the number +// of bytes written to dst, but this value is always EncodedLen(len(src)). +// RawEncode implements hexadecimal encoding for a given alphabet. +func RawEncode(dst, src, alpha []byte) int { + if len(alpha) != 16 { + panic("invalid alphabet") + } + + encodeGeneric(dst, src, alpha) + return len(src) * 2 +} + +// Decode decodes src into DecodedLen(len(src)) bytes, returning the actual +// number of bytes written to dst. +// +// If Decode encounters invalid input, it returns an error describing the failure. +func Decode(dst, src []byte) (int, error) { + if len(src)%2 == 1 { + return 0, errLength + } + + if n, ok := decodeGeneric(dst, src); !ok { + return 0, InvalidByteError(src[n]) + } + + return len(src) / 2, nil +} diff --git a/vendor/github.com/uptrace/bun/.gitignore b/vendor/github.com/uptrace/bun/.gitignore new file mode 100644 index 0000000..b5b7923 --- /dev/null +++ b/vendor/github.com/uptrace/bun/.gitignore @@ -0,0 +1,4 @@ +# Patterns for files created by this project. +# For other files, use global gitignore. +*.s3db +.idea diff --git a/vendor/github.com/uptrace/bun/.prettierrc.yml b/vendor/github.com/uptrace/bun/.prettierrc.yml new file mode 100644 index 0000000..decea56 --- /dev/null +++ b/vendor/github.com/uptrace/bun/.prettierrc.yml @@ -0,0 +1,6 @@ +trailingComma: all +tabWidth: 2 +semi: false +singleQuote: true +proseWrap: always +printWidth: 100 diff --git a/vendor/github.com/uptrace/bun/CHANGELOG.md b/vendor/github.com/uptrace/bun/CHANGELOG.md new file mode 100644 index 0000000..2b07325 --- /dev/null +++ b/vendor/github.com/uptrace/bun/CHANGELOG.md @@ -0,0 +1,1089 @@ +## [1.2.16](github.com/uptrace/bun/compare/v1.2.15...v1.2.16) (2025-11-20) + + +### Bug Fixes + +* data race in db clone stats ([e92d910](github.com/uptrace/bun/commits/e92d91041144037c270db1f87d7b5750d52d88ab)) +* **db:** data race in db clone stats ([a78f382](github.com/uptrace/bun/commits/a78f38251b0ca990e55868953cd85e3db7cadcc5)) +* **db:** move DBStats to noCopyState ([c646241](github.com/uptrace/bun/commits/c64624188efb7b049a6e70d064566a6613fb9974)) +* return "custom" for unknown dialects instead of "invalid" ([#1280](/github.com/uptrace/bun/issues/1280)) ([106cc08](github.com/uptrace/bun/commits/106cc081023efe0662375fc9e6b9de9b0b06f6b0)), closes [#1276](github.com/uptrace/bun/issues/1276) +* revert CreateChannel rename ([#1248](/github.com/uptrace/bun/issues/1248)) ([a5b2ac6](github.com/uptrace/bun/commits/a5b2ac63e478da9011640df2612e33094b5fc90b)) +* sql injection [#1228](/github.com/uptrace/bun/issues/1228) ([#1263](/github.com/uptrace/bun/issues/1263)) ([c12edf0](github.com/uptrace/bun/commits/c12edf090a9f7b17ea04f121c8b25603590e845b)) +* update `SelectQuery.Clone` to properly handle non-nil empty arg slices ([#1299](/github.com/uptrace/bun/issues/1299)) ([b499cce](github.com/uptrace/bun/commits/b499cce68bff4c2c51d8647dd1ca17c0451f468e)), closes [#1298](github.com/uptrace/bun/issues/1298) + + +### Features + +* add Context to ConnResolver.ResolveConn ([#1275](/github.com/uptrace/bun/issues/1275)) ([d9f273f](github.com/uptrace/bun/commits/d9f273fc119ff2097883fffeb34d2e2c6074accd)) +* add materialize cte support ([#1260](/github.com/uptrace/bun/issues/1260)) ([16ebb09](github.com/uptrace/bun/commits/16ebb0916c2bffbec6bdb2af4c227d83fefdb4ae)) +* add SetValues ([#1252](/github.com/uptrace/bun/issues/1252)) ([9556d3c](github.com/uptrace/bun/commits/9556d3c4fb5fb79d9295a5937d46052a0e07fb2f)) +* add SortDir type to safely build order queries ([#1284](/github.com/uptrace/bun/issues/1284)) ([2ad0521](github.com/uptrace/bun/commits/2ad05214e57548e0b6dccf487401335cbd4e4ed9)) +* add WithQueryHook and deprecated AddQueryHook ([#1272](/github.com/uptrace/bun/issues/1272)) ([f662c1e](github.com/uptrace/bun/commits/f662c1ee6f49306eea50ceceb23d2da5b83d28cc)) +* **migrate:** add BeforeMigration and AfterMigration ([#1273](/github.com/uptrace/bun/issues/1273)) ([042b10a](github.com/uptrace/bun/commits/042b10aa50f6b0f532a59de4ce81fbddb6a3b739)) +* **pgdialect:** add support for RETURNING clause in MERGE statements ([#1258](/github.com/uptrace/bun/issues/1258)) ([6f4c688](github.com/uptrace/bun/commits/6f4c68874cd3324c863253d10ba4989365837b61)) + + + +## [1.2.15](https://github.com/uptrace/bun/compare/v1.2.14...v1.2.15) (2025-07-17) + + +### Bug Fixes + +* **pgdriver:** add mandatory space before negative numbers to resolve CVE-2024-34359 ([8067a8f](https://github.com/uptrace/bun/commit/8067a8f13f8d22fb57b76d6800f7aefc12b044cd)) + + +### Features + +* **db:** rename CleanQueryHook to ResetQueryHooks ([cb17679](https://github.com/uptrace/bun/commit/cb176796f5fbae8b3ea44e67875dd00ecf689425)) +* **db:** support clean query hooks ([a5f19a7](https://github.com/uptrace/bun/commit/a5f19a7c0d68fd44eaff99ebaaeb88ca089d7538)), closes [#1226](https://github.com/uptrace/bun/issues/1226) +* **dialect:** return default on update/delete when create table ([d347b48](https://github.com/uptrace/bun/commit/d347b48c7764a23000a28ca3ad40368b8b89e298)), closes [#1212](https://github.com/uptrace/bun/issues/1212) + + + +## [1.2.14](https://github.com/uptrace/bun/compare/v1.2.13...v1.2.14) (2025-06-16) + + +### Bug Fixes + +* restore q.limit check ([07d32c1](https://github.com/uptrace/bun/commit/07d32c1662015a398322fdbc0dc34c5f0d10ce44)) + + + +## [1.2.13](https://github.com/uptrace/bun/compare/v1.2.12...v1.2.13) (2025-06-11) + + +### Bug Fixes + +* **query:** scanAndCount without model ([07fb7ec](https://github.com/uptrace/bun/commit/07fb7ec540979d0625cfeb771a0679c5982c6e2a)), closes [#1209](https://github.com/uptrace/bun/issues/1209) +* sort fk constraints before appending ([c87fa90](https://github.com/uptrace/bun/commit/c87fa903c56743e24a2cb677e8e96fd5c802fba5)) +* use slices sort ([8555900](https://github.com/uptrace/bun/commit/8555900ad840d9b6e73c8655af4f1b6766bc943b)) + + + +## [1.2.12](https://github.com/uptrace/bun/compare/v1.2.11...v1.2.12) (2025-06-05) + + +### Bug Fixes + +* **automigrate:** append SQL to separate []byte slices ([f44a349](https://github.com/uptrace/bun/commit/f44a349ec61b09f9f0240a923e121cbaa3ab1d14)) +* **gh-1160:** add WithExcludeForeignKeys option ([63141cb](https://github.com/uptrace/bun/commit/63141cb6c9a6d0d2abf4b41eac5b1c6078884326)), closes [#1160](https://github.com/uptrace/bun/issues/1160) +* iss-824 to allow mssql to support non unicode strings ([0565763](https://github.com/uptrace/bun/commit/056576355a0a7ff75f616cedb5d81144f6657a6a)) +* **migrations:** skip template rendering if no data + fix tests ([4055827](https://github.com/uptrace/bun/commit/4055827e1af4f0b7e13879d393c1131ab497d962)) +* **pgdriver:** rename channelOverflowHandler to ChannelOverflowHandler for public API ([65760a9](https://github.com/uptrace/bun/commit/65760a9e648a1ae379982e5d8737d6d864f6a8e3)) +* relation join data race ([37971d7](https://github.com/uptrace/bun/commit/37971d7f83042ab83e52be1c122083f8a98a1efa)) +* report BIGSERIAL ~ BIGINT in pgdialect ([ad7356a](https://github.com/uptrace/bun/commit/ad7356a772324950cf866b86d23771fc53f83505)) +* skip automigrator test early ([5b22710](https://github.com/uptrace/bun/commit/5b22710f0b4d980ebec38fcd306bf459dc1eb615)) +* start sequence with last+1 ([7fbf34a](https://github.com/uptrace/bun/commit/7fbf34a69ff249c72af522331a4f6116f240630a)) + + +### Features + +* add support for netip.Addr and netip.Prefix ([63ccc8f](https://github.com/uptrace/bun/commit/63ccc8f530092c3dfc71179b94a43db452fa54ec)) +* exclude tables using LIKE pattern ([5351f7e](https://github.com/uptrace/bun/commit/5351f7ed4fe53662386e697cc551ba54487da018)) +* **migrations:** support Go templates in SQL migrations ([d92e29e](https://github.com/uptrace/bun/commit/d92e29e459ae2804ad48e1b4f6a8147211a47a57)) +* **pg:** allow user config buffer size of pg's connect ([e2f2650](https://github.com/uptrace/bun/commit/e2f2650950d13442d45694b7cd186b77b4e8e0bb)), closes [#1201](https://github.com/uptrace/bun/issues/1201) +* **pgdriver:** add option for tracing ([80c5e3c](https://github.com/uptrace/bun/commit/80c5e3c684c410dfc02170cfb8671bb8b1db2e35)), closes [#1150](https://github.com/uptrace/bun/issues/1150) +* **pgdriver:** add overflow handler to listener channel ([6f0e3a1](https://github.com/uptrace/bun/commit/6f0e3a1d33de5a61625d22ba6464bfe5da404a11)) +* set notnull=true for autoincrement columns ([1bd5dd7](https://github.com/uptrace/bun/commit/1bd5dd73ce943235a403c5896b6e70401b194093)) +* support changing column type to SERIAL ([136b480](https://github.com/uptrace/bun/commit/136b480e6835dd9a12b4925f57225fb73d0aa7ae)) + + + +## [1.2.11](https://github.com/uptrace/bun/compare/v1.2.10...v1.2.11) (2025-03-05) + + +### Bug Fixes + +* always use the value returned by implemented driver.Valuer ([0c29af6](https://github.com/uptrace/bun/commit/0c29af65f17891d15019e60f64704e9c45204062)) +* handle driver.Valuer in getRealValue ([fa37c7b](https://github.com/uptrace/bun/commit/fa37c7b91e570ca032d01d7311245a07b52dbed8)) +* only handle pointer-based driver.Valuer implementations ([40b20cd](https://github.com/uptrace/bun/commit/40b20cd207a22b8b8f86ec36c62385f6293c192a)) +* **schema:** determine whether a field is ambiguous with prefix ([83f6f99](https://github.com/uptrace/bun/commit/83f6f992bf38a654207b27fcc3bd4ea1984c9acb)) +* **schema:** process embed with struct ([a06003d](https://github.com/uptrace/bun/commit/a06003d867168a663b1ad223bbed85b3d94fd920)), closes [#1136](https://github.com/uptrace/bun/issues/1136) +* **test:** define uuid type for pointer primary keys ([3b72bd4](https://github.com/uptrace/bun/commit/3b72bd4cd045aa8061b7ca8b1cb00eae6c4016f0)) +* **test:** use varchar to be compatible with multiple databases ([287b0e3](https://github.com/uptrace/bun/commit/287b0e386feeab7391b749723c32377e5315a870)) +* **typo:** minor typo fix in `migrate/auto.go` ([368ed3f](https://github.com/uptrace/bun/commit/368ed3f2e2a65fbad50b26080efb33366b793e83)) + + + +## [1.2.10](https://github.com/uptrace/bun/compare/v1.2.9...v1.2.10) (2025-02-18) + + +### Bug Fixes + +* clone query in scanAndCountConcurrently to avoid data race ([66fdc39](https://github.com/uptrace/bun/commit/66fdc39b33a482534920578ed8c7f88c3f142a3d)), closes [#1117](https://github.com/uptrace/bun/issues/1117) +* **create_table:** avoid creating unintended foreign keys ([#1130](https://github.com/uptrace/bun/issues/1130)) ([187743b](https://github.com/uptrace/bun/commit/187743b1e743755cd57a9cc11e7f2f9cea0a7dcd)) +* **pgdialect:** handle []*time.Time arrays ([4c4e12a](https://github.com/uptrace/bun/commit/4c4e12aa7f27cf49189427da5104afb436af4348)) +* **pgdialect:** handle nill array on jsonb column ([0dc4e3e](https://github.com/uptrace/bun/commit/0dc4e3edb3f9021b02ed6f80d54cb88d2ef9b025)) +* **pgdialect:** postgres syntax errors for slices of pointers and json arrays [#877](https://github.com/uptrace/bun/issues/877) ([1422b77](https://github.com/uptrace/bun/commit/1422b7726a24ac55ee6ca0e15ec084c34f7b1bd6)) +* process embedded's struct field for table ([b410e42](https://github.com/uptrace/bun/commit/b410e420ab888d87d2b6ebb014f13baae8fdc2b7)), closes [#1125](https://github.com/uptrace/bun/issues/1125) + + +### Features + +* add DBReplica and use it in ReadWriteConnResolver ([95c825e](https://github.com/uptrace/bun/commit/95c825e1215b26456caeebf1893d3b6183202bae)) +* allow setting a query comment through a context value ([9f5ccfe](https://github.com/uptrace/bun/commit/9f5ccfea7144c7ced877e3ce6972c589c5c6c3e6)) +* **bunotel:** ability to override span names ([04e2125](https://github.com/uptrace/bun/commit/04e21253298ee495179754fbbfccc047468a034d)) +* **bunotel:** always record affected rows ([960a304](https://github.com/uptrace/bun/commit/960a3046ad0cc8ea548dc448380549f610cb5da4)) + + + +## [1.2.9](https://github.com/uptrace/bun/compare/v1.2.8...v1.2.9) (2025-01-26) + + +### Bug Fixes + +* apply join condition to select with count ([e77b9e7](https://github.com/uptrace/bun/commit/e77b9e72fa5ae8e173d506a4e154ba64214c4aff)), closes [#597](https://github.com/uptrace/bun/issues/597) +* build ([702e525](https://github.com/uptrace/bun/commit/702e525e30ec93b6d4611359518e1008b67744af)) +* individual replica timeout ([9f5e8b1](https://github.com/uptrace/bun/commit/9f5e8b1c46673bd1779bd4309a28db33dcd695bf)) +* test ([dfc4059](https://github.com/uptrace/bun/commit/dfc405901907419d043bb6ced3ad20c131c1b972)) + + +### Features + +* add feature flag AlterColumnExists ([fc35e12](https://github.com/uptrace/bun/commit/fc35e1222242b3d581f0b7496a9021aadfc50b07)), closes [#704](https://github.com/uptrace/bun/issues/704) +* add Options ([815e11a](https://github.com/uptrace/bun/commit/815e11a023d2babf65d528a20ddffc7628636e7e)) +* allow to specify read-only replica for SELECTs ([cbbe1e9](https://github.com/uptrace/bun/commit/cbbe1e94fd0c72d1870395a663c8053d7e8c6ace)) +* downgrade to use the field in has-many-relation ([91e0d27](https://github.com/uptrace/bun/commit/91e0d2719a5a20b3208cea0232e2dbcb452d6c23)), closes [#1107](https://github.com/uptrace/bun/issues/1107) +* make WithReadOnlyReplica variadic ([4cbb15a](https://github.com/uptrace/bun/commit/4cbb15a53e566e03284253aa46be372338968954)) +* **pgdialect:** allow to convert uint to int ([7d22ddd](https://github.com/uptrace/bun/commit/7d22ddd263b28b9fd6e172e0208c124b7c56f111)) +* **pgdriver:** improve otel instrumentation ([c40e4f3](https://github.com/uptrace/bun/commit/c40e4f3c50c710903236dc89b56a843a0351a21a)) + + + +## [1.2.8](https://github.com/uptrace/bun/compare/v1.2.7...v1.2.8) (2025-01-06) + + +### Bug Fixes + +* comment string zero bytes filtering ([34dfd68](https://github.com/uptrace/bun/commit/34dfd684e371c24b9f59e9b13ef57660931f0bde)) +* get m2m table's structKey with driver.Valuer ([f107314](https://github.com/uptrace/bun/commit/f1073147dc73d01dcf8a6ee9252d354ff06a1062)), closes [#1100](https://github.com/uptrace/bun/issues/1100) +* return error when use dest with has-many/many-to-many ([8296774](https://github.com/uptrace/bun/commit/829677486b502e6d5d2ae37814488ae9f2c7386e)), closes [#606](https://github.com/uptrace/bun/issues/606) +* support scan float32 to float32/float64 ([a52e733](https://github.com/uptrace/bun/commit/a52e7339a93f84468878dcaffc42536faa44efae)), closes [#1087](https://github.com/uptrace/bun/issues/1087) + + +### Features + +* add RelationWithOpts method to SelectQuery ([dd3ef52](https://github.com/uptrace/bun/commit/dd3ef522c8a9c656958b73ee5d546854fb7c6edf)) +* enhance debugging by adding query comments in headers ([1376d18](https://github.com/uptrace/bun/commit/1376d1870bfe3d89e3630203787f1e87c503d5df)) +* sort fields by struct ([5edb672](https://github.com/uptrace/bun/commit/5edb672e320be9b210f06d25c4f4b9e761c1c526)), closes [#1095](https://github.com/uptrace/bun/issues/1095) + + + +## [1.2.7](https://github.com/uptrace/bun/compare/v1.2.6...v1.2.7) (2025-01-01) + + +### Bug Fixes + +* do not create new migrations if nothing to migrate ([5cc961d](https://github.com/uptrace/bun/commit/5cc961d6cc461ad3534728fc4d3cae12bf8b736e)) +* has many relation with driver.Valuer ([cb8c42c](https://github.com/uptrace/bun/commit/cb8c42cd3f65d95865c76a594abad815eea1df3c)) +* improve range type to support driver.Valuer and sql.Scanner ([856e12b](https://github.com/uptrace/bun/commit/856e12b0d37275a6aa247370f6a8231fd89ca3e7)) +* pass correct 'transactional' parameter ([ebdef1b](https://github.com/uptrace/bun/commit/ebdef1b0e9d33a5ca475ab4c2ec2fb44d11d4595)) +* **pgdialect:** remove unsigned integer conversion ([ab3c679](https://github.com/uptrace/bun/commit/ab3c679d529dd20d44e789dc6f1d89f9510bde0b)), closes [uptrace/bun#624](https://github.com/uptrace/bun/issues/624) +* remove unused param on table.go and tables.go: canAddr ([d563e2d](https://github.com/uptrace/bun/commit/d563e2dbe95caeb0e00ad1b3e82283431747fe7b)) +* replace the link to docs repo in CONTRIBUTING.md ([e120096](https://github.com/uptrace/bun/commit/e12009662ae1ddefcc1337cc5e32e73d77c7def0)) +* trim surrounding '' in string literal in DEFAULT clause ([a0dff72](https://github.com/uptrace/bun/commit/a0dff72b6ab0ca24d00c96c923046200dd6112ed)) + + +### Features + +* add an ordered map to remove unnecessary dependencies ([9fea143](https://github.com/uptrace/bun/commit/9fea1437d8344d836670e802fd12d3476e8cad86)) +* support disable dialect's feature ([5343bd7](https://github.com/uptrace/bun/commit/5343bd7fc4ceda866a7d607388ebb7a89f7f5823)) + + + +## [1.2.6](https://github.com/uptrace/bun/compare/v1.2.5...v1.2.6) (2024-11-20) + + +### Bug Fixes + +* append IDENTITY to ADD COLUMN statement if needed ([694f873](https://github.com/uptrace/bun/commit/694f873d61ed8d2f09032ae0c0dbec4b71c3719e)) +* **ci:** prune stale should be executed at 3 AM every day ([0cedcb0](https://github.com/uptrace/bun/commit/0cedcb068229b63041a4f48de12bb767c8454048)) +* cleanup after testUniqueRenamedTable ([b1ae32e](https://github.com/uptrace/bun/commit/b1ae32e9e9f45ff2a66e50bfd13bedcf6653d874)) +* fix go.mod of oracledialect ([89e21ea](https://github.com/uptrace/bun/commit/89e21eab362c60511cca00890ae29551a2ba7c46)) +* has many relationship with multiple columns ([1664b2c](https://github.com/uptrace/bun/commit/1664b2c07a5f6cfd3b6730e5005373686e9830a6)) +* ignore case for type equivalence ([c3253a5](https://github.com/uptrace/bun/commit/c3253a5c59b078607db9e216ddc11afdef546e05)) +* implement DefaultSchema for Oracle dialect ([d08fa40](https://github.com/uptrace/bun/commit/d08fa40cc87d67296a83a77448ea511531fc8cdd)) +* **oracledialect:** add go.mod file so the dialect is released properly ([#1043](https://github.com/uptrace/bun/issues/1043)) ([1bb5597](https://github.com/uptrace/bun/commit/1bb5597f1a32f5d693101ef4a62e25d99f5b9db5)) +* **oracledialect:** update go.mod by go mod tidy to fix tests ([7f90a15](https://github.com/uptrace/bun/commit/7f90a15c51a2482dda94226dd13b913d6b470a29)) +* **pgdialect:** array value quoting ([892c416](https://github.com/uptrace/bun/commit/892c416272a8428c592896d65d3ad51a6f2356d8)) +* remove schema name from t.Name during bun-schema inspection ([31ed582](https://github.com/uptrace/bun/commit/31ed58254ad08143d88684672acd33ce044ea5a9)) +* rename column only if the name does not exist in 'target' ([fed6012](https://github.com/uptrace/bun/commit/fed6012d177e55b8320b31ef37fc02a0cbf0b9f5)) +* support embed with tag Unique ([3acd6dd](https://github.com/uptrace/bun/commit/3acd6dd8546118d7b867ca796a5e56311edad070)) +* update oracledialect/version.go in release.sh ([bcd070f](https://github.com/uptrace/bun/commit/bcd070f48a75d0092a5620261658c9c5994f0bf6)) +* update schema.Field names ([9b810de](https://github.com/uptrace/bun/commit/9b810dee4b1a721efb82c913099f39f52c44eb57)) + + +### Features + +* add and drop columns ([3fdd5b8](https://github.com/uptrace/bun/commit/3fdd5b8f635f849a74e78c665274609f75245b19)) +* add and drop IDENTITY ([dd83779](https://github.com/uptrace/bun/commit/dd837795c31490fd8816eec0e9833e79fafdda32)) +* add support type for net/netip.addr and net/netip.prefix ([#1028](https://github.com/uptrace/bun/issues/1028)) ([95c4a8e](https://github.com/uptrace/bun/commit/95c4a8ebd634e1e99114727a7b157eeeb9297ee9)) +* **automigrate:** detect renamed tables ([c03938f](https://github.com/uptrace/bun/commit/c03938ff5e9fa2f653e4c60668b1368357d2de10)) +* change column type ([3cfd8c6](https://github.com/uptrace/bun/commit/3cfd8c62125786aaf6f493acc5b39f4d3db3d628)) +* **ci:** support release on osx ([435510b](https://github.com/uptrace/bun/commit/435510b0a73b0d9e6d06e3e3c3f0fa4379e9ed8c)) +* create sql migrations and apply them ([1bf7cfd](https://github.com/uptrace/bun/commit/1bf7cfd067e0e26ae212b0f7421e5abc6f67fb4f)) +* create transactional migration files ([c3320f6](https://github.com/uptrace/bun/commit/c3320f624830dc2fe99af2c7cbe492b2a83f9e4a)) +* detect Create/Drop table ([408859f](https://github.com/uptrace/bun/commit/408859f07be38236b39a00909cdce55d49f6f824)) +* detect modified relations ([a918dc4](https://github.com/uptrace/bun/commit/a918dc472a33dd24c5fffd4d048bcf49f2e07a42)) +* detect renamed columns ([886d0a5](https://github.com/uptrace/bun/commit/886d0a5b18aba272f1c86af2a2cf68ce4c8879f2)) +* detect renamed tables ([8857bab](https://github.com/uptrace/bun/commit/8857bab54b94170d218633f3b210d379e4e51a21)) +* enhance Apply method to accept multiple functions ([7823f2f](https://github.com/uptrace/bun/commit/7823f2f24c814e104dc59475156255c7b3b26144)) +* implement fmt.Stringer queries ([5060e47](https://github.com/uptrace/bun/commit/5060e47db13451a982e48d0f14055a58ba60b472)) +* improve FK handling ([a822fc5](https://github.com/uptrace/bun/commit/a822fc5f8ae547b7cd41e1ca35609d519d78598b)) +* include target schema name in migration name ([ac8d221](https://github.com/uptrace/bun/commit/ac8d221e6443b469e794314c5fc189250fa542d5)) +* **mariadb:** support RETURNING clause in DELETE statement ([b8dec9d](https://github.com/uptrace/bun/commit/b8dec9d9a06124696bd5ee2abbf33f19087174b6)) +* migrate FKs ([4c1dfdb](https://github.com/uptrace/bun/commit/4c1dfdbe99c73d0c0f2d7b1f8b11adf30c6a41f7)) +* **mysql:** support ORDER BY and LIMIT clauses in UPDATE and DELETE statements ([de71bed](https://github.com/uptrace/bun/commit/de71bed9252980648269af85b7a51cbc464ce710)) +* support modifying primary keys ([a734629](https://github.com/uptrace/bun/commit/a734629fa285406038cbe4a50798626b5ac08539)) +* support UNIQUE constraints ([3c4d5d2](https://github.com/uptrace/bun/commit/3c4d5d2c47be4652fb9b5cf1c6bd7b6c0a437287)) +* use *bun.DB in MigratorDialect ([a8788bf](https://github.com/uptrace/bun/commit/a8788bf62cbcc954a08532c299c774262de7a81d)) + + + +## [1.2.5](https://github.com/uptrace/bun/compare/v1.2.3...v1.2.5) (2024-10-26) + + +### Bug Fixes + +* allow Limit() without Order() with MSSQL ([#1009](https://github.com/uptrace/bun/issues/1009)) ([1a46ddc](https://github.com/uptrace/bun/commit/1a46ddc0d3ca0bdc60ca8be5ad1886799d14c8b0)) +* copy bytes in mapModel.Scan ([#1030](https://github.com/uptrace/bun/issues/1030)) ([#1032](https://github.com/uptrace/bun/issues/1032)) ([39fda4e](https://github.com/uptrace/bun/commit/39fda4e3d341e59e4955f751cb354a939e57c1b1)) +* fix issue with has-many join and pointer fields ([#950](https://github.com/uptrace/bun/issues/950)) ([#983](https://github.com/uptrace/bun/issues/983)) ([cbc5177](https://github.com/uptrace/bun/commit/cbc517792ba6cdcef1828f3699d3d4dfe3c5e0eb)) +* restore explicit column: name override ([#984](https://github.com/uptrace/bun/issues/984)) ([169f258](https://github.com/uptrace/bun/commit/169f258a9460cad451f3025d2ef8df1bbd42a003)) +* return column option back ([#1036](https://github.com/uptrace/bun/issues/1036)) ([a3ccbea](https://github.com/uptrace/bun/commit/a3ccbeab39151d3eed6cb245fe15cfb5d71ba557)) +* sql.NullString mistaken as custom struct ([#1019](https://github.com/uptrace/bun/issues/1019)) ([87c77b8](https://github.com/uptrace/bun/commit/87c77b8911f2035b0ee8ea96356a2c7600b5b94d)) +* typos ([#1026](https://github.com/uptrace/bun/issues/1026)) ([760de7d](https://github.com/uptrace/bun/commit/760de7d0fad15dc761475670a4dde056aef9210d)) + + +### Features + +* add transaction isolation level support to pgdriver ([#1034](https://github.com/uptrace/bun/issues/1034)) ([3ef44ce](https://github.com/uptrace/bun/commit/3ef44ce1cdd969a21b76d6c803119cf12c375cb0)) + + +### Performance Improvements + +* refactor SelectQuery.ScanAndCount to optimize performance when there is no limit and offset ([#1035](https://github.com/uptrace/bun/issues/1035)) ([8638613](https://github.com/uptrace/bun/commit/86386135897485bbada6c50ec9a2743626111433)) + + + +## [1.2.4](https://github.com/uptrace/bun/compare/v1.2.3...v1.2.4) (2024-10-26) + + +### Bug Fixes + +* allow Limit() without Order() with MSSQL ([#1009](https://github.com/uptrace/bun/issues/1009)) ([1a46ddc](https://github.com/uptrace/bun/commit/1a46ddc0d3ca0bdc60ca8be5ad1886799d14c8b0)) +* copy bytes in mapModel.Scan ([#1030](https://github.com/uptrace/bun/issues/1030)) ([#1032](https://github.com/uptrace/bun/issues/1032)) ([39fda4e](https://github.com/uptrace/bun/commit/39fda4e3d341e59e4955f751cb354a939e57c1b1)) +* return column option back ([#1036](https://github.com/uptrace/bun/issues/1036)) ([a3ccbea](https://github.com/uptrace/bun/commit/a3ccbeab39151d3eed6cb245fe15cfb5d71ba557)) +* sql.NullString mistaken as custom struct ([#1019](https://github.com/uptrace/bun/issues/1019)) ([87c77b8](https://github.com/uptrace/bun/commit/87c77b8911f2035b0ee8ea96356a2c7600b5b94d)) +* typos ([#1026](https://github.com/uptrace/bun/issues/1026)) ([760de7d](https://github.com/uptrace/bun/commit/760de7d0fad15dc761475670a4dde056aef9210d)) + + +### Features + +* add transaction isolation level support to pgdriver ([#1034](https://github.com/uptrace/bun/issues/1034)) ([3ef44ce](https://github.com/uptrace/bun/commit/3ef44ce1cdd969a21b76d6c803119cf12c375cb0)) + + +### Performance Improvements + +* refactor SelectQuery.ScanAndCount to optimize performance when there is no limit and offset ([#1035](https://github.com/uptrace/bun/issues/1035)) ([8638613](https://github.com/uptrace/bun/commit/86386135897485bbada6c50ec9a2743626111433)) + + + +## [1.2.3](https://github.com/uptrace/bun/compare/v1.2.2...v1.2.3) (2024-08-31) + + + +## [1.2.2](https://github.com/uptrace/bun/compare/v1.2.1...v1.2.2) (2024-08-29) + + +### Bug Fixes + +* gracefully handle empty hstore in pgdialect ([#1010](https://github.com/uptrace/bun/issues/1010)) ([2f73d8a](https://github.com/uptrace/bun/commit/2f73d8a8e16c8718ebfc956036d9c9a01a0888bc)) +* number each unit test ([#974](https://github.com/uptrace/bun/issues/974)) ([b005dc2](https://github.com/uptrace/bun/commit/b005dc2a9034715c6f59dcfc8e76aa3b85df38ab)) + + +### Features + +* add ModelTableExpr to TruncateTableQuery ([#969](https://github.com/uptrace/bun/issues/969)) ([7bc330f](https://github.com/uptrace/bun/commit/7bc330f152cf0d9dc30956478e2731ea5816f012)) + + + +## [1.2.1](https://github.com/uptrace/bun/compare/v1.2.0...v1.2.1) (2024-04-02) + + + +# [1.2.0](https://github.com/uptrace/bun/compare/v1.1.17...v1.2.0) (2024-04-02) + + +### Bug Fixes + +* embedding of scanonly fields ([ed6ed74](https://github.com/uptrace/bun/commit/ed6ed74d5379ea6badb09cc37709211a51f5792b)) +* **table:** allow alt annotation ([#956](https://github.com/uptrace/bun/issues/956)) ([8a0397b](https://github.com/uptrace/bun/commit/8a0397b6e2219909d6b00d258eb7934170058edd)) +* transactional migration file extension ([#959](https://github.com/uptrace/bun/issues/959)) ([921b15b](https://github.com/uptrace/bun/commit/921b15b80110d28251a9210c77397d29924ffbc5)) + + +### Features + +* Allow overriding of Warn and Deprecated loggers ([#952](https://github.com/uptrace/bun/issues/952)) ([0e9d737](https://github.com/uptrace/bun/commit/0e9d737e4ca2deb86930237ee32a39cf3f7e8157)) +* enable SNI ([#953](https://github.com/uptrace/bun/issues/953)) ([4071ffb](https://github.com/uptrace/bun/commit/4071ffb5bcb1b233cda239c92504d8139dcf1d2f)) +* **idb:** add NewMerge method to IDB ([#966](https://github.com/uptrace/bun/issues/966)) ([664e2f1](https://github.com/uptrace/bun/commit/664e2f154f1153d2a80cd062a5074f1692edaee7)) + + + +## [1.1.17](https://github.com/uptrace/bun/compare/v1.1.16...v1.1.17) (2024-01-11) + + +### Features + +* add CreateTxSQLMigrations function ([#916](https://github.com/uptrace/bun/issues/916)) ([c68ec7c](https://github.com/uptrace/bun/commit/c68ec7cfc418959eb7c79028be7ac91f97d462ef)) +* add Join to UpdateQuery ([#908](https://github.com/uptrace/bun/issues/908)) ([8c4d8be](https://github.com/uptrace/bun/commit/8c4d8be3aa4e64582698b37fd21434b8960dddc0)) +* bunslog.QueryHook for Bun logging using `log/slog` ([#904](https://github.com/uptrace/bun/issues/904)) ([4953367](https://github.com/uptrace/bun/commit/495336731da0a995aa28c7bc84345c7825408e48)) +* dbfixture.New to accept IDB interface ([#900](https://github.com/uptrace/bun/issues/900)) ([2dee174](https://github.com/uptrace/bun/commit/2dee174bc4d09a45caeeede2885306e5fd10002d)) + + + +## [1.1.16](https://github.com/uptrace/bun/compare/v1.1.15...v1.1.16) (2023-09-16) + + +### Reverts + +* Revert "fix: "model does not have column" error (#850)" ([387228e](https://github.com/uptrace/bun/commit/387228e85d22dfcf3659f4631dfa87106d7ef45f)), closes [#850](https://github.com/uptrace/bun/issues/850) + + + +## [1.1.15](https://github.com/uptrace/bun/compare/v1.1.14...v1.1.15) (2023-09-10) + + +### Bug Fixes + +* "model does not have column" error ([#850](https://github.com/uptrace/bun/issues/850)) ([16367aa](https://github.com/uptrace/bun/commit/16367aabb34b98766d28e0678f9d47710f451fae)) +* alloc when mounting ([#891](https://github.com/uptrace/bun/issues/891)) ([f2256f1](https://github.com/uptrace/bun/commit/f2256f10a1d328fb924ca79cde76e77641398573)) +* index hints have to be specified following a table name ([4a2ae85](https://github.com/uptrace/bun/commit/4a2ae853a1509bb300bc2d96471505caee799e43)) +* make Rows.Close to drain messages ([5ceba07](https://github.com/uptrace/bun/commit/5ceba076668eb7aaddb1d8a56202256d5e6c1ead)) +* run hooks on Rows ([#892](https://github.com/uptrace/bun/issues/892)) ([f652b3d](https://github.com/uptrace/bun/commit/f652b3d399a3dc46c856eb8c0f10140a12ea4310)) +* scan error [#709](https://github.com/uptrace/bun/issues/709) ([#837](https://github.com/uptrace/bun/issues/837)) ([b82afa5](https://github.com/uptrace/bun/commit/b82afa52633b2a1b352db6de4ff0d369d5468a07)) + + +### Features + +* add bun.NullZero ([786bb6b](https://github.com/uptrace/bun/commit/786bb6bfeba3c12f8b28579d61e4794d9fb3e373)) +* **bunotel:** add options for set otel providers ([#836](https://github.com/uptrace/bun/issues/836)) ([806e632](https://github.com/uptrace/bun/commit/806e6323f60b4703b03a71c113c263d0afc95b35)) + + + +## [1.1.14](https://github.com/uptrace/bun/compare/v1.1.13...v1.1.14) (2023-05-24) + + +### Bug Fixes + +* enable CompositeIn for MySQL ([9f377b5](https://github.com/uptrace/bun/commit/9f377b5e744cb38ef4aadd61213855c009e47354)) + + + +## [1.1.13](https://github.com/uptrace/bun/compare/v1.1.12...v1.1.13) (2023-05-06) + + +### Bug Fixes + +* bunbig.Int.Scan typo ([7ddabb8](https://github.com/uptrace/bun/commit/7ddabb8c667f50032bc0bb2523a287efbe0851e7)) +* compare full MySQL version ([096fabe](https://github.com/uptrace/bun/commit/096fabefa114202d3601ad8e456f5e491a4e3787)) +* enable delete table alias for MySQL >= 8.0.16 ([77a600b](https://github.com/uptrace/bun/commit/77a600bc060154fb91aa68e68ba6a8875e5b10fb)) +* incorrect table relationship panic message [#791](https://github.com/uptrace/bun/issues/791) ([ad41888](https://github.com/uptrace/bun/commit/ad4188862eeaab30fc7c48d3224b5a786557aec5)) +* should rollback if migrate using transaction and got an err (thanks [@bayshark](https://github.com/bayshark)) ([e7a119b](https://github.com/uptrace/bun/commit/e7a119b1b8911d8bf059bb271c90ad4a5f5f02be)) + + +### Features + +* add option to customize Go migration template ([f31bf73](https://github.com/uptrace/bun/commit/f31bf739b9c7a0383411b9e67cba96c858795c68)) +* expose Exec(…) method for RawQuery ([11192c8](https://github.com/uptrace/bun/commit/11192c83f932eb7421ef09e06859a7f171de7803)) +* prefix migration files with 1 upto 14 digits ([b74b671](https://github.com/uptrace/bun/commit/b74b6714bb6a83e470e21801c97cc40e20acfb50)) +* rename option ([9353a3f](https://github.com/uptrace/bun/commit/9353a3f921c038fdf4a90665f1b0a9d0d03dc182)) + + + +## [1.1.12](https://github.com/uptrace/bun/compare/v1.1.11...v1.1.12) (2023-02-20) + + + +## [1.1.11](https://github.com/uptrace/bun/compare/v1.1.10...v1.1.11) (2023-02-01) + + +### Bug Fixes + +* add support for inserting values with Unicode encoding for mssql dialect ([e98c6c0](https://github.com/uptrace/bun/commit/e98c6c0f033b553bea3bbc783aa56c2eaa17718f)) +* fix relation tag ([a3eedff](https://github.com/uptrace/bun/commit/a3eedff49700490d4998dcdcdc04f554d8f17166)) + + + +## [1.1.10](https://github.com/uptrace/bun/compare/v1.1.9...v1.1.10) (2023-01-16) + + +### Bug Fixes + +* allow QueryEvent to better detect operations in raw queries ([8e44735](https://github.com/uptrace/bun/commit/8e4473538364bae6562055d35e94c3e9c0b77691)) +* append default VARCHAR length instead of hardcoding it in the type definition ([e5079c7](https://github.com/uptrace/bun/commit/e5079c70343ba8c8b410aed23ac1d1ae5a2c9ff6)) +* prevent panic when use pg array with custom database type ([67e4412](https://github.com/uptrace/bun/commit/67e4412a972a9ed5f3a1d07c66957beedbc8a8a3)) +* properly return sql.ErrNoRows when scanning []byte ([996fead](https://github.com/uptrace/bun/commit/996fead2595fbcaff4878b77befe6709a54b3a4d)) + + +### Features + +* mssql output support for update or delete query ([#718](https://github.com/uptrace/bun/issues/718)) ([08876b4](https://github.com/uptrace/bun/commit/08876b4d420e761cbfa658aa6bb89b3f7c62c240)) +* add Err method to query builder ([c722c90](https://github.com/uptrace/bun/commit/c722c90f3dce2642ca4f4c2ab3f9a35cd496b557)) +* add support for time.Time array in Postgres ([3dd6f3b](https://github.com/uptrace/bun/commit/3dd6f3b2ac1bfbcda08240dc1676647b61715a9c)) +* mssql and pg merge query ([#723](https://github.com/uptrace/bun/issues/723)) ([deea764](https://github.com/uptrace/bun/commit/deea764d9380b16aad34228aa32717d10f2a4bab)) +* setError on attempt to set non-positive .Varchar() ([3335e0b](https://github.com/uptrace/bun/commit/3335e0b9d6d3f424145e1f715223a0fffe773d9a)) + + +### Reverts + +* go 1.18 ([67a4488](https://github.com/uptrace/bun/commit/67a448897eaaf1ebc54d629dfd3b2509b35da352)) + + + +## [1.1.9](https://github.com/uptrace/bun/compare/v1.1.8...v1.1.9) (2022-11-23) + + +### Bug Fixes + +* adding dialect override for append-bool ([#695](https://github.com/uptrace/bun/issues/695)) ([338f2f0](https://github.com/uptrace/bun/commit/338f2f04105ad89e64530db86aeb387e2ad4789e)) +* don't call hooks twice for whereExists ([9057857](https://github.com/uptrace/bun/commit/90578578e717f248e4b6eb114c5b495fd8d4ed41)) +* don't lock migrations when running Migrate and Rollback ([69a7354](https://github.com/uptrace/bun/commit/69a7354d987ff2ed5338c9ef5f4ce320724299ab)) +* **query:** make WhereDeleted compatible with ForceDelete ([299c3fd](https://github.com/uptrace/bun/commit/299c3fd57866aaecd127a8f219c95332898475db)), closes [#673](https://github.com/uptrace/bun/issues/673) +* relation join soft delete SQL generate ([a98f4e9](https://github.com/uptrace/bun/commit/a98f4e9f2bbdbc2b81cd13aa228a1a91eb905ba2)) + + +### Features + +* add migrate.Exec ([d368bbe](https://github.com/uptrace/bun/commit/d368bbe52bb1ee3dabf0aada190bf967eec10255)) +* **update:** "skipupdate" while bulk ([1a32b2f](https://github.com/uptrace/bun/commit/1a32b2ffbd5bc9a8d8b5978dd0f16c9fb79242ee)) +* **zerolog:** added zerolog hook ([9d2267d](https://github.com/uptrace/bun/commit/9d2267d414b47164ab6ceada55bf311ad548a6b0)) + + + +## [1.1.8](https://github.com/uptrace/bun/compare/v1.1.7...v1.1.8) (2022-08-29) + + +### Bug Fixes + +* **bunotel:** handle option attributes ([#656](https://github.com/uptrace/bun/issues/656)) ([9f1e0bd](https://github.com/uptrace/bun/commit/9f1e0bd19fc0300f12996b3e6595f093024e06b6)) +* driver.Valuer returns itself causes stackoverflow ([c9f51d3](https://github.com/uptrace/bun/commit/c9f51d3e2dabed0c29c26a4221abbc426a7206f3)), closes [#657](https://github.com/uptrace/bun/issues/657) +* **pgdriver:** return FATAL and PANIC errors immediately ([4595e38](https://github.com/uptrace/bun/commit/4595e385d3706116e47bf9dc295186ec7a2ab0f9)) +* quote m2m table name fixes [#649](https://github.com/uptrace/bun/issues/649) ([61a634e](https://github.com/uptrace/bun/commit/61a634e4cd5c18df4b75f756d4b0f06ea94bc3c8)) +* support multi-level embed column ([177ec4c](https://github.com/uptrace/bun/commit/177ec4c6e04f92957614ad4724bc82c422649a4b)), closes [#643](https://github.com/uptrace/bun/issues/643) + + +### Features + +* conditions not supporting composite in ([e5d78d4](https://github.com/uptrace/bun/commit/e5d78d464b94b78438cf275b4c35f713d129961d)) +* **idb:** support raw query ([be4e688](https://github.com/uptrace/bun/commit/be4e6886ad94b4b6ca42f24f73d79a15b1ac3188)) +* **migrate:** add MissingMigrations ([42567d0](https://github.com/uptrace/bun/commit/42567d052280f2c412d4796df7178915e537e6d9)) +* **pgdriver:** implement database/sql/driver.SessionResetter ([bda298a](https://github.com/uptrace/bun/commit/bda298ac66305e5b00ba67d72d3973625930c6b9)) +* **pgdriver:** provide access to the underlying net.Conn ([d07ea0e](https://github.com/uptrace/bun/commit/d07ea0ed1541225b5f08e59a4c87383811f7f051)) + + + +## [1.1.7](https://github.com/uptrace/bun/compare/v1.1.6...v1.1.7) (2022-07-29) + + +### Bug Fixes + +* change ScanAndCount without a limit to select all rows ([de5c570](https://github.com/uptrace/bun/commit/de5c5704166563aea41a82f7863f2db88ff108e2)) + + + +## [1.1.6](https://github.com/uptrace/bun/compare/v1.1.5...v1.1.6) (2022-07-10) + + +### Bug Fixes + +* bunotel add set attributes to query metrics ([dae82cc](https://github.com/uptrace/bun/commit/dae82cc0e3af49be1e474027b55c34364676985d)) +* **db.ScanRows:** ensure rows.Close is called ([9ffbc6a](https://github.com/uptrace/bun/commit/9ffbc6a46e24b908742b6973f33ef8e5b17cc12b)) +* merge apply ([3081849](https://github.com/uptrace/bun/commit/30818499eacddd3b1a3e749091ba6a1468125641)) +* **migrate:** close conn/tx on error ([7b168ea](https://github.com/uptrace/bun/commit/7b168eabfe0f844bcbf8dc89629d04c385b9f58c)) +* **migrate:** type Migration should be used as a value rather than a pointer ([fb43935](https://github.com/uptrace/bun/commit/fb4393582b49fe528800a66aac5fb1c9a6033048)) +* **migrate:** type MigrationGroup should be used as a value rather than a pointer ([649da1b](https://github.com/uptrace/bun/commit/649da1b3c158060add9b61b32c289260daafa65a)) +* mssql cursor pagination ([#589](https://github.com/uptrace/bun/issues/589)) ([b34ec97](https://github.com/uptrace/bun/commit/b34ec97ddda95629f73762721d60fd3e00e7e99f)) + + +### Features + +* "skipupdate" model field tag ([#565](https://github.com/uptrace/bun/issues/565)) ([9288294](https://github.com/uptrace/bun/commit/928829482c718a0c215aa4f4adfa6f3fb3ed4302)) +* add pgdriver write error to log ([5ddda3d](https://github.com/uptrace/bun/commit/5ddda3de31cd08ceee4bdea64ceae8d15eace07b)) +* add query string representation ([520da7e](https://github.com/uptrace/bun/commit/520da7e1d6dbf7b06846f6b39a7f99e8753c1466)) +* add relation condition with tag ([fe5bbf6](https://github.com/uptrace/bun/commit/fe5bbf64f33d25b310e5510ece7d705b9eb3bfea)) +* add support for ON UPDATE and ON DELETE rules on belongs-to relationships from struct tags ([#533](https://github.com/uptrace/bun/issues/533)) ([a327b2a](https://github.com/uptrace/bun/commit/a327b2ae216abb55a705626296c0cdbf8d648697)) +* add tx methods to IDB ([#587](https://github.com/uptrace/bun/issues/587)) ([feab313](https://github.com/uptrace/bun/commit/feab313c0358200b6e270ac70f4551b011ab5276)) +* added raw query calls ([#596](https://github.com/uptrace/bun/issues/596)) ([127644d](https://github.com/uptrace/bun/commit/127644d2eea443736fbd6bed3417595d439e4639)) +* **bunotel:** add option to enable formatting of queries ([#547](https://github.com/uptrace/bun/issues/547)) ([b9c768c](https://github.com/uptrace/bun/commit/b9c768cec3b5dea36c3c9c344d1e76e0ffad1369)) +* **config.go:** add sslrootcert support to DSN parameters ([3bd5d69](https://github.com/uptrace/bun/commit/3bd5d692d7df4f30d07b835d6a46fc7af382489a)) +* create an extra module for newrelic ([#599](https://github.com/uptrace/bun/issues/599)) ([6c676ce](https://github.com/uptrace/bun/commit/6c676ce13f05fe763471fbec2d5a2db48bc88650)) +* **migrate:** add WithMarkAppliedOnSuccess ([31b2cc4](https://github.com/uptrace/bun/commit/31b2cc4f5ccd794a436d081073d4974835d3780d)) +* **pgdialect:** add hstore support ([66b44f7](https://github.com/uptrace/bun/commit/66b44f7c0edc205927fb8be96aaf263b31828fa1)) +* **pgdialect:** add identity support ([646251e](https://github.com/uptrace/bun/commit/646251ec02a1e2ec717e907e6f128d8b51f17c6d)) +* **pgdriver:** expose pgdriver.ParseTime ([405a7d7](https://github.com/uptrace/bun/commit/405a7d78d8f60cf27e8f175deaf95db5877d84be)) + + + +## [1.1.5](https://github.com/uptrace/bun/compare/v1.1.4...v1.1.5) (2022-05-12) + + +### Bug Fixes + +* **driver/sqliteshim:** make it work with recent version of modernc sqlite ([2360584](https://github.com/uptrace/bun/commit/23605846c20684e39bf1eaac50a2147a1b68a729)) + + + +## [1.1.4](https://github.com/uptrace/bun/compare/v1.1.3...v1.1.4) (2022-04-20) + + +### Bug Fixes + +* automatically set nullzero when there is default:value option ([72c44ae](https://github.com/uptrace/bun/commit/72c44aebbeec3a83ed97ea25a3262174d744df65)) +* fix ForceDelete on live/undeleted rows ([1a33250](https://github.com/uptrace/bun/commit/1a33250f27f00e752a735ce10311ac95dcb0c968)) +* fix OmitZero and value overriding ([087ea07](https://github.com/uptrace/bun/commit/087ea0730551f1e841bacb6ad2fa3afd512a1df8)) +* rename Query to QueryBuilder ([98d111b](https://github.com/uptrace/bun/commit/98d111b7cc00fa61b6b2cec147f43285f4baadb4)) + + +### Features + +* add ApplyQueryBuilder ([582eca0](https://github.com/uptrace/bun/commit/582eca09cf2b59e67c2e4a2ad24f1a74cb53addd)) +* **config.go:** add connect_timeout to DSN parsable params ([998b04d](https://github.com/uptrace/bun/commit/998b04d51a9a4f182ac3458f90db8dbf9185c4ba)), closes [#505](https://github.com/uptrace/bun/issues/505) + + + +# [1.1.3](https://github.com/uptrace/bun/compare/v1.1.2...v) (2022-03-29) + +### Bug Fixes + +- fix panic message when has-many encounter an error + ([cfd2747](https://github.com/uptrace/bun/commit/cfd27475fac89a1c8cf798bfa64898bd77bbba79)) +- **migrate:** change rollback to match migrate behavior + ([df5af9c](https://github.com/uptrace/bun/commit/df5af9c9cbdf54ce243e037bbb2c7b154f8422b3)) + +### Features + +- added QueryBuilder interface for SelectQuery, UpdateQuery, DeleteQuery + ([#499](https://github.com/uptrace/bun/issues/499)) + ([59fef48](https://github.com/uptrace/bun/commit/59fef48f6b3ec7f32bdda779b6693c333ff1dfdb)) + +# [1.1.2](https://github.com/uptrace/bun/compare/v1.1.2...v) (2022-03-22) + +### Bug Fixes + +- correctly handle bun.In([][]byte{...}) + ([800616e](https://github.com/uptrace/bun/commit/800616ed28ca600ad676319a10adb970b2b4daf6)) + +### Features + +- accept extend option to allow extending existing models + ([48b80e4](https://github.com/uptrace/bun/commit/48b80e4f7e3ed8a28fd305f7853ebe7ab984a497)) + +# [1.1.0](https://github.com/uptrace/bun/compare/v1.1.0-beta.1...v1.1.0) (2022-02-28) + +### Features + +- Added [MSSQL](https://bun.uptrace.dev/guide/drivers.html#mssql) support as a 4th fully supported + DBMS. +- Added `SetColumn("col_name", "upper(?)", "hello")` in addition to + `Set("col_name = upper(?)", "hello")` which works for all 4 supported DBMS. + +* improve nil ptr values handling + ([b398e6b](https://github.com/uptrace/bun/commit/b398e6bea840ea2fd3e001b7879c0b00b6dcd6f7)) + +### Breaking changes + +- Bun no longer automatically marks some fields like `ID int64` as `pk` and `autoincrement`. You + need to manually add those options: + +```diff +type Model struct { +- ID int64 ++ ID int64 `bun:",pk,autoincrement"` +} +``` + +Bun [v1.0.25](#1024-2022-02-22) prints warnings for models with missing options so you are +recommended to upgrade to v1.0.24 before upgrading to v1.1.x. + +- Also, Bun no longer adds `nullzero` option to `soft_delete` fields. + +- Removed `nopk` and `allowzero` options. + +### Bug Fixes + +- append slice values + ([4a65129](https://github.com/uptrace/bun/commit/4a651294fb0f1e73079553024810c3ead9777311)) +- check for nils when appending driver.Value + ([7bb1640](https://github.com/uptrace/bun/commit/7bb1640a00fceca1e1075fe6544b9a4842ab2b26)) +- cleanup soft deletes for mssql + ([e72e2c5](https://github.com/uptrace/bun/commit/e72e2c5d0a85f3d26c3fa22c7284c2de1dcfda8e)) +- **dbfixture:** apply cascade option. Fixes [#447](https://github.com/uptrace/bun/issues/447) + ([d32d988](https://github.com/uptrace/bun/commit/d32d98840bc23e74c836f8192cb4bc9529aa9233)) +- create table WithForeignKey() and has-many relation + ([3cf5649](https://github.com/uptrace/bun/commit/3cf56491706b5652c383dbe007ff2389ad64922e)) +- do not emit m2m relations in WithForeignKeys() + ([56c8c5e](https://github.com/uptrace/bun/commit/56c8c5ed44c0d6d734c3d3161c642ce8437e2248)) +- accept dest in select queries + ([33b5b6f](https://github.com/uptrace/bun/commit/33b5b6ff660b77238a737a543ca12675c7f0c284)) + +## [1.0.25](https://github.com/uptrace/bun/compare/v1.0.23...v1.0.25) (2022-02-22) + +### Bug Fixes + +### Deprecated + +In the coming v1.1.x release, Bun will stop automatically adding `,pk,autoincrement` options on +`ID int64/int32` fields. This version (v1.0.23) only prints a warning when it encounters such +fields, but the code will continue working as before. + +To fix warnings, add missing options: + +```diff +type Model struct { +- ID int64 ++ ID int64 `bun:",pk,autoincrement"` +} +``` + +To silence warnings: + +```go +bun.SetWarnLogger(log.New(ioutil.Discard, "", log.LstdFlags)) +``` + +Bun will also print a warning on [soft delete](https://bun.uptrace.dev/guide/soft-deletes.html) +fields without a `,nullzero` option. You can fix the warning by adding missing `,nullzero` or +`,allowzero` options. + +In v1.1.x, such options as `,nopk` and `,allowzero` will not be necessary and will be removed. + +### Bug Fixes + +- fix missing autoincrement warning + ([3bc9c72](https://github.com/uptrace/bun/commit/3bc9c721e1c1c5104c256a0c01c4525df6ecefc2)) + +* append slice values + ([4a65129](https://github.com/uptrace/bun/commit/4a651294fb0f1e73079553024810c3ead9777311)) +* don't automatically set pk, nullzero, and autoincrement options + ([519a0df](https://github.com/uptrace/bun/commit/519a0df9707de01a418aba0d6b7482cfe4c9a532)) + +### Features + +- add CreateTableQuery.DetectForeignKeys + ([a958fcb](https://github.com/uptrace/bun/commit/a958fcbab680b0c5ad7980f369c7b73f7673db87)) + +## [1.0.22](https://github.com/uptrace/bun/compare/v1.0.21...v1.0.22) (2022-01-28) + +### Bug Fixes + +- improve scan error message + ([54048b2](https://github.com/uptrace/bun/commit/54048b296b9648fd62107ce6fa6fd7e6e2a648c7)) +- properly discover json.Marshaler on ptr field + ([3b321b0](https://github.com/uptrace/bun/commit/3b321b08601c4b8dc6bcaa24adea20875883ac14)) + +### Breaking (MySQL, MariaDB) + +- **insert:** get last insert id only with pk support auto increment + ([79e7c79](https://github.com/uptrace/bun/commit/79e7c797beea54bfc9dc1cb0141a7520ff941b4d)). Make + sure your MySQL models have `bun:",pk,autoincrement"` options if you are using autoincrements. + +### Features + +- refuse to start when version check does not pass + ([ff8d767](https://github.com/uptrace/bun/commit/ff8d76794894eeaebede840e5199720f3f5cf531)) +- support Column in ValuesQuery + ([0707679](https://github.com/uptrace/bun/commit/0707679b075cac57efa8e6fe9019b57b2da4bcc7)) + +## [1.0.21](https://github.com/uptrace/bun/compare/v1.0.20...v1.0.21) (2022-01-06) + +### Bug Fixes + +- append where to index create + ([1de6cea](https://github.com/uptrace/bun/commit/1de6ceaa8bba59b69fbe0cc6916d1b27da5586d8)) +- check if slice is nil when calling BeforeAppendModel + ([938d9da](https://github.com/uptrace/bun/commit/938d9dadb72ceeeb906064d9575278929d20cbbe)) +- **dbfixture:** directly set matching types via reflect + ([780504c](https://github.com/uptrace/bun/commit/780504cf1da687fc51a22d002ea66e2ccc41e1a3)) +- properly handle driver.Valuer and type:json + ([a17454a](https://github.com/uptrace/bun/commit/a17454ac6b95b2a2e927d0c4e4aee96494108389)) +- support scanning string into uint64 + ([73cc117](https://github.com/uptrace/bun/commit/73cc117a9f7a623ced1fdaedb4546e8e7470e4d3)) +- unique module name for opentelemetry example + ([f2054fe](https://github.com/uptrace/bun/commit/f2054fe1d11cea3b21d69dab6f6d6d7d97ba06bb)) + +### Features + +- add anonymous fields with type name + ([508375b](https://github.com/uptrace/bun/commit/508375b8f2396cb088fd4399a9259584353eb7e5)) +- add baseQuery.GetConn() + ([81a9bee](https://github.com/uptrace/bun/commit/81a9beecb74fed7ec3574a1d42acdf10a74e0b00)) +- create new queries from baseQuery + ([ae1dd61](https://github.com/uptrace/bun/commit/ae1dd611a91c2b7c79bc2bc12e9a53e857791e71)) +- support INSERT ... RETURNING for MariaDB >= 10.5.0 + ([b6531c0](https://github.com/uptrace/bun/commit/b6531c00ecbd4c7ec56b4131fab213f9313edc1b)) + +## [1.0.20](https://github.com/uptrace/bun/compare/v1.0.19...v1.0.20) (2021-12-19) + +### Bug Fixes + +- add Event.QueryTemplate and change Event.Query to be always formatted + ([52b1ccd](https://github.com/uptrace/bun/commit/52b1ccdf3578418aa427adef9dcf942d90ae4fdd)) +- change GetTableName to return formatted table name in case ModelTableExpr + ([95144dd](https://github.com/uptrace/bun/commit/95144dde937b4ac88b36b0bd8b01372421069b44)) +- change ScanAndCount to work with transactions + ([5b3f2c0](https://github.com/uptrace/bun/commit/5b3f2c021c424da366caffd33589e8adde821403)) +- **dbfixture:** directly call funcs bypassing template eval + ([a61974b](https://github.com/uptrace/bun/commit/a61974ba2d24361c5357fb9bda1f3eceec5a45cd)) +- don't append CASCADE by default in drop table/column queries + ([26457ea](https://github.com/uptrace/bun/commit/26457ea5cb20862d232e6e5fa4dbdeac5d444bf1)) +- **migrate:** mark migrations as applied on error so the migration can be rolled back + ([8ce33fb](https://github.com/uptrace/bun/commit/8ce33fbbac8e33077c20daf19a14c5ff2291bcae)) +- respect nullzero when appending struct fields. Fixes + [#339](https://github.com/uptrace/bun/issues/339) + ([ffd02f3](https://github.com/uptrace/bun/commit/ffd02f3170b3cccdd670a48d563cfb41094c05d6)) +- reuse tx for relation join ([#366](https://github.com/uptrace/bun/issues/366)) + ([60bdb1a](https://github.com/uptrace/bun/commit/60bdb1ac84c0a699429eead3b7fdfbf14fe69ac6)) + +### Features + +- add `Dialect()` to Transaction and IDB interface + ([693f1e1](https://github.com/uptrace/bun/commit/693f1e135999fc31cf83b99a2530a695b20f4e1b)) +- add model embedding via embed:prefix\_ + ([9a2cedc](https://github.com/uptrace/bun/commit/9a2cedc8b08fa8585d4bfced338bd0a40d736b1d)) +- change the default log output to stderr + ([4bf5773](https://github.com/uptrace/bun/commit/4bf577382f19c64457cbf0d64490401450954654)), + closes [#349](https://github.com/uptrace/bun/issues/349) + +## [1.0.19](https://github.com/uptrace/bun/compare/v1.0.18...v1.0.19) (2021-11-30) + +### Features + +- add support for column:name to specify column name + ([e37b460](https://github.com/uptrace/bun/commit/e37b4602823babc8221970e086cfed90c6ad4cf4)) + +## [1.0.18](https://github.com/uptrace/bun/compare/v1.0.17...v1.0.18) (2021-11-24) + +### Bug Fixes + +- use correct operation for UpdateQuery + ([687a004](https://github.com/uptrace/bun/commit/687a004ef7ec6fe1ef06c394965dd2c2d822fc82)) + +### Features + +- add pgdriver.Notify + ([7ee443d](https://github.com/uptrace/bun/commit/7ee443d1b869d8ddc4746850f7425d0a9ccd012b)) +- CreateTableQuery.PartitionBy and CreateTableQuery.TableSpace + ([cd3ab4d](https://github.com/uptrace/bun/commit/cd3ab4d8f3682f5a30b87c2ebc2d7e551d739078)) +- **pgdriver:** add CopyFrom and CopyTo + ([0b97703](https://github.com/uptrace/bun/commit/0b977030b5c05f509e11d13550b5f99dfd62358d)) +- support InsertQuery.Ignore on PostgreSQL + ([1aa9d14](https://github.com/uptrace/bun/commit/1aa9d149da8e46e63ff79192e394fde4d18d9b60)) + +## [1.0.17](https://github.com/uptrace/bun/compare/v1.0.16...v1.0.17) (2021-11-11) + +### Bug Fixes + +- don't call rollback when tx is already done + ([8246c2a](https://github.com/uptrace/bun/commit/8246c2a63e2e6eba314201c6ba87f094edf098b9)) +- **mysql:** escape backslash char in strings + ([fb32029](https://github.com/uptrace/bun/commit/fb32029ea7604d066800b16df21f239b71bf121d)) + +## [1.0.16](https://github.com/uptrace/bun/compare/v1.0.15...v1.0.16) (2021-11-07) + +### Bug Fixes + +- call query hook when tx is started, committed, or rolled back + ([30e85b5](https://github.com/uptrace/bun/commit/30e85b5366b2e51951ef17a0cf362b58f708dab1)) +- **pgdialect:** auto-enable array support if the sql type is an array + ([62c1012](https://github.com/uptrace/bun/commit/62c1012b2482e83969e5c6f5faf89e655ce78138)) + +### Features + +- support multiple tag options join:left_col1=right_col1,join:left_col2=right_col2 + ([78cd5aa](https://github.com/uptrace/bun/commit/78cd5aa60a5c7d1323bb89081db2b2b811113052)) +- **tag:** log with bad tag name + ([4e82d75](https://github.com/uptrace/bun/commit/4e82d75be2dabdba1a510df4e1fbb86092f92f4c)) + +## [1.0.15](https://github.com/uptrace/bun/compare/v1.0.14...v1.0.15) (2021-10-29) + +### Bug Fixes + +- fixed bug creating table when model has no columns + ([042c50b](https://github.com/uptrace/bun/commit/042c50bfe41caaa6e279e02c887c3a84a3acd84f)) +- init table with dialect once + ([9a1ce1e](https://github.com/uptrace/bun/commit/9a1ce1e492602742bb2f587e9ed24e50d7d07cad)) + +### Features + +- accept columns in WherePK + ([b3e7035](https://github.com/uptrace/bun/commit/b3e70356db1aa4891115a10902316090fccbc8bf)) +- support ADD COLUMN IF NOT EXISTS + ([ca7357c](https://github.com/uptrace/bun/commit/ca7357cdfe283e2f0b94eb638372e18401c486e9)) + +## [1.0.14](https://github.com/uptrace/bun/compare/v1.0.13...v1.0.14) (2021-10-24) + +### Bug Fixes + +- correct binary serialization for mysql ([#259](https://github.com/uptrace/bun/issues/259)) + ([e899f50](https://github.com/uptrace/bun/commit/e899f50b22ef6759ef8c029a6cd3f25f2bde17ef)) +- correctly escape single quotes in pg arrays + ([3010847](https://github.com/uptrace/bun/commit/3010847f5c2c50bce1969689a0b77fd8a6fb7e55)) +- use BLOB sql type to encode []byte in MySQL and SQLite + ([725ec88](https://github.com/uptrace/bun/commit/725ec8843824a7fc8f4058ead75ab0e62a78192a)) + +### Features + +- warn when there are args but no placeholders + ([06dde21](https://github.com/uptrace/bun/commit/06dde215c8d0bde2b2364597190729a160e536a1)) + +## [1.0.13](https://github.com/uptrace/bun/compare/v1.0.12...v1.0.13) (2021-10-17) + +### Breaking Change + +- **pgdriver:** enable TLS by default with InsecureSkipVerify=true + ([15ec635](https://github.com/uptrace/bun/commit/15ec6356a04d5cf62d2efbeb189610532dc5eb31)) + +### Features + +- add BeforeAppendModelHook + ([0b55de7](https://github.com/uptrace/bun/commit/0b55de77aaffc1ed0894ef16f45df77bca7d93c1)) +- **pgdriver:** add support for unix socket DSN + ([f398cec](https://github.com/uptrace/bun/commit/f398cec1c3873efdf61ac0b94ebe06c657f0cf91)) + +## [1.0.12](https://github.com/uptrace/bun/compare/v1.0.11...v1.0.12) (2021-10-14) + +### Bug Fixes + +- add InsertQuery.ColumnExpr to specify columns + ([60ffe29](https://github.com/uptrace/bun/commit/60ffe293b37912d95f28e69734ff51edf4b27da7)) +- **bundebug:** change WithVerbose to accept a bool flag + ([b2f8b91](https://github.com/uptrace/bun/commit/b2f8b912de1dc29f40c79066de1e9d6379db666c)) +- **pgdialect:** fix bytea[] handling + ([a5ca013](https://github.com/uptrace/bun/commit/a5ca013742c5a2e947b43d13f9c2fc0cf6a65d9c)) +- **pgdriver:** rename DriverOption to Option + ([51c1702](https://github.com/uptrace/bun/commit/51c1702431787d7369904b2624e346bf3e59c330)) +- support allowzero on the soft delete field + ([d0abec7](https://github.com/uptrace/bun/commit/d0abec71a9a546472a83bd70ed4e6a7357659a9b)) + +### Features + +- **bundebug:** allow to configure the hook using env var, for example, BUNDEBUG={0,1,2} + ([ce92852](https://github.com/uptrace/bun/commit/ce928524cab9a83395f3772ae9dd5d7732af281d)) +- **bunotel:** report DBStats metrics + ([b9b1575](https://github.com/uptrace/bun/commit/b9b15750f405cdbd345b776f5a56c6f742bc7361)) +- **pgdriver:** add Error.StatementTimeout + ([8a7934d](https://github.com/uptrace/bun/commit/8a7934dd788057828bb2b0983732b4394b74e960)) +- **pgdriver:** allow setting Network in config + ([b24b5d8](https://github.com/uptrace/bun/commit/b24b5d8014195a56ad7a4c634c10681038e6044d)) + +## [1.0.11](https://github.com/uptrace/bun/compare/v1.0.10...v1.0.11) (2021-10-05) + +### Bug Fixes + +- **mysqldialect:** remove duplicate AppendTime + ([8d42090](https://github.com/uptrace/bun/commit/8d42090af34a1760004482c7fc0923b114d79937)) + +## [1.0.10](https://github.com/uptrace/bun/compare/v1.0.9...v1.0.10) (2021-10-05) + +### Bug Fixes + +- add UpdateQuery.OmitZero + ([2294db6](https://github.com/uptrace/bun/commit/2294db61d228711435fff1075409a30086b37555)) +- make ExcludeColumn work with many-to-many queries + ([300e12b](https://github.com/uptrace/bun/commit/300e12b993554ff839ec4fa6bbea97e16aca1b55)) +- **mysqldialect:** append time in local timezone + ([e763cc8](https://github.com/uptrace/bun/commit/e763cc81eac4b11fff4e074ad3ff6cd970a71697)) +- **tagparser:** improve parsing options with brackets + ([0daa61e](https://github.com/uptrace/bun/commit/0daa61edc3c4d927ed260332b99ee09f4bb6b42f)) + +### Features + +- add timetz parsing + ([6e415c4](https://github.com/uptrace/bun/commit/6e415c4c5fa2c8caf4bb4aed4e5897fe5676f5a5)) + +## [1.0.9](https://github.com/uptrace/bun/compare/v1.0.8...v1.0.9) (2021-09-27) + +### Bug Fixes + +- change DBStats to use uint32 instead of uint64 to make it work on i386 + ([caca2a7](https://github.com/uptrace/bun/commit/caca2a7130288dec49fa26b49c8550140ee52f4c)) + +### Features + +- add IQuery and QueryEvent.IQuery + ([b762942](https://github.com/uptrace/bun/commit/b762942fa3b1d8686d0a559f93f2a6847b83d9c1)) +- add QueryEvent.Model + ([7688201](https://github.com/uptrace/bun/commit/7688201b485d14d3e393956f09a3200ea4d4e31d)) +- **bunotel:** add experimental bun.query.timing metric + ([2cdb384](https://github.com/uptrace/bun/commit/2cdb384678631ccadac0fb75f524bd5e91e96ee2)) +- **pgdriver:** add Config.ConnParams to session config params + ([408caf0](https://github.com/uptrace/bun/commit/408caf0bb579e23e26fc6149efd6851814c22517)) +- **pgdriver:** allow specifying timeout in DSN + ([7dbc71b](https://github.com/uptrace/bun/commit/7dbc71b3494caddc2e97d113f00067071b9e19da)) + +## [1.0.8](https://github.com/uptrace/bun/compare/v1.0.7...v1.0.8) (2021-09-18) + +### Bug Fixes + +- don't append soft delete where for insert queries with on conflict clause + ([27c477c](https://github.com/uptrace/bun/commit/27c477ce071d4c49c99a2531d638ed9f20e33461)) +- improve bun.NullTime to accept string + ([73ad6f5](https://github.com/uptrace/bun/commit/73ad6f5640a0a9b09f8df2bc4ab9cb510021c50c)) +- make allowzero work with auto-detected primary keys + ([82ca87c](https://github.com/uptrace/bun/commit/82ca87c7c49797d507b31fdaacf8343716d4feff)) +- support soft deletes on nil model + ([0556e3c](https://github.com/uptrace/bun/commit/0556e3c63692a7f4e48659d52b55ffd9cca0202a)) + +## [1.0.7](https://github.com/uptrace/bun/compare/v1.0.6...v1.0.7) (2021-09-15) + +### Bug Fixes + +- don't append zero time as NULL without nullzero tag + ([3b8d9cb](https://github.com/uptrace/bun/commit/3b8d9cb4e39eb17f79a618396bbbe0adbc66b07b)) +- **pgdriver:** return PostgreSQL DATE as a string + ([40be0e8](https://github.com/uptrace/bun/commit/40be0e8ea85f8932b7a410a6fc2dd3acd2d18ebc)) +- specify table alias for soft delete where + ([5fff1dc](https://github.com/uptrace/bun/commit/5fff1dc1dd74fa48623a24fa79e358a544dfac0b)) + +### Features + +- add SelectQuery.Exists helper + ([c3e59c1](https://github.com/uptrace/bun/commit/c3e59c1bc58b43c4b8e33e7d170ad33a08fbc3c7)) + +## [1.0.6](https://github.com/uptrace/bun/compare/v1.0.5...v1.0.6) (2021-09-11) + +### Bug Fixes + +- change unique tag to create a separate unique constraint + ([8401615](https://github.com/uptrace/bun/commit/84016155a77ca77613cc054277fefadae3098757)) +- improve zero checker for ptr values + ([2b3623d](https://github.com/uptrace/bun/commit/2b3623dd665d873911fd20ca707016929921e862)) + +## v1.0.5 - Sep 09 2021 + +- chore: tweak bundebug colors +- fix: check if table is present when appending columns +- fix: copy []byte when scanning + +## v1.0.4 - Sep 08 2021 + +- Added support for MariaDB. +- Restored default `SET` for `ON CONFLICT DO UPDATE` queries. + +## v1.0.3 - Sep 06 2021 + +- Fixed bulk soft deletes. +- pgdialect: fixed scanning into an array pointer. + +## v1.0.2 - Sep 04 2021 + +- Changed to completely ignore fields marked with `bun:"-"`. If you want to be able to scan into + such columns, use `bun:",scanonly"`. +- pgdriver: fixed SASL authentication handling. + +## v1.0.1 - Sep 02 2021 + +- pgdriver: added erroneous zero writes retry. +- Improved column handling in Relation callback. + +## v1.0.0 - Sep 01 2021 + +- First stable release. + +## v0.4.1 - Aug 18 2021 + +- Fixed migrate package to properly rollback migrations. +- Added `allowzero` tag option that undoes `nullzero` option. + +## v0.4.0 - Aug 11 2021 + +- Changed `WhereGroup` function to accept `*SelectQuery`. +- Fixed query hooks for count queries. + +## v0.3.4 - Jul 19 2021 + +- Renamed `migrate.CreateGo` to `CreateGoMigration`. +- Added `migrate.WithPackageName` to customize the Go package name in generated migrations. +- Renamed `migrate.CreateSQL` to `CreateSQLMigrations` and changed `CreateSQLMigrations` to create + both up and down migration files. + +## v0.3.1 - Jul 12 2021 + +- Renamed `alias` field struct tag to `alt` so it is not confused with column alias. +- Reworked migrate package API. See + [migrate](https://github.com/uptrace/bun/tree/master/example/migrate) example for details. + +## v0.3.0 - Jul 09 2021 + +- Changed migrate package to return structured data instead of logging the progress. See + [migrate](https://github.com/uptrace/bun/tree/master/example/migrate) example for details. + +## v0.2.14 - Jul 01 2021 + +- Added [sqliteshim](https://pkg.go.dev/github.com/uptrace/bun/driver/sqliteshim) by + [Ivan Trubach](https://github.com/tie). +- Added support for MySQL 5.7 in addition to MySQL 8. + +## v0.2.12 - Jun 29 2021 + +- Fixed scanners for net.IP and net.IPNet. + +## v0.2.10 - Jun 29 2021 + +- Fixed pgdriver to format passed query args. + +## v0.2.9 - Jun 27 2021 + +- Added support for prepared statements in pgdriver. + +## v0.2.7 - Jun 26 2021 + +- Added `UpdateQuery.Bulk` helper to generate bulk-update queries. + + Before: + + ```go + models := []Model{ + {42, "hello"}, + {43, "world"}, + } + return db.NewUpdate(). + With("_data", db.NewValues(&models)). + Model(&models). + Table("_data"). + Set("model.str = _data.str"). + Where("model.id = _data.id") + ``` + + Now: + + ```go + db.NewUpdate(). + Model(&models). + Bulk() + ``` + +## v0.2.5 - Jun 25 2021 + +- Changed time.Time to always append zero time as `NULL`. +- Added `db.RunInTx` helper. + +## v0.2.4 - Jun 21 2021 + +- Added SSL support to pgdriver. + +## v0.2.3 - Jun 20 2021 + +- Replaced `ForceDelete(ctx)` with `ForceDelete().Exec(ctx)` for soft deletes. + +## v0.2.1 - Jun 17 2021 + +- Renamed `DBI` to `IConn`. `IConn` is a common interface for `*sql.DB`, `*sql.Conn`, and `*sql.Tx`. +- Added `IDB`. `IDB` is a common interface for `*bun.DB`, `bun.Conn`, and `bun.Tx`. + +## v0.2.0 - Jun 16 2021 + +- Changed [model hooks](https://bun.uptrace.dev/guide/hooks.html#model-hooks). See + [model-hooks](example/model-hooks) example. +- Renamed `has-one` to `belongs-to`. Renamed `belongs-to` to `has-one`. Previously Bun used + incorrect names for these relations. diff --git a/vendor/github.com/uptrace/bun/CONTRIBUTING.md b/vendor/github.com/uptrace/bun/CONTRIBUTING.md new file mode 100644 index 0000000..9426875 --- /dev/null +++ b/vendor/github.com/uptrace/bun/CONTRIBUTING.md @@ -0,0 +1,34 @@ +## Running tests + +To run tests, you need Docker which starts PostgreSQL and MySQL servers: + +```shell +cd internal/dbtest +./test.sh +``` + +To ease debugging, you can run tests and print all executed queries: + +```shell +BUNDEBUG=2 TZ= go test -run=TestName +``` + +## Releasing + +1. Run `release.sh` script which updates versions in go.mod files and pushes a new branch to GitHub: + +```shell +TAG=v1.0.0 ./scripts/release.sh +``` + +2. Open a pull request and wait for the build to finish. + +3. Merge the pull request and run `tag.sh` to create tags for packages: + +```shell +TAG=v1.0.0 ./scripts/tag.sh +``` + +## Documentation + +To contribute to the docs visit https://github.com/uptrace/bun-docs diff --git a/vendor/github.com/uptrace/bun/LICENSE b/vendor/github.com/uptrace/bun/LICENSE new file mode 100644 index 0000000..7ec8181 --- /dev/null +++ b/vendor/github.com/uptrace/bun/LICENSE @@ -0,0 +1,24 @@ +Copyright (c) 2021 Vladimir Mihailenco. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/uptrace/bun/Makefile b/vendor/github.com/uptrace/bun/Makefile new file mode 100644 index 0000000..c9a84cd --- /dev/null +++ b/vendor/github.com/uptrace/bun/Makefile @@ -0,0 +1,30 @@ +ALL_GO_MOD_DIRS := $(shell find . -type f -name 'go.mod' -exec dirname {} \; | sort) +EXAMPLE_GO_MOD_DIRS := $(shell find ./example/ -type f -name 'go.mod' -exec dirname {} \; | sort) + +test: + set -e; for dir in $(ALL_GO_MOD_DIRS); do \ + echo "go test in $${dir}"; \ + (cd "$${dir}" && \ + go test && \ + go test -race && \ + env GOOS=linux GOARCH=386 TZ= go test && \ + go vet); \ + done + +go_mod_tidy: + set -e; for dir in $(ALL_GO_MOD_DIRS); do \ + echo "go mod tidy in $${dir}"; \ + (cd "$${dir}" && \ + go get -u ./... && \ + go mod tidy); \ + done + +fmt: + gofmt -w -s ./ + goimports -w -local github.com/uptrace/bun ./ + +run-examples: + set -e; for dir in $(EXAMPLE_GO_MOD_DIRS); do \ + echo "go run . in $${dir}"; \ + (cd "$${dir}" && go run .); \ + done diff --git a/vendor/github.com/uptrace/bun/README.md b/vendor/github.com/uptrace/bun/README.md new file mode 100644 index 0000000..6fe9913 --- /dev/null +++ b/vendor/github.com/uptrace/bun/README.md @@ -0,0 +1,286 @@ +# Bun: SQL-first Golang ORM + +[![build workflow](https://github.com/uptrace/bun/actions/workflows/build.yml/badge.svg)](https://github.com/uptrace/bun/actions) +[![PkgGoDev](https://pkg.go.dev/badge/github.com/uptrace/bun)](https://pkg.go.dev/github.com/uptrace/bun) +[![Documentation](https://img.shields.io/badge/bun-documentation-informational)](https://bun.uptrace.dev/) +[![Chat](https://discordapp.com/api/guilds/752070105847955518/widget.png)](https://discord.gg/rWtp5Aj) +[![Gurubase](https://img.shields.io/badge/Gurubase-Ask%20Bun%20Guru-006BFF)](https://gurubase.io/g/bun) + +**Lightweight, SQL-first Golang ORM for PostgreSQL, MySQL, MSSQL, SQLite, and Oracle** + +Bun is a modern ORM that embraces SQL rather than hiding it. Write complex queries in Go with type +safety, powerful scanning capabilities, and database-agnostic code that works across multiple SQL +databases. + +## ✨ Key Features + +- **SQL-first approach** - Write elegant, readable queries that feel like SQL +- **Multi-database support** - PostgreSQL, MySQL/MariaDB, MSSQL, SQLite, and Oracle +- **Type-safe operations** - Leverage Go's static typing for compile-time safety +- **Flexible scanning** - Query results into structs, maps, scalars, or slices +- **Performance optimized** - Built on `database/sql` with minimal overhead +- **Rich relationships** - Define complex table relationships with struct tags +- **Production ready** - Migrations, fixtures, soft deletes, and OpenTelemetry support + +## 🚀 Quick Start + +```bash +go get github.com/uptrace/bun +``` + +### Basic Example + +```go +package main + +import ( + "context" + "database/sql" + "fmt" + + "github.com/uptrace/bun" + "github.com/uptrace/bun/dialect/sqlitedialect" + "github.com/uptrace/bun/driver/sqliteshim" +) + +func main() { + ctx := context.Background() + + // Open database + sqldb, err := sql.Open(sqliteshim.ShimName, "file::memory:") + if err != nil { + panic(err) + } + + // Create Bun instance + db := bun.NewDB(sqldb, sqlitedialect.New()) + + // Define model + type User struct { + ID int64 `bun:",pk,autoincrement"` + Name string `bun:",notnull"` + } + + // Create table + db.NewCreateTable().Model((*User)(nil)).Exec(ctx) + + // Insert user + user := &User{Name: "John Doe"} + db.NewInsert().Model(user).Exec(ctx) + + // Query user + err = db.NewSelect().Model(user).Where("id = ?", user.ID).Scan(ctx) + fmt.Printf("User: %+v\n", user) +} +``` + +## 🎯 Why Choose Bun? + +### Elegant Complex Queries + +Write sophisticated queries that remain readable and maintainable: + +```go +regionalSales := db.NewSelect(). + ColumnExpr("region"). + ColumnExpr("SUM(amount) AS total_sales"). + TableExpr("orders"). + GroupExpr("region") + +topRegions := db.NewSelect(). + ColumnExpr("region"). + TableExpr("regional_sales"). + Where("total_sales > (SELECT SUM(total_sales) / 10 FROM regional_sales)") + +var results []struct { + Region string `bun:"region"` + Product string `bun:"product"` + ProductUnits int `bun:"product_units"` + ProductSales int `bun:"product_sales"` +} + +err := db.NewSelect(). + With("regional_sales", regionalSales). + With("top_regions", topRegions). + ColumnExpr("region, product"). + ColumnExpr("SUM(quantity) AS product_units"). + ColumnExpr("SUM(amount) AS product_sales"). + TableExpr("orders"). + Where("region IN (SELECT region FROM top_regions)"). + GroupExpr("region, product"). + Scan(ctx, &results) +``` + +### Flexible Result Scanning + +Scan query results into various Go types: + +```go +// Into structs +var users []User +db.NewSelect().Model(&users).Scan(ctx) + +// Into maps +var userMaps []map[string]interface{} +db.NewSelect().Table("users").Scan(ctx, &userMaps) + +// Into scalars +var count int +db.NewSelect().Table("users").ColumnExpr("COUNT(*)").Scan(ctx, &count) + +// Into individual variables +var id int64 +var name string +db.NewSelect().Table("users").Column("id", "name").Limit(1).Scan(ctx, &id, &name) +``` + +## 📊 Database Support + +| Database | Driver | Dialect | +| ------------- | ------------------------------------------ | --------------------- | +| PostgreSQL | `github.com/uptrace/bun/driver/pgdriver` | `pgdialect.New()` | +| MySQL/MariaDB | `github.com/go-sql-driver/mysql` | `mysqldialect.New()` | +| SQLite | `github.com/uptrace/bun/driver/sqliteshim` | `sqlitedialect.New()` | +| SQL Server | `github.com/denisenkom/go-mssqldb` | `mssqldialect.New()` | +| Oracle | `github.com/sijms/go-ora/v2` | `oracledialect.New()` | + +## 🔧 Advanced Features + +### Table Relationships + +Define complex relationships with struct tags: + +```go +type User struct { + ID int64 `bun:",pk,autoincrement"` + Name string `bun:",notnull"` + Posts []Post `bun:"rel:has-many,join:id=user_id"` + Profile Profile `bun:"rel:has-one,join:id=user_id"` +} + +type Post struct { + ID int64 `bun:",pk,autoincrement"` + Title string + UserID int64 + User *User `bun:"rel:belongs-to,join:user_id=id"` +} + +// Load users with their posts +var users []User +err := db.NewSelect(). + Model(&users). + Relation("Posts"). + Scan(ctx) +``` + +### Bulk Operations + +Efficient bulk operations for large datasets: + +```go +// Bulk insert +users := []User{{Name: "John"}, {Name: "Jane"}, {Name: "Bob"}} +_, err := db.NewInsert().Model(&users).Exec(ctx) + +// Bulk update with CTE +_, err = db.NewUpdate(). + Model(&users). + Set("updated_at = NOW()"). + Where("active = ?", true). + Exec(ctx) + +// Bulk delete +_, err = db.NewDelete(). + Model((*User)(nil)). + Where("created_at < ?", time.Now().AddDate(-1, 0, 0)). + Exec(ctx) +``` + +### Migrations + +Version your database schema: + +```go +import "github.com/uptrace/bun/migrate" + +migrations := migrate.NewMigrations() + +migrations.MustRegister(func(ctx context.Context, db *bun.DB) error { + _, err := db.NewCreateTable().Model((*User)(nil)).Exec(ctx) + return err +}, func(ctx context.Context, db *bun.DB) error { + _, err := db.NewDropTable().Model((*User)(nil)).Exec(ctx) + return err +}) + +migrator := migrate.NewMigrator(db, migrations) +err := migrator.Init(ctx) +err = migrator.Up(ctx) +``` + +## 📈 Monitoring & Observability + +### Debug Queries + +Enable query logging for development: + +```go +import "github.com/uptrace/bun/extra/bundebug" + +db.AddQueryHook(bundebug.NewQueryHook( + bundebug.WithVerbose(true), +)) +``` + +### OpenTelemetry Integration + +Production-ready observability with distributed tracing: + +```go +import "github.com/uptrace/bun/extra/bunotel" + +db.AddQueryHook(bunotel.NewQueryHook( + bunotel.WithDBName("myapp"), +)) +``` + +> **Monitoring made easy**: Bun is brought to you by ⭐ +> [**uptrace/uptrace**](https://github.com/uptrace/uptrace). Uptrace is an open-source APM tool that +> supports distributed tracing, metrics, and logs. You can use it to monitor applications and set up +> automatic alerts to receive notifications via email, Slack, Telegram, and others. +> +> See [OpenTelemetry example](example/opentelemetry) which demonstrates how you can use Uptrace to +> monitor Bun. + +## 📚 Documentation & Resources + +- **[Getting Started Guide](https://bun.uptrace.dev/guide/golang-orm.html)** - Comprehensive + tutorial +- **[API Reference](https://pkg.go.dev/github.com/uptrace/bun)** - Complete package documentation +- **[Examples](https://github.com/uptrace/bun/tree/master/example)** - Working code samples +- **[Starter Kit](https://github.com/go-bun/bun-starter-kit)** - Production-ready template +- **[Community Discussions](https://github.com/uptrace/bun/discussions)** - Get help and share ideas + +## 🤝 Contributing + +We welcome contributions! Please see our [Contributing Guide](CONTRIBUTING.md) for details on how to +get started. + +**Thanks to all our contributors:** + + + Contributors + + +## 🔗 Related Projects + +- **[Golang HTTP router](https://github.com/uptrace/bunrouter)** - Fast and flexible HTTP router +- **[Golang msgpack](https://github.com/vmihailenco/msgpack)** - High-performance MessagePack + serialization + +--- + +
+ Star ⭐ this repo if you find Bun useful!
+ Join our community on Discord • Follow updates on GitHub +
diff --git a/vendor/github.com/uptrace/bun/bun.go b/vendor/github.com/uptrace/bun/bun.go new file mode 100644 index 0000000..680fc77 --- /dev/null +++ b/vendor/github.com/uptrace/bun/bun.go @@ -0,0 +1,98 @@ +package bun + +import ( + "context" + + "github.com/uptrace/bun/internal" + "github.com/uptrace/bun/schema" +) + +type ( + Safe = schema.Safe + Name = schema.Name + Ident = schema.Ident + Order = schema.Order + + NullTime = schema.NullTime + BaseModel = schema.BaseModel + Query = schema.Query + + BeforeAppendModelHook = schema.BeforeAppendModelHook + + BeforeScanRowHook = schema.BeforeScanRowHook + AfterScanRowHook = schema.AfterScanRowHook +) + +const ( + OrderAsc = schema.OrderAsc + OrderAscNullsFirst = schema.OrderDesc + OrderAscNullsLast = schema.OrderAscNullsLast + OrderDesc = schema.OrderDesc + OrderDescNullsFirst = schema.OrderDescNullsFirst + OrderDescNullsLast = schema.OrderDescNullsLast +) + +func SafeQuery(query string, args ...any) schema.QueryWithArgs { + return schema.SafeQuery(query, args) +} + +type BeforeSelectHook interface { + BeforeSelect(ctx context.Context, query *SelectQuery) error +} + +type AfterSelectHook interface { + AfterSelect(ctx context.Context, query *SelectQuery) error +} + +type BeforeInsertHook interface { + BeforeInsert(ctx context.Context, query *InsertQuery) error +} + +type AfterInsertHook interface { + AfterInsert(ctx context.Context, query *InsertQuery) error +} + +type BeforeUpdateHook interface { + BeforeUpdate(ctx context.Context, query *UpdateQuery) error +} + +type AfterUpdateHook interface { + AfterUpdate(ctx context.Context, query *UpdateQuery) error +} + +type BeforeDeleteHook interface { + BeforeDelete(ctx context.Context, query *DeleteQuery) error +} + +type AfterDeleteHook interface { + AfterDelete(ctx context.Context, query *DeleteQuery) error +} + +type BeforeCreateTableHook interface { + BeforeCreateTable(ctx context.Context, query *CreateTableQuery) error +} + +type AfterCreateTableHook interface { + AfterCreateTable(ctx context.Context, query *CreateTableQuery) error +} + +type BeforeDropTableHook interface { + BeforeDropTable(ctx context.Context, query *DropTableQuery) error +} + +type AfterDropTableHook interface { + AfterDropTable(ctx context.Context, query *DropTableQuery) error +} + +// SetLogger overwrites default Bun logger. +func SetLogger(logger internal.Logging) { + internal.SetLogger(logger) +} + +func In(slice any) schema.QueryAppender { + return schema.In(slice) +} + +func NullZero(value any) schema.QueryAppender { + return schema.NullZero(value) +} diff --git a/vendor/github.com/uptrace/bun/commitlint.config.js b/vendor/github.com/uptrace/bun/commitlint.config.js new file mode 100644 index 0000000..4fedde6 --- /dev/null +++ b/vendor/github.com/uptrace/bun/commitlint.config.js @@ -0,0 +1 @@ +module.exports = { extends: ['@commitlint/config-conventional'] } diff --git a/vendor/github.com/uptrace/bun/db.go b/vendor/github.com/uptrace/bun/db.go new file mode 100644 index 0000000..e2ec6bf --- /dev/null +++ b/vendor/github.com/uptrace/bun/db.go @@ -0,0 +1,778 @@ +package bun + +import ( + "context" + cryptorand "crypto/rand" + "database/sql" + "encoding/hex" + "fmt" + "reflect" + "strings" + "sync/atomic" + + "github.com/uptrace/bun/dialect/feature" + "github.com/uptrace/bun/internal" + "github.com/uptrace/bun/schema" +) + +const ( + discardUnknownColumns internal.Flag = 1 << iota +) + +type DBStats struct { + Queries uint32 + Errors uint32 +} + +type DBOption func(db *DB) + +func WithOptions(opts ...DBOption) DBOption { + return func(db *DB) { + for _, opt := range opts { + opt(db) + } + } +} + +func WithDiscardUnknownColumns() DBOption { + return func(db *DB) { + db.flags = db.flags.Set(discardUnknownColumns) + } +} + +// ConnResolver enables routing queries to multiple databases. +type ConnResolver interface { + ResolveConn(ctx context.Context, query Query) IConn + Close() error +} + +func WithConnResolver(resolver ConnResolver) DBOption { + return func(db *DB) { + db.resolver = resolver + } +} + +type DB struct { + // Must be a pointer so we copy the whole state, not individual fields. + *noCopyState + + gen schema.QueryGen + queryHooks []QueryHook +} + +// noCopyState contains DB fields that must not be copied on clone(), +// for example, it is forbidden to copy atomic.Pointer. +type noCopyState struct { + *sql.DB + dialect schema.Dialect + resolver ConnResolver + + flags internal.Flag + closed atomic.Bool + + stats DBStats +} + +func NewDB(sqldb *sql.DB, dialect schema.Dialect, opts ...DBOption) *DB { + dialect.Init(sqldb) + + db := &DB{ + noCopyState: &noCopyState{ + DB: sqldb, + dialect: dialect, + }, + gen: schema.NewQueryGen(dialect), + } + + for _, opt := range opts { + opt(db) + } + + return db +} + +func (db *DB) String() string { + var b strings.Builder + b.WriteString("DB") + return b.String() +} + +func (db *DB) Close() error { + if db.closed.Swap(true) { + return nil + } + + firstErr := db.DB.Close() + + if db.resolver != nil { + if err := db.resolver.Close(); err != nil && firstErr == nil { + firstErr = err + } + } + + return firstErr +} + +func (db *DB) DBStats() DBStats { + return DBStats{ + Queries: atomic.LoadUint32(&db.stats.Queries), + Errors: atomic.LoadUint32(&db.stats.Errors), + } +} + +func (db *DB) NewValues(model any) *ValuesQuery { + return NewValuesQuery(db, model) +} + +func (db *DB) NewMerge() *MergeQuery { + return NewMergeQuery(db) +} + +func (db *DB) NewSelect() *SelectQuery { + return NewSelectQuery(db) +} + +func (db *DB) NewInsert() *InsertQuery { + return NewInsertQuery(db) +} + +func (db *DB) NewUpdate() *UpdateQuery { + return NewUpdateQuery(db) +} + +func (db *DB) NewDelete() *DeleteQuery { + return NewDeleteQuery(db) +} + +func (db *DB) NewRaw(query string, args ...any) *RawQuery { + return NewRawQuery(db, query, args...) +} + +func (db *DB) NewCreateTable() *CreateTableQuery { + return NewCreateTableQuery(db) +} + +func (db *DB) NewDropTable() *DropTableQuery { + return NewDropTableQuery(db) +} + +func (db *DB) NewCreateIndex() *CreateIndexQuery { + return NewCreateIndexQuery(db) +} + +func (db *DB) NewDropIndex() *DropIndexQuery { + return NewDropIndexQuery(db) +} + +func (db *DB) NewTruncateTable() *TruncateTableQuery { + return NewTruncateTableQuery(db) +} + +func (db *DB) NewAddColumn() *AddColumnQuery { + return NewAddColumnQuery(db) +} + +func (db *DB) NewDropColumn() *DropColumnQuery { + return NewDropColumnQuery(db) +} + +func (db *DB) ResetModel(ctx context.Context, models ...any) error { + for _, model := range models { + if _, err := db.NewDropTable().Model(model).IfExists().Cascade().Exec(ctx); err != nil { + return err + } + if _, err := db.NewCreateTable().Model(model).Exec(ctx); err != nil { + return err + } + } + return nil +} + +func (db *DB) Dialect() schema.Dialect { + return db.dialect +} + +func (db *DB) ScanRows(ctx context.Context, rows *sql.Rows, dest ...any) error { + defer rows.Close() + + model, err := newModel(db, dest) + if err != nil { + return err + } + + _, err = model.ScanRows(ctx, rows) + if err != nil { + return err + } + + return rows.Err() +} + +func (db *DB) ScanRow(ctx context.Context, rows *sql.Rows, dest ...any) error { + model, err := newModel(db, dest) + if err != nil { + return err + } + + rs, ok := model.(rowScanner) + if !ok { + return fmt.Errorf("bun: %T does not support ScanRow", model) + } + + return rs.ScanRow(ctx, rows) +} + +func (db *DB) Table(typ reflect.Type) *schema.Table { + return db.dialect.Tables().Get(typ) +} + +// RegisterModel registers models by name so they can be referenced in table relations +// and fixtures. +func (db *DB) RegisterModel(models ...any) { + db.dialect.Tables().Register(models...) +} + +func (db *DB) clone() *DB { + clone := *db + + l := len(clone.queryHooks) + clone.queryHooks = clone.queryHooks[:l:l] + + return &clone +} + +// WithNamedArg returns a copy of the DB with an additional named argument +// bound into its query generator. Named arguments can later be referenced +// in SQL queries using placeholders (e.g. ?name). This method does not +// mutate the original DB instance but instead creates a cloned copy. +func (db *DB) WithNamedArg(name string, value any) *DB { + clone := db.clone() + clone.gen = clone.gen.WithNamedArg(name, value) + return clone +} + +func (db *DB) QueryGen() schema.QueryGen { + return db.gen +} + +type queryHookIniter interface { + Init(db *DB) +} + +// WithQueryHook returns a copy of the DB with the provided query hook +// attached. A query hook allows inspection or modification of queries +// before/after execution (e.g. for logging, tracing, metrics). +// If the hook implements queryHookIniter, its Init method is invoked +// with the current DB before cloning. Like other modifiers, this +// method leaves the original DB unmodified. +func (db *DB) WithQueryHook(hook QueryHook) *DB { + if initer, ok := hook.(queryHookIniter); ok { + initer.Init(db) + } + + clone := db.clone() + clone.queryHooks = append(clone.queryHooks, hook) + return clone +} + +// DEPRECATED: use WithQueryHook instead +func (db *DB) AddQueryHook(hook QueryHook) { + if initer, ok := hook.(queryHookIniter); ok { + initer.Init(db) + } + db.queryHooks = append(db.queryHooks, hook) +} + +// DEPRECATED: use WithQueryHook instead +func (db *DB) ResetQueryHooks() { + for i := range db.queryHooks { + db.queryHooks[i] = nil + } + db.queryHooks = nil +} + +// UpdateFQN returns a fully qualified column name. For MySQL, it returns the column name with +// the table alias. For other RDBMS, it returns just the column name. +func (db *DB) UpdateFQN(alias, column string) Ident { + if db.HasFeature(feature.UpdateMultiTable) { + return Ident(alias + "." + column) + } + return Ident(column) +} + +// HasFeature uses feature package to report whether the underlying DBMS supports this feature. +func (db *DB) HasFeature(feat feature.Feature) bool { + return db.dialect.Features().Has(feat) +} + +//------------------------------------------------------------------------------ + +func (db *DB) Exec(query string, args ...any) (sql.Result, error) { + return db.ExecContext(context.Background(), query, args...) +} + +func (db *DB) ExecContext( + ctx context.Context, query string, args ...any, +) (sql.Result, error) { + formattedQuery := db.format(query, args) + ctx, event := db.beforeQuery(ctx, nil, query, args, formattedQuery, nil) + res, err := db.DB.ExecContext(ctx, formattedQuery) + db.afterQuery(ctx, event, res, err) + return res, err +} + +func (db *DB) Query(query string, args ...any) (*sql.Rows, error) { + return db.QueryContext(context.Background(), query, args...) +} + +func (db *DB) QueryContext( + ctx context.Context, query string, args ...any, +) (*sql.Rows, error) { + formattedQuery := db.format(query, args) + ctx, event := db.beforeQuery(ctx, nil, query, args, formattedQuery, nil) + rows, err := db.DB.QueryContext(ctx, formattedQuery) + db.afterQuery(ctx, event, nil, err) + return rows, err +} + +func (db *DB) QueryRow(query string, args ...any) *sql.Row { + return db.QueryRowContext(context.Background(), query, args...) +} + +func (db *DB) QueryRowContext(ctx context.Context, query string, args ...any) *sql.Row { + formattedQuery := db.format(query, args) + ctx, event := db.beforeQuery(ctx, nil, query, args, formattedQuery, nil) + row := db.DB.QueryRowContext(ctx, formattedQuery) + db.afterQuery(ctx, event, nil, row.Err()) + return row +} + +func (db *DB) format(query string, args []any) string { + return db.gen.FormatQuery(query, args...) +} + +//------------------------------------------------------------------------------ + +type Conn struct { + db *DB + *sql.Conn +} + +func (db *DB) Conn(ctx context.Context) (Conn, error) { + conn, err := db.DB.Conn(ctx) + if err != nil { + return Conn{}, err + } + return Conn{ + db: db, + Conn: conn, + }, nil +} + +func (c Conn) ExecContext( + ctx context.Context, query string, args ...any, +) (sql.Result, error) { + formattedQuery := c.db.format(query, args) + ctx, event := c.db.beforeQuery(ctx, nil, query, args, formattedQuery, nil) + res, err := c.Conn.ExecContext(ctx, formattedQuery) + c.db.afterQuery(ctx, event, res, err) + return res, err +} + +func (c Conn) QueryContext( + ctx context.Context, query string, args ...any, +) (*sql.Rows, error) { + formattedQuery := c.db.format(query, args) + ctx, event := c.db.beforeQuery(ctx, nil, query, args, formattedQuery, nil) + rows, err := c.Conn.QueryContext(ctx, formattedQuery) + c.db.afterQuery(ctx, event, nil, err) + return rows, err +} + +func (c Conn) QueryRowContext(ctx context.Context, query string, args ...any) *sql.Row { + formattedQuery := c.db.format(query, args) + ctx, event := c.db.beforeQuery(ctx, nil, query, args, formattedQuery, nil) + row := c.Conn.QueryRowContext(ctx, formattedQuery) + c.db.afterQuery(ctx, event, nil, row.Err()) + return row +} + +func (c Conn) Dialect() schema.Dialect { + return c.db.Dialect() +} + +func (c Conn) NewValues(model any) *ValuesQuery { + return NewValuesQuery(c.db, model).Conn(c) +} + +func (c Conn) NewMerge() *MergeQuery { + return NewMergeQuery(c.db).Conn(c) +} + +func (c Conn) NewSelect() *SelectQuery { + return NewSelectQuery(c.db).Conn(c) +} + +func (c Conn) NewInsert() *InsertQuery { + return NewInsertQuery(c.db).Conn(c) +} + +func (c Conn) NewUpdate() *UpdateQuery { + return NewUpdateQuery(c.db).Conn(c) +} + +func (c Conn) NewDelete() *DeleteQuery { + return NewDeleteQuery(c.db).Conn(c) +} + +func (c Conn) NewRaw(query string, args ...any) *RawQuery { + return NewRawQuery(c.db, query, args...).Conn(c) +} + +func (c Conn) NewCreateTable() *CreateTableQuery { + return NewCreateTableQuery(c.db).Conn(c) +} + +func (c Conn) NewDropTable() *DropTableQuery { + return NewDropTableQuery(c.db).Conn(c) +} + +func (c Conn) NewCreateIndex() *CreateIndexQuery { + return NewCreateIndexQuery(c.db).Conn(c) +} + +func (c Conn) NewDropIndex() *DropIndexQuery { + return NewDropIndexQuery(c.db).Conn(c) +} + +func (c Conn) NewTruncateTable() *TruncateTableQuery { + return NewTruncateTableQuery(c.db).Conn(c) +} + +func (c Conn) NewAddColumn() *AddColumnQuery { + return NewAddColumnQuery(c.db).Conn(c) +} + +func (c Conn) NewDropColumn() *DropColumnQuery { + return NewDropColumnQuery(c.db).Conn(c) +} + +// RunInTx runs the function in a transaction. If the function returns an error, +// the transaction is rolled back. Otherwise, the transaction is committed. +func (c Conn) RunInTx( + ctx context.Context, opts *sql.TxOptions, fn func(ctx context.Context, tx Tx) error, +) error { + tx, err := c.BeginTx(ctx, opts) + if err != nil { + return err + } + + var done bool + + defer func() { + if !done { + _ = tx.Rollback() + } + }() + + if err := fn(ctx, tx); err != nil { + return err + } + + done = true + return tx.Commit() +} + +func (c Conn) BeginTx(ctx context.Context, opts *sql.TxOptions) (Tx, error) { + ctx, event := c.db.beforeQuery(ctx, nil, "BEGIN", nil, "BEGIN", nil) + tx, err := c.Conn.BeginTx(ctx, opts) + c.db.afterQuery(ctx, event, nil, err) + if err != nil { + return Tx{}, err + } + return Tx{ + ctx: ctx, + db: c.db, + Tx: tx, + }, nil +} + +//------------------------------------------------------------------------------ + +type Stmt struct { + *sql.Stmt +} + +func (db *DB) Prepare(query string) (Stmt, error) { + return db.PrepareContext(context.Background(), query) +} + +func (db *DB) PrepareContext(ctx context.Context, query string) (Stmt, error) { + stmt, err := db.DB.PrepareContext(ctx, query) + if err != nil { + return Stmt{}, err + } + return Stmt{Stmt: stmt}, nil +} + +//------------------------------------------------------------------------------ + +type Tx struct { + ctx context.Context + db *DB + // name is the name of a savepoint + name string + *sql.Tx +} + +// RunInTx runs the function in a transaction. If the function returns an error, +// the transaction is rolled back. Otherwise, the transaction is committed. +func (db *DB) RunInTx( + ctx context.Context, opts *sql.TxOptions, fn func(ctx context.Context, tx Tx) error, +) error { + tx, err := db.BeginTx(ctx, opts) + if err != nil { + return err + } + + var done bool + + defer func() { + if !done { + _ = tx.Rollback() + } + }() + + if err := fn(ctx, tx); err != nil { + return err + } + + done = true + return tx.Commit() +} + +func (db *DB) Begin() (Tx, error) { + return db.BeginTx(context.Background(), nil) +} + +func (db *DB) BeginTx(ctx context.Context, opts *sql.TxOptions) (Tx, error) { + ctx, event := db.beforeQuery(ctx, nil, "BEGIN", nil, "BEGIN", nil) + tx, err := db.DB.BeginTx(ctx, opts) + db.afterQuery(ctx, event, nil, err) + if err != nil { + return Tx{}, err + } + return Tx{ + ctx: ctx, + db: db, + Tx: tx, + }, nil +} + +func (tx Tx) Commit() error { + if tx.name == "" { + return tx.commitTX() + } + return tx.commitSP() +} + +func (tx Tx) commitTX() error { + ctx, event := tx.db.beforeQuery(tx.ctx, nil, "COMMIT", nil, "COMMIT", nil) + err := tx.Tx.Commit() + tx.db.afterQuery(ctx, event, nil, err) + return err +} + +func (tx Tx) commitSP() error { + if tx.db.HasFeature(feature.MSSavepoint) { + return nil + } + query := "RELEASE SAVEPOINT " + tx.name + _, err := tx.ExecContext(tx.ctx, query) + return err +} + +func (tx Tx) Rollback() error { + if tx.name == "" { + return tx.rollbackTX() + } + return tx.rollbackSP() +} + +func (tx Tx) rollbackTX() error { + ctx, event := tx.db.beforeQuery(tx.ctx, nil, "ROLLBACK", nil, "ROLLBACK", nil) + err := tx.Tx.Rollback() + tx.db.afterQuery(ctx, event, nil, err) + return err +} + +func (tx Tx) rollbackSP() error { + query := "ROLLBACK TO SAVEPOINT " + tx.name + if tx.db.HasFeature(feature.MSSavepoint) { + query = "ROLLBACK TRANSACTION " + tx.name + } + _, err := tx.ExecContext(tx.ctx, query) + return err +} + +func (tx Tx) Exec(query string, args ...any) (sql.Result, error) { + return tx.ExecContext(context.TODO(), query, args...) +} + +func (tx Tx) ExecContext( + ctx context.Context, query string, args ...any, +) (sql.Result, error) { + formattedQuery := tx.db.format(query, args) + ctx, event := tx.db.beforeQuery(ctx, nil, query, args, formattedQuery, nil) + res, err := tx.Tx.ExecContext(ctx, formattedQuery) + tx.db.afterQuery(ctx, event, res, err) + return res, err +} + +func (tx Tx) Query(query string, args ...any) (*sql.Rows, error) { + return tx.QueryContext(context.TODO(), query, args...) +} + +func (tx Tx) QueryContext( + ctx context.Context, query string, args ...any, +) (*sql.Rows, error) { + formattedQuery := tx.db.format(query, args) + ctx, event := tx.db.beforeQuery(ctx, nil, query, args, formattedQuery, nil) + rows, err := tx.Tx.QueryContext(ctx, formattedQuery) + tx.db.afterQuery(ctx, event, nil, err) + return rows, err +} + +func (tx Tx) QueryRow(query string, args ...any) *sql.Row { + return tx.QueryRowContext(context.TODO(), query, args...) +} + +func (tx Tx) QueryRowContext(ctx context.Context, query string, args ...any) *sql.Row { + formattedQuery := tx.db.format(query, args) + ctx, event := tx.db.beforeQuery(ctx, nil, query, args, formattedQuery, nil) + row := tx.Tx.QueryRowContext(ctx, formattedQuery) + tx.db.afterQuery(ctx, event, nil, row.Err()) + return row +} + +//------------------------------------------------------------------------------ + +func (tx Tx) Begin() (Tx, error) { + return tx.BeginTx(tx.ctx, nil) +} + +// BeginTx will save a point in the running transaction. +func (tx Tx) BeginTx(ctx context.Context, _ *sql.TxOptions) (Tx, error) { + // mssql savepoint names are limited to 32 characters + sp := make([]byte, 14) + _, err := cryptorand.Read(sp) + if err != nil { + return Tx{}, err + } + + qName := "SP_" + hex.EncodeToString(sp) + query := "SAVEPOINT " + qName + if tx.db.HasFeature(feature.MSSavepoint) { + query = "SAVE TRANSACTION " + qName + } + _, err = tx.ExecContext(ctx, query) + if err != nil { + return Tx{}, err + } + return Tx{ + ctx: ctx, + db: tx.db, + Tx: tx.Tx, + name: qName, + }, nil +} + +func (tx Tx) RunInTx( + ctx context.Context, _ *sql.TxOptions, fn func(ctx context.Context, tx Tx) error, +) error { + sp, err := tx.BeginTx(ctx, nil) + if err != nil { + return err + } + + var done bool + + defer func() { + if !done { + _ = sp.Rollback() + } + }() + + if err := fn(ctx, sp); err != nil { + return err + } + + done = true + return sp.Commit() +} + +func (tx Tx) Dialect() schema.Dialect { + return tx.db.Dialect() +} + +func (tx Tx) NewValues(model any) *ValuesQuery { + return NewValuesQuery(tx.db, model).Conn(tx) +} + +func (tx Tx) NewMerge() *MergeQuery { + return NewMergeQuery(tx.db).Conn(tx) +} + +func (tx Tx) NewSelect() *SelectQuery { + return NewSelectQuery(tx.db).Conn(tx) +} + +func (tx Tx) NewInsert() *InsertQuery { + return NewInsertQuery(tx.db).Conn(tx) +} + +func (tx Tx) NewUpdate() *UpdateQuery { + return NewUpdateQuery(tx.db).Conn(tx) +} + +func (tx Tx) NewDelete() *DeleteQuery { + return NewDeleteQuery(tx.db).Conn(tx) +} + +func (tx Tx) NewRaw(query string, args ...any) *RawQuery { + return NewRawQuery(tx.db, query, args...).Conn(tx) +} + +func (tx Tx) NewCreateTable() *CreateTableQuery { + return NewCreateTableQuery(tx.db).Conn(tx) +} + +func (tx Tx) NewDropTable() *DropTableQuery { + return NewDropTableQuery(tx.db).Conn(tx) +} + +func (tx Tx) NewCreateIndex() *CreateIndexQuery { + return NewCreateIndexQuery(tx.db).Conn(tx) +} + +func (tx Tx) NewDropIndex() *DropIndexQuery { + return NewDropIndexQuery(tx.db).Conn(tx) +} + +func (tx Tx) NewTruncateTable() *TruncateTableQuery { + return NewTruncateTableQuery(tx.db).Conn(tx) +} + +func (tx Tx) NewAddColumn() *AddColumnQuery { + return NewAddColumnQuery(tx.db).Conn(tx) +} + +func (tx Tx) NewDropColumn() *DropColumnQuery { + return NewDropColumnQuery(tx.db).Conn(tx) +} + +func (db *DB) makeQueryBytes() []byte { + return internal.MakeQueryBytes() +} diff --git a/vendor/github.com/uptrace/bun/dialect/append.go b/vendor/github.com/uptrace/bun/dialect/append.go new file mode 100644 index 0000000..8f5485f --- /dev/null +++ b/vendor/github.com/uptrace/bun/dialect/append.go @@ -0,0 +1,105 @@ +package dialect + +import ( + "math" + "strconv" + + "github.com/uptrace/bun/internal" +) + +func AppendError(b []byte, err error) []byte { + b = append(b, "?!("...) + b = append(b, err.Error()...) + b = append(b, ')') + return b +} + +func AppendNull(b []byte) []byte { + return append(b, "NULL"...) +} + +func AppendBool(b []byte, v bool) []byte { + if v { + return append(b, "TRUE"...) + } + return append(b, "FALSE"...) +} + +func AppendFloat32(b []byte, num float32) []byte { + return appendFloat(b, float64(num), 32) +} + +func AppendFloat64(b []byte, num float64) []byte { + return appendFloat(b, num, 64) +} + +func appendFloat(b []byte, num float64, bitSize int) []byte { + switch { + case math.IsNaN(num): + return append(b, "'NaN'"...) + case math.IsInf(num, 1): + return append(b, "'Infinity'"...) + case math.IsInf(num, -1): + return append(b, "'-Infinity'"...) + default: + return strconv.AppendFloat(b, num, 'f', -1, bitSize) + } +} + +//------------------------------------------------------------------------------ + +func AppendName(b []byte, ident string, quote byte) []byte { + return appendName(b, internal.Bytes(ident), quote) +} + +func appendName(b, ident []byte, quote byte) []byte { + b = append(b, quote) + for _, c := range ident { + if c == quote { + b = append(b, quote, quote) + } else { + b = append(b, c) + } + } + b = append(b, quote) + return b +} + +func AppendIdent(b []byte, name string, quote byte) []byte { + return appendIdent(b, internal.Bytes(name), quote) +} + +func appendIdent(b, name []byte, quote byte) []byte { + var quoted bool +loop: + for _, c := range name { + switch c { + case '*': + if !quoted { + b = append(b, '*') + continue loop + } + case '.': + if quoted { + b = append(b, quote) + quoted = false + } + b = append(b, '.') + continue loop + } + + if !quoted { + b = append(b, quote) + quoted = true + } + if c == quote { + b = append(b, quote, quote) + } else { + b = append(b, c) + } + } + if quoted { + b = append(b, quote) + } + return b +} diff --git a/vendor/github.com/uptrace/bun/dialect/dialect.go b/vendor/github.com/uptrace/bun/dialect/dialect.go new file mode 100644 index 0000000..79a02e8 --- /dev/null +++ b/vendor/github.com/uptrace/bun/dialect/dialect.go @@ -0,0 +1,31 @@ +package dialect + +type Name int + +func (n Name) String() string { + switch n { + case Invalid: + return "invalid" + case PG: + return "pg" + case SQLite: + return "sqlite" + case MySQL: + return "mysql" + case MSSQL: + return "mssql" + case Oracle: + return "oracle" + default: + return "custom" + } +} + +const ( + Invalid Name = iota + PG + SQLite + MySQL + MSSQL + Oracle +) diff --git a/vendor/github.com/uptrace/bun/dialect/feature/feature.go b/vendor/github.com/uptrace/bun/dialect/feature/feature.go new file mode 100644 index 0000000..0b94448 --- /dev/null +++ b/vendor/github.com/uptrace/bun/dialect/feature/feature.go @@ -0,0 +1,98 @@ +package feature + +import ( + "fmt" + "strconv" + + "github.com/uptrace/bun/internal" +) + +type Feature = internal.Flag + +const ( + CTE Feature = 1 << iota + WithValues + Returning + InsertReturning + Output // mssql + DefaultPlaceholder + DoubleColonCast + ValuesRow + UpdateMultiTable + InsertTableAlias + UpdateTableAlias + DeleteTableAlias + AutoIncrement + Identity + TableCascade + TableIdentity + TableTruncate + InsertOnConflict // INSERT ... ON CONFLICT + InsertOnDuplicateKey // INSERT ... ON DUPLICATE KEY + InsertIgnore // INSERT IGNORE ... + TableNotExists + OffsetFetch + SelectExists + UpdateFromTable + MSSavepoint + GeneratedIdentity + CompositeIn // ... WHERE (A,B) IN ((N, NN), (N, NN)...) + UpdateOrderLimit // UPDATE ... ORDER BY ... LIMIT ... + DeleteOrderLimit // DELETE ... ORDER BY ... LIMIT ... + DeleteReturning + MergeReturning + AlterColumnExists // ADD/DROP COLUMN IF NOT EXISTS/IF EXISTS + FKDefaultOnAction // FK ON UPDATE/ON DELETE has default value: NO ACTION +) + +type NotSupportError struct { + Flag Feature +} + +func (err *NotSupportError) Error() string { + name, ok := flag2str[err.Flag] + if !ok { + name = strconv.FormatInt(int64(err.Flag), 10) + } + return fmt.Sprintf("bun: feature %s is not supported by current dialect", name) +} + +func NewNotSupportError(flag Feature) *NotSupportError { + return &NotSupportError{Flag: flag} +} + +var flag2str = map[Feature]string{ + CTE: "CTE", + WithValues: "WithValues", + Returning: "Returning", + InsertReturning: "InsertReturning", + Output: "Output", + DefaultPlaceholder: "DefaultPlaceholder", + DoubleColonCast: "DoubleColonCast", + ValuesRow: "ValuesRow", + UpdateMultiTable: "UpdateMultiTable", + InsertTableAlias: "InsertTableAlias", + UpdateTableAlias: "UpdateTableAlias", + DeleteTableAlias: "DeleteTableAlias", + AutoIncrement: "AutoIncrement", + Identity: "Identity", + TableCascade: "TableCascade", + TableIdentity: "TableIdentity", + TableTruncate: "TableTruncate", + InsertOnConflict: "InsertOnConflict", + InsertOnDuplicateKey: "InsertOnDuplicateKey", + InsertIgnore: "InsertIgnore", + TableNotExists: "TableNotExists", + OffsetFetch: "OffsetFetch", + SelectExists: "SelectExists", + UpdateFromTable: "UpdateFromTable", + MSSavepoint: "MSSavepoint", + GeneratedIdentity: "GeneratedIdentity", + CompositeIn: "CompositeIn", + UpdateOrderLimit: "UpdateOrderLimit", + DeleteOrderLimit: "DeleteOrderLimit", + DeleteReturning: "DeleteReturning", + MergeReturning: "MergeReturning", + AlterColumnExists: "AlterColumnExists", + FKDefaultOnAction: "FKDefaultOnAction", +} diff --git a/vendor/github.com/uptrace/bun/dialect/sqltype/sqltype.go b/vendor/github.com/uptrace/bun/dialect/sqltype/sqltype.go new file mode 100644 index 0000000..1031fd3 --- /dev/null +++ b/vendor/github.com/uptrace/bun/dialect/sqltype/sqltype.go @@ -0,0 +1,16 @@ +package sqltype + +const ( + Boolean = "BOOLEAN" + SmallInt = "SMALLINT" + Integer = "INTEGER" + BigInt = "BIGINT" + Real = "REAL" + DoublePrecision = "DOUBLE PRECISION" + VarChar = "VARCHAR" + Blob = "BLOB" + Timestamp = "TIMESTAMP" + JSON = "JSON" + JSONB = "JSONB" + HSTORE = "HSTORE" +) diff --git a/vendor/github.com/uptrace/bun/extra/bunjson/json.go b/vendor/github.com/uptrace/bun/extra/bunjson/json.go new file mode 100644 index 0000000..2e0c007 --- /dev/null +++ b/vendor/github.com/uptrace/bun/extra/bunjson/json.go @@ -0,0 +1,26 @@ +package bunjson + +import ( + "encoding/json" + "io" +) + +var _ Provider = (*StdProvider)(nil) + +type StdProvider struct{} + +func (StdProvider) Marshal(v any) ([]byte, error) { + return json.Marshal(v) +} + +func (StdProvider) Unmarshal(data []byte, v any) error { + return json.Unmarshal(data, v) +} + +func (StdProvider) NewEncoder(w io.Writer) Encoder { + return json.NewEncoder(w) +} + +func (StdProvider) NewDecoder(r io.Reader) Decoder { + return json.NewDecoder(r) +} diff --git a/vendor/github.com/uptrace/bun/extra/bunjson/provider.go b/vendor/github.com/uptrace/bun/extra/bunjson/provider.go new file mode 100644 index 0000000..2cc30e7 --- /dev/null +++ b/vendor/github.com/uptrace/bun/extra/bunjson/provider.go @@ -0,0 +1,43 @@ +package bunjson + +import ( + "io" +) + +var provider Provider = StdProvider{} + +func SetProvider(p Provider) { + provider = p +} + +type Provider interface { + Marshal(v any) ([]byte, error) + Unmarshal(data []byte, v any) error + NewEncoder(w io.Writer) Encoder + NewDecoder(r io.Reader) Decoder +} + +type Decoder interface { + Decode(v any) error + UseNumber() +} + +type Encoder interface { + Encode(v any) error +} + +func Marshal(v any) ([]byte, error) { + return provider.Marshal(v) +} + +func Unmarshal(data []byte, v any) error { + return provider.Unmarshal(data, v) +} + +func NewEncoder(w io.Writer) Encoder { + return provider.NewEncoder(w) +} + +func NewDecoder(r io.Reader) Decoder { + return provider.NewDecoder(r) +} diff --git a/vendor/github.com/uptrace/bun/hook.go b/vendor/github.com/uptrace/bun/hook.go new file mode 100644 index 0000000..d721ca3 --- /dev/null +++ b/vendor/github.com/uptrace/bun/hook.go @@ -0,0 +1,112 @@ +package bun + +import ( + "context" + "database/sql" + "strings" + "sync/atomic" + "time" + "unicode" +) + +type QueryEvent struct { + DB *DB + + IQuery Query + Query string + QueryTemplate string + QueryArgs []any + Model Model + + StartTime time.Time + Result sql.Result + Err error + + Stash map[any]any +} + +func (e *QueryEvent) Operation() string { + if e.IQuery != nil { + return e.IQuery.Operation() + } + return queryOperation(e.Query) +} + +func queryOperation(query string) string { + queryOp := strings.TrimLeftFunc(query, unicode.IsSpace) + + if idx := strings.IndexByte(queryOp, ' '); idx > 0 { + queryOp = queryOp[:idx] + } + if len(queryOp) > 16 { + queryOp = queryOp[:16] + } + return queryOp +} + +type QueryHook interface { + BeforeQuery(context.Context, *QueryEvent) context.Context + AfterQuery(context.Context, *QueryEvent) +} + +func (db *DB) beforeQuery( + ctx context.Context, + iquery Query, + queryTemplate string, + queryArgs []any, + query string, + model Model, +) (context.Context, *QueryEvent) { + atomic.AddUint32(&db.stats.Queries, 1) + + if len(db.queryHooks) == 0 { + return ctx, nil + } + + event := &QueryEvent{ + DB: db, + + Model: model, + IQuery: iquery, + Query: query, + QueryTemplate: queryTemplate, + QueryArgs: queryArgs, + + StartTime: time.Now(), + } + + for _, hook := range db.queryHooks { + ctx = hook.BeforeQuery(ctx, event) + } + + return ctx, event +} + +func (db *DB) afterQuery( + ctx context.Context, + event *QueryEvent, + res sql.Result, + err error, +) { + switch err { + case nil, sql.ErrNoRows: + // nothing + default: + atomic.AddUint32(&db.stats.Errors, 1) + } + + if event == nil { + return + } + + event.Result = res + event.Err = err + + db.afterQueryFromIndex(ctx, event, len(db.queryHooks)-1) +} + +func (db *DB) afterQueryFromIndex(ctx context.Context, event *QueryEvent, hookIndex int) { + for ; hookIndex >= 0; hookIndex-- { + db.queryHooks[hookIndex].AfterQuery(ctx, event) + } +} diff --git a/vendor/github.com/uptrace/bun/internal/flag.go b/vendor/github.com/uptrace/bun/internal/flag.go new file mode 100644 index 0000000..22d2db2 --- /dev/null +++ b/vendor/github.com/uptrace/bun/internal/flag.go @@ -0,0 +1,16 @@ +package internal + +type Flag uint64 + +func (flag Flag) Has(other Flag) bool { + return flag&other != 0 +} + +func (flag Flag) Set(other Flag) Flag { + return flag | other +} + +func (flag Flag) Remove(other Flag) Flag { + flag &= ^other + return flag +} diff --git a/vendor/github.com/uptrace/bun/internal/hex.go b/vendor/github.com/uptrace/bun/internal/hex.go new file mode 100644 index 0000000..6fae2bb --- /dev/null +++ b/vendor/github.com/uptrace/bun/internal/hex.go @@ -0,0 +1,43 @@ +package internal + +import ( + fasthex "github.com/tmthrgd/go-hex" +) + +type HexEncoder struct { + b []byte + written bool +} + +func NewHexEncoder(b []byte) *HexEncoder { + return &HexEncoder{ + b: b, + } +} + +func (enc *HexEncoder) Bytes() []byte { + return enc.b +} + +func (enc *HexEncoder) Write(b []byte) (int, error) { + if !enc.written { + enc.b = append(enc.b, '\'') + enc.b = append(enc.b, `\x`...) + enc.written = true + } + + i := len(enc.b) + enc.b = append(enc.b, make([]byte, fasthex.EncodedLen(len(b)))...) + fasthex.Encode(enc.b[i:], b) + + return len(b), nil +} + +func (enc *HexEncoder) Close() error { + if enc.written { + enc.b = append(enc.b, '\'') + } else { + enc.b = append(enc.b, "NULL"...) + } + return nil +} diff --git a/vendor/github.com/uptrace/bun/internal/logger.go b/vendor/github.com/uptrace/bun/internal/logger.go new file mode 100644 index 0000000..4f7ee61 --- /dev/null +++ b/vendor/github.com/uptrace/bun/internal/logger.go @@ -0,0 +1,54 @@ +package internal + +import ( + "fmt" + "log" + "os" +) + +type Logging interface { + Printf(format string, v ...any) +} + +var defaultLogger = log.New(os.Stderr, "", log.LstdFlags) + +var Logger Logging = &logger{ + log: defaultLogger, +} + +var Warn = &wrapper{ + prefix: "WARN: bun: ", + logger: Logger, +} + +var Deprecated = &wrapper{ + prefix: "DEPRECATED: bun: ", + logger: Logger, +} + +type logger struct { + log *log.Logger +} + +func (l *logger) Printf(format string, v ...any) { + _ = l.log.Output(2, fmt.Sprintf(format, v...)) +} + +type wrapper struct { + prefix string + logger Logging +} + +func (w *wrapper) Printf(format string, v ...any) { + w.logger.Printf(w.prefix+format, v...) +} + +func SetLogger(newLogger Logging) { + if newLogger == nil { + Logger = &logger{log: defaultLogger} + } else { + Logger = newLogger + } + Warn.logger = Logger + Deprecated.logger = Logger +} diff --git a/vendor/github.com/uptrace/bun/internal/map_key.go b/vendor/github.com/uptrace/bun/internal/map_key.go new file mode 100644 index 0000000..7e14d3e --- /dev/null +++ b/vendor/github.com/uptrace/bun/internal/map_key.go @@ -0,0 +1,67 @@ +package internal + +import "reflect" + +var ifaceType = reflect.TypeFor[any]() + +type MapKey struct { + iface any +} + +func NewMapKey(is []any) MapKey { + return MapKey{ + iface: newMapKey(is), + } +} + +func newMapKey(is []any) any { + switch len(is) { + case 1: + ptr := new([1]any) + copy((*ptr)[:], is) + return *ptr + case 2: + ptr := new([2]any) + copy((*ptr)[:], is) + return *ptr + case 3: + ptr := new([3]any) + copy((*ptr)[:], is) + return *ptr + case 4: + ptr := new([4]any) + copy((*ptr)[:], is) + return *ptr + case 5: + ptr := new([5]any) + copy((*ptr)[:], is) + return *ptr + case 6: + ptr := new([6]any) + copy((*ptr)[:], is) + return *ptr + case 7: + ptr := new([7]any) + copy((*ptr)[:], is) + return *ptr + case 8: + ptr := new([8]any) + copy((*ptr)[:], is) + return *ptr + case 9: + ptr := new([9]any) + copy((*ptr)[:], is) + return *ptr + case 10: + ptr := new([10]any) + copy((*ptr)[:], is) + return *ptr + default: + } + + at := reflect.New(reflect.ArrayOf(len(is), ifaceType)).Elem() + for i, v := range is { + *(at.Index(i).Addr().Interface().(*any)) = v + } + return at.Interface() +} diff --git a/vendor/github.com/uptrace/bun/internal/parser/parser.go b/vendor/github.com/uptrace/bun/internal/parser/parser.go new file mode 100644 index 0000000..1f27044 --- /dev/null +++ b/vendor/github.com/uptrace/bun/internal/parser/parser.go @@ -0,0 +1,169 @@ +package parser + +import ( + "bytes" + "fmt" + "io" + "strconv" + + "github.com/uptrace/bun/internal" +) + +type Parser struct { + b []byte + i int +} + +func New(b []byte) *Parser { + return &Parser{ + b: b, + } +} + +func NewString(s string) *Parser { + return New(internal.Bytes(s)) +} + +func (p *Parser) Reset(b []byte) { + p.b = b + p.i = 0 +} + +func (p *Parser) Valid() bool { + return p.i < len(p.b) +} + +func (p *Parser) Remaining() []byte { + return p.b[p.i:] +} + +func (p *Parser) ReadByte() (byte, error) { + if p.Valid() { + ch := p.b[p.i] + p.Advance() + return ch, nil + } + return 0, io.ErrUnexpectedEOF +} + +func (p *Parser) Read() byte { + if p.Valid() { + ch := p.b[p.i] + p.Advance() + return ch + } + return 0 +} + +func (p *Parser) Unread() { + if p.i > 0 { + p.i-- + } +} + +func (p *Parser) Peek() byte { + if p.Valid() { + return p.b[p.i] + } + return 0 +} + +func (p *Parser) Advance() { + p.i++ +} + +func (p *Parser) Skip(skip byte) error { + ch := p.Peek() + if ch == skip { + p.Advance() + return nil + } + return fmt.Errorf("got %q, wanted %q", ch, skip) +} + +func (p *Parser) SkipPrefix(skip []byte) error { + if !bytes.HasPrefix(p.b[p.i:], skip) { + return fmt.Errorf("got %q, wanted prefix %q", p.b, skip) + } + p.i += len(skip) + return nil +} + +func (p *Parser) CutPrefix(skip []byte) bool { + if !bytes.HasPrefix(p.b[p.i:], skip) { + return false + } + p.i += len(skip) + return true +} + +func (p *Parser) ReadSep(sep byte) ([]byte, bool) { + ind := bytes.IndexByte(p.b[p.i:], sep) + if ind == -1 { + b := p.b[p.i:] + p.i = len(p.b) + return b, false + } + + b := p.b[p.i : p.i+ind] + p.i += ind + 1 + return b, true +} + +func (p *Parser) ReadIdentifier() (string, bool) { + if p.i < len(p.b) && p.b[p.i] == '(' { + s := p.i + 1 + if ind := bytes.IndexByte(p.b[s:], ')'); ind != -1 { + b := p.b[s : s+ind] + p.i = s + ind + 1 + return internal.String(b), false + } + } + + ind := len(p.b) - p.i + var alpha bool + for i, c := range p.b[p.i:] { + if isNum(c) { + continue + } + if isAlpha(c) || (i > 0 && alpha && c == '_') { + alpha = true + continue + } + ind = i + break + } + if ind == 0 { + return "", false + } + b := p.b[p.i : p.i+ind] + p.i += ind + return internal.String(b), !alpha +} + +func (p *Parser) ReadNumber() int { + ind := len(p.b) - p.i + for i, c := range p.b[p.i:] { + if !isNum(c) { + ind = i + break + } + } + if ind == 0 { + return 0 + } + n, err := strconv.Atoi(string(p.b[p.i : p.i+ind])) + if err != nil { + panic(err) + } + p.i += ind + return n +} + +func isNum(c byte) bool { + return c >= '0' && c <= '9' +} + +func isAlpha(c byte) bool { + return (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') +} diff --git a/vendor/github.com/uptrace/bun/internal/safe.go b/vendor/github.com/uptrace/bun/internal/safe.go new file mode 100644 index 0000000..862ff0e --- /dev/null +++ b/vendor/github.com/uptrace/bun/internal/safe.go @@ -0,0 +1,11 @@ +// +build appengine + +package internal + +func String(b []byte) string { + return string(b) +} + +func Bytes(s string) []byte { + return []byte(s) +} diff --git a/vendor/github.com/uptrace/bun/internal/tagparser/parser.go b/vendor/github.com/uptrace/bun/internal/tagparser/parser.go new file mode 100644 index 0000000..a390585 --- /dev/null +++ b/vendor/github.com/uptrace/bun/internal/tagparser/parser.go @@ -0,0 +1,184 @@ +package tagparser + +import ( + "strings" +) + +type Tag struct { + Name string + Options map[string][]string +} + +func (t Tag) IsZero() bool { + return t.Name == "" && t.Options == nil +} + +func (t Tag) HasOption(name string) bool { + _, ok := t.Options[name] + return ok +} + +func (t Tag) Option(name string) (string, bool) { + if vs, ok := t.Options[name]; ok { + return vs[len(vs)-1], true + } + return "", false +} + +func Parse(s string) Tag { + if s == "" { + return Tag{} + } + p := parser{ + s: s, + } + p.parse() + return p.tag +} + +type parser struct { + s string + i int + + tag Tag + seenName bool // for empty names +} + +func (p *parser) setName(name string) { + if p.seenName { + p.addOption(name, "") + } else { + p.seenName = true + p.tag.Name = name + } +} + +func (p *parser) addOption(key, value string) { + p.seenName = true + if key == "" { + return + } + if p.tag.Options == nil { + p.tag.Options = make(map[string][]string) + } + if vs, ok := p.tag.Options[key]; ok { + p.tag.Options[key] = append(vs, value) + } else { + p.tag.Options[key] = []string{value} + } +} + +func (p *parser) parse() { + for p.valid() { + p.parseKeyValue() + if p.peek() == ',' { + p.i++ + } + } +} + +func (p *parser) parseKeyValue() { + start := p.i + + for p.valid() { + switch c := p.read(); c { + case ',': + key := p.s[start : p.i-1] + p.setName(key) + return + case ':': + key := p.s[start : p.i-1] + value := p.parseValue() + p.addOption(key, value) + return + case '"': + key := p.parseQuotedValue() + p.setName(key) + return + } + } + + key := p.s[start:p.i] + p.setName(key) +} + +func (p *parser) parseValue() string { + start := p.i + + for p.valid() { + switch c := p.read(); c { + case '"': + return p.parseQuotedValue() + case ',': + return p.s[start : p.i-1] + case '(': + p.skipPairs('(', ')') + } + } + + if p.i == start { + return "" + } + return p.s[start:p.i] +} + +func (p *parser) parseQuotedValue() string { + if i := strings.IndexByte(p.s[p.i:], '"'); i >= 0 && p.s[p.i+i-1] != '\\' { + s := p.s[p.i : p.i+i] + p.i += i + 1 + return s + } + + b := make([]byte, 0, 16) + + for p.valid() { + switch c := p.read(); c { + case '\\': + b = append(b, p.read()) + case '"': + return string(b) + default: + b = append(b, c) + } + } + + return "" +} + +func (p *parser) skipPairs(start, end byte) { + var lvl int + for p.valid() { + switch c := p.read(); c { + case '"': + _ = p.parseQuotedValue() + case start: + lvl++ + case end: + if lvl == 0 { + return + } + lvl-- + } + } +} + +func (p *parser) valid() bool { + return p.i < len(p.s) +} + +func (p *parser) read() byte { + if !p.valid() { + return 0 + } + c := p.s[p.i] + p.i++ + return c +} + +func (p *parser) peek() byte { + if !p.valid() { + return 0 + } + c := p.s[p.i] + return c +} diff --git a/vendor/github.com/uptrace/bun/internal/time.go b/vendor/github.com/uptrace/bun/internal/time.go new file mode 100644 index 0000000..2cb69b4 --- /dev/null +++ b/vendor/github.com/uptrace/bun/internal/time.go @@ -0,0 +1,61 @@ +package internal + +import ( + "fmt" + "time" +) + +const ( + dateFormat = "2006-01-02" + timeFormat = "15:04:05.999999999" + timetzFormat1 = "15:04:05.999999999-07:00:00" + timetzFormat2 = "15:04:05.999999999-07:00" + timetzFormat3 = "15:04:05.999999999-07" + timestampFormat = "2006-01-02 15:04:05.999999999" + timestamptzFormat1 = "2006-01-02 15:04:05.999999999-07:00:00" + timestamptzFormat2 = "2006-01-02 15:04:05.999999999-07:00" + timestamptzFormat3 = "2006-01-02 15:04:05.999999999-07" +) + +func ParseTime(s string) (time.Time, error) { + l := len(s) + + if l >= len("2006-01-02 15:04:05") { + switch s[10] { + case ' ': + if c := s[l-6]; c == '+' || c == '-' { + return time.Parse(timestamptzFormat2, s) + } + if c := s[l-3]; c == '+' || c == '-' { + return time.Parse(timestamptzFormat3, s) + } + if c := s[l-9]; c == '+' || c == '-' { + return time.Parse(timestamptzFormat1, s) + } + return time.ParseInLocation(timestampFormat, s, time.UTC) + case 'T': + return time.Parse(time.RFC3339Nano, s) + } + } + + if l >= len("15:04:05-07") { + if c := s[l-6]; c == '+' || c == '-' { + return time.Parse(timetzFormat2, s) + } + if c := s[l-3]; c == '+' || c == '-' { + return time.Parse(timetzFormat3, s) + } + if c := s[l-9]; c == '+' || c == '-' { + return time.Parse(timetzFormat1, s) + } + } + + if l < len("15:04:05") { + return time.Time{}, fmt.Errorf("bun: can't parse time=%q", s) + } + + if s[2] == ':' { + return time.ParseInLocation(timeFormat, s, time.UTC) + } + return time.ParseInLocation(dateFormat, s, time.UTC) +} diff --git a/vendor/github.com/uptrace/bun/internal/underscore.go b/vendor/github.com/uptrace/bun/internal/underscore.go new file mode 100644 index 0000000..9de52fb --- /dev/null +++ b/vendor/github.com/uptrace/bun/internal/underscore.go @@ -0,0 +1,67 @@ +package internal + +func IsUpper(c byte) bool { + return c >= 'A' && c <= 'Z' +} + +func IsLower(c byte) bool { + return c >= 'a' && c <= 'z' +} + +func ToUpper(c byte) byte { + return c - 32 +} + +func ToLower(c byte) byte { + return c + 32 +} + +// Underscore converts "CamelCasedString" to "camel_cased_string". +func Underscore(s string) string { + r := make([]byte, 0, len(s)+5) + for i := 0; i < len(s); i++ { + c := s[i] + if IsUpper(c) { + if i > 0 && i+1 < len(s) && (IsLower(s[i-1]) || IsLower(s[i+1])) { + r = append(r, '_', ToLower(c)) + } else { + r = append(r, ToLower(c)) + } + } else { + r = append(r, c) + } + } + return string(r) +} + +func CamelCased(s string) string { + r := make([]byte, 0, len(s)) + upperNext := true + for i := 0; i < len(s); i++ { + c := s[i] + if c == '_' { + upperNext = true + continue + } + if upperNext { + if IsLower(c) { + c = ToUpper(c) + } + upperNext = false + } + r = append(r, c) + } + return string(r) +} + +func ToExported(s string) string { + if len(s) == 0 { + return s + } + if c := s[0]; IsLower(c) { + b := []byte(s) + b[0] = ToUpper(c) + return string(b) + } + return s +} diff --git a/vendor/github.com/uptrace/bun/internal/unsafe.go b/vendor/github.com/uptrace/bun/internal/unsafe.go new file mode 100644 index 0000000..1a03312 --- /dev/null +++ b/vendor/github.com/uptrace/bun/internal/unsafe.go @@ -0,0 +1,22 @@ +//go:build !appengine +// +build !appengine + +package internal + +import "unsafe" + +// String converts byte slice to string. +func String(b []byte) string { + if len(b) == 0 { + return "" + } + return unsafe.String(&b[0], len(b)) +} + +// Bytes converts string to byte slice. +func Bytes(s string) []byte { + if s == "" { + return []byte{} + } + return unsafe.Slice(unsafe.StringData(s), len(s)) +} diff --git a/vendor/github.com/uptrace/bun/internal/util.go b/vendor/github.com/uptrace/bun/internal/util.go new file mode 100644 index 0000000..ba1341e --- /dev/null +++ b/vendor/github.com/uptrace/bun/internal/util.go @@ -0,0 +1,87 @@ +package internal + +import ( + "reflect" +) + +func MakeSliceNextElemFunc(v reflect.Value) func() reflect.Value { + if v.Kind() == reflect.Array { + var pos int + return func() reflect.Value { + v := v.Index(pos) + pos++ + return v + } + } + + elemType := v.Type().Elem() + + if elemType.Kind() == reflect.Ptr { + elemType = elemType.Elem() + return func() reflect.Value { + if v.Len() < v.Cap() { + v.Set(v.Slice(0, v.Len()+1)) + elem := v.Index(v.Len() - 1) + if elem.IsNil() { + elem.Set(reflect.New(elemType)) + } + return elem + } + + elem := reflect.New(elemType) + v.Set(reflect.Append(v, elem)) + return elem + } + } + + zero := reflect.Zero(elemType) + return func() reflect.Value { + if v.Len() < v.Cap() { + v.Set(v.Slice(0, v.Len()+1)) + return v.Index(v.Len() - 1) + } + + v.Set(reflect.Append(v, zero)) + return v.Index(v.Len() - 1) + } +} + +func Unwrap(err error) error { + u, ok := err.(interface { + Unwrap() error + }) + if !ok { + return nil + } + return u.Unwrap() +} + +func FieldByIndexAlloc(v reflect.Value, index []int) reflect.Value { + if len(index) == 1 { + return v.Field(index[0]) + } + + for i, idx := range index { + if i > 0 { + v = indirectNil(v) + } + v = v.Field(idx) + } + return v +} + +func indirectNil(v reflect.Value) reflect.Value { + if v.Kind() == reflect.Ptr { + if v.IsNil() { + v.Set(reflect.New(v.Type().Elem())) + } + v = v.Elem() + } + return v +} + +// MakeQueryBytes returns zero-length byte slice with capacity of 4096. +func MakeQueryBytes() []byte { + // TODO: make this configurable? + return make([]byte, 0, 4096) +} diff --git a/vendor/github.com/uptrace/bun/model.go b/vendor/github.com/uptrace/bun/model.go new file mode 100644 index 0000000..1cc1a90 --- /dev/null +++ b/vendor/github.com/uptrace/bun/model.go @@ -0,0 +1,208 @@ +package bun + +import ( + "context" + "database/sql" + "errors" + "fmt" + "reflect" + "time" + + "github.com/uptrace/bun/schema" +) + +var errNilModel = errors.New("bun: Model(nil)") + +var ( + timeType = reflect.TypeFor[time.Time]() + bytesType = reflect.TypeFor[[]byte]() +) + +type Model = schema.Model + +type rowScanner interface { + ScanRow(ctx context.Context, rows *sql.Rows) error +} + +type TableModel interface { + Model + + schema.BeforeAppendModelHook + schema.BeforeScanRowHook + schema.AfterScanRowHook + ScanColumn(column string, src any) error + + Table() *schema.Table + Relation() *schema.Relation + + join(string) *relationJoin + getJoin(string) *relationJoin + getJoins() []relationJoin + addJoin(relationJoin) *relationJoin + clone() TableModel + + rootValue() reflect.Value + parentIndex() []int + mount(reflect.Value) + + updateSoftDeleteField(time.Time) error +} + +func newModel(db *DB, dest []any) (Model, error) { + if len(dest) == 1 { + return _newModel(db, dest[0], true) + } + + values := make([]reflect.Value, len(dest)) + + for i, el := range dest { + v := reflect.ValueOf(el) + if v.Kind() != reflect.Ptr { + return nil, fmt.Errorf("bun: Scan(non-pointer %T)", dest) + } + + v = v.Elem() + if v.Kind() != reflect.Slice { + return newScanModel(db, dest), nil + } + + values[i] = v + } + + return newSliceModel(db, dest, values), nil +} + +func newSingleModel(db *DB, dest any) (Model, error) { + return _newModel(db, dest, false) +} + +func _newModel(db *DB, dest any, scan bool) (Model, error) { + switch dest := dest.(type) { + case nil: + return nil, errNilModel + case Model: + return dest, nil + case sql.Scanner: + if !scan { + return nil, fmt.Errorf("bun: Model(unsupported %T)", dest) + } + return newScanModel(db, []any{dest}), nil + } + + v := reflect.ValueOf(dest) + if !v.IsValid() { + return nil, errNilModel + } + if v.Kind() != reflect.Ptr { + return nil, fmt.Errorf("bun: Model(non-pointer %T)", dest) + } + + if v.IsNil() { + typ := v.Type().Elem() + if typ.Kind() == reflect.Struct { + return newStructTableModel(db, dest, db.Table(typ)), nil + } + return nil, fmt.Errorf("bun: Model(nil %s %T)", typ.Kind(), dest) + } + + v = v.Elem() + typ := v.Type() + + switch typ { + case timeType, bytesType: + return newScanModel(db, []any{dest}), nil + } + + switch v.Kind() { + case reflect.Map: + if err := validMap(typ); err != nil { + return nil, err + } + mapPtr := v.Addr().Interface().(*map[string]any) + return newMapModel(db, mapPtr), nil + case reflect.Struct: + return newStructTableModelValue(db, dest, v), nil + case reflect.Slice: + switch elemType := sliceElemType(v); elemType.Kind() { + case reflect.Struct: + if elemType != timeType { + return newSliceTableModel(db, dest, v, elemType), nil + } + case reflect.Map: + if err := validMap(elemType); err != nil { + return nil, err + } + slicePtr := v.Addr().Interface().(*[]map[string]any) + return newMapSliceModel(db, slicePtr), nil + } + return newSliceModel(db, []any{dest}, []reflect.Value{v}), nil + } + + if scan { + return newScanModel(db, []any{dest}), nil + } + + return nil, fmt.Errorf("bun: Model(unsupported %T)", dest) +} + +func newTableModelIndex( + db *DB, + table *schema.Table, + root reflect.Value, + index []int, + rel *schema.Relation, +) (TableModel, error) { + typ := typeByIndex(table.Type, index) + + if typ.Kind() == reflect.Struct { + return &structTableModel{ + db: db, + table: table.Dialect().Tables().Get(typ), + rel: rel, + + root: root, + index: index, + }, nil + } + + if typ.Kind() == reflect.Slice { + structType := indirectType(typ.Elem()) + if structType.Kind() == reflect.Struct { + m := sliceTableModel{ + structTableModel: structTableModel{ + db: db, + table: table.Dialect().Tables().Get(structType), + rel: rel, + + root: root, + index: index, + }, + } + m.init(typ) + return &m, nil + } + } + + return nil, fmt.Errorf("bun: NewModel(%s)", typ) +} + +func validMap(typ reflect.Type) error { + if typ.Key().Kind() != reflect.String || typ.Elem().Kind() != reflect.Interface { + return fmt.Errorf("bun: Model(unsupported %s) (expected *map[string]any)", + typ) + } + return nil +} + +//------------------------------------------------------------------------------ + +func isSingleRowModel(m Model) bool { + switch m.(type) { + case *mapModel, + *structTableModel, + *scanModel: + return true + default: + return false + } +} diff --git a/vendor/github.com/uptrace/bun/model_map.go b/vendor/github.com/uptrace/bun/model_map.go new file mode 100644 index 0000000..b2a034c --- /dev/null +++ b/vendor/github.com/uptrace/bun/model_map.go @@ -0,0 +1,186 @@ +package bun + +import ( + "bytes" + "context" + "database/sql" + "reflect" + "slices" + + "github.com/uptrace/bun/schema" +) + +type mapModel struct { + db *DB + + dest *map[string]any + m map[string]any + + rows *sql.Rows + columns []string + _columnTypes []*sql.ColumnType + scanIndex int +} + +var _ Model = (*mapModel)(nil) + +func newMapModel(db *DB, dest *map[string]any) *mapModel { + m := &mapModel{ + db: db, + dest: dest, + } + if dest != nil { + m.m = *dest + } + return m +} + +func (m *mapModel) Value() any { + return m.dest +} + +func (m *mapModel) ScanRows(ctx context.Context, rows *sql.Rows) (int, error) { + if !rows.Next() { + return 0, rows.Err() + } + + columns, err := rows.Columns() + if err != nil { + return 0, err + } + + m.rows = rows + m.columns = columns + dest := makeDest(m, len(columns)) + + if m.m == nil { + m.m = make(map[string]any, len(m.columns)) + } + + m.scanIndex = 0 + if err := rows.Scan(dest...); err != nil { + return 0, err + } + + *m.dest = m.m + + return 1, nil +} + +func (m *mapModel) Scan(src any) error { + if _, ok := src.([]byte); !ok { + return m.scanRaw(src) + } + + columnTypes, err := m.columnTypes() + if err != nil { + return err + } + + scanType := columnTypes[m.scanIndex].ScanType() + switch scanType.Kind() { + case reflect.Interface: + return m.scanRaw(src) + case reflect.Slice: + if scanType.Elem().Kind() == reflect.Uint8 { + // Reference types such as []byte are only valid until the next call to Scan. + src := bytes.Clone(src.([]byte)) + return m.scanRaw(src) + } + } + + dest := reflect.New(scanType).Elem() + if err := schema.Scanner(scanType)(dest, src); err != nil { + return err + } + + return m.scanRaw(dest.Interface()) +} + +func (m *mapModel) columnTypes() ([]*sql.ColumnType, error) { + if m._columnTypes == nil { + columnTypes, err := m.rows.ColumnTypes() + if err != nil { + return nil, err + } + m._columnTypes = columnTypes + } + return m._columnTypes, nil +} + +func (m *mapModel) scanRaw(src any) error { + columnName := m.columns[m.scanIndex] + m.scanIndex++ + m.m[columnName] = src + return nil +} + +func (m *mapModel) appendColumnsValues(gen schema.QueryGen, b []byte) []byte { + keys := make([]string, 0, len(m.m)) + + for k := range m.m { + keys = append(keys, k) + } + slices.Sort(keys) + + b = append(b, " ("...) + + for i, k := range keys { + if i > 0 { + b = append(b, ", "...) + } + b = gen.AppendIdent(b, k) + } + + b = append(b, ") VALUES ("...) + + isTemplate := gen.IsNop() + for i, k := range keys { + if i > 0 { + b = append(b, ", "...) + } + if isTemplate { + b = append(b, '?') + } else { + b = gen.Append(b, m.m[k]) + } + } + + b = append(b, ")"...) + + return b +} + +func (m *mapModel) appendSet(gen schema.QueryGen, b []byte) []byte { + keys := make([]string, 0, len(m.m)) + + for k := range m.m { + keys = append(keys, k) + } + slices.Sort(keys) + + isTemplate := gen.IsNop() + for i, k := range keys { + if i > 0 { + b = append(b, ", "...) + } + + b = gen.AppendIdent(b, k) + b = append(b, " = "...) + if isTemplate { + b = append(b, '?') + } else { + b = gen.Append(b, m.m[k]) + } + } + + return b +} + +func makeDest(v any, n int) []any { + dest := make([]any, n) + for i := range dest { + dest[i] = v + } + return dest +} diff --git a/vendor/github.com/uptrace/bun/model_map_slice.go b/vendor/github.com/uptrace/bun/model_map_slice.go new file mode 100644 index 0000000..c4d7ce8 --- /dev/null +++ b/vendor/github.com/uptrace/bun/model_map_slice.go @@ -0,0 +1,153 @@ +package bun + +import ( + "context" + "database/sql" + "errors" + "slices" + + "github.com/uptrace/bun/dialect/feature" + "github.com/uptrace/bun/schema" +) + +type mapSliceModel struct { + mapModel + dest *[]map[string]any + + keys []string +} + +var _ Model = (*mapSliceModel)(nil) + +func newMapSliceModel(db *DB, dest *[]map[string]any) *mapSliceModel { + return &mapSliceModel{ + mapModel: mapModel{ + db: db, + }, + dest: dest, + } +} + +func (m *mapSliceModel) Value() any { + return m.dest +} + +func (m *mapSliceModel) SetCap(cap int) { + if cap > 100 { + cap = 100 + } + if slice := *m.dest; len(slice) < cap { + *m.dest = make([]map[string]any, 0, cap) + } +} + +func (m *mapSliceModel) ScanRows(ctx context.Context, rows *sql.Rows) (int, error) { + columns, err := rows.Columns() + if err != nil { + return 0, err + } + + m.rows = rows + m.columns = columns + dest := makeDest(m, len(columns)) + + slice := *m.dest + if len(slice) > 0 { + slice = slice[:0] + } + + var n int + + for rows.Next() { + m.m = make(map[string]any, len(m.columns)) + + m.scanIndex = 0 + if err := rows.Scan(dest...); err != nil { + return 0, err + } + + slice = append(slice, m.m) + n++ + } + if err := rows.Err(); err != nil { + return 0, err + } + + *m.dest = slice + return n, nil +} + +func (m *mapSliceModel) appendColumns(gen schema.QueryGen, b []byte) (_ []byte, err error) { + if err := m.initKeys(); err != nil { + return nil, err + } + + for i, k := range m.keys { + if i > 0 { + b = append(b, ", "...) + } + b = gen.AppendIdent(b, k) + } + + return b, nil +} + +func (m *mapSliceModel) appendValues(gen schema.QueryGen, b []byte) (_ []byte, err error) { + if err := m.initKeys(); err != nil { + return nil, err + } + slice := *m.dest + + if gen.IsNop() { + for i := range m.keys { + if i > 0 { + b = append(b, ", "...) + } + b = append(b, '?') + } + return b, nil + } + + for i, el := range slice { + if i > 0 { + b = append(b, "), "...) + if m.db.HasFeature(feature.ValuesRow) { + b = append(b, "ROW("...) + } else { + b = append(b, '(') + } + } + + for j, key := range m.keys { + if j > 0 { + b = append(b, ", "...) + } + b = gen.Append(b, el[key]) + } + } + + return b, nil +} + +func (m *mapSliceModel) initKeys() error { + if m.keys != nil { + return nil + } + + slice := *m.dest + if len(slice) == 0 { + return errors.New("bun: map slice is empty") + } + + first := slice[0] + keys := make([]string, 0, len(first)) + + for k := range first { + keys = append(keys, k) + } + + slices.Sort(keys) + m.keys = keys + + return nil +} diff --git a/vendor/github.com/uptrace/bun/model_scan.go b/vendor/github.com/uptrace/bun/model_scan.go new file mode 100644 index 0000000..63ad6f3 --- /dev/null +++ b/vendor/github.com/uptrace/bun/model_scan.go @@ -0,0 +1,56 @@ +package bun + +import ( + "context" + "database/sql" + "reflect" + + "github.com/uptrace/bun/schema" +) + +type scanModel struct { + db *DB + + dest []any + scanIndex int +} + +var _ Model = (*scanModel)(nil) + +func newScanModel(db *DB, dest []any) *scanModel { + return &scanModel{ + db: db, + dest: dest, + } +} + +func (m *scanModel) Value() any { + return m.dest +} + +func (m *scanModel) ScanRows(ctx context.Context, rows *sql.Rows) (int, error) { + if !rows.Next() { + return 0, rows.Err() + } + + dest := makeDest(m, len(m.dest)) + + m.scanIndex = 0 + if err := rows.Scan(dest...); err != nil { + return 0, err + } + + return 1, nil +} + +func (m *scanModel) ScanRow(ctx context.Context, rows *sql.Rows) error { + return rows.Scan(m.dest...) +} + +func (m *scanModel) Scan(src any) error { + dest := reflect.ValueOf(m.dest[m.scanIndex]) + m.scanIndex++ + + scanner := schema.Scanner(dest.Type()) + return scanner(dest, src) +} diff --git a/vendor/github.com/uptrace/bun/model_slice.go b/vendor/github.com/uptrace/bun/model_slice.go new file mode 100644 index 0000000..1cfd739 --- /dev/null +++ b/vendor/github.com/uptrace/bun/model_slice.go @@ -0,0 +1,82 @@ +package bun + +import ( + "context" + "database/sql" + "reflect" + + "github.com/uptrace/bun/internal" + "github.com/uptrace/bun/schema" +) + +type sliceInfo struct { + nextElem func() reflect.Value + scan schema.ScannerFunc +} + +type sliceModel struct { + dest []any + values []reflect.Value + scanIndex int + info []sliceInfo +} + +var _ Model = (*sliceModel)(nil) + +func newSliceModel(db *DB, dest []any, values []reflect.Value) *sliceModel { + return &sliceModel{ + dest: dest, + values: values, + } +} + +func (m *sliceModel) Value() any { + return m.dest +} + +func (m *sliceModel) ScanRows(ctx context.Context, rows *sql.Rows) (int, error) { + columns, err := rows.Columns() + if err != nil { + return 0, err + } + + m.info = make([]sliceInfo, len(m.values)) + for i, v := range m.values { + if v.IsValid() && v.Len() > 0 { + v.Set(v.Slice(0, 0)) + } + + m.info[i] = sliceInfo{ + nextElem: internal.MakeSliceNextElemFunc(v), + scan: schema.Scanner(v.Type().Elem()), + } + } + + if len(columns) == 0 { + return 0, nil + } + dest := makeDest(m, len(columns)) + + var n int + + for rows.Next() { + m.scanIndex = 0 + if err := rows.Scan(dest...); err != nil { + return 0, err + } + n++ + } + if err := rows.Err(); err != nil { + return 0, err + } + + return n, nil +} + +func (m *sliceModel) Scan(src any) error { + info := m.info[m.scanIndex] + m.scanIndex++ + + dest := info.nextElem() + return info.scan(dest, src) +} diff --git a/vendor/github.com/uptrace/bun/model_table_has_many.go b/vendor/github.com/uptrace/bun/model_table_has_many.go new file mode 100644 index 0000000..0b229e1 --- /dev/null +++ b/vendor/github.com/uptrace/bun/model_table_has_many.go @@ -0,0 +1,185 @@ +package bun + +import ( + "context" + "database/sql" + "database/sql/driver" + "fmt" + "reflect" + + "github.com/uptrace/bun/internal" + "github.com/uptrace/bun/schema" +) + +type hasManyModel struct { + *sliceTableModel + baseTable *schema.Table + rel *schema.Relation + + baseValues map[internal.MapKey][]reflect.Value + structKey []any +} + +var _ TableModel = (*hasManyModel)(nil) + +func newHasManyModel(j *relationJoin) *hasManyModel { + baseTable := j.BaseModel.Table() + joinModel := j.JoinModel.(*sliceTableModel) + baseValues := baseValues(joinModel, j.Relation.BasePKs) + if len(baseValues) == 0 { + return nil + } + m := hasManyModel{ + sliceTableModel: joinModel, + baseTable: baseTable, + rel: j.Relation, + + baseValues: baseValues, + } + if !m.sliceOfPtr { + m.strct = reflect.New(m.table.Type).Elem() + } + return &m +} + +func (m *hasManyModel) ScanRows(ctx context.Context, rows *sql.Rows) (int, error) { + columns, err := rows.Columns() + if err != nil { + return 0, err + } + + m.columns = columns + dest := makeDest(m, len(columns)) + + var n int + m.structKey = make([]any, len(m.rel.JoinPKs)) + for rows.Next() { + if m.sliceOfPtr { + m.strct = reflect.New(m.table.Type).Elem() + } else { + m.strct.Set(m.table.ZeroValue) + } + m.structInited = false + m.scanIndex = 0 + + if err := rows.Scan(dest...); err != nil { + return 0, err + } + + if err := m.parkStruct(); err != nil { + return 0, err + } + + n++ + } + if err := rows.Err(); err != nil { + return 0, err + } + + return n, nil +} + +func (m *hasManyModel) Scan(src any) error { + column := m.columns[m.scanIndex] + m.scanIndex++ + + field := m.table.LookupField(column) + if field == nil { + return fmt.Errorf("bun: %s does not have column %q", m.table.TypeName, column) + } + + if err := field.ScanValue(m.strct, src); err != nil { + return err + } + + for i, f := range m.rel.JoinPKs { + if f.Name == column { + m.structKey[i] = indirectAsKey(field.Value(m.strct)) + break + } + } + + return nil +} + +func (m *hasManyModel) parkStruct() error { + + baseValues, ok := m.baseValues[internal.NewMapKey(m.structKey)] + if !ok { + return fmt.Errorf( + "bun: has-many relation=%s does not have base %s with id=%q (check join conditions)", + m.rel.Field.GoName, m.baseTable, m.structKey) + } + + for i, v := range baseValues { + if !m.sliceOfPtr { + v.Set(reflect.Append(v, m.strct)) + continue + } + + if i == 0 { + v.Set(reflect.Append(v, m.strct.Addr())) + continue + } + + clone := reflect.New(m.strct.Type()).Elem() + clone.Set(m.strct) + v.Set(reflect.Append(v, clone.Addr())) + } + + return nil +} + +func (m *hasManyModel) clone() TableModel { + return &hasManyModel{ + sliceTableModel: m.sliceTableModel.clone().(*sliceTableModel), + baseTable: m.baseTable, + rel: m.rel, + baseValues: m.baseValues, + structKey: m.structKey, + } +} + +func baseValues(model TableModel, fields []*schema.Field) map[internal.MapKey][]reflect.Value { + fieldIndex := model.Relation().Field.Index + m := make(map[internal.MapKey][]reflect.Value) + key := make([]any, 0, len(fields)) + walk(model.rootValue(), model.parentIndex(), func(v reflect.Value) { + key = modelKey(key[:0], v, fields) + mapKey := internal.NewMapKey(key) + m[mapKey] = append(m[mapKey], v.FieldByIndex(fieldIndex)) + }) + return m +} + +func modelKey(key []any, strct reflect.Value, fields []*schema.Field) []any { + for _, f := range fields { + key = append(key, indirectAsKey(f.Value(strct))) + } + return key +} + +// indirectAsKey return the field value dereferencing the pointer if necessary. +// The value is then used as a map key. +func indirectAsKey(field reflect.Value) any { + if field.Kind() == reflect.Pointer && field.IsNil() { + return nil + } + + i := field.Interface() + if valuer, ok := i.(driver.Valuer); ok { + if v, err := valuer.Value(); err == nil { + switch reflect.TypeOf(v).Kind() { + case reflect.Array, reflect.Chan, reflect.Func, + reflect.Map, reflect.Pointer, reflect.Slice, reflect.UnsafePointer: + // NOTE #1107, these types cannot be used as map key, + // let us use original logic. + return i + default: + return v + } + } + } + + return reflect.Indirect(field).Interface() +} diff --git a/vendor/github.com/uptrace/bun/model_table_m2m.go b/vendor/github.com/uptrace/bun/model_table_m2m.go new file mode 100644 index 0000000..967ff54 --- /dev/null +++ b/vendor/github.com/uptrace/bun/model_table_m2m.go @@ -0,0 +1,142 @@ +package bun + +import ( + "context" + "database/sql" + "fmt" + "reflect" + + "github.com/uptrace/bun/internal" + "github.com/uptrace/bun/schema" +) + +type m2mModel struct { + *sliceTableModel + baseTable *schema.Table + rel *schema.Relation + + baseValues map[internal.MapKey][]reflect.Value + structKey []any +} + +var _ TableModel = (*m2mModel)(nil) + +func newM2MModel(j *relationJoin) *m2mModel { + baseTable := j.BaseModel.Table() + joinModel := j.JoinModel.(*sliceTableModel) + baseValues := baseValues(joinModel, j.Relation.BasePKs) + if len(baseValues) == 0 { + return nil + } + m := &m2mModel{ + sliceTableModel: joinModel, + baseTable: baseTable, + rel: j.Relation, + + baseValues: baseValues, + } + if !m.sliceOfPtr { + m.strct = reflect.New(m.table.Type).Elem() + } + return m +} + +func (m *m2mModel) ScanRows(ctx context.Context, rows *sql.Rows) (int, error) { + columns, err := rows.Columns() + if err != nil { + return 0, err + } + + m.columns = columns + dest := makeDest(m, len(columns)) + + var n int + + for rows.Next() { + if m.sliceOfPtr { + m.strct = reflect.New(m.table.Type).Elem() + } else { + m.strct.Set(m.table.ZeroValue) + } + m.structInited = false + + m.scanIndex = 0 + m.structKey = m.structKey[:0] + if err := rows.Scan(dest...); err != nil { + return 0, err + } + + if err := m.parkStruct(); err != nil { + return 0, err + } + + n++ + } + if err := rows.Err(); err != nil { + return 0, err + } + + return n, nil +} + +func (m *m2mModel) Scan(src any) error { + column := m.columns[m.scanIndex] + m.scanIndex++ + + // Base pks must come first. + if m.scanIndex <= len(m.rel.M2MBasePKs) { + return m.scanM2MColumn(column, src) + } + + if field, ok := m.table.FieldMap[column]; ok { + return field.ScanValue(m.strct, src) + } + + _, err := m.scanColumn(column, src) + return err +} + +func (m *m2mModel) scanM2MColumn(column string, src any) error { + for _, field := range m.rel.M2MBasePKs { + if field.Name == column { + dest := reflect.New(field.IndirectType).Elem() + if err := field.Scan(dest, src); err != nil { + return err + } + m.structKey = append(m.structKey, indirectAsKey(dest)) + break + } + } + + _, err := m.scanColumn(column, src) + return err +} + +func (m *m2mModel) parkStruct() error { + baseValues, ok := m.baseValues[internal.NewMapKey(m.structKey)] + if !ok { + return fmt.Errorf( + "bun: m2m relation=%s does not have base %s with key=%q (check join conditions)", + m.rel.Field.GoName, m.baseTable, m.structKey) + } + + for _, v := range baseValues { + if m.sliceOfPtr { + v.Set(reflect.Append(v, m.strct.Addr())) + } else { + v.Set(reflect.Append(v, m.strct)) + } + } + + return nil +} + +func (m *m2mModel) clone() TableModel { + return &m2mModel{ + sliceTableModel: m.sliceTableModel.clone().(*sliceTableModel), + baseTable: m.baseTable, + rel: m.rel, + baseValues: m.baseValues, + structKey: m.structKey, + } +} diff --git a/vendor/github.com/uptrace/bun/model_table_slice.go b/vendor/github.com/uptrace/bun/model_table_slice.go new file mode 100644 index 0000000..c75a09a --- /dev/null +++ b/vendor/github.com/uptrace/bun/model_table_slice.go @@ -0,0 +1,136 @@ +package bun + +import ( + "context" + "database/sql" + "reflect" + "time" + + "github.com/uptrace/bun/internal" + "github.com/uptrace/bun/schema" +) + +type sliceTableModel struct { + structTableModel + + slice reflect.Value + sliceLen int + sliceOfPtr bool + nextElem func() reflect.Value +} + +var _ TableModel = (*sliceTableModel)(nil) + +func newSliceTableModel( + db *DB, dest any, slice reflect.Value, elemType reflect.Type, +) *sliceTableModel { + m := &sliceTableModel{ + structTableModel: structTableModel{ + db: db, + table: db.Table(elemType), + dest: dest, + root: slice, + }, + + slice: slice, + sliceLen: slice.Len(), + nextElem: internal.MakeSliceNextElemFunc(slice), + } + m.init(slice.Type()) + return m +} + +func (m *sliceTableModel) init(sliceType reflect.Type) { + switch sliceType.Elem().Kind() { + case reflect.Ptr, reflect.Interface: + m.sliceOfPtr = true + } +} + +func (m *sliceTableModel) join(name string) *relationJoin { + return m._join(m.slice, name) +} + +func (m *sliceTableModel) ScanRows(ctx context.Context, rows *sql.Rows) (int, error) { + columns, err := rows.Columns() + if err != nil { + return 0, err + } + + m.columns = columns + dest := makeDest(m, len(columns)) + + if m.slice.IsValid() && m.slice.Len() > 0 { + m.slice.Set(m.slice.Slice(0, 0)) + } + + var n int + + for rows.Next() { + m.strct = m.nextElem() + if m.sliceOfPtr { + m.strct = m.strct.Elem() + } + m.structInited = false + + if err := m.scanRow(ctx, rows, dest); err != nil { + return 0, err + } + + n++ + } + if err := rows.Err(); err != nil { + return 0, err + } + + return n, nil +} + +var _ schema.BeforeAppendModelHook = (*sliceTableModel)(nil) + +func (m *sliceTableModel) BeforeAppendModel(ctx context.Context, query Query) error { + if !m.table.HasBeforeAppendModelHook() || !m.slice.IsValid() { + return nil + } + + sliceLen := m.slice.Len() + for i := 0; i < sliceLen; i++ { + strct := m.slice.Index(i) + if !m.sliceOfPtr { + strct = strct.Addr() + } + err := strct.Interface().(schema.BeforeAppendModelHook).BeforeAppendModel(ctx, query) + if err != nil { + return err + } + } + return nil +} + +// Inherit these hooks from structTableModel. +var ( + _ schema.BeforeScanRowHook = (*sliceTableModel)(nil) + _ schema.AfterScanRowHook = (*sliceTableModel)(nil) +) + +func (m *sliceTableModel) updateSoftDeleteField(tm time.Time) error { + sliceLen := m.slice.Len() + for i := 0; i < sliceLen; i++ { + strct := indirect(m.slice.Index(i)) + fv := m.table.SoftDeleteField.Value(strct) + if err := m.table.UpdateSoftDeleteField(fv, tm); err != nil { + return err + } + } + return nil +} + +func (m *sliceTableModel) clone() TableModel { + return &sliceTableModel{ + structTableModel: *m.structTableModel.clone().(*structTableModel), + slice: m.slice, + sliceLen: m.sliceLen, + sliceOfPtr: m.sliceOfPtr, + nextElem: m.nextElem, + } +} diff --git a/vendor/github.com/uptrace/bun/model_table_struct.go b/vendor/github.com/uptrace/bun/model_table_struct.go new file mode 100644 index 0000000..f9e526f --- /dev/null +++ b/vendor/github.com/uptrace/bun/model_table_struct.go @@ -0,0 +1,373 @@ +package bun + +import ( + "context" + "database/sql" + "fmt" + "reflect" + "strings" + "time" + + "github.com/uptrace/bun/internal" + "github.com/uptrace/bun/schema" +) + +type structTableModel struct { + db *DB + table *schema.Table + + rel *schema.Relation + joins []relationJoin + + dest any + root reflect.Value + index []int + + strct reflect.Value + structInited bool + structInitErr error + + columns []string + scanIndex int +} + +var _ TableModel = (*structTableModel)(nil) + +func newStructTableModel(db *DB, dest any, table *schema.Table) *structTableModel { + return &structTableModel{ + db: db, + table: table, + dest: dest, + } +} + +func newStructTableModelValue(db *DB, dest any, v reflect.Value) *structTableModel { + return &structTableModel{ + db: db, + table: db.Table(v.Type()), + dest: dest, + root: v, + strct: v, + } +} + +func (m *structTableModel) Value() any { + return m.dest +} + +func (m *structTableModel) Table() *schema.Table { + return m.table +} + +func (m *structTableModel) Relation() *schema.Relation { + return m.rel +} + +func (m *structTableModel) initStruct() error { + if m.structInited { + return m.structInitErr + } + m.structInited = true + + switch m.strct.Kind() { + case reflect.Invalid: + m.structInitErr = errNilModel + return m.structInitErr + case reflect.Interface: + m.strct = m.strct.Elem() + } + + if m.strct.Kind() == reflect.Ptr { + if m.strct.IsNil() { + m.strct.Set(reflect.New(m.strct.Type().Elem())) + m.strct = m.strct.Elem() + } else { + m.strct = m.strct.Elem() + } + } + + m.mountJoins() + + return nil +} + +func (m *structTableModel) mountJoins() { + for i := range m.joins { + j := &m.joins[i] + switch j.Relation.Type { + case schema.HasOneRelation, schema.BelongsToRelation: + j.JoinModel.mount(m.strct) + } + } +} + +var _ schema.BeforeAppendModelHook = (*structTableModel)(nil) + +func (m *structTableModel) BeforeAppendModel(ctx context.Context, query Query) error { + if !m.table.HasBeforeAppendModelHook() || !m.strct.IsValid() { + return nil + } + return m.strct.Addr().Interface().(schema.BeforeAppendModelHook).BeforeAppendModel(ctx, query) +} + +var _ schema.BeforeScanRowHook = (*structTableModel)(nil) + +func (m *structTableModel) BeforeScanRow(ctx context.Context) error { + if m.table.HasBeforeScanRowHook() { + return m.strct.Addr().Interface().(schema.BeforeScanRowHook).BeforeScanRow(ctx) + } + return nil +} + +var _ schema.AfterScanRowHook = (*structTableModel)(nil) + +func (m *structTableModel) AfterScanRow(ctx context.Context) error { + if !m.structInited { + return nil + } + + if m.table.HasAfterScanRowHook() { + firstErr := m.strct.Addr().Interface().(schema.AfterScanRowHook).AfterScanRow(ctx) + + for _, j := range m.joins { + switch j.Relation.Type { + case schema.HasOneRelation, schema.BelongsToRelation: + if err := j.JoinModel.AfterScanRow(ctx); err != nil && firstErr == nil { + firstErr = err + } + } + } + + return firstErr + } + + return nil +} + +func (m *structTableModel) getJoin(name string) *relationJoin { + for i := range m.joins { + j := &m.joins[i] + if j.Relation.Field.Name == name || j.Relation.Field.GoName == name { + return j + } + } + return nil +} + +func (m *structTableModel) getJoins() []relationJoin { + return m.joins +} + +func (m *structTableModel) addJoin(j relationJoin) *relationJoin { + m.joins = append(m.joins, j) + return &m.joins[len(m.joins)-1] +} + +func (m *structTableModel) join(name string) *relationJoin { + return m._join(m.strct, name) +} + +func (m *structTableModel) _join(bind reflect.Value, name string) *relationJoin { + path := strings.Split(name, ".") + index := make([]int, 0, len(path)) + + currJoin := relationJoin{ + BaseModel: m, + JoinModel: m, + } + var lastJoin *relationJoin + + for _, name := range path { + relation, ok := currJoin.JoinModel.Table().Relations[name] + if !ok { + return nil + } + + currJoin.Relation = relation + index = append(index, relation.Field.Index...) + + if j := currJoin.JoinModel.getJoin(name); j != nil { + currJoin.BaseModel = j.BaseModel + currJoin.JoinModel = j.JoinModel + + lastJoin = j + } else { + model, err := newTableModelIndex(m.db, m.table, bind, index, relation) + if err != nil { + return nil + } + + currJoin.Parent = lastJoin + currJoin.BaseModel = currJoin.JoinModel + currJoin.JoinModel = model + + lastJoin = currJoin.BaseModel.addJoin(currJoin) + } + } + + return lastJoin +} + +func (m *structTableModel) rootValue() reflect.Value { + return m.root +} + +func (m *structTableModel) parentIndex() []int { + return m.index[:len(m.index)-len(m.rel.Field.Index)] +} + +func (m *structTableModel) mount(host reflect.Value) { + m.strct = internal.FieldByIndexAlloc(host, m.rel.Field.Index) + m.structInited = false +} + +func (m *structTableModel) updateSoftDeleteField(tm time.Time) error { + if !m.strct.IsValid() { + return nil + } + fv := m.table.SoftDeleteField.Value(m.strct) + return m.table.UpdateSoftDeleteField(fv, tm) +} + +func (m *structTableModel) ScanRows(ctx context.Context, rows *sql.Rows) (int, error) { + if !rows.Next() { + return 0, rows.Err() + } + + var n int + + if err := m.ScanRow(ctx, rows); err != nil { + return 0, err + } + n++ + + // And discard the rest. This is especially important for SQLite3, which can return + // a row like it was inserted successfully and then return an actual error for the next row. + // See issues/100. + for rows.Next() { + n++ + } + if err := rows.Err(); err != nil { + return 0, err + } + + return n, nil +} + +func (m *structTableModel) ScanRow(ctx context.Context, rows *sql.Rows) error { + columns, err := rows.Columns() + if err != nil { + return err + } + + m.columns = columns + dest := makeDest(m, len(columns)) + + return m.scanRow(ctx, rows, dest) +} + +func (m *structTableModel) scanRow(ctx context.Context, rows *sql.Rows, dest []any) error { + if err := m.BeforeScanRow(ctx); err != nil { + return err + } + + m.scanIndex = 0 + if err := rows.Scan(dest...); err != nil { + return err + } + + if err := m.AfterScanRow(ctx); err != nil { + return err + } + + return nil +} + +func (m *structTableModel) Scan(src any) error { + column := m.columns[m.scanIndex] + m.scanIndex++ + + return m.ScanColumn(unquote(column), src) +} + +func (m *structTableModel) ScanColumn(column string, src any) error { + if ok, err := m.scanColumn(column, src); ok { + return err + } + if column == "" || column[0] == '_' || m.db.flags.Has(discardUnknownColumns) { + return nil + } + return fmt.Errorf("bun: %s does not have column %q", m.table.TypeName, column) +} + +func (m *structTableModel) scanColumn(column string, src any) (bool, error) { + if src != nil { + if err := m.initStruct(); err != nil { + return true, err + } + } + + if field := m.table.LookupField(column); field != nil { + if src == nil && m.isNil() { + return true, nil + } + return true, field.ScanValue(m.strct, src) + } + + if joinName, column := splitColumn(column); joinName != "" { + if join := m.getJoin(joinName); join != nil { + return true, join.JoinModel.ScanColumn(column, src) + } + + if m.table.ModelName == joinName { + return true, m.ScanColumn(column, src) + } + } + + return false, nil +} + +func (m *structTableModel) isNil() bool { + return m.strct.Kind() == reflect.Ptr && m.strct.IsNil() +} + +func (m *structTableModel) AppendNamedArg( + gen schema.QueryGen, b []byte, name string, +) ([]byte, bool) { + return m.table.AppendNamedArg(gen, b, name, m.strct) +} + +func (m *structTableModel) clone() TableModel { + return &structTableModel{ + db: m.db, + table: m.table, + rel: m.rel, + joins: append([]relationJoin{}, m.joins...), + dest: m.dest, + root: m.root, + index: append([]int{}, m.index...), + strct: m.strct, + structInited: m.structInited, + structInitErr: m.structInitErr, + columns: append([]string{}, m.columns...), + scanIndex: m.scanIndex, + } +} + +// sqlite3 sometimes does not unquote columns. +func unquote(s string) string { + if s == "" { + return s + } + if s[0] == '"' && s[len(s)-1] == '"' { + return s[1 : len(s)-1] + } + return s +} + +func splitColumn(s string) (string, string) { + if i := strings.Index(s, "__"); i >= 0 { + return s[:i], s[i+2:] + } + return "", s +} diff --git a/vendor/github.com/uptrace/bun/package.json b/vendor/github.com/uptrace/bun/package.json new file mode 100644 index 0000000..c1adcfe --- /dev/null +++ b/vendor/github.com/uptrace/bun/package.json @@ -0,0 +1,8 @@ +{ + "name": "gobun", + "version": "1.2.16", + "main": "index.js", + "repository": "git@github.com:uptrace/bun.git", + "author": "Vladimir Mihailenco ", + "license": "BSD-2-clause" +} diff --git a/vendor/github.com/uptrace/bun/query_base.go b/vendor/github.com/uptrace/bun/query_base.go new file mode 100644 index 0000000..74132b7 --- /dev/null +++ b/vendor/github.com/uptrace/bun/query_base.go @@ -0,0 +1,1582 @@ +package bun + +import ( + "context" + "database/sql" + "database/sql/driver" + "errors" + "fmt" + "strconv" + "strings" + "time" + + "github.com/uptrace/bun/dialect" + "github.com/uptrace/bun/dialect/feature" + "github.com/uptrace/bun/internal" + "github.com/uptrace/bun/schema" +) + +const ( + forceDeleteFlag internal.Flag = 1 << iota + deletedFlag + allWithDeletedFlag +) + +type WithQuery struct { + name string + query Query + recursive bool + materialized bool + notMaterialized bool +} + +// IConn is a common interface for *sql.DB, *sql.Conn, and *sql.Tx. +type IConn interface { + QueryContext(ctx context.Context, query string, args ...any) (*sql.Rows, error) + ExecContext(ctx context.Context, query string, args ...any) (sql.Result, error) + QueryRowContext(ctx context.Context, query string, args ...any) *sql.Row +} + +var ( + _ IConn = (*sql.DB)(nil) + _ IConn = (*sql.Conn)(nil) + _ IConn = (*sql.Tx)(nil) + _ IConn = (*DB)(nil) + _ IConn = (*Conn)(nil) + _ IConn = (*Tx)(nil) +) + +// IDB is a common interface for *bun.DB, bun.Conn, and bun.Tx. +type IDB interface { + IConn + Dialect() schema.Dialect + + NewValues(model any) *ValuesQuery + NewSelect() *SelectQuery + NewInsert() *InsertQuery + NewUpdate() *UpdateQuery + NewDelete() *DeleteQuery + NewMerge() *MergeQuery + NewRaw(query string, args ...any) *RawQuery + NewCreateTable() *CreateTableQuery + NewDropTable() *DropTableQuery + NewCreateIndex() *CreateIndexQuery + NewDropIndex() *DropIndexQuery + NewTruncateTable() *TruncateTableQuery + NewAddColumn() *AddColumnQuery + NewDropColumn() *DropColumnQuery + + BeginTx(ctx context.Context, opts *sql.TxOptions) (Tx, error) + RunInTx(ctx context.Context, opts *sql.TxOptions, f func(ctx context.Context, tx Tx) error) error +} + +var ( + _ IDB = (*DB)(nil) + _ IDB = (*Conn)(nil) + _ IDB = (*Tx)(nil) +) + +// QueryBuilder is used for common query methods +type QueryBuilder interface { + Query + Where(query string, args ...any) QueryBuilder + WhereGroup(sep string, fn func(QueryBuilder) QueryBuilder) QueryBuilder + WhereOr(query string, args ...any) QueryBuilder + WhereDeleted() QueryBuilder + WhereAllWithDeleted() QueryBuilder + WherePK(cols ...string) QueryBuilder + Unwrap() any +} + +var ( + _ QueryBuilder = (*selectQueryBuilder)(nil) + _ QueryBuilder = (*updateQueryBuilder)(nil) + _ QueryBuilder = (*deleteQueryBuilder)(nil) +) + +type baseQuery struct { + db *DB + conn IConn + + model Model + err error + + tableModel TableModel + table *schema.Table + + with []WithQuery + modelTableName schema.QueryWithArgs + tables []schema.QueryWithArgs + columns []schema.QueryWithArgs + + flags internal.Flag +} + +func (q *baseQuery) DB() *DB { + return q.db +} + +func (q *baseQuery) resolveConn(ctx context.Context, query Query) IConn { + if q.conn != nil { + return q.conn + } + if q.db.resolver != nil { + if conn := q.db.resolver.ResolveConn(ctx, query); conn != nil { + return conn + } + } + return q.db.DB +} + +func (q *baseQuery) GetModel() Model { + return q.model +} + +func (q *baseQuery) GetTableName() string { + if q.table != nil { + return q.table.Name + } + + for _, wq := range q.with { + if model := wq.query.GetModel(); model != nil { + return wq.query.GetTableName() + } + } + + if q.modelTableName.Query != "" { + return q.modelTableName.Query + } + + if len(q.tables) > 0 { + b, _ := q.tables[0].AppendQuery(q.db.gen, nil) + if len(b) < 64 { + return string(b) + } + } + + return "" +} + +func (q *baseQuery) setConn(db IConn) { + // Unwrap Bun wrappers to not call query hooks twice. + switch db := db.(type) { + case *DB: + q.conn = db.DB + case Conn: + q.conn = db.Conn + case Tx: + q.conn = db.Tx + default: + q.conn = db + } +} + +func (q *baseQuery) setModel(modeli any) { + model, err := newSingleModel(q.db, modeli) + if err != nil { + q.setErr(err) + return + } + + q.model = model + if tm, ok := model.(TableModel); ok { + q.tableModel = tm + q.table = tm.Table() + } +} + +func (q *baseQuery) setErr(err error) { + if q.err == nil { + q.err = err + } +} + +func (q *baseQuery) getModel(dest []any) (Model, error) { + if len(dest) > 0 { + return newModel(q.db, dest) + } + if q.model != nil { + return q.model, nil + } + return nil, errNilModel +} + +func (q *baseQuery) beforeAppendModel(ctx context.Context, query Query) error { + if q.tableModel != nil { + return q.tableModel.BeforeAppendModel(ctx, query) + } + return nil +} + +func (q *baseQuery) hasFeature(feature feature.Feature) bool { + return q.db.HasFeature(feature) +} + +//------------------------------------------------------------------------------ + +func (q *baseQuery) checkSoftDelete() error { + if q.table == nil { + return errors.New("bun: can't use soft deletes without a table") + } + if q.table.SoftDeleteField == nil { + return fmt.Errorf("%s does not have a soft delete field", q.table) + } + if q.tableModel == nil { + return errors.New("bun: can't use soft deletes without a table model") + } + return nil +} + +// Deleted adds `WHERE deleted_at IS NOT NULL` clause for soft deleted models. +func (q *baseQuery) whereDeleted() { + if err := q.checkSoftDelete(); err != nil { + q.setErr(err) + return + } + q.flags = q.flags.Set(deletedFlag) + q.flags = q.flags.Remove(allWithDeletedFlag) +} + +// AllWithDeleted changes query to return all rows including soft deleted ones. +func (q *baseQuery) whereAllWithDeleted() { + if err := q.checkSoftDelete(); err != nil { + q.setErr(err) + return + } + q.flags = q.flags.Set(allWithDeletedFlag).Remove(deletedFlag) +} + +func (q *baseQuery) isSoftDelete() bool { + if q.table != nil { + return q.table.SoftDeleteField != nil && + !q.flags.Has(allWithDeletedFlag) && + (!q.flags.Has(forceDeleteFlag) || q.flags.Has(deletedFlag)) + } + return false +} + +//------------------------------------------------------------------------------ + +func NewWithQuery(name string, query Query) *WithQuery { + return &WithQuery{ + name: name, + query: query, + } +} + +func (q *WithQuery) Recursive() *WithQuery { + q.recursive = true + return q +} + +func (q *WithQuery) Materialized() *WithQuery { + q.materialized = true + return q +} + +func (q *WithQuery) NotMaterialized() *WithQuery { + q.notMaterialized = true + return q +} + +func (q *baseQuery) addWith(query *WithQuery) { + if query == nil { + return + } + q.with = append(q.with, *query) +} + +func (q *baseQuery) appendWith(gen schema.QueryGen, b []byte) (_ []byte, err error) { + if len(q.with) == 0 { + return b, nil + } + + b = append(b, "WITH "...) + for i, with := range q.with { + if i > 0 { + b = append(b, ", "...) + } + + if with.recursive { + b = append(b, "RECURSIVE "...) + } + + b, err = q.appendCTE(gen, b, with) + if err != nil { + return nil, err + } + } + b = append(b, ' ') + return b, nil +} + +func (q *baseQuery) appendCTE( + gen schema.QueryGen, b []byte, cte WithQuery, +) (_ []byte, err error) { + if !gen.Dialect().Features().Has(feature.WithValues) { + if values, ok := cte.query.(*ValuesQuery); ok { + return q.appendSelectFromValues(gen, b, cte, values) + } + } + + b = gen.AppendIdent(b, cte.name) + + if q, ok := cte.query.(schema.ColumnsAppender); ok { + b = append(b, " ("...) + b, err = q.AppendColumns(gen, b) + if err != nil { + return nil, err + } + b = append(b, ")"...) + } + + switch { + case cte.materialized && gen.Dialect().Name() == dialect.PG: + b = append(b, " AS MATERIALIZED ("...) + case cte.notMaterialized && gen.Dialect().Name() == dialect.PG: + b = append(b, " AS NOT MATERIALIZED ("...) + default: + b = append(b, " AS ("...) + } + + b, err = cte.query.AppendQuery(gen, b) + if err != nil { + return nil, err + } + + b = append(b, ")"...) + return b, nil +} + +func (q *baseQuery) appendSelectFromValues( + gen schema.QueryGen, b []byte, cte WithQuery, values *ValuesQuery, +) (_ []byte, err error) { + b = gen.AppendIdent(b, cte.name) + b = append(b, " AS (SELECT * FROM ("...) + + b, err = cte.query.AppendQuery(gen, b) + if err != nil { + return nil, err + } + + b = append(b, ") AS t"...) + if q, ok := cte.query.(schema.ColumnsAppender); ok { + b = append(b, " ("...) + b, err = q.AppendColumns(gen, b) + if err != nil { + return nil, err + } + b = append(b, ")"...) + } + b = append(b, ")"...) + + return b, nil +} + +//------------------------------------------------------------------------------ + +func (q *baseQuery) addTable(table schema.QueryWithArgs) { + q.tables = append(q.tables, table) +} + +func (q *baseQuery) addColumn(column schema.QueryWithArgs) { + q.columns = append(q.columns, column) +} + +func (q *baseQuery) excludeColumn(columns []string) { + if q.table == nil { + q.setErr(errNilModel) + return + } + + if q.columns == nil { + for _, f := range q.table.Fields { + q.columns = append(q.columns, schema.UnsafeIdent(f.Name)) + } + } + + if len(columns) == 1 && columns[0] == "*" { + q.columns = make([]schema.QueryWithArgs, 0) + return + } + + for _, column := range columns { + if !q._excludeColumn(column) { + q.setErr(fmt.Errorf("bun: can't find column=%q", column)) + return + } + } +} + +func (q *baseQuery) _excludeColumn(column string) bool { + for i, col := range q.columns { + if col.Args == nil && col.Query == column { + q.columns = append(q.columns[:i], q.columns[i+1:]...) + return true + } + } + return false +} + +//------------------------------------------------------------------------------ + +func (q *baseQuery) modelHasTableName() bool { + if !q.modelTableName.IsZero() { + return q.modelTableName.Query != "" + } + return q.table != nil +} + +func (q *baseQuery) hasTables() bool { + return q.modelHasTableName() || len(q.tables) > 0 +} + +func (q *baseQuery) appendTables( + gen schema.QueryGen, b []byte, +) (_ []byte, err error) { + return q._appendTables(gen, b, false) +} + +func (q *baseQuery) appendTablesWithAlias( + gen schema.QueryGen, b []byte, +) (_ []byte, err error) { + return q._appendTables(gen, b, true) +} + +func (q *baseQuery) _appendTables( + gen schema.QueryGen, b []byte, withAlias bool, +) (_ []byte, err error) { + startLen := len(b) + + if q.modelHasTableName() { + if !q.modelTableName.IsZero() { + b, err = q.modelTableName.AppendQuery(gen, b) + if err != nil { + return nil, err + } + } else { + b = gen.AppendQuery(b, string(q.table.SQLNameForSelects)) + if withAlias && q.table.SQLAlias != q.table.SQLNameForSelects { + if q.db.dialect.Name() == dialect.Oracle { + b = append(b, ' ') + } else { + b = append(b, " AS "...) + } + b = append(b, q.table.SQLAlias...) + } + } + } + + for _, table := range q.tables { + if len(b) > startLen { + b = append(b, ", "...) + } + b, err = table.AppendQuery(gen, b) + if err != nil { + return nil, err + } + } + + return b, nil +} + +func (q *baseQuery) appendFirstTable(gen schema.QueryGen, b []byte) ([]byte, error) { + return q._appendFirstTable(gen, b, false) +} + +func (q *baseQuery) appendFirstTableWithAlias( + gen schema.QueryGen, b []byte, +) ([]byte, error) { + return q._appendFirstTable(gen, b, true) +} + +func (q *baseQuery) _appendFirstTable( + gen schema.QueryGen, b []byte, withAlias bool, +) ([]byte, error) { + if !q.modelTableName.IsZero() { + return q.modelTableName.AppendQuery(gen, b) + } + + if q.table != nil { + b = gen.AppendQuery(b, string(q.table.SQLName)) + if withAlias { + b = append(b, " AS "...) + b = append(b, q.table.SQLAlias...) + } + return b, nil + } + + if len(q.tables) > 0 { + return q.tables[0].AppendQuery(gen, b) + } + + return nil, errors.New("bun: query does not have a table") +} + +func (q *baseQuery) hasMultiTables() bool { + if q.modelHasTableName() { + return len(q.tables) >= 1 + } + return len(q.tables) >= 2 +} + +func (q *baseQuery) appendOtherTables(gen schema.QueryGen, b []byte) (_ []byte, err error) { + tables := q.tables + if !q.modelHasTableName() { + tables = tables[1:] + } + for i, table := range tables { + if i > 0 { + b = append(b, ", "...) + } + b, err = table.AppendQuery(gen, b) + if err != nil { + return nil, err + } + } + return b, nil +} + +//------------------------------------------------------------------------------ + +func (q *baseQuery) appendColumns(gen schema.QueryGen, b []byte) (_ []byte, err error) { + for i, f := range q.columns { + if i > 0 { + b = append(b, ", "...) + } + b, err = f.AppendQuery(gen, b) + if err != nil { + return nil, err + } + } + return b, nil +} + +func (q *baseQuery) getFields() ([]*schema.Field, error) { + if len(q.columns) == 0 { + if q.table == nil { + return nil, errNilModel + } + return q.table.Fields, nil + } + return q._getFields(false) +} + +func (q *baseQuery) getDataFields() ([]*schema.Field, error) { + if len(q.columns) == 0 { + if q.table == nil { + return nil, errNilModel + } + return q.table.DataFields, nil + } + return q._getFields(true) +} + +func (q *baseQuery) _getFields(omitPK bool) ([]*schema.Field, error) { + fields := make([]*schema.Field, 0, len(q.columns)) + for _, col := range q.columns { + if col.Args != nil { + continue + } + + field, err := q.table.Field(col.Query) + if err != nil { + return nil, err + } + + if omitPK && field.IsPK { + continue + } + + fields = append(fields, field) + } + return fields, nil +} + +func (q *baseQuery) scan( + ctx context.Context, + iquery Query, + query string, + model Model, + hasDest bool, +) (sql.Result, error) { + ctx, event := q.db.beforeQuery(ctx, iquery, query, nil, query, q.model) + res, err := q._scan(ctx, iquery, query, model, hasDest) + q.db.afterQuery(ctx, event, res, err) + return res, err +} + +func (q *baseQuery) _scan( + ctx context.Context, + iquery Query, + query string, + model Model, + hasDest bool, +) (sql.Result, error) { + rows, err := q.resolveConn(ctx, iquery).QueryContext(ctx, query) + if err != nil { + return nil, err + } + defer rows.Close() + + numRow, err := model.ScanRows(ctx, rows) + if err != nil { + return nil, err + } + + if numRow == 0 && hasDest && isSingleRowModel(model) { + return nil, sql.ErrNoRows + } + return driver.RowsAffected(numRow), nil +} + +func (q *baseQuery) exec( + ctx context.Context, + iquery Query, + query string, +) (sql.Result, error) { + ctx, event := q.db.beforeQuery(ctx, iquery, query, nil, query, q.model) + res, err := q.resolveConn(ctx, iquery).ExecContext(ctx, query) + q.db.afterQuery(ctx, event, res, err) + return res, err +} + +//------------------------------------------------------------------------------ + +func (q *baseQuery) AppendNamedArg(gen schema.QueryGen, b []byte, name string) ([]byte, bool) { + if q.table == nil { + return b, false + } + + if m, ok := q.tableModel.(*structTableModel); ok { + if b, ok := m.AppendNamedArg(gen, b, name); ok { + return b, ok + } + } + + switch name { + case "TableName": + b = gen.AppendQuery(b, string(q.table.SQLName)) + return b, true + case "TableAlias": + b = gen.AppendQuery(b, string(q.table.SQLAlias)) + return b, true + case "PKs": + b = appendColumns(b, "", q.table.PKs) + return b, true + case "TablePKs": + b = appendColumns(b, q.table.SQLAlias, q.table.PKs) + return b, true + case "Columns": + b = appendColumns(b, "", q.table.Fields) + return b, true + case "TableColumns": + b = appendColumns(b, q.table.SQLAlias, q.table.Fields) + return b, true + } + + return b, false +} + +//------------------------------------------------------------------------------ + +func (q *baseQuery) Dialect() schema.Dialect { + return q.db.Dialect() +} + +func (q *baseQuery) NewValues(model any) *ValuesQuery { + return NewValuesQuery(q.db, model).Conn(q.conn) +} + +func (q *baseQuery) NewSelect() *SelectQuery { + return NewSelectQuery(q.db).Conn(q.conn) +} + +func (q *baseQuery) NewInsert() *InsertQuery { + return NewInsertQuery(q.db).Conn(q.conn) +} + +func (q *baseQuery) NewUpdate() *UpdateQuery { + return NewUpdateQuery(q.db).Conn(q.conn) +} + +func (q *baseQuery) NewDelete() *DeleteQuery { + return NewDeleteQuery(q.db).Conn(q.conn) +} + +func (q *baseQuery) NewRaw(query string, args ...any) *RawQuery { + return NewRawQuery(q.db, query, args...).Conn(q.conn) +} + +func (q *baseQuery) NewCreateTable() *CreateTableQuery { + return NewCreateTableQuery(q.db).Conn(q.conn) +} + +func (q *baseQuery) NewDropTable() *DropTableQuery { + return NewDropTableQuery(q.db).Conn(q.conn) +} + +func (q *baseQuery) NewCreateIndex() *CreateIndexQuery { + return NewCreateIndexQuery(q.db).Conn(q.conn) +} + +func (q *baseQuery) NewDropIndex() *DropIndexQuery { + return NewDropIndexQuery(q.db).Conn(q.conn) +} + +func (q *baseQuery) NewTruncateTable() *TruncateTableQuery { + return NewTruncateTableQuery(q.db).Conn(q.conn) +} + +func (q *baseQuery) NewAddColumn() *AddColumnQuery { + return NewAddColumnQuery(q.db).Conn(q.conn) +} + +func (q *baseQuery) NewDropColumn() *DropColumnQuery { + return NewDropColumnQuery(q.db).Conn(q.conn) +} + +//------------------------------------------------------------------------------ + +func appendColumns(b []byte, table schema.Safe, fields []*schema.Field) []byte { + for i, f := range fields { + if i > 0 { + b = append(b, ", "...) + } + + if len(table) > 0 { + b = append(b, table...) + b = append(b, '.') + } + b = append(b, f.SQLName...) + } + return b +} + +func formatterWithModel(gen schema.QueryGen, model schema.NamedArgAppender) schema.QueryGen { + if gen.IsNop() { + return gen + } + return gen.WithArg(model) +} + +//------------------------------------------------------------------------------ + +type whereBaseQuery struct { + baseQuery + + where []schema.QueryWithSep + whereFields []*schema.Field +} + +func (q *whereBaseQuery) addWhere(where schema.QueryWithSep) { + q.where = append(q.where, where) +} + +func (q *whereBaseQuery) addWhereGroup(sep string, where []schema.QueryWithSep) { + if len(where) == 0 { + return + } + + q.addWhere(schema.SafeQueryWithSep("", nil, sep)) + q.addWhere(schema.SafeQueryWithSep("", nil, "(")) + + where[0].Sep = "" + q.where = append(q.where, where...) + + q.addWhere(schema.SafeQueryWithSep("", nil, ")")) +} + +func (q *whereBaseQuery) addWhereCols(cols []string) { + if q.table == nil { + err := fmt.Errorf("bun: got %T, but WherePK requires a struct or slice-based model", q.model) + q.setErr(err) + return + } + if q.whereFields != nil { + err := errors.New("bun: WherePK can only be called once") + q.setErr(err) + return + } + + if cols == nil { + if err := q.table.CheckPKs(); err != nil { + q.setErr(err) + return + } + q.whereFields = q.table.PKs + return + } + + q.whereFields = make([]*schema.Field, len(cols)) + for i, col := range cols { + field, err := q.table.Field(col) + if err != nil { + q.setErr(err) + return + } + q.whereFields[i] = field + } +} + +func (q *whereBaseQuery) mustAppendWhere( + gen schema.QueryGen, b []byte, withAlias bool, +) ([]byte, error) { + if len(q.where) == 0 && q.whereFields == nil && !q.flags.Has(deletedFlag) { + err := errors.New("bun: Update and Delete queries require at least one Where") + return nil, err + } + return q.appendWhere(gen, b, withAlias) +} + +func (q *whereBaseQuery) appendWhere( + gen schema.QueryGen, b []byte, withAlias bool, +) (_ []byte, err error) { + if len(q.where) == 0 && q.whereFields == nil && !q.isSoftDelete() { + return b, nil + } + + b = append(b, " WHERE "...) + startLen := len(b) + + if len(q.where) > 0 { + b, err = appendWhere(gen, b, q.where) + if err != nil { + return nil, err + } + } + + if q.isSoftDelete() { + if len(b) > startLen { + b = append(b, " AND "...) + } + + if withAlias { + b = append(b, q.tableModel.Table().SQLAlias...) + } else { + b = append(b, q.tableModel.Table().SQLName...) + } + b = append(b, '.') + + field := q.tableModel.Table().SoftDeleteField + b = append(b, field.SQLName...) + + if field.IsPtr || field.NullZero { + if q.flags.Has(deletedFlag) { + b = append(b, " IS NOT NULL"...) + } else { + b = append(b, " IS NULL"...) + } + } else { + if q.flags.Has(deletedFlag) { + b = append(b, " != "...) + } else { + b = append(b, " = "...) + } + b = gen.Dialect().AppendTime(b, time.Time{}) + } + } + + if q.whereFields != nil { + if len(b) > startLen { + b = append(b, " AND "...) + } + b, err = q.appendWhereFields(gen, b, q.whereFields, withAlias) + if err != nil { + return nil, err + } + } + + return b, nil +} + +func appendWhere( + gen schema.QueryGen, b []byte, where []schema.QueryWithSep, +) (_ []byte, err error) { + for i, where := range where { + if i > 0 { + b = append(b, where.Sep...) + } + + if where.Query == "" { + continue + } + + b = append(b, '(') + b, err = where.AppendQuery(gen, b) + if err != nil { + return nil, err + } + b = append(b, ')') + } + return b, nil +} + +func (q *whereBaseQuery) appendWhereFields( + gen schema.QueryGen, b []byte, fields []*schema.Field, withAlias bool, +) (_ []byte, err error) { + if q.table == nil { + err := fmt.Errorf("bun: got %T, but WherePK requires struct or slice-based model", q.model) + return nil, err + } + + switch model := q.tableModel.(type) { + case *structTableModel: + return q.appendWhereStructFields(gen, b, model, fields, withAlias) + case *sliceTableModel: + return q.appendWhereSliceFields(gen, b, model, fields, withAlias) + default: + return nil, fmt.Errorf("bun: WhereColumn does not support %T", q.tableModel) + } +} + +func (q *whereBaseQuery) appendWhereStructFields( + gen schema.QueryGen, + b []byte, + model *structTableModel, + fields []*schema.Field, + withAlias bool, +) (_ []byte, err error) { + if !model.strct.IsValid() { + return nil, errNilModel + } + + isTemplate := gen.IsNop() + b = append(b, '(') + for i, f := range fields { + if i > 0 { + b = append(b, " AND "...) + } + if withAlias { + b = append(b, q.table.SQLAlias...) + b = append(b, '.') + } + b = append(b, f.SQLName...) + b = append(b, " = "...) + if isTemplate { + b = append(b, '?') + } else { + b = f.AppendValue(gen, b, model.strct) + } + } + b = append(b, ')') + return b, nil +} + +func (q *whereBaseQuery) appendWhereSliceFields( + gen schema.QueryGen, + b []byte, + model *sliceTableModel, + fields []*schema.Field, + withAlias bool, +) (_ []byte, err error) { + if len(fields) > 1 { + b = append(b, '(') + } + if withAlias { + b = appendColumns(b, q.table.SQLAlias, fields) + } else { + b = appendColumns(b, "", fields) + } + if len(fields) > 1 { + b = append(b, ')') + } + + b = append(b, " IN ("...) + + isTemplate := gen.IsNop() + slice := model.slice + sliceLen := slice.Len() + for i := 0; i < sliceLen; i++ { + if i > 0 { + if isTemplate { + break + } + b = append(b, ", "...) + } + + el := indirect(slice.Index(i)) + + if len(fields) > 1 { + b = append(b, '(') + } + for i, f := range fields { + if i > 0 { + b = append(b, ", "...) + } + if isTemplate { + b = append(b, '?') + } else { + b = f.AppendValue(gen, b, el) + } + } + if len(fields) > 1 { + b = append(b, ')') + } + } + + b = append(b, ')') + + return b, nil +} + +//------------------------------------------------------------------------------ + +type returningQuery struct { + returning []schema.QueryWithArgs + returningFields []*schema.Field +} + +func (q *returningQuery) addReturning(ret schema.QueryWithArgs) { + q.returning = append(q.returning, ret) +} + +func (q *returningQuery) addReturningField(field *schema.Field) { + if len(q.returning) > 0 { + return + } + for _, f := range q.returningFields { + if f == field { + return + } + } + q.returningFields = append(q.returningFields, field) +} + +func (q *returningQuery) appendReturning( + gen schema.QueryGen, b []byte, +) (_ []byte, err error) { + return q._appendReturning(gen, b, "") +} + +func (q *returningQuery) appendOutput( + gen schema.QueryGen, b []byte, +) (_ []byte, err error) { + return q._appendReturning(gen, b, "INSERTED") +} + +func (q *returningQuery) _appendReturning( + gen schema.QueryGen, b []byte, table string, +) (_ []byte, err error) { + for i, f := range q.returning { + if i > 0 { + b = append(b, ", "...) + } + b, err = f.AppendQuery(gen, b) + if err != nil { + return nil, err + } + } + + if len(q.returning) > 0 { + return b, nil + } + + b = appendColumns(b, schema.Safe(table), q.returningFields) + return b, nil +} + +func (q *returningQuery) hasReturning() bool { + if len(q.returning) == 1 { + if ret := q.returning[0]; len(ret.Args) == 0 { + switch ret.Query { + case "", "null", "NULL": + return false + } + } + } + return len(q.returning) > 0 || len(q.returningFields) > 0 +} + +//------------------------------------------------------------------------------ + +type customValueQuery struct { + modelValues map[string]schema.QueryWithArgs + extraValues []columnValue +} + +type columnValue struct { + column string + value schema.QueryWithArgs +} + +func (q *customValueQuery) addValue( + table *schema.Table, column string, value string, args []any, +) { + ok := false + if table != nil { + _, ok = table.FieldMap[column] + } + + if ok { + if q.modelValues == nil { + q.modelValues = make(map[string]schema.QueryWithArgs) + } + q.modelValues[column] = schema.SafeQuery(value, args) + } else { + q.extraValues = append(q.extraValues, columnValue{ + column: column, + value: schema.SafeQuery(value, args), + }) + } +} + +//------------------------------------------------------------------------------ + +type setQuery struct { + set []schema.QueryWithArgs + setValues *ValuesQuery + omitZero bool + customValueQuery +} + +func (q *setQuery) addSet(set schema.QueryWithArgs) { + q.set = append(q.set, set) +} + +func (q *setQuery) appendSet(gen schema.QueryGen, b []byte) (_ []byte, err error) { + pos := len(b) + + if q.setValues != nil { + b, err = q.setValues.appendSet(gen, b) + if err != nil { + return nil, err + } + } + + for _, query := range q.set { + if len(b) > pos { + b = append(b, ", "...) + } + b, err = query.AppendQuery(gen, b) + if err != nil { + return nil, err + } + } + + return b, nil +} + +func (q *setQuery) appendSetStruct( + gen schema.QueryGen, b []byte, model *structTableModel, fields []*schema.Field, +) (_ []byte, err error) { + isTemplate := gen.IsNop() + pos := len(b) + for _, f := range fields { + if f.SkipUpdate() { + continue + } + + app, hasValue := q.modelValues[f.Name] + + if !hasValue && q.omitZero && f.HasZeroValue(model.strct) { + continue + } + + if len(b) != pos { + b = append(b, ", "...) + pos = len(b) + } + + b = append(b, f.SQLName...) + b = append(b, " = "...) + + if isTemplate { + b = append(b, '?') + continue + } + + if hasValue { + b, err = app.AppendQuery(gen, b) + if err != nil { + return nil, err + } + } else { + b = f.AppendValue(gen, b, model.strct) + } + } + + for i, v := range q.extraValues { + if i > 0 || len(fields) > 0 { + b = append(b, ", "...) + } + + b = append(b, v.column...) + b = append(b, " = "...) + + b, err = v.value.AppendQuery(gen, b) + if err != nil { + return nil, err + } + } + + return b, nil +} + +//------------------------------------------------------------------------------ + +type cascadeQuery struct { + cascade bool + restrict bool +} + +func (q cascadeQuery) appendCascade(gen schema.QueryGen, b []byte) []byte { + if !gen.HasFeature(feature.TableCascade) { + return b + } + if q.cascade { + b = append(b, " CASCADE"...) + } + if q.restrict { + b = append(b, " RESTRICT"...) + } + return b +} + +//------------------------------------------------------------------------------ + +type idxHintsQuery struct { + use *indexHints + ignore *indexHints + force *indexHints +} + +type indexHints struct { + names []schema.QueryWithArgs + forJoin []schema.QueryWithArgs + forOrderBy []schema.QueryWithArgs + forGroupBy []schema.QueryWithArgs +} + +func (ih *idxHintsQuery) lazyUse() *indexHints { + if ih.use == nil { + ih.use = new(indexHints) + } + return ih.use +} + +func (ih *idxHintsQuery) lazyIgnore() *indexHints { + if ih.ignore == nil { + ih.ignore = new(indexHints) + } + return ih.ignore +} + +func (ih *idxHintsQuery) lazyForce() *indexHints { + if ih.force == nil { + ih.force = new(indexHints) + } + return ih.force +} + +func (ih *idxHintsQuery) appendIndexes(hints []schema.QueryWithArgs, indexes ...string) []schema.QueryWithArgs { + for _, idx := range indexes { + hints = append(hints, schema.UnsafeIdent(idx)) + } + return hints +} + +func (ih *idxHintsQuery) addUseIndex(indexes ...string) { + if len(indexes) == 0 { + return + } + ih.lazyUse().names = ih.appendIndexes(ih.use.names, indexes...) +} + +func (ih *idxHintsQuery) addUseIndexForJoin(indexes ...string) { + if len(indexes) == 0 { + return + } + ih.lazyUse().forJoin = ih.appendIndexes(ih.use.forJoin, indexes...) +} + +func (ih *idxHintsQuery) addUseIndexForOrderBy(indexes ...string) { + if len(indexes) == 0 { + return + } + ih.lazyUse().forOrderBy = ih.appendIndexes(ih.use.forOrderBy, indexes...) +} + +func (ih *idxHintsQuery) addUseIndexForGroupBy(indexes ...string) { + if len(indexes) == 0 { + return + } + ih.lazyUse().forGroupBy = ih.appendIndexes(ih.use.forGroupBy, indexes...) +} + +func (ih *idxHintsQuery) addIgnoreIndex(indexes ...string) { + if len(indexes) == 0 { + return + } + ih.lazyIgnore().names = ih.appendIndexes(ih.ignore.names, indexes...) +} + +func (ih *idxHintsQuery) addIgnoreIndexForJoin(indexes ...string) { + if len(indexes) == 0 { + return + } + ih.lazyIgnore().forJoin = ih.appendIndexes(ih.ignore.forJoin, indexes...) +} + +func (ih *idxHintsQuery) addIgnoreIndexForOrderBy(indexes ...string) { + if len(indexes) == 0 { + return + } + ih.lazyIgnore().forOrderBy = ih.appendIndexes(ih.ignore.forOrderBy, indexes...) +} + +func (ih *idxHintsQuery) addIgnoreIndexForGroupBy(indexes ...string) { + if len(indexes) == 0 { + return + } + ih.lazyIgnore().forGroupBy = ih.appendIndexes(ih.ignore.forGroupBy, indexes...) +} + +func (ih *idxHintsQuery) addForceIndex(indexes ...string) { + if len(indexes) == 0 { + return + } + ih.lazyForce().names = ih.appendIndexes(ih.force.names, indexes...) +} + +func (ih *idxHintsQuery) addForceIndexForJoin(indexes ...string) { + if len(indexes) == 0 { + return + } + ih.lazyForce().forJoin = ih.appendIndexes(ih.force.forJoin, indexes...) +} + +func (ih *idxHintsQuery) addForceIndexForOrderBy(indexes ...string) { + if len(indexes) == 0 { + return + } + ih.lazyForce().forOrderBy = ih.appendIndexes(ih.force.forOrderBy, indexes...) +} + +func (ih *idxHintsQuery) addForceIndexForGroupBy(indexes ...string) { + if len(indexes) == 0 { + return + } + ih.lazyForce().forGroupBy = ih.appendIndexes(ih.force.forGroupBy, indexes...) +} + +func (ih *idxHintsQuery) appendIndexHints( + gen schema.QueryGen, b []byte, +) ([]byte, error) { + type IdxHint struct { + Name string + Values []schema.QueryWithArgs + } + + var hints []IdxHint + if ih.use != nil { + hints = append(hints, []IdxHint{ + { + Name: "USE INDEX", + Values: ih.use.names, + }, + { + Name: "USE INDEX FOR JOIN", + Values: ih.use.forJoin, + }, + { + Name: "USE INDEX FOR ORDER BY", + Values: ih.use.forOrderBy, + }, + { + Name: "USE INDEX FOR GROUP BY", + Values: ih.use.forGroupBy, + }, + }...) + } + + if ih.ignore != nil { + hints = append(hints, []IdxHint{ + { + Name: "IGNORE INDEX", + Values: ih.ignore.names, + }, + { + Name: "IGNORE INDEX FOR JOIN", + Values: ih.ignore.forJoin, + }, + { + Name: "IGNORE INDEX FOR ORDER BY", + Values: ih.ignore.forOrderBy, + }, + { + Name: "IGNORE INDEX FOR GROUP BY", + Values: ih.ignore.forGroupBy, + }, + }...) + } + + if ih.force != nil { + hints = append(hints, []IdxHint{ + { + Name: "FORCE INDEX", + Values: ih.force.names, + }, + { + Name: "FORCE INDEX FOR JOIN", + Values: ih.force.forJoin, + }, + { + Name: "FORCE INDEX FOR ORDER BY", + Values: ih.force.forOrderBy, + }, + { + Name: "FORCE INDEX FOR GROUP BY", + Values: ih.force.forGroupBy, + }, + }...) + } + + var err error + for _, h := range hints { + b, err = ih.bufIndexHint(h.Name, h.Values, gen, b) + if err != nil { + return nil, err + } + } + return b, nil +} + +func (ih *idxHintsQuery) bufIndexHint( + name string, + hints []schema.QueryWithArgs, + gen schema.QueryGen, b []byte, +) ([]byte, error) { + var err error + if len(hints) == 0 { + return b, nil + } + b = append(b, fmt.Sprintf(" %s (", name)...) + for i, f := range hints { + if i > 0 { + b = append(b, ", "...) + } + b, err = f.AppendQuery(gen, b) + if err != nil { + return nil, err + } + } + b = append(b, ")"...) + return b, nil +} + +//------------------------------------------------------------------------------ + +type orderLimitOffsetQuery struct { + order []schema.QueryWithArgs + + limit int32 + offset int32 +} + +func (q *orderLimitOffsetQuery) addOrder(orders ...string) { + for _, order := range orders { + if order == "" { + continue + } + + index := strings.IndexByte(order, ' ') + if index == -1 { + q.order = append(q.order, schema.UnsafeIdent(order)) + continue + } + + colName := order[:index] + sortDir := order[index+1:] + q.addOrderExpr("? ?", Ident(colName), Order(strings.ToUpper(sortDir))) + } +} + +func (q *orderLimitOffsetQuery) addOrderBy(colName string, sortDir Order) { + q.addOrderExpr("? ?", Ident(colName), sortDir) +} + +func (q *orderLimitOffsetQuery) addOrderExpr(query string, args ...any) { + q.order = append(q.order, schema.SafeQuery(query, args)) +} + +func (q *orderLimitOffsetQuery) appendOrder(gen schema.QueryGen, b []byte) (_ []byte, err error) { + if len(q.order) > 0 { + b = append(b, " ORDER BY "...) + + for i, f := range q.order { + if i > 0 { + b = append(b, ", "...) + } + b, err = f.AppendQuery(gen, b) + if err != nil { + return nil, err + } + } + + return b, nil + } + + // MSSQL: allows Limit() without Order() as per https://stackoverflow.com/a/36156953 + if q.limit > 0 && gen.Dialect().Name() == dialect.MSSQL { + return append(b, " ORDER BY _temp_sort"...), nil + } + + return b, nil +} + +func (q *orderLimitOffsetQuery) setLimit(n int) { + q.limit = int32(n) +} + +func (q *orderLimitOffsetQuery) setOffset(n int) { + q.offset = int32(n) +} + +func (q *orderLimitOffsetQuery) appendLimitOffset(gen schema.QueryGen, b []byte) (_ []byte, err error) { + if gen.Dialect().Features().Has(feature.OffsetFetch) { + if q.limit > 0 && q.offset > 0 { + b = append(b, " OFFSET "...) + b = strconv.AppendInt(b, int64(q.offset), 10) + b = append(b, " ROWS"...) + + b = append(b, " FETCH NEXT "...) + b = strconv.AppendInt(b, int64(q.limit), 10) + b = append(b, " ROWS ONLY"...) + } else if q.limit > 0 { + b = append(b, " OFFSET 0 ROWS"...) + + b = append(b, " FETCH NEXT "...) + b = strconv.AppendInt(b, int64(q.limit), 10) + b = append(b, " ROWS ONLY"...) + } else if q.offset > 0 { + b = append(b, " OFFSET "...) + b = strconv.AppendInt(b, int64(q.offset), 10) + b = append(b, " ROWS"...) + } + } else { + if q.limit > 0 { + b = append(b, " LIMIT "...) + b = strconv.AppendInt(b, int64(q.limit), 10) + } + if q.offset > 0 { + b = append(b, " OFFSET "...) + b = strconv.AppendInt(b, int64(q.offset), 10) + } + } + + return b, nil +} + +func IsReadOnlyQuery(query Query) bool { + sel, ok := query.(*SelectQuery) + if !ok { + return false + } + for _, el := range sel.with { + if !IsReadOnlyQuery(el.query) { + return false + } + } + return true +} diff --git a/vendor/github.com/uptrace/bun/query_column_add.go b/vendor/github.com/uptrace/bun/query_column_add.go new file mode 100644 index 0000000..9050c8e --- /dev/null +++ b/vendor/github.com/uptrace/bun/query_column_add.go @@ -0,0 +1,150 @@ +package bun + +import ( + "context" + "database/sql" + "fmt" + + "github.com/uptrace/bun/dialect/feature" + "github.com/uptrace/bun/internal" + "github.com/uptrace/bun/schema" +) + +type AddColumnQuery struct { + baseQuery + + ifNotExists bool + comment string +} + +var _ Query = (*AddColumnQuery)(nil) + +func NewAddColumnQuery(db *DB) *AddColumnQuery { + q := &AddColumnQuery{ + baseQuery: baseQuery{ + db: db, + }, + } + return q +} + +func (q *AddColumnQuery) Conn(db IConn) *AddColumnQuery { + q.setConn(db) + return q +} + +func (q *AddColumnQuery) Model(model any) *AddColumnQuery { + q.setModel(model) + return q +} + +func (q *AddColumnQuery) Err(err error) *AddColumnQuery { + q.setErr(err) + return q +} + +// Apply calls each function in fns, passing the AddColumnQuery as an argument. +func (q *AddColumnQuery) Apply(fns ...func(*AddColumnQuery) *AddColumnQuery) *AddColumnQuery { + for _, fn := range fns { + if fn != nil { + q = fn(q) + } + } + return q +} + +//------------------------------------------------------------------------------ + +func (q *AddColumnQuery) Table(tables ...string) *AddColumnQuery { + for _, table := range tables { + q.addTable(schema.UnsafeIdent(table)) + } + return q +} + +func (q *AddColumnQuery) TableExpr(query string, args ...any) *AddColumnQuery { + q.addTable(schema.SafeQuery(query, args)) + return q +} + +func (q *AddColumnQuery) ModelTableExpr(query string, args ...any) *AddColumnQuery { + q.modelTableName = schema.SafeQuery(query, args) + return q +} + +//------------------------------------------------------------------------------ + +func (q *AddColumnQuery) ColumnExpr(query string, args ...any) *AddColumnQuery { + q.addColumn(schema.SafeQuery(query, args)) + return q +} + +func (q *AddColumnQuery) IfNotExists() *AddColumnQuery { + q.ifNotExists = true + return q +} + +//------------------------------------------------------------------------------ + +// Comment adds a comment to the query, wrapped by /* ... */. +func (q *AddColumnQuery) Comment(comment string) *AddColumnQuery { + q.comment = comment + return q +} + +//------------------------------------------------------------------------------ + +func (q *AddColumnQuery) Operation() string { + return "ADD COLUMN" +} + +func (q *AddColumnQuery) AppendQuery(gen schema.QueryGen, b []byte) (_ []byte, err error) { + if q.err != nil { + return nil, q.err + } + + b = appendComment(b, q.comment) + + if len(q.columns) != 1 { + return nil, fmt.Errorf("bun: AddColumnQuery requires exactly one column") + } + + b = append(b, "ALTER TABLE "...) + + b, err = q.appendFirstTable(gen, b) + if err != nil { + return nil, err + } + + b = append(b, " ADD "...) + + if q.ifNotExists { + b = append(b, "IF NOT EXISTS "...) + } + + b, err = q.columns[0].AppendQuery(gen, b) + if err != nil { + return nil, err + } + + return b, nil +} + +//------------------------------------------------------------------------------ + +func (q *AddColumnQuery) Exec(ctx context.Context, dest ...any) (sql.Result, error) { + if q.ifNotExists && !q.hasFeature(feature.AlterColumnExists) { + return nil, feature.NewNotSupportError(feature.AlterColumnExists) + } + + // if a comment is propagated via the context, use it + setCommentFromContext(ctx, q) + + queryBytes, err := q.AppendQuery(q.db.gen, q.db.makeQueryBytes()) + if err != nil { + return nil, err + } + + query := internal.String(queryBytes) + return q.exec(ctx, q, query) +} diff --git a/vendor/github.com/uptrace/bun/query_column_drop.go b/vendor/github.com/uptrace/bun/query_column_drop.go new file mode 100644 index 0000000..3ac8a04 --- /dev/null +++ b/vendor/github.com/uptrace/bun/query_column_drop.go @@ -0,0 +1,148 @@ +package bun + +import ( + "context" + "database/sql" + "fmt" + + "github.com/uptrace/bun/internal" + "github.com/uptrace/bun/schema" +) + +type DropColumnQuery struct { + baseQuery + + comment string +} + +var _ Query = (*DropColumnQuery)(nil) + +func NewDropColumnQuery(db *DB) *DropColumnQuery { + q := &DropColumnQuery{ + baseQuery: baseQuery{ + db: db, + }, + } + return q +} + +func (q *DropColumnQuery) Conn(db IConn) *DropColumnQuery { + q.setConn(db) + return q +} + +func (q *DropColumnQuery) Model(model any) *DropColumnQuery { + q.setModel(model) + return q +} + +func (q *DropColumnQuery) Err(err error) *DropColumnQuery { + q.setErr(err) + return q +} + +// Apply calls each function in fns, passing the DropColumnQuery as an argument. +func (q *DropColumnQuery) Apply(fns ...func(*DropColumnQuery) *DropColumnQuery) *DropColumnQuery { + for _, fn := range fns { + if fn != nil { + q = fn(q) + } + } + return q +} + +//------------------------------------------------------------------------------ + +func (q *DropColumnQuery) Table(tables ...string) *DropColumnQuery { + for _, table := range tables { + q.addTable(schema.UnsafeIdent(table)) + } + return q +} + +func (q *DropColumnQuery) TableExpr(query string, args ...any) *DropColumnQuery { + q.addTable(schema.SafeQuery(query, args)) + return q +} + +func (q *DropColumnQuery) ModelTableExpr(query string, args ...any) *DropColumnQuery { + q.modelTableName = schema.SafeQuery(query, args) + return q +} + +//------------------------------------------------------------------------------ + +func (q *DropColumnQuery) Column(columns ...string) *DropColumnQuery { + for _, column := range columns { + q.addColumn(schema.UnsafeIdent(column)) + } + return q +} + +func (q *DropColumnQuery) ColumnExpr(query string, args ...any) *DropColumnQuery { + q.addColumn(schema.SafeQuery(query, args)) + return q +} + +//------------------------------------------------------------------------------ + +// Comment adds a comment to the query, wrapped by /* ... */. +func (q *DropColumnQuery) Comment(comment string) *DropColumnQuery { + q.comment = comment + return q +} + +//------------------------------------------------------------------------------ + +func (q *DropColumnQuery) Operation() string { + return "DROP COLUMN" +} + +func (q *DropColumnQuery) AppendQuery(gen schema.QueryGen, b []byte) (_ []byte, err error) { + if q.err != nil { + return nil, q.err + } + + b = appendComment(b, q.comment) + + if len(q.columns) != 1 { + return nil, fmt.Errorf("bun: DropColumnQuery requires exactly one column") + } + + b = append(b, "ALTER TABLE "...) + + b, err = q.appendFirstTable(gen, b) + if err != nil { + return nil, err + } + + b = append(b, " DROP COLUMN "...) + + b, err = q.columns[0].AppendQuery(gen, b) + if err != nil { + return nil, err + } + + return b, nil +} + +//------------------------------------------------------------------------------ + +func (q *DropColumnQuery) Exec(ctx context.Context, dest ...any) (sql.Result, error) { + // if a comment is propagated via the context, use it + setCommentFromContext(ctx, q) + + queryBytes, err := q.AppendQuery(q.db.gen, q.db.makeQueryBytes()) + if err != nil { + return nil, err + } + + query := internal.String(queryBytes) + + res, err := q.exec(ctx, q, query) + if err != nil { + return nil, err + } + + return res, nil +} diff --git a/vendor/github.com/uptrace/bun/query_delete.go b/vendor/github.com/uptrace/bun/query_delete.go new file mode 100644 index 0000000..bd1530f --- /dev/null +++ b/vendor/github.com/uptrace/bun/query_delete.go @@ -0,0 +1,452 @@ +package bun + +import ( + "context" + "database/sql" + "errors" + "time" + + "github.com/uptrace/bun/dialect/feature" + "github.com/uptrace/bun/internal" + "github.com/uptrace/bun/schema" +) + +type DeleteQuery struct { + whereBaseQuery + orderLimitOffsetQuery + returningQuery + + comment string +} + +var _ Query = (*DeleteQuery)(nil) + +func NewDeleteQuery(db *DB) *DeleteQuery { + q := &DeleteQuery{ + whereBaseQuery: whereBaseQuery{ + baseQuery: baseQuery{ + db: db, + }, + }, + } + return q +} + +func (q *DeleteQuery) Conn(db IConn) *DeleteQuery { + q.setConn(db) + return q +} + +func (q *DeleteQuery) Model(model any) *DeleteQuery { + q.setModel(model) + return q +} + +func (q *DeleteQuery) Err(err error) *DeleteQuery { + q.setErr(err) + return q +} + +// Apply calls each function in fns, passing the DeleteQuery as an argument. +func (q *DeleteQuery) Apply(fns ...func(*DeleteQuery) *DeleteQuery) *DeleteQuery { + for _, fn := range fns { + if fn != nil { + q = fn(q) + } + } + return q +} + +func (q *DeleteQuery) With(name string, query Query) *DeleteQuery { + q.addWith(NewWithQuery(name, query)) + return q +} + +func (q *DeleteQuery) WithRecursive(name string, query Query) *DeleteQuery { + q.addWith(NewWithQuery(name, query).Recursive()) + return q +} + +func (q *DeleteQuery) WithQuery(query *WithQuery) *DeleteQuery { + q.addWith(query) + return q +} + +func (q *DeleteQuery) Table(tables ...string) *DeleteQuery { + for _, table := range tables { + q.addTable(schema.UnsafeIdent(table)) + } + return q +} + +func (q *DeleteQuery) TableExpr(query string, args ...any) *DeleteQuery { + q.addTable(schema.SafeQuery(query, args)) + return q +} + +func (q *DeleteQuery) ModelTableExpr(query string, args ...any) *DeleteQuery { + q.modelTableName = schema.SafeQuery(query, args) + return q +} + +//------------------------------------------------------------------------------ + +func (q *DeleteQuery) WherePK(cols ...string) *DeleteQuery { + q.addWhereCols(cols) + return q +} + +func (q *DeleteQuery) Where(query string, args ...any) *DeleteQuery { + q.addWhere(schema.SafeQueryWithSep(query, args, " AND ")) + return q +} + +func (q *DeleteQuery) WhereOr(query string, args ...any) *DeleteQuery { + q.addWhere(schema.SafeQueryWithSep(query, args, " OR ")) + return q +} + +func (q *DeleteQuery) WhereGroup(sep string, fn func(*DeleteQuery) *DeleteQuery) *DeleteQuery { + saved := q.where + q.where = nil + + q = fn(q) + + where := q.where + q.where = saved + + q.addWhereGroup(sep, where) + + return q +} + +func (q *DeleteQuery) WhereDeleted() *DeleteQuery { + q.whereDeleted() + return q +} + +func (q *DeleteQuery) WhereAllWithDeleted() *DeleteQuery { + q.whereAllWithDeleted() + return q +} + +func (q *DeleteQuery) Order(orders ...string) *DeleteQuery { + if !q.hasFeature(feature.DeleteOrderLimit) { + q.setErr(feature.NewNotSupportError(feature.DeleteOrderLimit)) + return q + } + q.addOrder(orders...) + return q +} + +func (q *DeleteQuery) OrderExpr(query string, args ...any) *DeleteQuery { + if !q.hasFeature(feature.DeleteOrderLimit) { + q.setErr(feature.NewNotSupportError(feature.DeleteOrderLimit)) + return q + } + q.addOrderExpr(query, args...) + return q +} + +func (q *DeleteQuery) ForceDelete() *DeleteQuery { + q.flags = q.flags.Set(forceDeleteFlag) + return q +} + +// ------------------------------------------------------------------------------ +func (q *DeleteQuery) Limit(n int) *DeleteQuery { + if !q.hasFeature(feature.DeleteOrderLimit) { + q.setErr(feature.NewNotSupportError(feature.DeleteOrderLimit)) + return q + } + q.setLimit(n) + return q +} + +//------------------------------------------------------------------------------ + +// Returning adds a RETURNING clause to the query. +// +// To suppress the auto-generated RETURNING clause, use `Returning("NULL")`. +func (q *DeleteQuery) Returning(query string, args ...any) *DeleteQuery { + if !q.hasFeature(feature.DeleteReturning) { + q.setErr(feature.NewNotSupportError(feature.DeleteOrderLimit)) + return q + } + + q.addReturning(schema.SafeQuery(query, args)) + return q +} + +//------------------------------------------------------------------------------ + +// Comment adds a comment to the query, wrapped by /* ... */. +func (q *DeleteQuery) Comment(comment string) *DeleteQuery { + q.comment = comment + return q +} + +//------------------------------------------------------------------------------ + +func (q *DeleteQuery) Operation() string { + return "DELETE" +} + +func (q *DeleteQuery) AppendQuery(gen schema.QueryGen, b []byte) (_ []byte, err error) { + if q.err != nil { + return nil, q.err + } + + b = appendComment(b, q.comment) + + gen = formatterWithModel(gen, q) + + if q.isSoftDelete() { + now := time.Now() + + if err := q.tableModel.updateSoftDeleteField(now); err != nil { + return nil, err + } + + upd := &UpdateQuery{ + whereBaseQuery: q.whereBaseQuery, + returningQuery: q.returningQuery, + } + upd.Set(q.softDeleteSet(gen, now)) + + return upd.AppendQuery(gen, b) + } + + withAlias := q.db.HasFeature(feature.DeleteTableAlias) + + b, err = q.appendWith(gen, b) + if err != nil { + return nil, err + } + + b = append(b, "DELETE FROM "...) + + if withAlias { + b, err = q.appendFirstTableWithAlias(gen, b) + } else { + b, err = q.appendFirstTable(gen, b) + } + if err != nil { + return nil, err + } + + if q.hasMultiTables() { + b = append(b, " USING "...) + b, err = q.appendOtherTables(gen, b) + if err != nil { + return nil, err + } + } + + if q.hasFeature(feature.Output) && q.hasReturning() { + b = append(b, " OUTPUT "...) + b, err = q.appendOutput(gen, b) + if err != nil { + return nil, err + } + } + + b, err = q.mustAppendWhere(gen, b, withAlias) + if err != nil { + return nil, err + } + + if q.hasMultiTables() && (len(q.order) > 0 || q.limit > 0) { + return nil, errors.New("bun: can't use ORDER or LIMIT with multiple tables") + } + + b, err = q.appendOrder(gen, b) + if err != nil { + return nil, err + } + + b, err = q.appendLimitOffset(gen, b) + if err != nil { + return nil, err + } + + if q.hasFeature(feature.DeleteReturning) && q.hasReturning() { + b = append(b, " RETURNING "...) + b, err = q.appendReturning(gen, b) + if err != nil { + return nil, err + } + } + + return b, nil +} + +func (q *DeleteQuery) isSoftDelete() bool { + return q.tableModel != nil && q.table.SoftDeleteField != nil && !q.flags.Has(forceDeleteFlag) +} + +func (q *DeleteQuery) softDeleteSet(gen schema.QueryGen, tm time.Time) string { + b := make([]byte, 0, 32) + if gen.HasFeature(feature.UpdateMultiTable) { + b = append(b, q.table.SQLAlias...) + b = append(b, '.') + } + b = append(b, q.table.SoftDeleteField.SQLName...) + b = append(b, " = "...) + b = gen.Append(b, tm) + return internal.String(b) +} + +//------------------------------------------------------------------------------ + +func (q *DeleteQuery) Scan(ctx context.Context, dest ...any) error { + _, err := q.scanOrExec(ctx, dest, true) + return err +} + +func (q *DeleteQuery) Exec(ctx context.Context, dest ...any) (sql.Result, error) { + return q.scanOrExec(ctx, dest, len(dest) > 0) +} + +func (q *DeleteQuery) scanOrExec( + ctx context.Context, dest []any, hasDest bool, +) (sql.Result, error) { + if q.err != nil { + return nil, q.err + } + + if q.table != nil { + if err := q.beforeDeleteHook(ctx); err != nil { + return nil, err + } + } + + // Run append model hooks before generating the query. + if err := q.beforeAppendModel(ctx, q); err != nil { + return nil, err + } + + // if a comment is propagated via the context, use it + setCommentFromContext(ctx, q) + + // Generate the query before checking hasReturning. + queryBytes, err := q.AppendQuery(q.db.gen, q.db.makeQueryBytes()) + if err != nil { + return nil, err + } + + useScan := hasDest || (q.hasReturning() && q.hasFeature(feature.DeleteReturning|feature.Output)) + var model Model + + if useScan { + var err error + model, err = q.getModel(dest) + if err != nil { + return nil, err + } + } + + query := internal.String(queryBytes) + + var res sql.Result + + if useScan { + res, err = q.scan(ctx, q, query, model, hasDest) + if err != nil { + return nil, err + } + } else { + res, err = q.exec(ctx, q, query) + if err != nil { + return nil, err + } + } + + if q.table != nil { + if err := q.afterDeleteHook(ctx); err != nil { + return nil, err + } + } + + return res, nil +} + +func (q *DeleteQuery) beforeDeleteHook(ctx context.Context) error { + if hook, ok := q.table.ZeroIface.(BeforeDeleteHook); ok { + if err := hook.BeforeDelete(ctx, q); err != nil { + return err + } + } + return nil +} + +func (q *DeleteQuery) afterDeleteHook(ctx context.Context) error { + if hook, ok := q.table.ZeroIface.(AfterDeleteHook); ok { + if err := hook.AfterDelete(ctx, q); err != nil { + return err + } + } + return nil +} + +// String returns the generated SQL query string. The DeleteQuery instance must not be +// modified during query generation to ensure multiple calls to String() return identical results. +func (q *DeleteQuery) String() string { + buf, err := q.AppendQuery(q.db.QueryGen(), nil) + if err != nil { + panic(err) + } + return string(buf) +} + +//------------------------------------------------------------------------------ + +func (q *DeleteQuery) QueryBuilder() QueryBuilder { + return &deleteQueryBuilder{q} +} + +func (q *DeleteQuery) ApplyQueryBuilder(fn func(QueryBuilder) QueryBuilder) *DeleteQuery { + return fn(q.QueryBuilder()).Unwrap().(*DeleteQuery) +} + +type deleteQueryBuilder struct { + *DeleteQuery +} + +func (q *deleteQueryBuilder) WhereGroup( + sep string, fn func(QueryBuilder) QueryBuilder, +) QueryBuilder { + q.DeleteQuery = q.DeleteQuery.WhereGroup(sep, func(qs *DeleteQuery) *DeleteQuery { + return fn(q).(*deleteQueryBuilder).DeleteQuery + }) + return q +} + +func (q *deleteQueryBuilder) Where(query string, args ...any) QueryBuilder { + q.DeleteQuery.Where(query, args...) + return q +} + +func (q *deleteQueryBuilder) WhereOr(query string, args ...any) QueryBuilder { + q.DeleteQuery.WhereOr(query, args...) + return q +} + +func (q *deleteQueryBuilder) WhereDeleted() QueryBuilder { + q.DeleteQuery.WhereDeleted() + return q +} + +func (q *deleteQueryBuilder) WhereAllWithDeleted() QueryBuilder { + q.DeleteQuery.WhereAllWithDeleted() + return q +} + +func (q *deleteQueryBuilder) WherePK(cols ...string) QueryBuilder { + q.DeleteQuery.WherePK(cols...) + return q +} + +func (q *deleteQueryBuilder) Unwrap() any { + return q.DeleteQuery +} diff --git a/vendor/github.com/uptrace/bun/query_index_create.go b/vendor/github.com/uptrace/bun/query_index_create.go new file mode 100644 index 0000000..d5b27c1 --- /dev/null +++ b/vendor/github.com/uptrace/bun/query_index_create.go @@ -0,0 +1,267 @@ +package bun + +import ( + "context" + "database/sql" + + "github.com/uptrace/bun/internal" + "github.com/uptrace/bun/schema" +) + +type CreateIndexQuery struct { + whereBaseQuery + + unique bool + fulltext bool + spatial bool + concurrently bool + ifNotExists bool + + index schema.QueryWithArgs + using schema.QueryWithArgs + include []schema.QueryWithArgs + comment string +} + +var _ Query = (*CreateIndexQuery)(nil) + +func NewCreateIndexQuery(db *DB) *CreateIndexQuery { + q := &CreateIndexQuery{ + whereBaseQuery: whereBaseQuery{ + baseQuery: baseQuery{ + db: db, + }, + }, + } + return q +} + +func (q *CreateIndexQuery) Conn(db IConn) *CreateIndexQuery { + q.setConn(db) + return q +} + +func (q *CreateIndexQuery) Model(model any) *CreateIndexQuery { + q.setModel(model) + return q +} + +func (q *CreateIndexQuery) Err(err error) *CreateIndexQuery { + q.setErr(err) + return q +} + +func (q *CreateIndexQuery) Unique() *CreateIndexQuery { + q.unique = true + return q +} + +func (q *CreateIndexQuery) Concurrently() *CreateIndexQuery { + q.concurrently = true + return q +} + +func (q *CreateIndexQuery) IfNotExists() *CreateIndexQuery { + q.ifNotExists = true + return q +} + +//------------------------------------------------------------------------------ + +func (q *CreateIndexQuery) Index(query string) *CreateIndexQuery { + q.index = schema.UnsafeIdent(query) + return q +} + +func (q *CreateIndexQuery) IndexExpr(query string, args ...any) *CreateIndexQuery { + q.index = schema.SafeQuery(query, args) + return q +} + +//------------------------------------------------------------------------------ + +func (q *CreateIndexQuery) Table(tables ...string) *CreateIndexQuery { + for _, table := range tables { + q.addTable(schema.UnsafeIdent(table)) + } + return q +} + +func (q *CreateIndexQuery) TableExpr(query string, args ...any) *CreateIndexQuery { + q.addTable(schema.SafeQuery(query, args)) + return q +} + +func (q *CreateIndexQuery) ModelTableExpr(query string, args ...any) *CreateIndexQuery { + q.modelTableName = schema.SafeQuery(query, args) + return q +} + +func (q *CreateIndexQuery) Using(query string, args ...any) *CreateIndexQuery { + q.using = schema.SafeQuery(query, args) + return q +} + +//------------------------------------------------------------------------------ + +func (q *CreateIndexQuery) Column(columns ...string) *CreateIndexQuery { + for _, column := range columns { + q.addColumn(schema.UnsafeIdent(column)) + } + return q +} + +func (q *CreateIndexQuery) ColumnExpr(query string, args ...any) *CreateIndexQuery { + q.addColumn(schema.SafeQuery(query, args)) + return q +} + +func (q *CreateIndexQuery) ExcludeColumn(columns ...string) *CreateIndexQuery { + q.excludeColumn(columns) + return q +} + +//------------------------------------------------------------------------------ + +func (q *CreateIndexQuery) Include(columns ...string) *CreateIndexQuery { + for _, column := range columns { + q.include = append(q.include, schema.UnsafeIdent(column)) + } + return q +} + +func (q *CreateIndexQuery) IncludeExpr(query string, args ...any) *CreateIndexQuery { + q.include = append(q.include, schema.SafeQuery(query, args)) + return q +} + +//------------------------------------------------------------------------------ + +func (q *CreateIndexQuery) Where(query string, args ...any) *CreateIndexQuery { + q.addWhere(schema.SafeQueryWithSep(query, args, " AND ")) + return q +} + +func (q *CreateIndexQuery) WhereOr(query string, args ...any) *CreateIndexQuery { + q.addWhere(schema.SafeQueryWithSep(query, args, " OR ")) + return q +} + +//------------------------------------------------------------------------------ + +// Comment adds a comment to the query, wrapped by /* ... */. +func (q *CreateIndexQuery) Comment(comment string) *CreateIndexQuery { + q.comment = comment + return q +} + +//------------------------------------------------------------------------------ + +func (q *CreateIndexQuery) Operation() string { + return "CREATE INDEX" +} + +func (q *CreateIndexQuery) AppendQuery(gen schema.QueryGen, b []byte) (_ []byte, err error) { + if q.err != nil { + return nil, q.err + } + + b = appendComment(b, q.comment) + + b = append(b, "CREATE "...) + + if q.unique { + b = append(b, "UNIQUE "...) + } + if q.fulltext { + b = append(b, "FULLTEXT "...) + } + if q.spatial { + b = append(b, "SPATIAL "...) + } + + b = append(b, "INDEX "...) + + if q.concurrently { + b = append(b, "CONCURRENTLY "...) + } + if q.ifNotExists { + b = append(b, "IF NOT EXISTS "...) + } + + b, err = q.index.AppendQuery(gen, b) + if err != nil { + return nil, err + } + + b = append(b, " ON "...) + b, err = q.appendFirstTable(gen, b) + if err != nil { + return nil, err + } + + if !q.using.IsZero() { + b = append(b, " USING "...) + b, err = q.using.AppendQuery(gen, b) + if err != nil { + return nil, err + } + } + + b = append(b, " ("...) + for i, col := range q.columns { + if i > 0 { + b = append(b, ", "...) + } + b, err = col.AppendQuery(gen, b) + if err != nil { + return nil, err + } + } + b = append(b, ')') + + if len(q.include) > 0 { + b = append(b, " INCLUDE ("...) + for i, col := range q.include { + if i > 0 { + b = append(b, ", "...) + } + b, err = col.AppendQuery(gen, b) + if err != nil { + return nil, err + } + } + b = append(b, ')') + } + + if len(q.where) > 0 { + b = append(b, " WHERE "...) + b, err = appendWhere(gen, b, q.where) + if err != nil { + return nil, err + } + } + + return b, nil +} + +//------------------------------------------------------------------------------ + +func (q *CreateIndexQuery) Exec(ctx context.Context, dest ...any) (sql.Result, error) { + // if a comment is propagated via the context, use it + setCommentFromContext(ctx, q) + + queryBytes, err := q.AppendQuery(q.db.gen, q.db.makeQueryBytes()) + if err != nil { + return nil, err + } + + query := internal.String(queryBytes) + + res, err := q.exec(ctx, q, query) + if err != nil { + return nil, err + } + + return res, nil +} diff --git a/vendor/github.com/uptrace/bun/query_index_drop.go b/vendor/github.com/uptrace/bun/query_index_drop.go new file mode 100644 index 0000000..f71cf1f --- /dev/null +++ b/vendor/github.com/uptrace/bun/query_index_drop.go @@ -0,0 +1,134 @@ +package bun + +import ( + "context" + "database/sql" + + "github.com/uptrace/bun/internal" + "github.com/uptrace/bun/schema" +) + +type DropIndexQuery struct { + baseQuery + cascadeQuery + + concurrently bool + ifExists bool + + index schema.QueryWithArgs + comment string +} + +var _ Query = (*DropIndexQuery)(nil) + +func NewDropIndexQuery(db *DB) *DropIndexQuery { + q := &DropIndexQuery{ + baseQuery: baseQuery{ + db: db, + }, + } + return q +} + +func (q *DropIndexQuery) Conn(db IConn) *DropIndexQuery { + q.setConn(db) + return q +} + +func (q *DropIndexQuery) Model(model any) *DropIndexQuery { + q.setModel(model) + return q +} + +func (q *DropIndexQuery) Err(err error) *DropIndexQuery { + q.setErr(err) + return q +} + +//------------------------------------------------------------------------------ + +func (q *DropIndexQuery) Concurrently() *DropIndexQuery { + q.concurrently = true + return q +} + +func (q *DropIndexQuery) IfExists() *DropIndexQuery { + q.ifExists = true + return q +} + +func (q *DropIndexQuery) Cascade() *DropIndexQuery { + q.cascade = true + return q +} + +func (q *DropIndexQuery) Restrict() *DropIndexQuery { + q.restrict = true + return q +} + +func (q *DropIndexQuery) Index(query string, args ...any) *DropIndexQuery { + q.index = schema.SafeQuery(query, args) + return q +} + +//------------------------------------------------------------------------------ + +// Comment adds a comment to the query, wrapped by /* ... */. +func (q *DropIndexQuery) Comment(comment string) *DropIndexQuery { + q.comment = comment + return q +} + +//------------------------------------------------------------------------------ + +func (q *DropIndexQuery) Operation() string { + return "DROP INDEX" +} + +func (q *DropIndexQuery) AppendQuery(gen schema.QueryGen, b []byte) (_ []byte, err error) { + if q.err != nil { + return nil, q.err + } + + b = appendComment(b, q.comment) + + b = append(b, "DROP INDEX "...) + + if q.concurrently { + b = append(b, "CONCURRENTLY "...) + } + if q.ifExists { + b = append(b, "IF EXISTS "...) + } + + b, err = q.index.AppendQuery(gen, b) + if err != nil { + return nil, err + } + + b = q.appendCascade(gen, b) + + return b, nil +} + +//------------------------------------------------------------------------------ + +func (q *DropIndexQuery) Exec(ctx context.Context, dest ...any) (sql.Result, error) { + // if a comment is propagated via the context, use it + setCommentFromContext(ctx, q) + + queryBytes, err := q.AppendQuery(q.db.gen, q.db.makeQueryBytes()) + if err != nil { + return nil, err + } + + query := internal.String(queryBytes) + + res, err := q.exec(ctx, q, query) + if err != nil { + return nil, err + } + + return res, nil +} diff --git a/vendor/github.com/uptrace/bun/query_insert.go b/vendor/github.com/uptrace/bun/query_insert.go new file mode 100644 index 0000000..15f3aec --- /dev/null +++ b/vendor/github.com/uptrace/bun/query_insert.go @@ -0,0 +1,706 @@ +package bun + +import ( + "context" + "database/sql" + "fmt" + "reflect" + "strings" + + "github.com/uptrace/bun/dialect/feature" + "github.com/uptrace/bun/internal" + "github.com/uptrace/bun/schema" +) + +type InsertQuery struct { + whereBaseQuery + returningQuery + customValueQuery + + on schema.QueryWithArgs + setQuery + + ignore bool + replace bool + comment string +} + +var _ Query = (*InsertQuery)(nil) + +func NewInsertQuery(db *DB) *InsertQuery { + q := &InsertQuery{ + whereBaseQuery: whereBaseQuery{ + baseQuery: baseQuery{ + db: db, + }, + }, + } + return q +} + +func (q *InsertQuery) Conn(db IConn) *InsertQuery { + q.setConn(db) + return q +} + +func (q *InsertQuery) Model(model any) *InsertQuery { + q.setModel(model) + return q +} + +func (q *InsertQuery) Err(err error) *InsertQuery { + q.setErr(err) + return q +} + +// Apply calls each function in fns, passing the InsertQuery as an argument. +func (q *InsertQuery) Apply(fns ...func(*InsertQuery) *InsertQuery) *InsertQuery { + for _, fn := range fns { + if fn != nil { + q = fn(q) + } + } + return q +} + +func (q *InsertQuery) With(name string, query Query) *InsertQuery { + q.addWith(NewWithQuery(name, query)) + return q +} + +func (q *InsertQuery) WithRecursive(name string, query Query) *InsertQuery { + q.addWith(NewWithQuery(name, query).Recursive()) + return q +} + +func (q *InsertQuery) WithQuery(query *WithQuery) *InsertQuery { + q.addWith(query) + return q +} + + +//------------------------------------------------------------------------------ + +func (q *InsertQuery) Table(tables ...string) *InsertQuery { + for _, table := range tables { + q.addTable(schema.UnsafeIdent(table)) + } + return q +} + +func (q *InsertQuery) TableExpr(query string, args ...any) *InsertQuery { + q.addTable(schema.SafeQuery(query, args)) + return q +} + +func (q *InsertQuery) ModelTableExpr(query string, args ...any) *InsertQuery { + q.modelTableName = schema.SafeQuery(query, args) + return q +} + +//------------------------------------------------------------------------------ + +func (q *InsertQuery) Column(columns ...string) *InsertQuery { + for _, column := range columns { + q.addColumn(schema.UnsafeIdent(column)) + } + return q +} + +func (q *InsertQuery) ColumnExpr(query string, args ...any) *InsertQuery { + q.addColumn(schema.SafeQuery(query, args)) + return q +} + +func (q *InsertQuery) ExcludeColumn(columns ...string) *InsertQuery { + q.excludeColumn(columns) + return q +} + +// Value overwrites model value for the column. +func (q *InsertQuery) Value(column string, expr string, args ...any) *InsertQuery { + if q.table == nil { + q.setErr(errNilModel) + return q + } + q.addValue(q.table, column, expr, args) + return q +} + +func (q *InsertQuery) Where(query string, args ...any) *InsertQuery { + q.addWhere(schema.SafeQueryWithSep(query, args, " AND ")) + return q +} + +func (q *InsertQuery) WhereOr(query string, args ...any) *InsertQuery { + q.addWhere(schema.SafeQueryWithSep(query, args, " OR ")) + return q +} + +//------------------------------------------------------------------------------ + +// Returning adds a RETURNING clause to the query. +// +// To suppress the auto-generated RETURNING clause, use `Returning("")`. +func (q *InsertQuery) Returning(query string, args ...any) *InsertQuery { + q.addReturning(schema.SafeQuery(query, args)) + return q +} + +//------------------------------------------------------------------------------ + +// Ignore generates different queries depending on the DBMS: +// - On MySQL, it generates `INSERT IGNORE INTO`. +// - On PostgreSQL, it generates `ON CONFLICT DO NOTHING`. +func (q *InsertQuery) Ignore() *InsertQuery { + if q.db.gen.HasFeature(feature.InsertOnConflict) { + return q.On("CONFLICT DO NOTHING") + } + if q.db.gen.HasFeature(feature.InsertIgnore) { + q.ignore = true + } + return q +} + +// Replaces generates a `REPLACE INTO` query (MySQL and MariaDB). +func (q *InsertQuery) Replace() *InsertQuery { + q.replace = true + return q +} + +//------------------------------------------------------------------------------ + +// Comment adds a comment to the query, wrapped by /* ... */. +func (q *InsertQuery) Comment(comment string) *InsertQuery { + q.comment = comment + return q +} + +//------------------------------------------------------------------------------ + +func (q *InsertQuery) Operation() string { + return "INSERT" +} + +func (q *InsertQuery) AppendQuery(gen schema.QueryGen, b []byte) (_ []byte, err error) { + if q.err != nil { + return nil, q.err + } + + b = appendComment(b, q.comment) + + gen = formatterWithModel(gen, q) + + b, err = q.appendWith(gen, b) + if err != nil { + return nil, err + } + + if q.replace { + b = append(b, "REPLACE "...) + } else { + b = append(b, "INSERT "...) + if q.ignore { + b = append(b, "IGNORE "...) + } + } + b = append(b, "INTO "...) + + if q.db.HasFeature(feature.InsertTableAlias) && !q.on.IsZero() { + b, err = q.appendFirstTableWithAlias(gen, b) + } else { + b, err = q.appendFirstTable(gen, b) + } + if err != nil { + return nil, err + } + + b, err = q.appendColumnsValues(gen, b, false) + if err != nil { + return nil, err + } + + b, err = q.appendOn(gen, b) + if err != nil { + return nil, err + } + + if q.hasFeature(feature.InsertReturning) && q.hasReturning() { + b = append(b, " RETURNING "...) + b, err = q.appendReturning(gen, b) + if err != nil { + return nil, err + } + } + + return b, nil +} + +func (q *InsertQuery) appendColumnsValues( + gen schema.QueryGen, b []byte, skipOutput bool, +) (_ []byte, err error) { + if q.hasMultiTables() { + if q.columns != nil { + b = append(b, " ("...) + b, err = q.appendColumns(gen, b) + if err != nil { + return nil, err + } + b = append(b, ")"...) + } + + if q.hasFeature(feature.Output) && q.hasReturning() { + b = append(b, " OUTPUT "...) + b, err = q.appendOutput(gen, b) + if err != nil { + return nil, err + } + } + + b = append(b, " SELECT "...) + + if q.columns != nil { + b, err = q.appendColumns(gen, b) + if err != nil { + return nil, err + } + } else { + b = append(b, "*"...) + } + + b = append(b, " FROM "...) + b, err = q.appendOtherTables(gen, b) + if err != nil { + return nil, err + } + + return b, nil + } + + if m, ok := q.model.(*mapModel); ok { + return m.appendColumnsValues(gen, b), nil + } + if _, ok := q.model.(*mapSliceModel); ok { + return nil, fmt.Errorf("Insert(*[]map[string]any) is not supported") + } + + if q.model == nil { + return nil, errNilModel + } + + // Build fields to populate RETURNING clause. + fields, err := q.getFields() + if err != nil { + return nil, err + } + + b = append(b, " ("...) + b = q.appendFields(gen, b, fields) + b = append(b, ")"...) + + if q.hasFeature(feature.Output) && q.hasReturning() && !skipOutput { + b = append(b, " OUTPUT "...) + b, err = q.appendOutput(gen, b) + if err != nil { + return nil, err + } + } + + b = append(b, " VALUES ("...) + + switch model := q.tableModel.(type) { + case *structTableModel: + b, err = q.appendStructValues(gen, b, fields, model.strct) + if err != nil { + return nil, err + } + case *sliceTableModel: + b, err = q.appendSliceValues(gen, b, fields, model.slice) + if err != nil { + return nil, err + } + default: + return nil, fmt.Errorf("bun: Insert does not support %T", q.tableModel) + } + + b = append(b, ')') + + return b, nil +} + +func (q *InsertQuery) appendStructValues( + gen schema.QueryGen, b []byte, fields []*schema.Field, strct reflect.Value, +) (_ []byte, err error) { + isTemplate := gen.IsNop() + for i, f := range fields { + if i > 0 { + b = append(b, ", "...) + } + + app, ok := q.modelValues[f.Name] + if ok { + b, err = app.AppendQuery(gen, b) + if err != nil { + return nil, err + } + q.addReturningField(f) + continue + } + + switch { + case isTemplate: + b = append(b, '?') + case q.marshalsToDefault(f, strct): + if q.db.HasFeature(feature.DefaultPlaceholder) { + b = append(b, "DEFAULT"...) + } else if f.SQLDefault != "" { + b = append(b, f.SQLDefault...) + } else { + b = append(b, "NULL"...) + } + q.addReturningField(f) + default: + b = f.AppendValue(gen, b, strct) + } + } + + for i, v := range q.extraValues { + if i > 0 || len(fields) > 0 { + b = append(b, ", "...) + } + + b, err = v.value.AppendQuery(gen, b) + if err != nil { + return nil, err + } + } + + return b, nil +} + +func (q *InsertQuery) appendSliceValues( + gen schema.QueryGen, b []byte, fields []*schema.Field, slice reflect.Value, +) (_ []byte, err error) { + if gen.IsNop() { + return q.appendStructValues(gen, b, fields, reflect.Value{}) + } + + sliceLen := slice.Len() + for i := 0; i < sliceLen; i++ { + if i > 0 { + b = append(b, "), ("...) + } + el := indirect(slice.Index(i)) + b, err = q.appendStructValues(gen, b, fields, el) + if err != nil { + return nil, err + } + } + + return b, nil +} + +func (q *InsertQuery) getFields() ([]*schema.Field, error) { + hasIdentity := q.db.HasFeature(feature.Identity) + + if len(q.columns) > 0 || q.db.HasFeature(feature.DefaultPlaceholder) && !hasIdentity { + return q.baseQuery.getFields() + } + + var strct reflect.Value + + switch model := q.tableModel.(type) { + case *structTableModel: + strct = model.strct + case *sliceTableModel: + if model.sliceLen == 0 { + return nil, fmt.Errorf("bun: Insert(empty %T)", model.slice.Type()) + } + strct = indirect(model.slice.Index(0)) + default: + return nil, errNilModel + } + + fields := make([]*schema.Field, 0, len(q.table.Fields)) + + for _, f := range q.table.Fields { + if hasIdentity && f.AutoIncrement { + q.addReturningField(f) + continue + } + if f.NotNull && q.marshalsToDefault(f, strct) { + q.addReturningField(f) + continue + } + fields = append(fields, f) + } + + return fields, nil +} + +// marshalsToDefault checks if the value will be marshaled as DEFAULT or NULL (if DEFAULT placeholder is not supported) +// when appending it to the VALUES clause in place of the given field. +func (q InsertQuery) marshalsToDefault(f *schema.Field, v reflect.Value) bool { + return (f.IsPtr && f.HasNilValue(v)) || + (f.HasZeroValue(v) && (f.NullZero || f.SQLDefault != "")) +} + +func (q *InsertQuery) appendFields( + gen schema.QueryGen, b []byte, fields []*schema.Field, +) []byte { + b = appendColumns(b, "", fields) + for i, v := range q.extraValues { + if i > 0 || len(fields) > 0 { + b = append(b, ", "...) + } + b = gen.AppendIdent(b, v.column) + } + return b +} + +//------------------------------------------------------------------------------ + +func (q *InsertQuery) On(s string, args ...any) *InsertQuery { + q.on = schema.SafeQuery(s, args) + return q +} + +func (q *InsertQuery) Set(query string, args ...any) *InsertQuery { + q.addSet(schema.SafeQuery(query, args)) + return q +} + +func (q *InsertQuery) SetValues(values *ValuesQuery) *InsertQuery { + q.setValues = values + return q +} + +func (q *InsertQuery) appendOn(gen schema.QueryGen, b []byte) (_ []byte, err error) { + if q.on.IsZero() { + return b, nil + } + + b = append(b, " ON "...) + b, err = q.on.AppendQuery(gen, b) + if err != nil { + return nil, err + } + + if len(q.set) > 0 || q.setValues != nil { + if gen.HasFeature(feature.InsertOnDuplicateKey) { + b = append(b, ' ') + } else { + b = append(b, " SET "...) + } + + b, err = q.appendSet(gen, b) + if err != nil { + return nil, err + } + } else if q.onConflictDoUpdate() { + fields, err := q.getDataFields() + if err != nil { + return nil, err + } + b = q.appendSetExcluded(b, fields) + } else if q.onDuplicateKeyUpdate() { + fields, err := q.getDataFields() + if err != nil { + return nil, err + } + b = q.appendSetValues(b, fields) + } + + if len(q.where) > 0 { + b = append(b, " WHERE "...) + + b, err = appendWhere(gen, b, q.where) + if err != nil { + return nil, err + } + } + + return b, nil +} + +func (q *InsertQuery) onConflictDoUpdate() bool { + return strings.HasSuffix(strings.ToUpper(q.on.Query), " DO UPDATE") +} + +func (q *InsertQuery) onDuplicateKeyUpdate() bool { + return strings.ToUpper(q.on.Query) == "DUPLICATE KEY UPDATE" +} + +func (q *InsertQuery) appendSetExcluded(b []byte, fields []*schema.Field) []byte { + b = append(b, " SET "...) + for i, f := range fields { + if i > 0 { + b = append(b, ", "...) + } + b = append(b, f.SQLName...) + b = append(b, " = EXCLUDED."...) + b = append(b, f.SQLName...) + } + return b +} + +func (q *InsertQuery) appendSetValues(b []byte, fields []*schema.Field) []byte { + b = append(b, " "...) + for i, f := range fields { + if i > 0 { + b = append(b, ", "...) + } + b = append(b, f.SQLName...) + b = append(b, " = VALUES("...) + b = append(b, f.SQLName...) + b = append(b, ")"...) + } + return b +} + +//------------------------------------------------------------------------------ + +func (q *InsertQuery) Scan(ctx context.Context, dest ...any) error { + _, err := q.scanOrExec(ctx, dest, true) + return err +} + +func (q *InsertQuery) Exec(ctx context.Context, dest ...any) (sql.Result, error) { + return q.scanOrExec(ctx, dest, len(dest) > 0) +} + +func (q *InsertQuery) scanOrExec( + ctx context.Context, dest []any, hasDest bool, +) (sql.Result, error) { + if q.err != nil { + return nil, q.err + } + + if q.table != nil { + if err := q.beforeInsertHook(ctx); err != nil { + return nil, err + } + } + + // Run append model hooks before generating the query. + if err := q.beforeAppendModel(ctx, q); err != nil { + return nil, err + } + + // if a comment is propagated via the context, use it + setCommentFromContext(ctx, q) + + // Generate the query before checking hasReturning. + queryBytes, err := q.AppendQuery(q.db.gen, q.db.makeQueryBytes()) + if err != nil { + return nil, err + } + + useScan := hasDest || (q.hasReturning() && q.hasFeature(feature.InsertReturning|feature.Output)) + var model Model + + if useScan { + var err error + model, err = q.getModel(dest) + if err != nil { + return nil, err + } + } + + query := internal.String(queryBytes) + var res sql.Result + + if useScan { + res, err = q.scan(ctx, q, query, model, hasDest) + if err != nil { + return nil, err + } + } else { + res, err = q.exec(ctx, q, query) + if err != nil { + return nil, err + } + + if err := q.tryLastInsertID(res, dest); err != nil { + return nil, err + } + } + + if q.table != nil { + if err := q.afterInsertHook(ctx); err != nil { + return nil, err + } + } + + return res, nil +} + +func (q *InsertQuery) beforeInsertHook(ctx context.Context) error { + if hook, ok := q.table.ZeroIface.(BeforeInsertHook); ok { + if err := hook.BeforeInsert(ctx, q); err != nil { + return err + } + } + return nil +} + +func (q *InsertQuery) afterInsertHook(ctx context.Context) error { + if hook, ok := q.table.ZeroIface.(AfterInsertHook); ok { + if err := hook.AfterInsert(ctx, q); err != nil { + return err + } + } + return nil +} + +func (q *InsertQuery) tryLastInsertID(res sql.Result, dest []any) error { + if q.db.HasFeature(feature.Returning) || + q.db.HasFeature(feature.Output) || + q.table == nil || + len(q.table.PKs) != 1 || + !q.table.PKs[0].AutoIncrement { + return nil + } + + id, err := res.LastInsertId() + if err != nil { + return err + } + if id == 0 { + return nil + } + + model, err := q.getModel(dest) + if err != nil { + return err + } + + pk := q.table.PKs[0] + switch model := model.(type) { + case *structTableModel: + if err := pk.ScanValue(model.strct, id); err != nil { + return err + } + case *sliceTableModel: + sliceLen := model.slice.Len() + for i := 0; i < sliceLen; i++ { + strct := indirect(model.slice.Index(i)) + if err := pk.ScanValue(strct, id); err != nil { + return err + } + id++ + } + } + + return nil +} + +// String returns the generated SQL query string. The InsertQuery instance must not be +// modified during query generation to ensure multiple calls to String() return identical results. +func (q *InsertQuery) String() string { + buf, err := q.AppendQuery(q.db.QueryGen(), nil) + if err != nil { + panic(err) + } + return string(buf) +} diff --git a/vendor/github.com/uptrace/bun/query_merge.go b/vendor/github.com/uptrace/bun/query_merge.go new file mode 100644 index 0000000..0b07e01 --- /dev/null +++ b/vendor/github.com/uptrace/bun/query_merge.go @@ -0,0 +1,351 @@ +package bun + +import ( + "context" + "database/sql" + "errors" + + "github.com/uptrace/bun/dialect" + "github.com/uptrace/bun/dialect/feature" + "github.com/uptrace/bun/internal" + "github.com/uptrace/bun/schema" +) + +type MergeQuery struct { + baseQuery + returningQuery + + using schema.QueryWithArgs + on schema.QueryWithArgs + when []schema.QueryAppender + comment string +} + +var _ Query = (*MergeQuery)(nil) + +func NewMergeQuery(db *DB) *MergeQuery { + q := &MergeQuery{ + baseQuery: baseQuery{ + db: db, + }, + } + if q.db.dialect.Name() != dialect.MSSQL && q.db.dialect.Name() != dialect.PG { + q.setErr(errors.New("bun: merge not supported for current dialect")) + } + return q +} + +func (q *MergeQuery) Conn(db IConn) *MergeQuery { + q.setConn(db) + return q +} + +func (q *MergeQuery) Model(model any) *MergeQuery { + q.setModel(model) + return q +} + +func (q *MergeQuery) Err(err error) *MergeQuery { + q.setErr(err) + return q +} + +// Apply calls each function in fns, passing the MergeQuery as an argument. +func (q *MergeQuery) Apply(fns ...func(*MergeQuery) *MergeQuery) *MergeQuery { + for _, fn := range fns { + if fn != nil { + q = fn(q) + } + } + return q +} + +func (q *MergeQuery) With(name string, query Query) *MergeQuery { + q.addWith(NewWithQuery(name, query)) + return q +} + +func (q *MergeQuery) WithRecursive(name string, query Query) *MergeQuery { + q.addWith(NewWithQuery(name, query).Recursive()) + return q +} + +func (q *MergeQuery) WithQuery(query *WithQuery) *MergeQuery { + q.addWith(query) + return q +} + +// ------------------------------------------------------------------------------ + +func (q *MergeQuery) Table(tables ...string) *MergeQuery { + for _, table := range tables { + q.addTable(schema.UnsafeIdent(table)) + } + return q +} + +func (q *MergeQuery) TableExpr(query string, args ...any) *MergeQuery { + q.addTable(schema.SafeQuery(query, args)) + return q +} + +func (q *MergeQuery) ModelTableExpr(query string, args ...any) *MergeQuery { + q.modelTableName = schema.SafeQuery(query, args) + return q +} + +//------------------------------------------------------------------------------ + +// Returning adds a RETURNING clause to the query. +// +// To suppress the auto-generated RETURNING clause, use `Returning("NULL")`. +// Supported for PostgreSQL 17+ and MSSQL (via OUTPUT clause) +func (q *MergeQuery) Returning(query string, args ...any) *MergeQuery { + q.addReturning(schema.SafeQuery(query, args)) + return q +} + +//------------------------------------------------------------------------------ + +func (q *MergeQuery) Using(s string, args ...any) *MergeQuery { + q.using = schema.SafeQuery(s, args) + return q +} + +func (q *MergeQuery) On(s string, args ...any) *MergeQuery { + q.on = schema.SafeQuery(s, args) + return q +} + +// WhenInsert for when insert clause. +func (q *MergeQuery) WhenInsert(expr string, fn func(q *InsertQuery) *InsertQuery) *MergeQuery { + sq := NewInsertQuery(q.db) + // apply the model as default into sub query, since appendColumnsValues required + if q.model != nil { + sq = sq.Model(q.model) + } + sq = sq.Apply(fn) + q.when = append(q.when, &whenInsert{expr: expr, query: sq}) + return q +} + +// WhenUpdate for when update clause. +func (q *MergeQuery) WhenUpdate(expr string, fn func(q *UpdateQuery) *UpdateQuery) *MergeQuery { + sq := NewUpdateQuery(q.db) + // apply the model as default into sub query + if q.model != nil { + sq = sq.Model(q.model) + } + sq = sq.Apply(fn) + q.when = append(q.when, &whenUpdate{expr: expr, query: sq}) + return q +} + +// WhenDelete for when delete clause. +func (q *MergeQuery) WhenDelete(expr string) *MergeQuery { + q.when = append(q.when, &whenDelete{expr: expr}) + return q +} + +// When for raw expression clause. +func (q *MergeQuery) When(expr string, args ...any) *MergeQuery { + q.when = append(q.when, schema.SafeQuery(expr, args)) + return q +} + +//------------------------------------------------------------------------------ + +// Comment adds a comment to the query, wrapped by /* ... */. +func (q *MergeQuery) Comment(comment string) *MergeQuery { + q.comment = comment + return q +} + +//------------------------------------------------------------------------------ + +func (q *MergeQuery) Operation() string { + return "MERGE" +} + +func (q *MergeQuery) AppendQuery(gen schema.QueryGen, b []byte) (_ []byte, err error) { + if q.err != nil { + return nil, q.err + } + + b = appendComment(b, q.comment) + + gen = formatterWithModel(gen, q) + + b, err = q.appendWith(gen, b) + if err != nil { + return nil, err + } + + b = append(b, "MERGE "...) + if q.db.dialect.Name() == dialect.PG { + b = append(b, "INTO "...) + } + + b, err = q.appendFirstTableWithAlias(gen, b) + if err != nil { + return nil, err + } + + b = append(b, " USING "...) + b, err = q.using.AppendQuery(gen, b) + if err != nil { + return nil, err + } + + b = append(b, " ON "...) + b, err = q.on.AppendQuery(gen, b) + if err != nil { + return nil, err + } + + for _, w := range q.when { + b = append(b, " WHEN "...) + b, err = w.AppendQuery(gen, b) + if err != nil { + return nil, err + } + } + + if q.hasFeature(feature.Output) && q.hasReturning() { + b = append(b, " OUTPUT "...) + b, err = q.appendOutput(gen, b) + if err != nil { + return nil, err + } + } + + if q.hasFeature(feature.MergeReturning) && q.hasReturning() { + b = append(b, " RETURNING "...) + b, err = q.appendReturning(gen, b) + if err != nil { + return nil, err + } + } + + // A MERGE statement must be terminated by a semi-colon (;). + b = append(b, ";"...) + + return b, nil +} + +//------------------------------------------------------------------------------ + +func (q *MergeQuery) Scan(ctx context.Context, dest ...any) error { + _, err := q.scanOrExec(ctx, dest, true) + return err +} + +func (q *MergeQuery) Exec(ctx context.Context, dest ...any) (sql.Result, error) { + return q.scanOrExec(ctx, dest, len(dest) > 0) +} + +func (q *MergeQuery) scanOrExec( + ctx context.Context, dest []any, hasDest bool, +) (sql.Result, error) { + if q.err != nil { + return nil, q.err + } + + // Run append model hooks before generating the query. + if err := q.beforeAppendModel(ctx, q); err != nil { + return nil, err + } + + // if a comment is propagated via the context, use it + setCommentFromContext(ctx, q) + + // Generate the query before checking hasReturning. + queryBytes, err := q.AppendQuery(q.db.gen, q.db.makeQueryBytes()) + if err != nil { + return nil, err + } + + useScan := hasDest || (q.hasReturning() && q.hasFeature(feature.InsertReturning|feature.MergeReturning|feature.Output)) + var model Model + + if useScan { + var err error + model, err = q.getModel(dest) + if err != nil { + return nil, err + } + } + + query := internal.String(queryBytes) + var res sql.Result + + if useScan { + res, err = q.scan(ctx, q, query, model, true) + if err != nil { + return nil, err + } + } else { + res, err = q.exec(ctx, q, query) + if err != nil { + return nil, err + } + } + + return res, nil +} + +// String returns the generated SQL query string. The MergeQuery instance must not be +// modified during query generation to ensure multiple calls to String() return identical results. +func (q *MergeQuery) String() string { + buf, err := q.AppendQuery(q.db.QueryGen(), nil) + if err != nil { + panic(err) + } + return string(buf) +} + +//------------------------------------------------------------------------------ + +type whenInsert struct { + expr string + query *InsertQuery +} + +func (w *whenInsert) AppendQuery(gen schema.QueryGen, b []byte) (_ []byte, err error) { + b = append(b, w.expr...) + if w.query != nil { + b = append(b, " THEN INSERT"...) + b, err = w.query.appendColumnsValues(gen, b, true) + if err != nil { + return nil, err + } + } + return b, nil +} + +type whenUpdate struct { + expr string + query *UpdateQuery +} + +func (w *whenUpdate) AppendQuery(gen schema.QueryGen, b []byte) (_ []byte, err error) { + b = append(b, w.expr...) + if w.query != nil { + b = append(b, " THEN UPDATE SET "...) + b, err = w.query.appendSet(gen, b) + if err != nil { + return nil, err + } + } + return b, nil +} + +type whenDelete struct { + expr string +} + +func (w *whenDelete) AppendQuery(gen schema.QueryGen, b []byte) (_ []byte, err error) { + b = append(b, w.expr...) + b = append(b, " THEN DELETE"...) + return b, nil +} diff --git a/vendor/github.com/uptrace/bun/query_raw.go b/vendor/github.com/uptrace/bun/query_raw.go new file mode 100644 index 0000000..5af84c5 --- /dev/null +++ b/vendor/github.com/uptrace/bun/query_raw.go @@ -0,0 +1,107 @@ +package bun + +import ( + "context" + "database/sql" + + "github.com/uptrace/bun/schema" +) + +type RawQuery struct { + baseQuery + + query string + args []any + comment string +} + +func NewRawQuery(db *DB, query string, args ...any) *RawQuery { + return &RawQuery{ + baseQuery: baseQuery{ + db: db, + }, + query: query, + args: args, + } +} + +func (q *RawQuery) Conn(db IConn) *RawQuery { + q.setConn(db) + return q +} + +func (q *RawQuery) Err(err error) *RawQuery { + q.setErr(err) + return q +} + +func (q *RawQuery) Exec(ctx context.Context, dest ...any) (sql.Result, error) { + return q.scanOrExec(ctx, dest, len(dest) > 0) +} + +func (q *RawQuery) Scan(ctx context.Context, dest ...any) error { + _, err := q.scanOrExec(ctx, dest, true) + return err +} + +// Comment adds a comment to the query, wrapped by /* ... */. +func (q *RawQuery) Comment(comment string) *RawQuery { + q.comment = comment + return q +} + +func (q *RawQuery) scanOrExec( + ctx context.Context, dest []any, hasDest bool, +) (sql.Result, error) { + if q.err != nil { + return nil, q.err + } + + var model Model + var err error + + if hasDest { + model, err = q.getModel(dest) + if err != nil { + return nil, err + } + } + + // if a comment is propagated via the context, use it + setCommentFromContext(ctx, q) + + query := q.db.format(q.query, q.args) + var res sql.Result + + if hasDest { + res, err = q.scan(ctx, q, query, model, hasDest) + } else { + res, err = q.exec(ctx, q, query) + } + + if err != nil { + return nil, err + } + + return res, nil +} + +func (q *RawQuery) AppendQuery(gen schema.QueryGen, b []byte) ([]byte, error) { + b = appendComment(b, q.comment) + + return gen.AppendQuery(b, q.query, q.args...), nil +} + +func (q *RawQuery) Operation() string { + return "SELECT" +} + +// String returns the generated SQL query string. The RawQuery instance must not be +// modified during query generation to ensure multiple calls to String() return identical results. +func (q *RawQuery) String() string { + buf, err := q.AppendQuery(q.db.QueryGen(), nil) + if err != nil { + panic(err) + } + return string(buf) +} diff --git a/vendor/github.com/uptrace/bun/query_select.go b/vendor/github.com/uptrace/bun/query_select.go new file mode 100644 index 0000000..23e67ed --- /dev/null +++ b/vendor/github.com/uptrace/bun/query_select.go @@ -0,0 +1,1376 @@ +package bun + +import ( + "bytes" + "context" + "database/sql" + "errors" + "fmt" + "sync" + + "github.com/uptrace/bun/dialect" + + "github.com/uptrace/bun/dialect/feature" + "github.com/uptrace/bun/internal" + "github.com/uptrace/bun/schema" +) + +type union struct { + expr string + query *SelectQuery +} + +type SelectQuery struct { + whereBaseQuery + idxHintsQuery + orderLimitOffsetQuery + + distinctOn []schema.QueryWithArgs + joins []joinQuery + group []schema.QueryWithArgs + having []schema.QueryWithArgs + selFor schema.QueryWithArgs + + union []union + comment string +} + +var _ Query = (*SelectQuery)(nil) + +func NewSelectQuery(db *DB) *SelectQuery { + return &SelectQuery{ + whereBaseQuery: whereBaseQuery{ + baseQuery: baseQuery{ + db: db, + }, + }, + } +} + +func (q *SelectQuery) Conn(db IConn) *SelectQuery { + q.setConn(db) + return q +} + +func (q *SelectQuery) Model(model any) *SelectQuery { + q.setModel(model) + return q +} + +func (q *SelectQuery) Err(err error) *SelectQuery { + q.setErr(err) + return q +} + +// Apply calls each function in fns, passing the SelectQuery as an argument. +func (q *SelectQuery) Apply(fns ...func(*SelectQuery) *SelectQuery) *SelectQuery { + for _, fn := range fns { + if fn != nil { + q = fn(q) + } + } + return q +} + +func (q *SelectQuery) With(name string, query Query) *SelectQuery { + q.addWith(NewWithQuery(name, query)) + return q +} + +func (q *SelectQuery) WithRecursive(name string, query Query) *SelectQuery { + q.addWith(NewWithQuery(name, query).Recursive()) + return q +} + +func (q *SelectQuery) WithQuery(query *WithQuery) *SelectQuery { + q.addWith(query) + return q +} + +func (q *SelectQuery) Distinct() *SelectQuery { + q.distinctOn = make([]schema.QueryWithArgs, 0) + return q +} + +func (q *SelectQuery) DistinctOn(query string, args ...any) *SelectQuery { + q.distinctOn = append(q.distinctOn, schema.SafeQuery(query, args)) + return q +} + +//------------------------------------------------------------------------------ + +func (q *SelectQuery) Table(tables ...string) *SelectQuery { + for _, table := range tables { + q.addTable(schema.UnsafeIdent(table)) + } + return q +} + +func (q *SelectQuery) TableExpr(query string, args ...any) *SelectQuery { + q.addTable(schema.SafeQuery(query, args)) + return q +} + +func (q *SelectQuery) ModelTableExpr(query string, args ...any) *SelectQuery { + q.modelTableName = schema.SafeQuery(query, args) + return q +} + +//------------------------------------------------------------------------------ + +func (q *SelectQuery) Column(columns ...string) *SelectQuery { + for _, column := range columns { + q.addColumn(schema.UnsafeIdent(column)) + } + return q +} + +func (q *SelectQuery) ColumnExpr(query string, args ...any) *SelectQuery { + q.addColumn(schema.SafeQuery(query, args)) + return q +} + +func (q *SelectQuery) ExcludeColumn(columns ...string) *SelectQuery { + q.excludeColumn(columns) + return q +} + +//------------------------------------------------------------------------------ + +func (q *SelectQuery) WherePK(cols ...string) *SelectQuery { + q.addWhereCols(cols) + return q +} + +func (q *SelectQuery) Where(query string, args ...any) *SelectQuery { + q.addWhere(schema.SafeQueryWithSep(query, args, " AND ")) + return q +} + +func (q *SelectQuery) WhereOr(query string, args ...any) *SelectQuery { + q.addWhere(schema.SafeQueryWithSep(query, args, " OR ")) + return q +} + +func (q *SelectQuery) WhereGroup(sep string, fn func(*SelectQuery) *SelectQuery) *SelectQuery { + saved := q.where + q.where = nil + + q = fn(q) + + where := q.where + q.where = saved + + q.addWhereGroup(sep, where) + + return q +} + +func (q *SelectQuery) WhereDeleted() *SelectQuery { + q.whereDeleted() + return q +} + +func (q *SelectQuery) WhereAllWithDeleted() *SelectQuery { + q.whereAllWithDeleted() + return q +} + +//------------------------------------------------------------------------------ + +func (q *SelectQuery) UseIndex(indexes ...string) *SelectQuery { + if q.db.dialect.Name() == dialect.MySQL { + q.addUseIndex(indexes...) + } + return q +} + +func (q *SelectQuery) UseIndexForJoin(indexes ...string) *SelectQuery { + if q.db.dialect.Name() == dialect.MySQL { + q.addUseIndexForJoin(indexes...) + } + return q +} + +func (q *SelectQuery) UseIndexForOrderBy(indexes ...string) *SelectQuery { + if q.db.dialect.Name() == dialect.MySQL { + q.addUseIndexForOrderBy(indexes...) + } + return q +} + +func (q *SelectQuery) UseIndexForGroupBy(indexes ...string) *SelectQuery { + if q.db.dialect.Name() == dialect.MySQL { + q.addUseIndexForGroupBy(indexes...) + } + return q +} + +func (q *SelectQuery) IgnoreIndex(indexes ...string) *SelectQuery { + if q.db.dialect.Name() == dialect.MySQL { + q.addIgnoreIndex(indexes...) + } + return q +} + +func (q *SelectQuery) IgnoreIndexForJoin(indexes ...string) *SelectQuery { + if q.db.dialect.Name() == dialect.MySQL { + q.addIgnoreIndexForJoin(indexes...) + } + return q +} + +func (q *SelectQuery) IgnoreIndexForOrderBy(indexes ...string) *SelectQuery { + if q.db.dialect.Name() == dialect.MySQL { + q.addIgnoreIndexForOrderBy(indexes...) + } + return q +} + +func (q *SelectQuery) IgnoreIndexForGroupBy(indexes ...string) *SelectQuery { + if q.db.dialect.Name() == dialect.MySQL { + q.addIgnoreIndexForGroupBy(indexes...) + } + return q +} + +func (q *SelectQuery) ForceIndex(indexes ...string) *SelectQuery { + if q.db.dialect.Name() == dialect.MySQL { + q.addForceIndex(indexes...) + } + return q +} + +func (q *SelectQuery) ForceIndexForJoin(indexes ...string) *SelectQuery { + if q.db.dialect.Name() == dialect.MySQL { + q.addForceIndexForJoin(indexes...) + } + return q +} + +func (q *SelectQuery) ForceIndexForOrderBy(indexes ...string) *SelectQuery { + if q.db.dialect.Name() == dialect.MySQL { + q.addForceIndexForOrderBy(indexes...) + } + return q +} + +func (q *SelectQuery) ForceIndexForGroupBy(indexes ...string) *SelectQuery { + if q.db.dialect.Name() == dialect.MySQL { + q.addForceIndexForGroupBy(indexes...) + } + return q +} + +//------------------------------------------------------------------------------ + +func (q *SelectQuery) Group(columns ...string) *SelectQuery { + for _, column := range columns { + q.group = append(q.group, schema.UnsafeIdent(column)) + } + return q +} + +func (q *SelectQuery) GroupExpr(group string, args ...any) *SelectQuery { + q.group = append(q.group, schema.SafeQuery(group, args)) + return q +} + +func (q *SelectQuery) Having(having string, args ...any) *SelectQuery { + q.having = append(q.having, schema.SafeQuery(having, args)) + return q +} + +func (q *SelectQuery) Order(orders ...string) *SelectQuery { + q.addOrder(orders...) + return q +} + +func (q *SelectQuery) OrderBy(colName string, sortDir Order) *SelectQuery { + q.addOrderBy(colName, sortDir) + return q +} + +func (q *SelectQuery) OrderExpr(query string, args ...any) *SelectQuery { + q.addOrderExpr(query, args...) + return q +} + +func (q *SelectQuery) Limit(n int) *SelectQuery { + q.setLimit(n) + return q +} + +func (q *SelectQuery) Offset(n int) *SelectQuery { + q.setOffset(n) + return q +} + +func (q *SelectQuery) For(s string, args ...any) *SelectQuery { + q.selFor = schema.SafeQuery(s, args) + return q +} + +//------------------------------------------------------------------------------ + +func (q *SelectQuery) Union(other *SelectQuery) *SelectQuery { + return q.addUnion(" UNION ", other) +} + +func (q *SelectQuery) UnionAll(other *SelectQuery) *SelectQuery { + return q.addUnion(" UNION ALL ", other) +} + +func (q *SelectQuery) Intersect(other *SelectQuery) *SelectQuery { + return q.addUnion(" INTERSECT ", other) +} + +func (q *SelectQuery) IntersectAll(other *SelectQuery) *SelectQuery { + return q.addUnion(" INTERSECT ALL ", other) +} + +func (q *SelectQuery) Except(other *SelectQuery) *SelectQuery { + return q.addUnion(" EXCEPT ", other) +} + +func (q *SelectQuery) ExceptAll(other *SelectQuery) *SelectQuery { + return q.addUnion(" EXCEPT ALL ", other) +} + +func (q *SelectQuery) addUnion(expr string, other *SelectQuery) *SelectQuery { + q.union = append(q.union, union{ + expr: expr, + query: other, + }) + return q +} + +//------------------------------------------------------------------------------ + +func (q *SelectQuery) Join(join string, args ...any) *SelectQuery { + q.joins = append(q.joins, joinQuery{ + join: schema.SafeQuery(join, args), + }) + return q +} + +func (q *SelectQuery) JoinOn(cond string, args ...any) *SelectQuery { + return q.joinOn(cond, args, " AND ") +} + +func (q *SelectQuery) JoinOnOr(cond string, args ...any) *SelectQuery { + return q.joinOn(cond, args, " OR ") +} + +func (q *SelectQuery) joinOn(cond string, args []any, sep string) *SelectQuery { + if len(q.joins) == 0 { + q.setErr(errors.New("bun: query has no joins")) + return q + } + j := &q.joins[len(q.joins)-1] + j.on = append(j.on, schema.SafeQueryWithSep(cond, args, sep)) + return q +} + +//------------------------------------------------------------------------------ + +// Relation adds a relation to the query. +func (q *SelectQuery) Relation(name string, apply ...func(*SelectQuery) *SelectQuery) *SelectQuery { + if len(apply) > 1 { + panic("only one apply function is supported") + } + + if q.tableModel == nil { + q.setErr(errNilModel) + return q + } + + join := q.tableModel.join(name) + if join == nil { + q.setErr(fmt.Errorf("%s does not have relation=%q", q.table, name)) + return q + } + + q.applyToRelation(join, apply...) + + return q +} + +type RelationOpts struct { + // Apply applies additional options to the relation. + Apply func(*SelectQuery) *SelectQuery + // AdditionalJoinOnConditions adds additional conditions to the JOIN ON clause. + AdditionalJoinOnConditions []schema.QueryWithArgs +} + +// RelationWithOpts adds a relation to the query with additional options. +func (q *SelectQuery) RelationWithOpts(name string, opts RelationOpts) *SelectQuery { + if q.tableModel == nil { + q.setErr(errNilModel) + return q + } + + join := q.tableModel.join(name) + if join == nil { + q.setErr(fmt.Errorf("%s does not have relation=%q", q.table, name)) + return q + } + + if opts.Apply != nil { + q.applyToRelation(join, opts.Apply) + } + + if len(opts.AdditionalJoinOnConditions) > 0 { + join.additionalJoinOnConditions = opts.AdditionalJoinOnConditions + } + + return q +} + +func (q *SelectQuery) applyToRelation(join *relationJoin, apply ...func(*SelectQuery) *SelectQuery) { + var apply1, apply2 func(*SelectQuery) *SelectQuery + + if len(join.Relation.Condition) > 0 { + apply1 = func(q *SelectQuery) *SelectQuery { + for _, opt := range join.Relation.Condition { + q.addWhere(schema.SafeQueryWithSep(opt, nil, " AND ")) + } + + return q + } + } + + if len(apply) == 1 { + apply2 = apply[0] + } + + join.apply = func(q *SelectQuery) *SelectQuery { + if apply1 != nil { + q = apply1(q) + } + if apply2 != nil { + q = apply2(q) + } + + return q + } +} + +func (q *SelectQuery) forEachInlineRelJoin(fn func(*relationJoin) error) error { + if q.tableModel == nil { + return nil + } + return q._forEachInlineRelJoin(fn, q.tableModel.getJoins()) +} + +func (q *SelectQuery) _forEachInlineRelJoin(fn func(*relationJoin) error, joins []relationJoin) error { + for i := range joins { + j := &joins[i] + switch j.Relation.Type { + case schema.HasOneRelation, schema.BelongsToRelation: + if err := fn(j); err != nil { + return err + } + if err := q._forEachInlineRelJoin(fn, j.JoinModel.getJoins()); err != nil { + return err + } + } + } + return nil +} + +func (q *SelectQuery) selectJoins(ctx context.Context, joins []relationJoin) error { + for i := range joins { + j := &joins[i] + + var err error + + switch j.Relation.Type { + case schema.HasOneRelation, schema.BelongsToRelation: + err = q.selectJoins(ctx, j.JoinModel.getJoins()) + case schema.HasManyRelation: + err = j.selectMany(ctx, q.db.NewSelect().Conn(q.conn)) + case schema.ManyToManyRelation: + err = j.selectM2M(ctx, q.db.NewSelect().Conn(q.conn)) + default: + panic("not reached") + } + + if err != nil { + return err + } + } + return nil +} + +//------------------------------------------------------------------------------ + +// Comment adds a comment to the query, wrapped by /* ... */. +func (q *SelectQuery) Comment(comment string) *SelectQuery { + q.comment = comment + return q +} + +//------------------------------------------------------------------------------ + +func (q *SelectQuery) Operation() string { + return "SELECT" +} + +func (q *SelectQuery) AppendQuery(gen schema.QueryGen, b []byte) (_ []byte, err error) { + b = appendComment(b, q.comment) + + return q.appendQuery(gen, b, false) +} + +func (q *SelectQuery) appendQuery( + gen schema.QueryGen, b []byte, count bool, +) (_ []byte, err error) { + if q.err != nil { + return nil, q.err + } + + gen = formatterWithModel(gen, q) + + cteCount := count && (len(q.group) > 0 || q.distinctOn != nil) + if cteCount { + b = append(b, "WITH _count_wrapper AS ("...) + } + + if len(q.union) > 0 { + b = append(b, '(') + } + + b, err = q.appendWith(gen, b) + if err != nil { + return nil, err + } + + if err := q.forEachInlineRelJoin(func(j *relationJoin) error { + j.applyTo(q) + return nil + }); err != nil { + return nil, err + } + + b = append(b, "SELECT "...) + + if len(q.distinctOn) > 0 { + b = append(b, "DISTINCT ON ("...) + for i, app := range q.distinctOn { + if i > 0 { + b = append(b, ", "...) + } + b, err = app.AppendQuery(gen, b) + if err != nil { + return nil, err + } + } + b = append(b, ") "...) + } else if q.distinctOn != nil { + b = append(b, "DISTINCT "...) + } + + if count && !cteCount { + b = append(b, "count(*)"...) + } else { + // MSSQL: allows Limit() without Order() as per https://stackoverflow.com/a/36156953 + if q.limit > 0 && len(q.order) == 0 && gen.Dialect().Name() == dialect.MSSQL { + b = append(b, "0 AS _temp_sort, "...) + } + + b, err = q.appendColumns(gen, b) + if err != nil { + return nil, err + } + } + + if q.hasTables() { + b, err = q.appendTables(gen, b) + if err != nil { + return nil, err + } + } + + b, err = q.appendIndexHints(gen, b) + if err != nil { + return nil, err + } + + if err := q.forEachInlineRelJoin(func(j *relationJoin) error { + b = append(b, ' ') + b, err = j.appendHasOneJoin(gen, b, q) + return err + }); err != nil { + return nil, err + } + + for _, join := range q.joins { + b, err = join.AppendQuery(gen, b) + if err != nil { + return nil, err + } + } + + b, err = q.appendWhere(gen, b, true) + if err != nil { + return nil, err + } + + if len(q.group) > 0 { + b = append(b, " GROUP BY "...) + for i, f := range q.group { + if i > 0 { + b = append(b, ", "...) + } + b, err = f.AppendQuery(gen, b) + if err != nil { + return nil, err + } + } + } + + if len(q.having) > 0 { + b = append(b, " HAVING "...) + for i, f := range q.having { + if i > 0 { + b = append(b, " AND "...) + } + b = append(b, '(') + b, err = f.AppendQuery(gen, b) + if err != nil { + return nil, err + } + b = append(b, ')') + } + } + + if !count { + b, err = q.appendOrder(gen, b) + if err != nil { + return nil, err + } + + b, err = q.appendLimitOffset(gen, b) + if err != nil { + return nil, err + } + + if !q.selFor.IsZero() { + b = append(b, " FOR "...) + b, err = q.selFor.AppendQuery(gen, b) + if err != nil { + return nil, err + } + } + } + + if len(q.union) > 0 { + b = append(b, ')') + + for _, u := range q.union { + b = append(b, u.expr...) + b = append(b, '(') + b, err = u.query.AppendQuery(gen, b) + if err != nil { + return nil, err + } + b = append(b, ')') + } + } + + if cteCount { + b = append(b, ") SELECT count(*) FROM _count_wrapper"...) + } + + return b, nil +} + +func (q *SelectQuery) appendColumns(gen schema.QueryGen, b []byte) (_ []byte, err error) { + start := len(b) + + switch { + case q.columns != nil: + for i, col := range q.columns { + if i > 0 { + b = append(b, ", "...) + } + + if col.Args == nil && q.table != nil { + if field, ok := q.table.FieldMap[col.Query]; ok { + b = append(b, q.table.SQLAlias...) + b = append(b, '.') + b = append(b, field.SQLName...) + continue + } + } + + b, err = col.AppendQuery(gen, b) + if err != nil { + return nil, err + } + } + case q.table != nil: + if len(q.table.Fields) > 10 && gen.IsNop() { + b = append(b, q.table.SQLAlias...) + b = append(b, '.') + b = gen.Dialect().AppendString(b, fmt.Sprintf("%d columns", len(q.table.Fields))) + } else { + b = appendColumns(b, q.table.SQLAlias, q.table.Fields) + } + default: + b = append(b, '*') + } + + if err := q.forEachInlineRelJoin(func(join *relationJoin) error { + if len(b) != start { + b = append(b, ", "...) + start = len(b) + } + + b, err = q.appendInlineRelColumns(gen, b, join) + if err != nil { + return err + } + + return nil + }); err != nil { + return nil, err + } + + b = bytes.TrimSuffix(b, []byte(", ")) + + return b, nil +} + +func (q *SelectQuery) appendInlineRelColumns( + gen schema.QueryGen, b []byte, join *relationJoin, +) (_ []byte, err error) { + if join.columns != nil { + table := join.JoinModel.Table() + for i, col := range join.columns { + if i > 0 { + b = append(b, ", "...) + } + + if col.Args == nil { + if field, ok := table.FieldMap[col.Query]; ok { + b = join.appendAlias(gen, b) + b = append(b, '.') + b = append(b, field.SQLName...) + b = append(b, " AS "...) + b = join.appendAliasColumn(gen, b, field.Name) + continue + } + } + + b, err = col.AppendQuery(gen, b) + if err != nil { + return nil, err + } + } + return b, nil + } + + for i, field := range join.JoinModel.Table().Fields { + if i > 0 { + b = append(b, ", "...) + } + b = join.appendAlias(gen, b) + b = append(b, '.') + b = append(b, field.SQLName...) + b = append(b, " AS "...) + b = join.appendAliasColumn(gen, b, field.Name) + } + return b, nil +} + +func (q *SelectQuery) appendTables(gen schema.QueryGen, b []byte) (_ []byte, err error) { + b = append(b, " FROM "...) + return q.appendTablesWithAlias(gen, b) +} + +//------------------------------------------------------------------------------ + +func (q *SelectQuery) Rows(ctx context.Context) (*sql.Rows, error) { + if q.err != nil { + return nil, q.err + } + + if err := q.beforeAppendModel(ctx, q); err != nil { + return nil, err + } + + // if a comment is propagated via the context, use it + setCommentFromContext(ctx, q) + + queryBytes, err := q.AppendQuery(q.db.gen, q.db.makeQueryBytes()) + if err != nil { + return nil, err + } + + query := internal.String(queryBytes) + + ctx, event := q.db.beforeQuery(ctx, q, query, nil, query, q.model) + rows, err := q.resolveConn(ctx, q).QueryContext(ctx, query) + q.db.afterQuery(ctx, event, nil, err) + return rows, err +} + +func (q *SelectQuery) Exec(ctx context.Context, dest ...any) (res sql.Result, err error) { + if q.err != nil { + return nil, q.err + } + if err := q.beforeAppendModel(ctx, q); err != nil { + return nil, err + } + + // if a comment is propagated via the context, use it + setCommentFromContext(ctx, q) + + queryBytes, err := q.AppendQuery(q.db.gen, q.db.makeQueryBytes()) + if err != nil { + return nil, err + } + + query := internal.String(queryBytes) + + if len(dest) > 0 { + model, err := q.getModel(dest) + if err != nil { + return nil, err + } + + res, err = q.scan(ctx, q, query, model, true) + if err != nil { + return nil, err + } + } else { + res, err = q.exec(ctx, q, query) + if err != nil { + return nil, err + } + } + + return res, nil +} + +func (q *SelectQuery) Scan(ctx context.Context, dest ...any) error { + _, err := q.scanResult(ctx, dest...) + return err +} + +func (q *SelectQuery) scanResult(ctx context.Context, dest ...any) (sql.Result, error) { + if q.err != nil { + return nil, q.err + } + + model, err := q.getModel(dest) + if err != nil { + return nil, err + } + if len(dest) > 0 && q.tableModel != nil && len(q.tableModel.getJoins()) > 0 { + for _, j := range q.tableModel.getJoins() { + switch j.Relation.Type { + case schema.HasManyRelation, schema.ManyToManyRelation: + return nil, fmt.Errorf("When querying has-many or many-to-many relationships, you should use Model instead of the dest parameter in Scan.") + } + } + } + + if q.table != nil { + if err := q.beforeSelectHook(ctx); err != nil { + return nil, err + } + } + + if err := q.beforeAppendModel(ctx, q); err != nil { + return nil, err + } + + // if a comment is propagated via the context, use it + setCommentFromContext(ctx, q) + + queryBytes, err := q.AppendQuery(q.db.gen, q.db.makeQueryBytes()) + if err != nil { + return nil, err + } + + query := internal.String(queryBytes) + + res, err := q.scan(ctx, q, query, model, true) + if err != nil { + return nil, err + } + + if n, _ := res.RowsAffected(); n > 0 { + if tableModel, ok := model.(TableModel); ok { + if err := q.selectJoins(ctx, tableModel.getJoins()); err != nil { + return nil, err + } + } + } + + if q.table != nil { + if err := q.afterSelectHook(ctx); err != nil { + return nil, err + } + } + + return res, nil +} + +func (q *SelectQuery) beforeSelectHook(ctx context.Context) error { + if hook, ok := q.table.ZeroIface.(BeforeSelectHook); ok { + if err := hook.BeforeSelect(ctx, q); err != nil { + return err + } + } + return nil +} + +func (q *SelectQuery) afterSelectHook(ctx context.Context) error { + if hook, ok := q.table.ZeroIface.(AfterSelectHook); ok { + if err := hook.AfterSelect(ctx, q); err != nil { + return err + } + } + return nil +} + +func (q *SelectQuery) Count(ctx context.Context) (int, error) { + if q.err != nil { + return 0, q.err + } + + // if a comment is propagated via the context, use it + setCommentFromContext(ctx, q) + + qq := countQuery{q} + + queryBytes, err := qq.AppendQuery(q.db.gen, nil) + if err != nil { + return 0, err + } + + query := internal.String(queryBytes) + ctx, event := q.db.beforeQuery(ctx, qq, query, nil, query, q.model) + + var num int + err = q.resolveConn(ctx, q).QueryRowContext(ctx, query).Scan(&num) + + q.db.afterQuery(ctx, event, nil, err) + + return num, err +} + +func (q *SelectQuery) ScanAndCount(ctx context.Context, dest ...any) (int, error) { + if q.offset == 0 && q.limit == 0 { + // If there is no limit and offset, we can use a single query to get the count and scan + if res, err := q.scanResult(ctx, dest...); err != nil { + return 0, err + } else if n, err := res.RowsAffected(); err != nil { + return 0, err + } else { + return int(n), nil + } + } + if q.conn == nil { + return q.scanAndCountConcurrently(ctx, dest...) + } + return q.scanAndCountSeq(ctx, dest...) +} + +func (q *SelectQuery) scanAndCountConcurrently( + ctx context.Context, dest ...any, +) (int, error) { + var count int + var wg sync.WaitGroup + var mu sync.Mutex + var firstErr error + + // FIXME: clone should not be needed, because the query is not modified here + // and should not be implicitly modified by the Bun lib. + countQuery := q.Clone() + + // Don't scan results if the user explicitly set Limit(-1). + if q.limit >= 0 { + wg.Add(1) + go func() { + defer wg.Done() + + if err := q.Scan(ctx, dest...); err != nil { + mu.Lock() + if firstErr == nil { + firstErr = err + } + mu.Unlock() + } + }() + } + + wg.Add(1) + go func() { + defer wg.Done() + + var err error + count, err = countQuery.Count(ctx) + if err != nil { + mu.Lock() + if firstErr == nil { + firstErr = err + } + mu.Unlock() + } + }() + + wg.Wait() + return count, firstErr +} + +func (q *SelectQuery) scanAndCountSeq(ctx context.Context, dest ...any) (int, error) { + var firstErr error + + // Don't scan results if the user explicitly set Limit(-1). + if q.limit >= 0 { + firstErr = q.Scan(ctx, dest...) + } + + count, err := q.Count(ctx) + if err != nil && firstErr == nil { + firstErr = err + } + + return count, firstErr +} + +func (q *SelectQuery) Exists(ctx context.Context) (bool, error) { + if q.err != nil { + return false, q.err + } + + if q.hasFeature(feature.SelectExists) { + return q.selectExists(ctx) + } + return q.whereExists(ctx) +} + +func (q *SelectQuery) selectExists(ctx context.Context) (bool, error) { + // if a comment is propagated via the context, use it + setCommentFromContext(ctx, q) + + qq := selectExistsQuery{q} + + queryBytes, err := qq.AppendQuery(q.db.gen, nil) + if err != nil { + return false, err + } + + query := internal.String(queryBytes) + ctx, event := q.db.beforeQuery(ctx, qq, query, nil, query, q.model) + + var exists bool + err = q.resolveConn(ctx, q).QueryRowContext(ctx, query).Scan(&exists) + + q.db.afterQuery(ctx, event, nil, err) + + return exists, err +} + +func (q *SelectQuery) whereExists(ctx context.Context) (bool, error) { + // if a comment is propagated via the context, use it + setCommentFromContext(ctx, q) + + qq := whereExistsQuery{q} + + queryBytes, err := qq.AppendQuery(q.db.gen, nil) + if err != nil { + return false, err + } + + query := internal.String(queryBytes) + res, err := q.exec(ctx, qq, query) + if err != nil { + return false, err + } + + n, err := res.RowsAffected() + if err != nil { + return false, err + } + + return n == 1, nil +} + +// String returns the generated SQL query string. The SelectQuery instance must not be +// modified during query generation to ensure multiple calls to String() return identical results. +func (q *SelectQuery) String() string { + buf, err := q.AppendQuery(q.db.QueryGen(), nil) + if err != nil { + panic(err) + } + return string(buf) +} + +func (q *SelectQuery) Clone() *SelectQuery { + if q == nil { + return nil + } + + cloneArgs := func(args []schema.QueryWithArgs) []schema.QueryWithArgs { + if args == nil { + return nil + } + clone := make([]schema.QueryWithArgs, len(args)) + copy(clone, args) + return clone + } + cloneHints := func(hints *indexHints) *indexHints { + if hints == nil { + return nil + } + return &indexHints{ + names: cloneArgs(hints.names), + forJoin: cloneArgs(hints.forJoin), + forOrderBy: cloneArgs(hints.forOrderBy), + forGroupBy: cloneArgs(hints.forGroupBy), + } + } + + var tableModel TableModel + if q.tableModel != nil { + tableModel = q.tableModel.clone() + } + clone := &SelectQuery{ + whereBaseQuery: whereBaseQuery{ + baseQuery: baseQuery{ + db: q.db, + table: q.table, + model: q.model, + tableModel: tableModel, + with: make([]WithQuery, len(q.with)), + tables: cloneArgs(q.tables), + columns: cloneArgs(q.columns), + modelTableName: q.modelTableName, + }, + where: make([]schema.QueryWithSep, len(q.where)), + }, + + idxHintsQuery: idxHintsQuery{ + use: cloneHints(q.idxHintsQuery.use), + ignore: cloneHints(q.idxHintsQuery.ignore), + force: cloneHints(q.idxHintsQuery.force), + }, + + orderLimitOffsetQuery: orderLimitOffsetQuery{ + order: cloneArgs(q.order), + limit: q.limit, + offset: q.offset, + }, + + distinctOn: cloneArgs(q.distinctOn), + joins: make([]joinQuery, len(q.joins)), + group: cloneArgs(q.group), + having: cloneArgs(q.having), + union: make([]union, len(q.union)), + comment: q.comment, + } + + for i, w := range q.with { + clone.with[i] = WithQuery{ + name: w.name, + recursive: w.recursive, + query: w.query, // TODO: maybe clone is need + } + } + + if !q.modelTableName.IsZero() { + clone.modelTableName = schema.SafeQuery( + q.modelTableName.Query, + append([]any(nil), q.modelTableName.Args...), + ) + } + + for i, w := range q.where { + clone.where[i] = schema.SafeQueryWithSep( + w.Query, + append([]any(nil), w.Args...), + w.Sep, + ) + } + + for i, j := range q.joins { + clone.joins[i] = joinQuery{ + join: schema.SafeQuery(j.join.Query, append([]any(nil), j.join.Args...)), + on: make([]schema.QueryWithSep, len(j.on)), + } + for k, on := range j.on { + clone.joins[i].on[k] = schema.SafeQueryWithSep( + on.Query, + append([]any(nil), on.Args...), + on.Sep, + ) + } + } + + for i, u := range q.union { + clone.union[i] = union{ + expr: u.expr, + query: u.query.Clone(), + } + } + + if !q.selFor.IsZero() { + clone.selFor = schema.SafeQuery( + q.selFor.Query, + append([]any(nil), q.selFor.Args...), + ) + } + + return clone +} + +//------------------------------------------------------------------------------ + +func (q *SelectQuery) QueryBuilder() QueryBuilder { + return &selectQueryBuilder{q} +} + +func (q *SelectQuery) ApplyQueryBuilder(fn func(QueryBuilder) QueryBuilder) *SelectQuery { + return fn(q.QueryBuilder()).Unwrap().(*SelectQuery) +} + +type selectQueryBuilder struct { + *SelectQuery +} + +func (q *selectQueryBuilder) WhereGroup( + sep string, fn func(QueryBuilder) QueryBuilder, +) QueryBuilder { + q.SelectQuery = q.SelectQuery.WhereGroup(sep, func(qs *SelectQuery) *SelectQuery { + return fn(q).(*selectQueryBuilder).SelectQuery + }) + return q +} + +func (q *selectQueryBuilder) Where(query string, args ...any) QueryBuilder { + q.SelectQuery.Where(query, args...) + return q +} + +func (q *selectQueryBuilder) WhereOr(query string, args ...any) QueryBuilder { + q.SelectQuery.WhereOr(query, args...) + return q +} + +func (q *selectQueryBuilder) WhereDeleted() QueryBuilder { + q.SelectQuery.WhereDeleted() + return q +} + +func (q *selectQueryBuilder) WhereAllWithDeleted() QueryBuilder { + q.SelectQuery.WhereAllWithDeleted() + return q +} + +func (q *selectQueryBuilder) WherePK(cols ...string) QueryBuilder { + q.SelectQuery.WherePK(cols...) + return q +} + +func (q *selectQueryBuilder) Unwrap() any { + return q.SelectQuery +} + +//------------------------------------------------------------------------------ + +type joinQuery struct { + join schema.QueryWithArgs + on []schema.QueryWithSep +} + +func (j *joinQuery) AppendQuery(gen schema.QueryGen, b []byte) (_ []byte, err error) { + b = append(b, ' ') + + b, err = j.join.AppendQuery(gen, b) + if err != nil { + return nil, err + } + + if len(j.on) > 0 { + b = append(b, " ON "...) + for i, on := range j.on { + if i > 0 { + b = append(b, on.Sep...) + } + + b = append(b, '(') + b, err = on.AppendQuery(gen, b) + if err != nil { + return nil, err + } + b = append(b, ')') + } + } + + return b, nil +} + +//------------------------------------------------------------------------------ + +type countQuery struct { + *SelectQuery +} + +func (q countQuery) AppendQuery(gen schema.QueryGen, b []byte) (_ []byte, err error) { + if q.err != nil { + return nil, q.err + } + return q.appendQuery(gen, b, true) +} + +//------------------------------------------------------------------------------ + +type selectExistsQuery struct { + *SelectQuery +} + +func (q selectExistsQuery) AppendQuery(gen schema.QueryGen, b []byte) (_ []byte, err error) { + if q.err != nil { + return nil, q.err + } + + b = append(b, "SELECT EXISTS ("...) + + b, err = q.appendQuery(gen, b, false) + if err != nil { + return nil, err + } + + b = append(b, ")"...) + + return b, nil +} + +//------------------------------------------------------------------------------ + +type whereExistsQuery struct { + *SelectQuery +} + +func (q whereExistsQuery) AppendQuery(gen schema.QueryGen, b []byte) (_ []byte, err error) { + if q.err != nil { + return nil, q.err + } + + b = append(b, "SELECT 1 WHERE EXISTS ("...) + + b, err = q.appendQuery(gen, b, false) + if err != nil { + return nil, err + } + + b = append(b, ")"...) + + return b, nil +} diff --git a/vendor/github.com/uptrace/bun/query_table_create.go b/vendor/github.com/uptrace/bun/query_table_create.go new file mode 100644 index 0000000..6414c94 --- /dev/null +++ b/vendor/github.com/uptrace/bun/query_table_create.go @@ -0,0 +1,427 @@ +package bun + +import ( + "bytes" + "context" + "database/sql" + "fmt" + "slices" + "strconv" + "strings" + + "github.com/uptrace/bun/dialect" + "github.com/uptrace/bun/dialect/feature" + "github.com/uptrace/bun/dialect/sqltype" + "github.com/uptrace/bun/internal" + "github.com/uptrace/bun/schema" +) + +type CreateTableQuery struct { + baseQuery + + temp bool + ifNotExists bool + fksFromRel bool // Create foreign keys captured in table's relations. + + // varchar changes the default length for VARCHAR columns. + // Because some dialects require that length is always specified for VARCHAR type, + // we will use the exact user-defined type if length is set explicitly, as in `bun:",type:varchar(5)"`, + // but assume the new default length when it's omitted, e.g. `bun:",type:varchar"`. + varchar int + + fks []schema.QueryWithArgs + partitionBy schema.QueryWithArgs + tablespace schema.QueryWithArgs + comment string +} + +var _ Query = (*CreateTableQuery)(nil) + +func NewCreateTableQuery(db *DB) *CreateTableQuery { + q := &CreateTableQuery{ + baseQuery: baseQuery{ + db: db, + }, + varchar: db.Dialect().DefaultVarcharLen(), + } + return q +} + +func (q *CreateTableQuery) Conn(db IConn) *CreateTableQuery { + q.setConn(db) + return q +} + +func (q *CreateTableQuery) Model(model any) *CreateTableQuery { + q.setModel(model) + return q +} + +func (q *CreateTableQuery) Err(err error) *CreateTableQuery { + q.setErr(err) + return q +} + +// ------------------------------------------------------------------------------ + +func (q *CreateTableQuery) Table(tables ...string) *CreateTableQuery { + for _, table := range tables { + q.addTable(schema.UnsafeIdent(table)) + } + return q +} + +func (q *CreateTableQuery) TableExpr(query string, args ...any) *CreateTableQuery { + q.addTable(schema.SafeQuery(query, args)) + return q +} + +func (q *CreateTableQuery) ModelTableExpr(query string, args ...any) *CreateTableQuery { + q.modelTableName = schema.SafeQuery(query, args) + return q +} + +func (q *CreateTableQuery) ColumnExpr(query string, args ...any) *CreateTableQuery { + q.addColumn(schema.SafeQuery(query, args)) + return q +} + +// ------------------------------------------------------------------------------ + +func (q *CreateTableQuery) Temp() *CreateTableQuery { + q.temp = true + return q +} + +func (q *CreateTableQuery) IfNotExists() *CreateTableQuery { + q.ifNotExists = true + return q +} + +// Varchar sets default length for VARCHAR columns. +func (q *CreateTableQuery) Varchar(n int) *CreateTableQuery { + if n <= 0 { + q.setErr(fmt.Errorf("bun: illegal VARCHAR length: %d", n)) + return q + } + q.varchar = n + return q +} + +func (q *CreateTableQuery) ForeignKey(query string, args ...any) *CreateTableQuery { + q.fks = append(q.fks, schema.SafeQuery(query, args)) + return q +} + +func (q *CreateTableQuery) PartitionBy(query string, args ...any) *CreateTableQuery { + q.partitionBy = schema.SafeQuery(query, args) + return q +} + +func (q *CreateTableQuery) TableSpace(tablespace string) *CreateTableQuery { + q.tablespace = schema.UnsafeIdent(tablespace) + return q +} + +// WithForeignKeys adds a FOREIGN KEY clause for each of the model's existing relations. +func (q *CreateTableQuery) WithForeignKeys() *CreateTableQuery { + q.fksFromRel = true + return q +} + +//------------------------------------------------------------------------------ + +// Comment adds a comment to the query, wrapped by /* ... */. +func (q *CreateTableQuery) Comment(comment string) *CreateTableQuery { + q.comment = comment + return q +} + +// ------------------------------------------------------------------------------ + +func (q *CreateTableQuery) Operation() string { + return "CREATE TABLE" +} + +func (q *CreateTableQuery) AppendQuery(gen schema.QueryGen, b []byte) (_ []byte, err error) { + if q.err != nil { + return nil, q.err + } + + b = appendComment(b, q.comment) + + if q.table == nil { + return nil, errNilModel + } + + b = append(b, "CREATE "...) + if q.temp { + b = append(b, "TEMP "...) + } + b = append(b, "TABLE "...) + if q.ifNotExists && gen.HasFeature(feature.TableNotExists) { + b = append(b, "IF NOT EXISTS "...) + } + b, err = q.appendFirstTable(gen, b) + if err != nil { + return nil, err + } + + b = append(b, " ("...) + + for i, field := range q.table.Fields { + if i > 0 { + b = append(b, ", "...) + } + + b = append(b, field.SQLName...) + b = append(b, " "...) + b = q.appendSQLType(b, field) + if field.NotNull && q.db.dialect.Name() != dialect.Oracle { + b = append(b, " NOT NULL"...) + } + + if (field.Identity && gen.HasFeature(feature.GeneratedIdentity)) || + (field.AutoIncrement && (gen.HasFeature(feature.AutoIncrement) || gen.HasFeature(feature.Identity))) { + b = q.db.dialect.AppendSequence(b, q.table, field) + } + + if field.SQLDefault != "" { + b = append(b, " DEFAULT "...) + b = append(b, field.SQLDefault...) + } + } + + for i, col := range q.columns { + // Only pre-pend the comma if we are on subsequent iterations, or if there were fields/columns appended before + // this. This way if we are only appending custom column expressions we will not produce a syntax error with a + // leading comma. + if i > 0 || len(q.table.Fields) > 0 { + b = append(b, ", "...) + } + b, err = col.AppendQuery(gen, b) + if err != nil { + return nil, err + } + } + + // In SQLite AUTOINCREMENT is only valid for INTEGER PRIMARY KEY columns, so it might be that + // a primary key constraint has already been created in dialect.AppendSequence() call above. + // See sqldialect.Dialect.AppendSequence() for more details. + if len(q.table.PKs) > 0 && !bytes.Contains(b, []byte("PRIMARY KEY")) { + b = q.appendPKConstraint(b, q.table.PKs) + } + b = q.appendUniqueConstraints(gen, b) + + if q.fksFromRel { + b, err = q.appendFKConstraintsRel(gen, b) + if err != nil { + return nil, err + } + } + b, err = q.appendFKConstraints(gen, b) + if err != nil { + return nil, err + } + + b = append(b, ")"...) + + if !q.partitionBy.IsZero() { + b = append(b, " PARTITION BY "...) + b, err = q.partitionBy.AppendQuery(gen, b) + if err != nil { + return nil, err + } + } + + if !q.tablespace.IsZero() { + b = append(b, " TABLESPACE "...) + b, err = q.tablespace.AppendQuery(gen, b) + if err != nil { + return nil, err + } + } + + return b, nil +} + +func (q *CreateTableQuery) appendSQLType(b []byte, field *schema.Field) []byte { + // Most of the time these two will match, but for the cases where DiscoveredSQLType is dialect-specific, + // e.g. pgdialect would change sqltype.SmallInt to pgTypeSmallSerial for columns that have `bun:",autoincrement"` + if !strings.EqualFold(field.CreateTableSQLType, field.DiscoveredSQLType) { + return append(b, field.CreateTableSQLType...) + } + + // For all common SQL types except VARCHAR, both UserDefinedSQLType and DiscoveredSQLType specify the correct type, + // and we needn't modify it. For VARCHAR columns, we will stop to check if a valid length has been set in .Varchar(int). + if !strings.EqualFold(field.CreateTableSQLType, sqltype.VarChar) || q.varchar <= 0 { + return append(b, field.CreateTableSQLType...) + } + + if q.db.dialect.Name() == dialect.Oracle { + b = append(b, "VARCHAR2"...) + } else { + b = append(b, sqltype.VarChar...) + } + b = append(b, "("...) + b = strconv.AppendInt(b, int64(q.varchar), 10) + b = append(b, ")"...) + return b +} + +func (q *CreateTableQuery) appendUniqueConstraints(gen schema.QueryGen, b []byte) []byte { + unique := q.table.Unique + + keys := make([]string, 0, len(unique)) + for key := range unique { + keys = append(keys, key) + } + slices.Sort(keys) + + for _, key := range keys { + if key == "" { + for _, field := range unique[key] { + b = q.appendUniqueConstraint(gen, b, key, field) + } + continue + } + b = q.appendUniqueConstraint(gen, b, key, unique[key]...) + } + + return b +} + +func (q *CreateTableQuery) appendUniqueConstraint( + gen schema.QueryGen, b []byte, name string, fields ...*schema.Field, +) []byte { + if name != "" { + b = append(b, ", CONSTRAINT "...) + b = gen.AppendIdent(b, name) + } else { + b = append(b, ","...) + } + b = append(b, " UNIQUE ("...) + b = appendColumns(b, "", fields) + b = append(b, ")"...) + return b +} + +// appendFKConstraintsRel appends a FOREIGN KEY clause for each of the model's existing relations. +func (q *CreateTableQuery) appendFKConstraintsRel(gen schema.QueryGen, b []byte) (_ []byte, err error) { + relations := q.tableModel.Table().Relations + + keys := make([]string, 0, len(relations)) + for key := range relations { + keys = append(keys, key) + } + slices.Sort(keys) + + for _, key := range keys { + if rel := relations[key]; rel.References() { + query := "(?) REFERENCES ? (?)" + args := []any{ + Safe(appendColumns(nil, "", rel.BasePKs)), + rel.JoinTable.SQLName, + Safe(appendColumns(nil, "", rel.JoinPKs)), + } + if len(rel.OnUpdate) > 0 { + query += " ?" + args = append(args, Safe(rel.OnUpdate)) + } + if len(rel.OnDelete) > 0 { + query += " ?" + args = append(args, Safe(rel.OnDelete)) + } + b, err = q.appendFK(gen, b, schema.QueryWithArgs{ + Query: query, + Args: args, + }) + if err != nil { + return nil, err + } + } + } + return b, nil +} + +func (q *CreateTableQuery) appendFK(gen schema.QueryGen, b []byte, fk schema.QueryWithArgs) (_ []byte, err error) { + b = append(b, ", FOREIGN KEY "...) + return fk.AppendQuery(gen, b) +} + +func (q *CreateTableQuery) appendFKConstraints( + gen schema.QueryGen, b []byte, +) (_ []byte, err error) { + for _, fk := range q.fks { + if b, err = q.appendFK(gen, b, fk); err != nil { + return nil, err + } + } + return b, nil +} + +func (q *CreateTableQuery) appendPKConstraint(b []byte, pks []*schema.Field) []byte { + b = append(b, ", PRIMARY KEY ("...) + b = appendColumns(b, "", pks) + b = append(b, ")"...) + return b +} + +// ------------------------------------------------------------------------------ + +func (q *CreateTableQuery) Exec(ctx context.Context, dest ...any) (sql.Result, error) { + if err := q.beforeCreateTableHook(ctx); err != nil { + return nil, err + } + + // if a comment is propagated via the context, use it + setCommentFromContext(ctx, q) + + queryBytes, err := q.AppendQuery(q.db.gen, q.db.makeQueryBytes()) + if err != nil { + return nil, err + } + + query := internal.String(queryBytes) + + res, err := q.exec(ctx, q, query) + if err != nil { + return nil, err + } + + if q.table != nil { + if err := q.afterCreateTableHook(ctx); err != nil { + return nil, err + } + } + + return res, nil +} + +func (q *CreateTableQuery) beforeCreateTableHook(ctx context.Context) error { + if hook, ok := q.table.ZeroIface.(BeforeCreateTableHook); ok { + if err := hook.BeforeCreateTable(ctx, q); err != nil { + return err + } + } + return nil +} + +func (q *CreateTableQuery) afterCreateTableHook(ctx context.Context) error { + if hook, ok := q.table.ZeroIface.(AfterCreateTableHook); ok { + if err := hook.AfterCreateTable(ctx, q); err != nil { + return err + } + } + return nil +} + +// String returns the generated SQL query string. The CreateTableQuery instance must not be +// modified during query generation to ensure multiple calls to String() return identical results. +func (q *CreateTableQuery) String() string { + buf, err := q.AppendQuery(q.db.QueryGen(), nil) + if err != nil { + panic(err) + } + return string(buf) +} diff --git a/vendor/github.com/uptrace/bun/query_table_drop.go b/vendor/github.com/uptrace/bun/query_table_drop.go new file mode 100644 index 0000000..75f66e4 --- /dev/null +++ b/vendor/github.com/uptrace/bun/query_table_drop.go @@ -0,0 +1,176 @@ +package bun + +import ( + "context" + "database/sql" + + "github.com/uptrace/bun/internal" + "github.com/uptrace/bun/schema" +) + +type DropTableQuery struct { + baseQuery + cascadeQuery + + ifExists bool + comment string +} + +var _ Query = (*DropTableQuery)(nil) + +func NewDropTableQuery(db *DB) *DropTableQuery { + q := &DropTableQuery{ + baseQuery: baseQuery{ + db: db, + }, + } + return q +} + +func (q *DropTableQuery) Conn(db IConn) *DropTableQuery { + q.setConn(db) + return q +} + +func (q *DropTableQuery) Model(model any) *DropTableQuery { + q.setModel(model) + return q +} + +func (q *DropTableQuery) Err(err error) *DropTableQuery { + q.setErr(err) + return q +} + +//------------------------------------------------------------------------------ + +func (q *DropTableQuery) Table(tables ...string) *DropTableQuery { + for _, table := range tables { + q.addTable(schema.UnsafeIdent(table)) + } + return q +} + +func (q *DropTableQuery) TableExpr(query string, args ...any) *DropTableQuery { + q.addTable(schema.SafeQuery(query, args)) + return q +} + +func (q *DropTableQuery) ModelTableExpr(query string, args ...any) *DropTableQuery { + q.modelTableName = schema.SafeQuery(query, args) + return q +} + +//------------------------------------------------------------------------------ + +func (q *DropTableQuery) IfExists() *DropTableQuery { + q.ifExists = true + return q +} + +func (q *DropTableQuery) Cascade() *DropTableQuery { + q.cascade = true + return q +} + +func (q *DropTableQuery) Restrict() *DropTableQuery { + q.restrict = true + return q +} + +//------------------------------------------------------------------------------ + +// Comment adds a comment to the query, wrapped by /* ... */. +func (q *DropTableQuery) Comment(comment string) *DropTableQuery { + q.comment = comment + return q +} + +//------------------------------------------------------------------------------ + +func (q *DropTableQuery) Operation() string { + return "DROP TABLE" +} + +func (q *DropTableQuery) AppendQuery(gen schema.QueryGen, b []byte) (_ []byte, err error) { + if q.err != nil { + return nil, q.err + } + + b = appendComment(b, q.comment) + + b = append(b, "DROP TABLE "...) + if q.ifExists { + b = append(b, "IF EXISTS "...) + } + + b, err = q.appendTables(gen, b) + if err != nil { + return nil, err + } + + b = q.appendCascade(gen, b) + + return b, nil +} + +//------------------------------------------------------------------------------ + +func (q *DropTableQuery) Exec(ctx context.Context, dest ...any) (sql.Result, error) { + if q.table != nil { + if err := q.beforeDropTableHook(ctx); err != nil { + return nil, err + } + } + + // if a comment is propagated via the context, use it + setCommentFromContext(ctx, q) + + queryBytes, err := q.AppendQuery(q.db.gen, q.db.makeQueryBytes()) + if err != nil { + return nil, err + } + + query := internal.String(queryBytes) + + res, err := q.exec(ctx, q, query) + if err != nil { + return nil, err + } + + if q.table != nil { + if err := q.afterDropTableHook(ctx); err != nil { + return nil, err + } + } + + return res, nil +} + +func (q *DropTableQuery) beforeDropTableHook(ctx context.Context) error { + if hook, ok := q.table.ZeroIface.(BeforeDropTableHook); ok { + if err := hook.BeforeDropTable(ctx, q); err != nil { + return err + } + } + return nil +} + +func (q *DropTableQuery) afterDropTableHook(ctx context.Context) error { + if hook, ok := q.table.ZeroIface.(AfterDropTableHook); ok { + if err := hook.AfterDropTable(ctx, q); err != nil { + return err + } + } + return nil +} + +// String returns the generated SQL query string. The DropTableQuery instance must not be +// modified during query generation to ensure multiple calls to String() return identical results. +func (q *DropTableQuery) String() string { + buf, err := q.AppendQuery(q.db.QueryGen(), nil) + if err != nil { + panic(err) + } + return string(buf) +} diff --git a/vendor/github.com/uptrace/bun/query_table_truncate.go b/vendor/github.com/uptrace/bun/query_table_truncate.go new file mode 100644 index 0000000..d5c4255 --- /dev/null +++ b/vendor/github.com/uptrace/bun/query_table_truncate.go @@ -0,0 +1,155 @@ +package bun + +import ( + "context" + "database/sql" + + "github.com/uptrace/bun/dialect/feature" + "github.com/uptrace/bun/internal" + "github.com/uptrace/bun/schema" +) + +type TruncateTableQuery struct { + baseQuery + cascadeQuery + + continueIdentity bool + comment string +} + +var _ Query = (*TruncateTableQuery)(nil) + +func NewTruncateTableQuery(db *DB) *TruncateTableQuery { + q := &TruncateTableQuery{ + baseQuery: baseQuery{ + db: db, + }, + } + return q +} + +func (q *TruncateTableQuery) Conn(db IConn) *TruncateTableQuery { + q.setConn(db) + return q +} + +func (q *TruncateTableQuery) Model(model any) *TruncateTableQuery { + q.setModel(model) + return q +} + +func (q *TruncateTableQuery) Err(err error) *TruncateTableQuery { + q.setErr(err) + return q +} + +//------------------------------------------------------------------------------ + +func (q *TruncateTableQuery) Table(tables ...string) *TruncateTableQuery { + for _, table := range tables { + q.addTable(schema.UnsafeIdent(table)) + } + return q +} + +func (q *TruncateTableQuery) TableExpr(query string, args ...any) *TruncateTableQuery { + q.addTable(schema.SafeQuery(query, args)) + return q +} + +func (q *TruncateTableQuery) ModelTableExpr(query string, args ...any) *TruncateTableQuery { + q.modelTableName = schema.SafeQuery(query, args) + return q +} + +//------------------------------------------------------------------------------ + +func (q *TruncateTableQuery) ContinueIdentity() *TruncateTableQuery { + q.continueIdentity = true + return q +} + +func (q *TruncateTableQuery) Cascade() *TruncateTableQuery { + q.cascade = true + return q +} + +func (q *TruncateTableQuery) Restrict() *TruncateTableQuery { + q.restrict = true + return q +} + +//------------------------------------------------------------------------------ + +// Comment adds a comment to the query, wrapped by /* ... */. +func (q *TruncateTableQuery) Comment(comment string) *TruncateTableQuery { + q.comment = comment + return q +} + +//------------------------------------------------------------------------------ + +func (q *TruncateTableQuery) Operation() string { + return "TRUNCATE TABLE" +} + +func (q *TruncateTableQuery) AppendQuery( + gen schema.QueryGen, b []byte, +) (_ []byte, err error) { + if q.err != nil { + return nil, q.err + } + + b = appendComment(b, q.comment) + + if !gen.HasFeature(feature.TableTruncate) { + b = append(b, "DELETE FROM "...) + + b, err = q.appendTables(gen, b) + if err != nil { + return nil, err + } + + return b, nil + } + + b = append(b, "TRUNCATE TABLE "...) + + b, err = q.appendTables(gen, b) + if err != nil { + return nil, err + } + + if q.db.HasFeature(feature.TableIdentity) { + if q.continueIdentity { + b = append(b, " CONTINUE IDENTITY"...) + } else { + b = append(b, " RESTART IDENTITY"...) + } + } + + b = q.appendCascade(gen, b) + + return b, nil +} + +//------------------------------------------------------------------------------ + +func (q *TruncateTableQuery) Exec(ctx context.Context, dest ...any) (sql.Result, error) { + // if a comment is propagated via the context, use it + setCommentFromContext(ctx, q) + + queryBytes, err := q.AppendQuery(q.db.gen, q.db.makeQueryBytes()) + if err != nil { + return nil, err + } + + query := internal.String(queryBytes) + + res, err := q.exec(ctx, q, query) + if err != nil { + return nil, err + } + + return res, nil +} diff --git a/vendor/github.com/uptrace/bun/query_update.go b/vendor/github.com/uptrace/bun/query_update.go new file mode 100644 index 0000000..c75bd4c --- /dev/null +++ b/vendor/github.com/uptrace/bun/query_update.go @@ -0,0 +1,677 @@ +package bun + +import ( + "context" + "database/sql" + "errors" + "fmt" + + "github.com/uptrace/bun/dialect" + + "github.com/uptrace/bun/dialect/feature" + "github.com/uptrace/bun/internal" + "github.com/uptrace/bun/schema" +) + +type UpdateQuery struct { + whereBaseQuery + orderLimitOffsetQuery + returningQuery + setQuery + idxHintsQuery + + joins []joinQuery + comment string +} + +var _ Query = (*UpdateQuery)(nil) + +func NewUpdateQuery(db *DB) *UpdateQuery { + q := &UpdateQuery{ + whereBaseQuery: whereBaseQuery{ + baseQuery: baseQuery{ + db: db, + }, + }, + } + return q +} + +func (q *UpdateQuery) Conn(db IConn) *UpdateQuery { + q.setConn(db) + return q +} + +func (q *UpdateQuery) Model(model any) *UpdateQuery { + q.setModel(model) + return q +} + +func (q *UpdateQuery) Err(err error) *UpdateQuery { + q.setErr(err) + return q +} + +// Apply calls each function in fns, passing the UpdateQuery as an argument. +func (q *UpdateQuery) Apply(fns ...func(*UpdateQuery) *UpdateQuery) *UpdateQuery { + for _, fn := range fns { + if fn != nil { + q = fn(q) + } + } + return q +} + +func (q *UpdateQuery) With(name string, query Query) *UpdateQuery { + q.addWith(NewWithQuery(name, query)) + return q +} + +func (q *UpdateQuery) WithRecursive(name string, query Query) *UpdateQuery { + q.addWith(NewWithQuery(name, query).Recursive()) + return q +} + +func (q *UpdateQuery) WithQuery(query *WithQuery) *UpdateQuery { + q.addWith(query) + return q +} + +// ------------------------------------------------------------------------------ + +func (q *UpdateQuery) Table(tables ...string) *UpdateQuery { + for _, table := range tables { + q.addTable(schema.UnsafeIdent(table)) + } + return q +} + +func (q *UpdateQuery) TableExpr(query string, args ...any) *UpdateQuery { + q.addTable(schema.SafeQuery(query, args)) + return q +} + +func (q *UpdateQuery) ModelTableExpr(query string, args ...any) *UpdateQuery { + q.modelTableName = schema.SafeQuery(query, args) + return q +} + +//------------------------------------------------------------------------------ + +func (q *UpdateQuery) Column(columns ...string) *UpdateQuery { + for _, column := range columns { + q.addColumn(schema.UnsafeIdent(column)) + } + return q +} + +func (q *UpdateQuery) ExcludeColumn(columns ...string) *UpdateQuery { + q.excludeColumn(columns) + return q +} + +func (q *UpdateQuery) Set(query string, args ...any) *UpdateQuery { + q.addSet(schema.SafeQuery(query, args)) + return q +} + +func (q *UpdateQuery) SetColumn(column string, query string, args ...any) *UpdateQuery { + if q.db.HasFeature(feature.UpdateMultiTable) { + column = q.table.Alias + "." + column + } + q.addSet(schema.SafeQuery(column+" = "+query, args)) + return q +} + +// Value overwrites model value for the column. +func (q *UpdateQuery) Value(column string, query string, args ...any) *UpdateQuery { + if q.table == nil { + q.setErr(errNilModel) + return q + } + q.addValue(q.table, column, query, args) + return q +} + +func (q *UpdateQuery) OmitZero() *UpdateQuery { + q.omitZero = true + return q +} + +//------------------------------------------------------------------------------ + +func (q *UpdateQuery) Join(join string, args ...any) *UpdateQuery { + q.joins = append(q.joins, joinQuery{ + join: schema.SafeQuery(join, args), + }) + return q +} + +func (q *UpdateQuery) JoinOn(cond string, args ...any) *UpdateQuery { + return q.joinOn(cond, args, " AND ") +} + +func (q *UpdateQuery) JoinOnOr(cond string, args ...any) *UpdateQuery { + return q.joinOn(cond, args, " OR ") +} + +func (q *UpdateQuery) joinOn(cond string, args []any, sep string) *UpdateQuery { + if len(q.joins) == 0 { + q.setErr(errors.New("bun: query has no joins")) + return q + } + j := &q.joins[len(q.joins)-1] + j.on = append(j.on, schema.SafeQueryWithSep(cond, args, sep)) + return q +} + +//------------------------------------------------------------------------------ + +func (q *UpdateQuery) WherePK(cols ...string) *UpdateQuery { + q.addWhereCols(cols) + return q +} + +func (q *UpdateQuery) Where(query string, args ...any) *UpdateQuery { + q.addWhere(schema.SafeQueryWithSep(query, args, " AND ")) + return q +} + +func (q *UpdateQuery) WhereOr(query string, args ...any) *UpdateQuery { + q.addWhere(schema.SafeQueryWithSep(query, args, " OR ")) + return q +} + +func (q *UpdateQuery) WhereGroup(sep string, fn func(*UpdateQuery) *UpdateQuery) *UpdateQuery { + saved := q.where + q.where = nil + + q = fn(q) + + where := q.where + q.where = saved + + q.addWhereGroup(sep, where) + + return q +} + +func (q *UpdateQuery) WhereDeleted() *UpdateQuery { + q.whereDeleted() + return q +} + +func (q *UpdateQuery) WhereAllWithDeleted() *UpdateQuery { + q.whereAllWithDeleted() + return q +} + +// ------------------------------------------------------------------------------ +func (q *UpdateQuery) Order(orders ...string) *UpdateQuery { + if !q.hasFeature(feature.UpdateOrderLimit) { + q.setErr(feature.NewNotSupportError(feature.UpdateOrderLimit)) + return q + } + q.addOrder(orders...) + return q +} + +func (q *UpdateQuery) OrderExpr(query string, args ...any) *UpdateQuery { + if !q.hasFeature(feature.UpdateOrderLimit) { + q.setErr(feature.NewNotSupportError(feature.UpdateOrderLimit)) + return q + } + q.addOrderExpr(query, args...) + return q +} + +func (q *UpdateQuery) Limit(n int) *UpdateQuery { + if !q.hasFeature(feature.UpdateOrderLimit) { + q.setErr(feature.NewNotSupportError(feature.UpdateOrderLimit)) + return q + } + q.setLimit(n) + return q +} + +//------------------------------------------------------------------------------ + +// Returning adds a RETURNING clause to the query. +// +// To suppress the auto-generated RETURNING clause, use `Returning("NULL")`. +func (q *UpdateQuery) Returning(query string, args ...any) *UpdateQuery { + q.addReturning(schema.SafeQuery(query, args)) + return q +} + +//------------------------------------------------------------------------------ + +// Comment adds a comment to the query, wrapped by /* ... */. +func (q *UpdateQuery) Comment(comment string) *UpdateQuery { + q.comment = comment + return q +} + +//------------------------------------------------------------------------------ + +func (q *UpdateQuery) Operation() string { + return "UPDATE" +} + +func (q *UpdateQuery) AppendQuery(gen schema.QueryGen, b []byte) (_ []byte, err error) { + if q.err != nil { + return nil, q.err + } + + b = appendComment(b, q.comment) + + gen = formatterWithModel(gen, q) + + b, err = q.appendWith(gen, b) + if err != nil { + return nil, err + } + + b = append(b, "UPDATE "...) + + if gen.HasFeature(feature.UpdateMultiTable) { + b, err = q.appendTablesWithAlias(gen, b) + } else if gen.HasFeature(feature.UpdateTableAlias) { + b, err = q.appendFirstTableWithAlias(gen, b) + } else { + b, err = q.appendFirstTable(gen, b) + } + if err != nil { + return nil, err + } + + b, err = q.appendIndexHints(gen, b) + if err != nil { + return nil, err + } + + b, err = q.mustAppendSet(gen, b) + if err != nil { + return nil, err + } + + if !gen.HasFeature(feature.UpdateMultiTable) { + b, err = q.appendOtherTables(gen, b) + if err != nil { + return nil, err + } + } + + for _, j := range q.joins { + b, err = j.AppendQuery(gen, b) + if err != nil { + return nil, err + } + } + + if q.hasFeature(feature.Output) && q.hasReturning() { + b = append(b, " OUTPUT "...) + b, err = q.appendOutput(gen, b) + if err != nil { + return nil, err + } + } + + b, err = q.mustAppendWhere(gen, b, q.hasTableAlias(gen)) + if err != nil { + return nil, err + } + + b, err = q.appendOrder(gen, b) + if err != nil { + return nil, err + } + + b, err = q.appendLimitOffset(gen, b) + if err != nil { + return nil, err + } + + if q.hasFeature(feature.Returning) && q.hasReturning() { + b = append(b, " RETURNING "...) + b, err = q.appendReturning(gen, b) + if err != nil { + return nil, err + } + } + + return b, nil +} + +func (q *UpdateQuery) mustAppendSet(gen schema.QueryGen, b []byte) (_ []byte, err error) { + b = append(b, " SET "...) + pos := len(b) + + switch model := q.model.(type) { + case *structTableModel: + if !model.strct.IsValid() { // Model((*Foo)(nil)) + break + } + if len(q.set) > 0 && q.columns == nil { + break + } + + fields, err := q.getDataFields() + if err != nil { + return nil, err + } + + b, err = q.appendSetStruct(gen, b, model, fields) + if err != nil { + return nil, err + } + + case *sliceTableModel: + if len(q.set) > 0 { // bulk-update + return q.appendSet(gen, b) + } + return nil, errors.New("bun: to bulk Update, use CTE and VALUES") + + case *mapModel: + b = model.appendSet(gen, b) + + case nil: + // continue below + + default: + return nil, fmt.Errorf("bun: Update does not support %T", q.model) + } + + if len(q.set) > 0 { + if len(b) > pos { + b = append(b, ", "...) + } + return q.appendSet(gen, b) + } + + if len(b) == pos { + return nil, errors.New("bun: empty SET clause is not allowed in the UPDATE query") + } + return b, nil +} + +func (q *UpdateQuery) appendOtherTables(gen schema.QueryGen, b []byte) (_ []byte, err error) { + if !q.hasMultiTables() { + return b, nil + } + + b = append(b, " FROM "...) + + b, err = q.whereBaseQuery.appendOtherTables(gen, b) + if err != nil { + return nil, err + } + + return b, nil +} + +//------------------------------------------------------------------------------ + +func (q *UpdateQuery) Bulk() *UpdateQuery { + model, ok := q.model.(*sliceTableModel) + if !ok { + q.setErr(fmt.Errorf("bun: Bulk requires a slice, got %T", q.model)) + return q + } + + set, err := q.updateSliceSet(q.db.gen, model) + if err != nil { + q.setErr(err) + return q + } + + values := q.db.NewValues(model) + values.customValueQuery = q.customValueQuery + + return q.With("_data", values). + Model(model). + TableExpr("_data"). + Set(set). + Where(q.updateSliceWhere(q.db.gen, model)) +} + +func (q *UpdateQuery) updateSliceSet( + gen schema.QueryGen, model *sliceTableModel, +) (string, error) { + fields, err := q.getDataFields() + if err != nil { + return "", err + } + + var b []byte + pos := len(b) + for _, field := range fields { + if field.SkipUpdate() { + continue + } + if len(b) != pos { + b = append(b, ", "...) + pos = len(b) + } + if gen.HasFeature(feature.UpdateMultiTable) { + b = append(b, model.table.SQLAlias...) + b = append(b, '.') + } + b = append(b, field.SQLName...) + b = append(b, " = _data."...) + b = append(b, field.SQLName...) + } + return internal.String(b), nil +} + +func (q *UpdateQuery) updateSliceWhere(gen schema.QueryGen, model *sliceTableModel) string { + var b []byte + for i, pk := range model.table.PKs { + if i > 0 { + b = append(b, " AND "...) + } + if q.hasTableAlias(gen) { + b = append(b, model.table.SQLAlias...) + } else { + b = append(b, model.table.SQLName...) + } + b = append(b, '.') + b = append(b, pk.SQLName...) + b = append(b, " = _data."...) + b = append(b, pk.SQLName...) + } + return internal.String(b) +} + +//------------------------------------------------------------------------------ + +func (q *UpdateQuery) Scan(ctx context.Context, dest ...any) error { + _, err := q.scanOrExec(ctx, dest, true) + return err +} + +func (q *UpdateQuery) Exec(ctx context.Context, dest ...any) (sql.Result, error) { + return q.scanOrExec(ctx, dest, len(dest) > 0) +} + +func (q *UpdateQuery) scanOrExec( + ctx context.Context, dest []any, hasDest bool, +) (sql.Result, error) { + if q.err != nil { + return nil, q.err + } + + if q.table != nil { + if err := q.beforeUpdateHook(ctx); err != nil { + return nil, err + } + } + + // Run append model hooks before generating the query. + if err := q.beforeAppendModel(ctx, q); err != nil { + return nil, err + } + + // if a comment is propagated via the context, use it + setCommentFromContext(ctx, q) + + // Generate the query before checking hasReturning. + queryBytes, err := q.AppendQuery(q.db.gen, q.db.makeQueryBytes()) + if err != nil { + return nil, err + } + + useScan := hasDest || (q.hasReturning() && q.hasFeature(feature.Returning|feature.Output)) + var model Model + + if useScan { + var err error + model, err = q.getModel(dest) + if err != nil { + return nil, err + } + } + + query := internal.String(queryBytes) + + var res sql.Result + + if useScan { + res, err = q.scan(ctx, q, query, model, hasDest) + if err != nil { + return nil, err + } + } else { + res, err = q.exec(ctx, q, query) + if err != nil { + return nil, err + } + } + + if q.table != nil { + if err := q.afterUpdateHook(ctx); err != nil { + return nil, err + } + } + + return res, nil +} + +func (q *UpdateQuery) beforeUpdateHook(ctx context.Context) error { + if hook, ok := q.table.ZeroIface.(BeforeUpdateHook); ok { + if err := hook.BeforeUpdate(ctx, q); err != nil { + return err + } + } + return nil +} + +func (q *UpdateQuery) afterUpdateHook(ctx context.Context) error { + if hook, ok := q.table.ZeroIface.(AfterUpdateHook); ok { + if err := hook.AfterUpdate(ctx, q); err != nil { + return err + } + } + return nil +} + +// FQN returns a fully qualified column name, for example, table_name.column_name or +// table_alias.column_alias. +func (q *UpdateQuery) FQN(column string) Ident { + if q.table == nil { + panic("UpdateQuery.FQN requires a model") + } + if q.hasTableAlias(q.db.gen) { + return Ident(q.table.Alias + "." + column) + } + return Ident(q.table.Name + "." + column) +} + +func (q *UpdateQuery) hasTableAlias(gen schema.QueryGen) bool { + return gen.HasFeature(feature.UpdateMultiTable | feature.UpdateTableAlias) +} + +// String returns the generated SQL query string. The UpdateQuery instance must not be +// modified during query generation to ensure multiple calls to String() return identical results. +func (q *UpdateQuery) String() string { + buf, err := q.AppendQuery(q.db.QueryGen(), nil) + if err != nil { + panic(err) + } + return string(buf) +} + +//------------------------------------------------------------------------------ + +func (q *UpdateQuery) QueryBuilder() QueryBuilder { + return &updateQueryBuilder{q} +} + +func (q *UpdateQuery) ApplyQueryBuilder(fn func(QueryBuilder) QueryBuilder) *UpdateQuery { + return fn(q.QueryBuilder()).Unwrap().(*UpdateQuery) +} + +type updateQueryBuilder struct { + *UpdateQuery +} + +func (q *updateQueryBuilder) WhereGroup( + sep string, fn func(QueryBuilder) QueryBuilder, +) QueryBuilder { + q.UpdateQuery = q.UpdateQuery.WhereGroup(sep, func(qs *UpdateQuery) *UpdateQuery { + return fn(q).(*updateQueryBuilder).UpdateQuery + }) + return q +} + +func (q *updateQueryBuilder) Where(query string, args ...any) QueryBuilder { + q.UpdateQuery.Where(query, args...) + return q +} + +func (q *updateQueryBuilder) WhereOr(query string, args ...any) QueryBuilder { + q.UpdateQuery.WhereOr(query, args...) + return q +} + +func (q *updateQueryBuilder) WhereDeleted() QueryBuilder { + q.UpdateQuery.WhereDeleted() + return q +} + +func (q *updateQueryBuilder) WhereAllWithDeleted() QueryBuilder { + q.UpdateQuery.WhereAllWithDeleted() + return q +} + +func (q *updateQueryBuilder) WherePK(cols ...string) QueryBuilder { + q.UpdateQuery.WherePK(cols...) + return q +} + +func (q *updateQueryBuilder) Unwrap() any { + return q.UpdateQuery +} + +//------------------------------------------------------------------------------ + +func (q *UpdateQuery) UseIndex(indexes ...string) *UpdateQuery { + if q.db.dialect.Name() == dialect.MySQL { + q.addUseIndex(indexes...) + } + return q +} + +func (q *UpdateQuery) IgnoreIndex(indexes ...string) *UpdateQuery { + if q.db.dialect.Name() == dialect.MySQL { + q.addIgnoreIndex(indexes...) + } + return q +} + +func (q *UpdateQuery) ForceIndex(indexes ...string) *UpdateQuery { + if q.db.dialect.Name() == dialect.MySQL { + q.addForceIndex(indexes...) + } + return q +} diff --git a/vendor/github.com/uptrace/bun/query_values.go b/vendor/github.com/uptrace/bun/query_values.go new file mode 100644 index 0000000..6626947 --- /dev/null +++ b/vendor/github.com/uptrace/bun/query_values.go @@ -0,0 +1,250 @@ +package bun + +import ( + "fmt" + "reflect" + "strconv" + + "github.com/uptrace/bun/dialect/feature" + "github.com/uptrace/bun/schema" +) + +type ValuesQuery struct { + baseQuery + setQuery + + withOrder bool + comment string +} + +var ( + _ Query = (*ValuesQuery)(nil) + _ schema.NamedArgAppender = (*ValuesQuery)(nil) +) + +func NewValuesQuery(db *DB, model any) *ValuesQuery { + q := &ValuesQuery{ + baseQuery: baseQuery{ + db: db, + }, + } + q.setModel(model) + return q +} + +func (q *ValuesQuery) Conn(db IConn) *ValuesQuery { + q.setConn(db) + return q +} + +func (q *ValuesQuery) Err(err error) *ValuesQuery { + q.setErr(err) + return q +} + +func (q *ValuesQuery) Column(columns ...string) *ValuesQuery { + for _, column := range columns { + q.addColumn(schema.UnsafeIdent(column)) + } + return q +} + +// Value overwrites model value for the column. +func (q *ValuesQuery) Value(column string, expr string, args ...any) *ValuesQuery { + if q.table == nil { + q.setErr(errNilModel) + return q + } + q.addValue(q.table, column, expr, args) + return q +} + +func (q *ValuesQuery) OmitZero() *ValuesQuery { + q.omitZero = true + return q +} + +func (q *ValuesQuery) WithOrder() *ValuesQuery { + q.withOrder = true + return q +} + +// Comment adds a comment to the query, wrapped by /* ... */. +func (q *ValuesQuery) Comment(comment string) *ValuesQuery { + q.comment = comment + return q +} + +func (q *ValuesQuery) AppendNamedArg(gen schema.QueryGen, b []byte, name string) ([]byte, bool) { + switch name { + case "Columns": + bb, err := q.AppendColumns(gen, b) + if err != nil { + q.setErr(err) + return b, true + } + return bb, true + } + return b, false +} + +// AppendColumns appends the table columns. It is used by CTE. +func (q *ValuesQuery) AppendColumns(gen schema.QueryGen, b []byte) (_ []byte, err error) { + if q.err != nil { + return nil, q.err + } + if q.model == nil { + return nil, errNilModel + } + + if q.tableModel != nil { + fields, err := q.getFields() + if err != nil { + return nil, err + } + + b = appendColumns(b, "", fields) + + if q.withOrder { + b = append(b, ", _order"...) + } + + return b, nil + } + + switch model := q.model.(type) { + case *mapSliceModel: + return model.appendColumns(gen, b) + } + + return nil, fmt.Errorf("bun: Values does not support %T", q.model) +} + +func (q *ValuesQuery) Operation() string { + return "VALUES" +} + +func (q *ValuesQuery) AppendQuery(gen schema.QueryGen, b []byte) (_ []byte, err error) { + if q.err != nil { + return nil, q.err + } + if q.model == nil { + return nil, errNilModel + } + + b = appendComment(b, q.comment) + + gen = formatterWithModel(gen, q) + + b = append(b, "VALUES "...) + if q.db.HasFeature(feature.ValuesRow) { + b = append(b, "ROW("...) + } else { + b = append(b, '(') + } + + switch model := q.model.(type) { + case *structTableModel: + fields, err := q.getFields() + if err != nil { + return nil, err + } + + b, err = q.appendValues(gen, b, fields, model.strct) + if err != nil { + return nil, err + } + + if q.withOrder { + b = append(b, ", "...) + b = strconv.AppendInt(b, 0, 10) + } + + case *sliceTableModel: + fields, err := q.getFields() + if err != nil { + return nil, err + } + + sliceLen := model.slice.Len() + for i := range sliceLen { + if i > 0 { + b = append(b, "), "...) + if q.db.HasFeature(feature.ValuesRow) { + b = append(b, "ROW("...) + } else { + b = append(b, '(') + } + } + + b, err = q.appendValues(gen, b, fields, model.slice.Index(i)) + if err != nil { + return nil, err + } + + if q.withOrder { + b = append(b, ", "...) + b = strconv.AppendInt(b, int64(i), 10) + } + } + + case *mapSliceModel: + b, err = model.appendValues(gen, b) + if err != nil { + return nil, err + } + + default: + return nil, fmt.Errorf("bun: Values does not support %T", model) + } + + b = append(b, ')') + return b, nil +} + +func (q *ValuesQuery) appendValues( + gen schema.QueryGen, b []byte, fields []*schema.Field, strct reflect.Value, +) (_ []byte, err error) { + isTemplate := gen.IsNop() + for i, f := range fields { + if i > 0 { + b = append(b, ", "...) + } + + app, ok := q.modelValues[f.Name] + if ok { + b, err = app.AppendQuery(gen, b) + if err != nil { + return nil, err + } + continue + } + + if isTemplate { + b = append(b, '?') + } else { + b = f.AppendValue(gen, b, indirect(strct)) + } + + if gen.HasFeature(feature.DoubleColonCast) { + b = append(b, "::"...) + b = append(b, f.UserSQLType...) + } + } + return b, nil +} + +func (q *ValuesQuery) appendSet(gen schema.QueryGen, b []byte) (_ []byte, err error) { + switch model := q.model.(type) { + case *mapModel: + return model.appendSet(gen, b), nil + case *structTableModel: + fields, err := q.getDataFields() + if err != nil { + return nil, err + } + return q.appendSetStruct(gen, b, model, fields) + default: + return nil, fmt.Errorf("bun: SetValues(unsupported %T)", model) + } +} diff --git a/vendor/github.com/uptrace/bun/relation_join.go b/vendor/github.com/uptrace/bun/relation_join.go new file mode 100644 index 0000000..07a60cc --- /dev/null +++ b/vendor/github.com/uptrace/bun/relation_join.go @@ -0,0 +1,453 @@ +package bun + +import ( + "context" + "reflect" + "time" + + "github.com/uptrace/bun/dialect/feature" + "github.com/uptrace/bun/internal" + "github.com/uptrace/bun/schema" +) + +type relationJoin struct { + Parent *relationJoin + BaseModel TableModel + JoinModel TableModel + Relation *schema.Relation + + additionalJoinOnConditions []schema.QueryWithArgs + + apply func(*SelectQuery) *SelectQuery + columns []schema.QueryWithArgs +} + +func (j *relationJoin) applyTo(q *SelectQuery) { + if j.apply == nil { + return + } + + var table *schema.Table + var columns []schema.QueryWithArgs + + // Save state. + table, q.table = q.table, j.JoinModel.Table() + columns, q.columns = q.columns, nil + + q = j.apply(q) + + // Restore state. + q.table = table + j.columns, q.columns = q.columns, columns +} + +func (j *relationJoin) Select(ctx context.Context, q *SelectQuery) error { + switch j.Relation.Type { + } + panic("not reached") +} + +func (j *relationJoin) selectMany(ctx context.Context, q *SelectQuery) error { + q = j.manyQuery(q) + if q == nil { + return nil + } + return q.Scan(ctx) +} + +func (j *relationJoin) manyQuery(q *SelectQuery) *SelectQuery { + hasManyModel := newHasManyModel(j) + if hasManyModel == nil { + return nil + } + + q = q.Model(hasManyModel) + + var where []byte + + if q.db.HasFeature(feature.CompositeIn) { + return j.manyQueryCompositeIn(where, q) + } + return j.manyQueryMulti(where, q) +} + +func (j *relationJoin) manyQueryCompositeIn(where []byte, q *SelectQuery) *SelectQuery { + if len(j.Relation.JoinPKs) > 1 { + where = append(where, '(') + } + where = appendColumns(where, j.JoinModel.Table().SQLAlias, j.Relation.JoinPKs) + if len(j.Relation.JoinPKs) > 1 { + where = append(where, ')') + } + where = append(where, " IN ("...) + where = appendChildValues( + q.db.QueryGen(), + where, + j.JoinModel.rootValue(), + j.JoinModel.parentIndex(), + j.Relation.BasePKs, + ) + where = append(where, ")"...) + if len(j.additionalJoinOnConditions) > 0 { + where = append(where, " AND "...) + where = appendAdditionalJoinOnConditions(q.db.QueryGen(), where, j.additionalJoinOnConditions) + } + + q = q.Where(internal.String(where)) + + if j.Relation.PolymorphicField != nil { + q = q.Where("? = ?", j.Relation.PolymorphicField.SQLName, j.Relation.PolymorphicValue) + } + + j.applyTo(q) + q = q.Apply(j.hasManyColumns) + + return q +} + +func (j *relationJoin) manyQueryMulti(where []byte, q *SelectQuery) *SelectQuery { + where = appendMultiValues( + q.db.QueryGen(), + where, + j.JoinModel.rootValue(), + j.JoinModel.parentIndex(), + j.Relation.BasePKs, + j.Relation.JoinPKs, + j.JoinModel.Table().SQLAlias, + ) + + q = q.Where(internal.String(where)) + + if len(j.additionalJoinOnConditions) > 0 { + q = q.Where(internal.String(appendAdditionalJoinOnConditions(q.db.QueryGen(), []byte{}, j.additionalJoinOnConditions))) + } + + if j.Relation.PolymorphicField != nil { + q = q.Where("? = ?", j.Relation.PolymorphicField.SQLName, j.Relation.PolymorphicValue) + } + + j.applyTo(q) + q = q.Apply(j.hasManyColumns) + + return q +} + +func (j *relationJoin) hasManyColumns(q *SelectQuery) *SelectQuery { + b := make([]byte, 0, 32) + + joinTable := j.JoinModel.Table() + if len(j.columns) > 0 { + for i, col := range j.columns { + if i > 0 { + b = append(b, ", "...) + } + + if col.Args == nil { + if field, ok := joinTable.FieldMap[col.Query]; ok { + b = append(b, joinTable.SQLAlias...) + b = append(b, '.') + b = append(b, field.SQLName...) + continue + } + } + + var err error + b, err = col.AppendQuery(q.db.gen, b) + if err != nil { + q.setErr(err) + return q + } + + } + } else { + b = appendColumns(b, joinTable.SQLAlias, joinTable.Fields) + } + + q = q.ColumnExpr(internal.String(b)) + + return q +} + +func (j *relationJoin) selectM2M(ctx context.Context, q *SelectQuery) error { + q = j.m2mQuery(q) + if q == nil { + return nil + } + return q.Scan(ctx) +} + +func (j *relationJoin) m2mQuery(q *SelectQuery) *SelectQuery { + gen := q.db.gen + + m2mModel := newM2MModel(j) + if m2mModel == nil { + return nil + } + q = q.Model(m2mModel) + + index := j.JoinModel.parentIndex() + + if j.Relation.M2MTable != nil { + // We only need base pks to park joined models to the base model. + fields := j.Relation.M2MBasePKs + + b := make([]byte, 0, len(fields)) + b = appendColumns(b, j.Relation.M2MTable.SQLAlias, fields) + + q = q.ColumnExpr(internal.String(b)) + } + + //nolint + var join []byte + join = append(join, "JOIN "...) + join = gen.AppendQuery(join, string(j.Relation.M2MTable.SQLName)) + join = append(join, " AS "...) + join = append(join, j.Relation.M2MTable.SQLAlias...) + join = append(join, " ON ("...) + for i, col := range j.Relation.M2MBasePKs { + if i > 0 { + join = append(join, ", "...) + } + join = append(join, j.Relation.M2MTable.SQLAlias...) + join = append(join, '.') + join = append(join, col.SQLName...) + } + join = append(join, ") IN ("...) + join = appendChildValues(gen, join, j.BaseModel.rootValue(), index, j.Relation.BasePKs) + join = append(join, ")"...) + + if len(j.additionalJoinOnConditions) > 0 { + join = append(join, " AND "...) + join = appendAdditionalJoinOnConditions(gen, join, j.additionalJoinOnConditions) + } + + q = q.Join(internal.String(join)) + + joinTable := j.JoinModel.Table() + for i, m2mJoinField := range j.Relation.M2MJoinPKs { + joinField := j.Relation.JoinPKs[i] + q = q.Where("?.? = ?.?", + joinTable.SQLAlias, joinField.SQLName, + j.Relation.M2MTable.SQLAlias, m2mJoinField.SQLName) + } + + j.applyTo(q) + q = q.Apply(j.hasManyColumns) + + return q +} + +func (j *relationJoin) hasParent() bool { + if j.Parent != nil { + switch j.Parent.Relation.Type { + case schema.HasOneRelation, schema.BelongsToRelation: + return true + } + } + return false +} + +func (j *relationJoin) appendAlias(gen schema.QueryGen, b []byte) []byte { + quote := gen.IdentQuote() + + b = append(b, quote) + b = appendAlias(b, j) + b = append(b, quote) + return b +} + +func (j *relationJoin) appendAliasColumn(gen schema.QueryGen, b []byte, column string) []byte { + quote := gen.IdentQuote() + + b = append(b, quote) + b = appendAlias(b, j) + b = append(b, "__"...) + b = append(b, column...) + b = append(b, quote) + return b +} + +func (j *relationJoin) appendBaseAlias(gen schema.QueryGen, b []byte) []byte { + quote := gen.IdentQuote() + + if j.hasParent() { + b = append(b, quote) + b = appendAlias(b, j.Parent) + b = append(b, quote) + return b + } + return append(b, j.BaseModel.Table().SQLAlias...) +} + +func (j *relationJoin) appendSoftDelete( + gen schema.QueryGen, b []byte, flags internal.Flag, +) []byte { + b = append(b, '.') + + field := j.JoinModel.Table().SoftDeleteField + b = append(b, field.SQLName...) + + if field.IsPtr || field.NullZero { + if flags.Has(deletedFlag) { + b = append(b, " IS NOT NULL"...) + } else { + b = append(b, " IS NULL"...) + } + } else { + if flags.Has(deletedFlag) { + b = append(b, " != "...) + } else { + b = append(b, " = "...) + } + b = gen.Dialect().AppendTime(b, time.Time{}) + } + + return b +} + +func appendAlias(b []byte, j *relationJoin) []byte { + if j.hasParent() { + b = appendAlias(b, j.Parent) + b = append(b, "__"...) + } + b = append(b, j.Relation.Field.Name...) + return b +} + +func (j *relationJoin) appendHasOneJoin( + gen schema.QueryGen, b []byte, q *SelectQuery, +) (_ []byte, err error) { + isSoftDelete := j.JoinModel.Table().SoftDeleteField != nil && !q.flags.Has(allWithDeletedFlag) + + b = append(b, "LEFT JOIN "...) + b = gen.AppendQuery(b, string(j.JoinModel.Table().SQLNameForSelects)) + b = append(b, " AS "...) + b = j.appendAlias(gen, b) + + b = append(b, " ON "...) + + b = append(b, '(') + for i, baseField := range j.Relation.BasePKs { + if i > 0 { + b = append(b, " AND "...) + } + b = j.appendAlias(gen, b) + b = append(b, '.') + b = append(b, j.Relation.JoinPKs[i].SQLName...) + b = append(b, " = "...) + b = j.appendBaseAlias(gen, b) + b = append(b, '.') + b = append(b, baseField.SQLName...) + } + b = append(b, ')') + + if isSoftDelete { + b = append(b, " AND "...) + b = j.appendAlias(gen, b) + b = j.appendSoftDelete(gen, b, q.flags) + } + + if len(j.additionalJoinOnConditions) > 0 { + b = append(b, " AND "...) + b = appendAdditionalJoinOnConditions(gen, b, j.additionalJoinOnConditions) + } + + return b, nil +} + +func appendChildValues( + gen schema.QueryGen, b []byte, v reflect.Value, index []int, fields []*schema.Field, +) []byte { + seen := make(map[string]struct{}) + walk(v, index, func(v reflect.Value) { + start := len(b) + + if len(fields) > 1 { + b = append(b, '(') + } + for i, f := range fields { + if i > 0 { + b = append(b, ", "...) + } + b = f.AppendValue(gen, b, v) + } + if len(fields) > 1 { + b = append(b, ')') + } + b = append(b, ", "...) + + if _, ok := seen[string(b[start:])]; ok { + b = b[:start] + } else { + seen[string(b[start:])] = struct{}{} + } + }) + if len(seen) > 0 { + b = b[:len(b)-2] // trim ", " + } + return b +} + +// appendMultiValues is an alternative to appendChildValues that doesn't use the sql keyword ID +// but instead uses old style ((k1=v1) AND (k2=v2)) OR (...) conditions. +func appendMultiValues( + gen schema.QueryGen, b []byte, v reflect.Value, index []int, baseFields, joinFields []*schema.Field, joinTable schema.Safe, +) []byte { + // This is based on a mix of appendChildValues and query_base.appendColumns + + // These should never mismatch in length but nice to know if it does + if len(joinFields) != len(baseFields) { + panic("not reached") + } + + // walk the relations + b = append(b, '(') + seen := make(map[string]struct{}) + walk(v, index, func(v reflect.Value) { + start := len(b) + for i, f := range baseFields { + if i > 0 { + b = append(b, " AND "...) + } + if len(baseFields) > 1 { + b = append(b, '(') + } + // Field name + b = append(b, joinTable...) + b = append(b, '.') + b = append(b, []byte(joinFields[i].SQLName)...) + + // Equals value + b = append(b, '=') + b = f.AppendValue(gen, b, v) + if len(baseFields) > 1 { + b = append(b, ')') + } + } + + b = append(b, ") OR ("...) + + if _, ok := seen[string(b[start:])]; ok { + b = b[:start] + } else { + seen[string(b[start:])] = struct{}{} + } + }) + if len(seen) > 0 { + b = b[:len(b)-6] // trim ") OR (" + } + b = append(b, ')') + return b +} + +func appendAdditionalJoinOnConditions( + gen schema.QueryGen, b []byte, conditions []schema.QueryWithArgs, +) []byte { + for i, cond := range conditions { + if i > 0 { + b = append(b, " AND "...) + } + b = gen.AppendQuery(b, cond.Query, cond.Args...) + } + return b +} diff --git a/vendor/github.com/uptrace/bun/schema/append.go b/vendor/github.com/uptrace/bun/schema/append.go new file mode 100644 index 0000000..f7a1f87 --- /dev/null +++ b/vendor/github.com/uptrace/bun/schema/append.go @@ -0,0 +1,81 @@ +package schema + +import ( + "fmt" + "reflect" + + "github.com/uptrace/bun/dialect" +) + +func In(slice any) QueryAppender { + v := reflect.ValueOf(slice) + if v.Kind() != reflect.Slice { + return &inValues{ + err: fmt.Errorf("bun: In(non-slice %T)", slice), + } + } + return &inValues{ + slice: v, + } +} + +type inValues struct { + slice reflect.Value + err error +} + +var _ QueryAppender = (*inValues)(nil) + +func (in *inValues) AppendQuery(gen QueryGen, b []byte) (_ []byte, err error) { + if in.err != nil { + return nil, in.err + } + return appendIn(gen, b, in.slice), nil +} + +func appendIn(gen QueryGen, b []byte, slice reflect.Value) []byte { + sliceLen := slice.Len() + + if sliceLen == 0 { + return dialect.AppendNull(b) + } + + for i := 0; i < sliceLen; i++ { + if i > 0 { + b = append(b, ", "...) + } + + elem := slice.Index(i) + if elem.Kind() == reflect.Interface { + elem = elem.Elem() + } + + if elem.Kind() == reflect.Slice && elem.Type() != bytesType { + b = append(b, '(') + b = appendIn(gen, b, elem) + b = append(b, ')') + } else { + b = gen.AppendValue(b, elem) + } + } + return b +} + +//------------------------------------------------------------------------------ + +func NullZero(value any) QueryAppender { + return nullZero{ + value: value, + } +} + +type nullZero struct { + value any +} + +func (nz nullZero) AppendQuery(gen QueryGen, b []byte) (_ []byte, err error) { + if isZero(nz.value) { + return dialect.AppendNull(b), nil + } + return gen.AppendValue(b, reflect.ValueOf(nz.value)), nil +} diff --git a/vendor/github.com/uptrace/bun/schema/append_value.go b/vendor/github.com/uptrace/bun/schema/append_value.go new file mode 100644 index 0000000..fb3166d --- /dev/null +++ b/vendor/github.com/uptrace/bun/schema/append_value.go @@ -0,0 +1,316 @@ +package schema + +import ( + "database/sql/driver" + "fmt" + "net" + "reflect" + "strconv" + "strings" + "time" + + "github.com/puzpuzpuz/xsync/v3" + "github.com/uptrace/bun/dialect" + "github.com/uptrace/bun/dialect/sqltype" + "github.com/uptrace/bun/extra/bunjson" + "github.com/uptrace/bun/internal" + "github.com/vmihailenco/msgpack/v5" +) + +type ( + AppenderFunc func(gen QueryGen, b []byte, v reflect.Value) []byte + CustomAppender func(typ reflect.Type) AppenderFunc +) + +var appenders = []AppenderFunc{ + reflect.Bool: AppendBoolValue, + reflect.Int: AppendIntValue, + reflect.Int8: AppendIntValue, + reflect.Int16: AppendIntValue, + reflect.Int32: AppendIntValue, + reflect.Int64: AppendIntValue, + reflect.Uint: AppendUintValue, + reflect.Uint8: AppendUintValue, + reflect.Uint16: AppendUintValue, + reflect.Uint32: appendUint32Value, + reflect.Uint64: appendUint64Value, + reflect.Uintptr: nil, + reflect.Float32: AppendFloat32Value, + reflect.Float64: AppendFloat64Value, + reflect.Complex64: nil, + reflect.Complex128: nil, + reflect.Array: AppendJSONValue, + reflect.Chan: nil, + reflect.Func: nil, + reflect.Interface: nil, + reflect.Map: AppendJSONValue, + reflect.Ptr: nil, + reflect.Slice: AppendJSONValue, + reflect.String: AppendStringValue, + reflect.Struct: AppendJSONValue, + reflect.UnsafePointer: nil, +} + +var appenderCache = xsync.NewMapOf[reflect.Type, AppenderFunc]() + +func FieldAppender(dialect Dialect, field *Field) AppenderFunc { + if field.Tag.HasOption("msgpack") { + return appendMsgpack + } + + fieldType := field.StructField.Type + + switch strings.ToUpper(field.UserSQLType) { + case sqltype.JSON, sqltype.JSONB: + if fieldType.Implements(driverValuerType) { + return appendDriverValue + } + + if fieldType.Kind() != reflect.Ptr { + if reflect.PointerTo(fieldType).Implements(driverValuerType) { + return addrAppender(appendDriverValue) + } + } + + return AppendJSONValue + } + + return Appender(dialect, fieldType) +} + +func Appender(dialect Dialect, typ reflect.Type) AppenderFunc { + if v, ok := appenderCache.Load(typ); ok { + return v + } + + fn := appender(dialect, typ) + + if v, ok := appenderCache.LoadOrStore(typ, fn); ok { + return v + } + return fn +} + +func appender(dialect Dialect, typ reflect.Type) AppenderFunc { + switch typ { + case bytesType: + return appendBytesValue + case timeType: + return appendTimeValue + case timePtrType: + return PtrAppender(appendTimeValue) + case ipNetType: + return appendIPNetValue + case ipType, netipPrefixType, netipAddrType: + return appendStringer + case jsonRawMessageType: + return appendJSONRawMessageValue + } + + kind := typ.Kind() + + if typ.Implements(queryAppenderType) { + if kind == reflect.Ptr { + return nilAwareAppender(appendQueryAppenderValue) + } + return appendQueryAppenderValue + } + if typ.Implements(driverValuerType) { + if kind == reflect.Ptr { + return nilAwareAppender(appendDriverValue) + } + return appendDriverValue + } + + if kind != reflect.Ptr { + ptr := reflect.PointerTo(typ) + if ptr.Implements(queryAppenderType) { + return addrAppender(appendQueryAppenderValue) + } + if ptr.Implements(driverValuerType) { + return addrAppender(appendDriverValue) + } + } + + switch kind { + case reflect.Interface: + return ifaceAppenderFunc + case reflect.Ptr: + if typ.Implements(jsonMarshalerType) { + return nilAwareAppender(AppendJSONValue) + } + if fn := Appender(dialect, typ.Elem()); fn != nil { + return PtrAppender(fn) + } + case reflect.Slice: + if typ.Elem().Kind() == reflect.Uint8 { + return appendBytesValue + } + case reflect.Array: + if typ.Elem().Kind() == reflect.Uint8 { + return appendArrayBytesValue + } + } + + return appenders[typ.Kind()] +} + +func ifaceAppenderFunc(gen QueryGen, b []byte, v reflect.Value) []byte { + if v.IsNil() { + return dialect.AppendNull(b) + } + elem := v.Elem() + appender := Appender(gen.Dialect(), elem.Type()) + return appender(gen, b, elem) +} + +func nilAwareAppender(fn AppenderFunc) AppenderFunc { + return func(gen QueryGen, b []byte, v reflect.Value) []byte { + if v.IsNil() { + return dialect.AppendNull(b) + } + return fn(gen, b, v) + } +} + +func PtrAppender(fn AppenderFunc) AppenderFunc { + return func(gen QueryGen, b []byte, v reflect.Value) []byte { + if v.IsNil() { + return dialect.AppendNull(b) + } + return fn(gen, b, v.Elem()) + } +} + +func AppendBoolValue(gen QueryGen, b []byte, v reflect.Value) []byte { + return gen.Dialect().AppendBool(b, v.Bool()) +} + +func AppendIntValue(gen QueryGen, b []byte, v reflect.Value) []byte { + return strconv.AppendInt(b, v.Int(), 10) +} + +func AppendUintValue(gen QueryGen, b []byte, v reflect.Value) []byte { + return strconv.AppendUint(b, v.Uint(), 10) +} + +func appendUint32Value(gen QueryGen, b []byte, v reflect.Value) []byte { + return gen.Dialect().AppendUint32(b, uint32(v.Uint())) +} + +func appendUint64Value(gen QueryGen, b []byte, v reflect.Value) []byte { + return gen.Dialect().AppendUint64(b, v.Uint()) +} + +func AppendFloat32Value(gen QueryGen, b []byte, v reflect.Value) []byte { + return dialect.AppendFloat32(b, float32(v.Float())) +} + +func AppendFloat64Value(gen QueryGen, b []byte, v reflect.Value) []byte { + return dialect.AppendFloat64(b, float64(v.Float())) +} + +func appendBytesValue(gen QueryGen, b []byte, v reflect.Value) []byte { + return gen.Dialect().AppendBytes(b, v.Bytes()) +} + +func appendArrayBytesValue(gen QueryGen, b []byte, v reflect.Value) []byte { + if v.CanAddr() { + return gen.Dialect().AppendBytes(b, v.Slice(0, v.Len()).Bytes()) + } + + tmp := make([]byte, v.Len()) + reflect.Copy(reflect.ValueOf(tmp), v) + b = gen.Dialect().AppendBytes(b, tmp) + return b +} + +func AppendStringValue(gen QueryGen, b []byte, v reflect.Value) []byte { + return gen.Dialect().AppendString(b, v.String()) +} + +func AppendJSONValue(gen QueryGen, b []byte, v reflect.Value) []byte { + bb, err := bunjson.Marshal(v.Interface()) + if err != nil { + return dialect.AppendError(b, err) + } + + if len(bb) > 0 && bb[len(bb)-1] == '\n' { + bb = bb[:len(bb)-1] + } + + return gen.Dialect().AppendJSON(b, bb) +} + +func appendTimeValue(gen QueryGen, b []byte, v reflect.Value) []byte { + tm := v.Interface().(time.Time) + return gen.Dialect().AppendTime(b, tm) +} + +func appendIPNetValue(gen QueryGen, b []byte, v reflect.Value) []byte { + ipnet := v.Interface().(net.IPNet) + return gen.Dialect().AppendString(b, ipnet.String()) +} + +func appendStringer(gen QueryGen, b []byte, v reflect.Value) []byte { + return gen.Dialect().AppendString(b, v.Interface().(fmt.Stringer).String()) +} + +func appendJSONRawMessageValue(gen QueryGen, b []byte, v reflect.Value) []byte { + bytes := v.Bytes() + if bytes == nil { + return dialect.AppendNull(b) + } + return gen.Dialect().AppendString(b, internal.String(bytes)) +} + +func appendQueryAppenderValue(gen QueryGen, b []byte, v reflect.Value) []byte { + return AppendQueryAppender(gen, b, v.Interface().(QueryAppender)) +} + +func appendDriverValue(gen QueryGen, b []byte, v reflect.Value) []byte { + value, err := v.Interface().(driver.Valuer).Value() + if err != nil { + return dialect.AppendError(b, err) + } + if _, ok := value.(driver.Valuer); ok { + return dialect.AppendError(b, fmt.Errorf("driver.Valuer returns unsupported type %T", value)) + } + return gen.Append(b, value) +} + +func addrAppender(fn AppenderFunc) AppenderFunc { + return func(gen QueryGen, b []byte, v reflect.Value) []byte { + if !v.CanAddr() { + err := fmt.Errorf("bun: Append(nonaddressable %T)", v.Interface()) + return dialect.AppendError(b, err) + } + return fn(gen, b, v.Addr()) + } +} + +func appendMsgpack(gen QueryGen, b []byte, v reflect.Value) []byte { + hexEnc := internal.NewHexEncoder(b) + + enc := msgpack.GetEncoder() + defer msgpack.PutEncoder(enc) + + enc.Reset(hexEnc) + if err := enc.EncodeValue(v); err != nil { + return dialect.AppendError(b, err) + } + + if err := hexEnc.Close(); err != nil { + return dialect.AppendError(b, err) + } + + return hexEnc.Bytes() +} + +func AppendQueryAppender(gen QueryGen, b []byte, app QueryAppender) []byte { + bb, err := app.AppendQuery(gen, b) + if err != nil { + return dialect.AppendError(b, err) + } + return bb +} diff --git a/vendor/github.com/uptrace/bun/schema/dialect.go b/vendor/github.com/uptrace/bun/schema/dialect.go new file mode 100644 index 0000000..bb40af6 --- /dev/null +++ b/vendor/github.com/uptrace/bun/schema/dialect.go @@ -0,0 +1,194 @@ +package schema + +import ( + "database/sql" + "encoding/hex" + "strconv" + "time" + "unicode/utf8" + + "github.com/uptrace/bun/dialect" + "github.com/uptrace/bun/dialect/feature" + "github.com/uptrace/bun/internal/parser" +) + +type Dialect interface { + Init(db *sql.DB) + + Name() dialect.Name + Features() feature.Feature + + Tables() *Tables + OnTable(table *Table) + + IdentQuote() byte + + AppendUint32(b []byte, n uint32) []byte + AppendUint64(b []byte, n uint64) []byte + AppendTime(b []byte, tm time.Time) []byte + AppendString(b []byte, s string) []byte + AppendBytes(b []byte, bs []byte) []byte + AppendJSON(b, jsonb []byte) []byte + AppendBool(b []byte, v bool) []byte + + // AppendSequence adds the appropriate instruction for the driver to create a sequence + // from which (autoincremented) values for the column will be generated. + AppendSequence(b []byte, t *Table, f *Field) []byte + + // DefaultVarcharLen should be returned for dialects in which specifying VARCHAR length + // is mandatory in queries that modify the schema (CREATE TABLE / ADD COLUMN, etc). + // Dialects that do not have such requirement may return 0, which should be interpreted so by the caller. + DefaultVarcharLen() int + + // DefaultSchema should returns the name of the default database schema. + DefaultSchema() string +} + +// ------------------------------------------------------------------------------ + +type BaseDialect struct{} + +func (BaseDialect) AppendUint32(b []byte, n uint32) []byte { + return strconv.AppendUint(b, uint64(n), 10) +} + +func (BaseDialect) AppendUint64(b []byte, n uint64) []byte { + return strconv.AppendUint(b, n, 10) +} + +func (BaseDialect) AppendTime(b []byte, tm time.Time) []byte { + b = append(b, '\'') + b = tm.UTC().AppendFormat(b, "2006-01-02 15:04:05.999999-07:00") + b = append(b, '\'') + return b +} + +func (BaseDialect) AppendString(b []byte, s string) []byte { + b = append(b, '\'') + for _, r := range s { + if r == '\000' { + continue + } + + if r == '\'' { + b = append(b, '\'', '\'') + continue + } + + if r < utf8.RuneSelf { + b = append(b, byte(r)) + continue + } + + l := len(b) + if cap(b)-l < utf8.UTFMax { + b = append(b, make([]byte, utf8.UTFMax)...) + } + n := utf8.EncodeRune(b[l:l+utf8.UTFMax], r) + b = b[:l+n] + } + b = append(b, '\'') + return b +} + +func (BaseDialect) AppendBytes(b, bs []byte) []byte { + if bs == nil { + return dialect.AppendNull(b) + } + + b = append(b, `'\x`...) + + s := len(b) + b = append(b, make([]byte, hex.EncodedLen(len(bs)))...) + hex.Encode(b[s:], bs) + + b = append(b, '\'') + + return b +} + +func (BaseDialect) AppendJSON(b, jsonb []byte) []byte { + b = append(b, '\'') + + p := parser.New(jsonb) + for p.Valid() { + c := p.Read() + switch c { + case '"': + b = append(b, '"') + case '\'': + b = append(b, "''"...) + case '\000': + continue + case '\\': + if p.CutPrefix([]byte("u0000")) { + b = append(b, `\\u0000`...) + } else { + b = append(b, '\\') + if p.Valid() { + b = append(b, p.Read()) + } + } + default: + b = append(b, c) + } + } + + b = append(b, '\'') + + return b +} + +func (BaseDialect) AppendBool(b []byte, v bool) []byte { + return dialect.AppendBool(b, v) +} + +// ------------------------------------------------------------------------------ + +type nopDialect struct { + BaseDialect + + tables *Tables + features feature.Feature +} + +func newNopDialect() *nopDialect { + d := new(nopDialect) + d.tables = NewTables(d) + d.features = feature.Returning + return d +} + +func (d *nopDialect) Init(*sql.DB) {} + +func (d *nopDialect) Name() dialect.Name { + return dialect.Invalid +} + +func (d *nopDialect) Features() feature.Feature { + return d.features +} + +func (d *nopDialect) Tables() *Tables { + return d.tables +} + +func (d *nopDialect) OnField(field *Field) {} + +func (d *nopDialect) OnTable(table *Table) {} + +func (d *nopDialect) IdentQuote() byte { + return '"' +} + +func (d *nopDialect) DefaultVarcharLen() int { + return 0 +} + +func (d *nopDialect) AppendSequence(b []byte, _ *Table, _ *Field) []byte { + return b +} + +func (d *nopDialect) DefaultSchema() string { + return "nop" +} diff --git a/vendor/github.com/uptrace/bun/schema/field.go b/vendor/github.com/uptrace/bun/schema/field.go new file mode 100644 index 0000000..65381d8 --- /dev/null +++ b/vendor/github.com/uptrace/bun/schema/field.go @@ -0,0 +1,137 @@ +package schema + +import ( + "fmt" + "reflect" + + "github.com/uptrace/bun/dialect" + "github.com/uptrace/bun/internal" + "github.com/uptrace/bun/internal/tagparser" +) + +type Field struct { + Table *Table // Contains this field + StructField reflect.StructField + IsPtr bool + + Tag tagparser.Tag + IndirectType reflect.Type + Index []int + + Name string // SQL name, .e.g. id + SQLName Safe // escaped SQL name, e.g. "id" + GoName string // struct field name, e.g. Id + + DiscoveredSQLType string + UserSQLType string + CreateTableSQLType string + SQLDefault string + + OnDelete string + OnUpdate string + + IsPK bool + NotNull bool + NullZero bool + AutoIncrement bool + Identity bool + + Append AppenderFunc + Scan ScannerFunc + IsZero IsZeroerFunc +} + +func (f *Field) String() string { + return f.Name +} + +func (f *Field) WithIndex(path []int) *Field { + if len(path) == 0 { + return f + } + clone := *f + clone.Index = makeIndex(path, f.Index) + return &clone +} + +func (f *Field) Clone() *Field { + cp := *f + cp.Index = cp.Index[:len(f.Index):len(f.Index)] + return &cp +} + +func (f *Field) Value(strct reflect.Value) reflect.Value { + return internal.FieldByIndexAlloc(strct, f.Index) +} + +func (f *Field) HasNilValue(v reflect.Value) bool { + if len(f.Index) == 1 { + return v.Field(f.Index[0]).IsNil() + } + + for _, index := range f.Index { + if v.Kind() == reflect.Ptr { + if v.IsNil() { + return true + } + v = v.Elem() + } + v = v.Field(index) + } + return v.IsNil() +} + +func (f *Field) HasZeroValue(v reflect.Value) bool { + if len(f.Index) == 1 { + return f.IsZero(v.Field(f.Index[0])) + } + + for _, index := range f.Index { + if v.Kind() == reflect.Ptr { + if v.IsNil() { + return true + } + v = v.Elem() + } + v = v.Field(index) + } + return f.IsZero(v) +} + +func (f *Field) AppendValue(gen QueryGen, b []byte, strct reflect.Value) []byte { + fv, ok := fieldByIndex(strct, f.Index) + if !ok { + return dialect.AppendNull(b) + } + + if (f.IsPtr && fv.IsNil()) || (f.NullZero && f.IsZero(fv)) { + return dialect.AppendNull(b) + } + if f.Append == nil { + panic(fmt.Errorf("bun: AppendValue(unsupported %s)", fv.Type())) + } + return f.Append(gen, b, fv) +} + +func (f *Field) ScanValue(strct reflect.Value, src any) error { + if src == nil { + if fv, ok := fieldByIndex(strct, f.Index); ok { + return f.ScanWithCheck(fv, src) + } + return nil + } + + fv := internal.FieldByIndexAlloc(strct, f.Index) + return f.ScanWithCheck(fv, src) +} + +func (f *Field) ScanWithCheck(fv reflect.Value, src any) error { + if f.Scan == nil { + return fmt.Errorf("bun: Scan(unsupported %s)", f.IndirectType) + } + return f.Scan(fv, src) +} + +func (f *Field) SkipUpdate() bool { + return f.Tag.HasOption("skipupdate") +} diff --git a/vendor/github.com/uptrace/bun/schema/hook.go b/vendor/github.com/uptrace/bun/schema/hook.go new file mode 100644 index 0000000..8e755c9 --- /dev/null +++ b/vendor/github.com/uptrace/bun/schema/hook.go @@ -0,0 +1,43 @@ +package schema + +import ( + "context" + "database/sql" + "reflect" +) + +type Model interface { + ScanRows(ctx context.Context, rows *sql.Rows) (int, error) + Value() any +} + +type Query interface { + QueryAppender + Operation() string + GetModel() Model + GetTableName() string +} + +//------------------------------------------------------------------------------ + +type BeforeAppendModelHook interface { + BeforeAppendModel(ctx context.Context, query Query) error +} + +var beforeAppendModelHookType = reflect.TypeFor[BeforeAppendModelHook]() + +//------------------------------------------------------------------------------ + +type BeforeScanRowHook interface { + BeforeScanRow(context.Context) error +} + +var beforeScanRowHookType = reflect.TypeFor[BeforeScanRowHook]() + +//------------------------------------------------------------------------------ + +type AfterScanRowHook interface { + AfterScanRow(context.Context) error +} + +var afterScanRowHookType = reflect.TypeFor[AfterScanRowHook]() diff --git a/vendor/github.com/uptrace/bun/schema/querygen.go b/vendor/github.com/uptrace/bun/schema/querygen.go new file mode 100644 index 0000000..5ca559d --- /dev/null +++ b/vendor/github.com/uptrace/bun/schema/querygen.go @@ -0,0 +1,291 @@ +package schema + +import ( + "reflect" + "strconv" + "strings" + "time" + + "github.com/uptrace/bun/dialect" + "github.com/uptrace/bun/dialect/feature" + "github.com/uptrace/bun/internal" + "github.com/uptrace/bun/internal/parser" +) + +var nopQueryGen = QueryGen{ + dialect: newNopDialect(), +} + +type QueryGen struct { + dialect Dialect + args *namedArgList +} + +func NewQueryGen(dialect Dialect) QueryGen { + return QueryGen{ + dialect: dialect, + } +} + +func NewNopQueryGen() QueryGen { + return nopQueryGen +} + +func (f QueryGen) IsNop() bool { + return f.dialect.Name() == dialect.Invalid +} + +func (f QueryGen) Dialect() Dialect { + return f.dialect +} + +func (f QueryGen) IdentQuote() byte { + return f.dialect.IdentQuote() +} + +func (gen QueryGen) Append(b []byte, v any) []byte { + switch v := v.(type) { + case nil: + return dialect.AppendNull(b) + case bool: + return dialect.AppendBool(b, v) + case int: + return strconv.AppendInt(b, int64(v), 10) + case int32: + return strconv.AppendInt(b, int64(v), 10) + case int64: + return strconv.AppendInt(b, v, 10) + case uint: + return strconv.AppendInt(b, int64(v), 10) + case uint32: + return gen.Dialect().AppendUint32(b, v) + case uint64: + return gen.Dialect().AppendUint64(b, v) + case float32: + return dialect.AppendFloat32(b, v) + case float64: + return dialect.AppendFloat64(b, v) + case string: + return gen.Dialect().AppendString(b, v) + case time.Time: + return gen.Dialect().AppendTime(b, v) + case []byte: + return gen.Dialect().AppendBytes(b, v) + case QueryAppender: + return AppendQueryAppender(gen, b, v) + default: + vv := reflect.ValueOf(v) + if vv.Kind() == reflect.Ptr && vv.IsNil() { + return dialect.AppendNull(b) + } + appender := Appender(gen.Dialect(), vv.Type()) + return appender(gen, b, vv) + } +} + +func (f QueryGen) AppendName(b []byte, name string) []byte { + return dialect.AppendName(b, name, f.IdentQuote()) +} + +func (f QueryGen) AppendIdent(b []byte, ident string) []byte { + return dialect.AppendIdent(b, ident, f.IdentQuote()) +} + +func (f QueryGen) AppendValue(b []byte, v reflect.Value) []byte { + if v.Kind() == reflect.Ptr && v.IsNil() { + return dialect.AppendNull(b) + } + appender := Appender(f.dialect, v.Type()) + return appender(f, b, v) +} + +func (f QueryGen) HasFeature(feature feature.Feature) bool { + return f.dialect.Features().Has(feature) +} + +func (f QueryGen) WithArg(arg NamedArgAppender) QueryGen { + return QueryGen{ + dialect: f.dialect, + args: f.args.WithArg(arg), + } +} + +func (f QueryGen) WithNamedArg(name string, value any) QueryGen { + return QueryGen{ + dialect: f.dialect, + args: f.args.WithArg(&namedArg{name: name, value: value}), + } +} + +func (f QueryGen) FormatQuery(query string, args ...any) string { + if f.IsNop() || (args == nil && f.args == nil) || strings.IndexByte(query, '?') == -1 { + return query + } + return internal.String(f.AppendQuery(nil, query, args...)) +} + +func (f QueryGen) AppendQuery(dst []byte, query string, args ...any) []byte { + if f.IsNop() || (args == nil && f.args == nil) || strings.IndexByte(query, '?') == -1 { + return append(dst, query...) + } + return f.append(dst, parser.NewString(query), args) +} + +func (f QueryGen) append(dst []byte, p *parser.Parser, args []any) []byte { + var namedArgs NamedArgAppender + if len(args) == 1 { + if v, ok := args[0].(NamedArgAppender); ok { + namedArgs = v + } else if v, ok := newStructArgs(f, args[0]); ok { + namedArgs = v + } + } + + var argIndex int + for p.Valid() { + b, ok := p.ReadSep('?') + if !ok { + dst = append(dst, b...) + continue + } + if len(b) > 0 && b[len(b)-1] == '\\' { + dst = append(dst, b[:len(b)-1]...) + dst = append(dst, '?') + continue + } + dst = append(dst, b...) + + name, numeric := p.ReadIdentifier() + if name != "" { + if numeric { + idx, err := strconv.Atoi(name) + if err != nil { + goto restore_arg + } + + if idx >= len(args) { + goto restore_arg + } + + dst = f.appendArg(dst, args[idx]) + continue + } + + if namedArgs != nil { + dst, ok = namedArgs.AppendNamedArg(f, dst, name) + if ok { + continue + } + } + + dst, ok = f.args.AppendNamedArg(f, dst, name) + if ok { + continue + } + + restore_arg: + dst = append(dst, '?') + dst = append(dst, name...) + continue + } + + if argIndex >= len(args) { + dst = append(dst, '?') + continue + } + + arg := args[argIndex] + argIndex++ + + dst = f.appendArg(dst, arg) + } + + return dst +} + +func (gen QueryGen) appendArg(b []byte, arg any) []byte { + switch arg := arg.(type) { + case QueryAppender: + bb, err := arg.AppendQuery(gen, b) + if err != nil { + return dialect.AppendError(b, err) + } + return bb + default: + return gen.Append(b, arg) + } +} + +//------------------------------------------------------------------------------ + +type NamedArgAppender interface { + AppendNamedArg(gen QueryGen, b []byte, name string) ([]byte, bool) +} + +type namedArgList struct { + arg NamedArgAppender + next *namedArgList +} + +func (l *namedArgList) WithArg(arg NamedArgAppender) *namedArgList { + return &namedArgList{ + arg: arg, + next: l, + } +} + +func (l *namedArgList) AppendNamedArg(gen QueryGen, b []byte, name string) ([]byte, bool) { + for l != nil && l.arg != nil { + if b, ok := l.arg.AppendNamedArg(gen, b, name); ok { + return b, true + } + l = l.next + } + return b, false +} + +//------------------------------------------------------------------------------ + +type namedArg struct { + name string + value any +} + +var _ NamedArgAppender = (*namedArg)(nil) + +func (a *namedArg) AppendNamedArg(gen QueryGen, b []byte, name string) ([]byte, bool) { + if a.name == name { + return gen.appendArg(b, a.value), true + } + return b, false +} + +//------------------------------------------------------------------------------ + +type structArgs struct { + table *Table + strct reflect.Value +} + +var _ NamedArgAppender = (*structArgs)(nil) + +func newStructArgs(gen QueryGen, strct any) (*structArgs, bool) { + v := reflect.ValueOf(strct) + if !v.IsValid() { + return nil, false + } + + v = reflect.Indirect(v) + if v.Kind() != reflect.Struct { + return nil, false + } + + return &structArgs{ + table: gen.Dialect().Tables().Get(v.Type()), + strct: v, + }, true +} + +func (m *structArgs) AppendNamedArg(gen QueryGen, b []byte, name string) ([]byte, bool) { + return m.table.AppendNamedArg(gen, b, name, m.strct) +} diff --git a/vendor/github.com/uptrace/bun/schema/reflect.go b/vendor/github.com/uptrace/bun/schema/reflect.go new file mode 100644 index 0000000..3435fa1 --- /dev/null +++ b/vendor/github.com/uptrace/bun/schema/reflect.go @@ -0,0 +1,51 @@ +package schema + +import ( + "database/sql/driver" + "encoding/json" + "net" + "net/netip" + "reflect" + "time" +) + +var ( + bytesType = reflect.TypeFor[[]byte]() + timePtrType = reflect.TypeFor[*time.Time]() + timeType = reflect.TypeFor[time.Time]() + ipType = reflect.TypeFor[net.IP]() + ipNetType = reflect.TypeFor[net.IPNet]() + netipPrefixType = reflect.TypeFor[netip.Prefix]() + netipAddrType = reflect.TypeFor[netip.Addr]() + jsonRawMessageType = reflect.TypeFor[json.RawMessage]() + + driverValuerType = reflect.TypeFor[driver.Valuer]() + queryAppenderType = reflect.TypeFor[QueryAppender]() + jsonMarshalerType = reflect.TypeFor[json.Marshaler]() +) + +func indirectType(t reflect.Type) reflect.Type { + if t.Kind() == reflect.Ptr { + t = t.Elem() + } + return t +} + +func fieldByIndex(v reflect.Value, index []int) (_ reflect.Value, ok bool) { + if len(index) == 1 { + return v.Field(index[0]), true + } + + for i, idx := range index { + if i > 0 { + if v.Kind() == reflect.Ptr { + if v.IsNil() { + return v, false + } + v = v.Elem() + } + } + v = v.Field(idx) + } + return v, true +} diff --git a/vendor/github.com/uptrace/bun/schema/relation.go b/vendor/github.com/uptrace/bun/schema/relation.go new file mode 100644 index 0000000..0711635 --- /dev/null +++ b/vendor/github.com/uptrace/bun/schema/relation.go @@ -0,0 +1,84 @@ +package schema + +import ( + "fmt" +) + +const ( + InvalidRelation = iota + HasOneRelation + BelongsToRelation + HasManyRelation + ManyToManyRelation +) + +type Relation struct { + Type int + Field *Field // Has the bun tag defining this relation. + + // Base and Join can be explained with this query: + // + // SELECT * FROM base_table JOIN join_table + JoinTable *Table + BasePKs []*Field + JoinPKs []*Field + OnUpdate string + OnDelete string + Condition []string + + PolymorphicField *Field + PolymorphicValue string + + M2MTable *Table + M2MBasePKs []*Field + M2MJoinPKs []*Field +} + +// References returns true if the table which defines this Relation +// needs to declare a foreign key constraint, as is the case +// for 'has-one' and 'belongs-to' relations. For other relations, +// the constraint is created either in the referencing table (1:N, 'has-many' relations) +// or the junction table (N:N, 'm2m' relations). +// +// Usage of `rel:` tag does not always imply creation of foreign keys (when WithForeignKeys() is not set) +// and can be used exclusively for joining tables at query time. For example: +// +// type User struct { +// ID int64 `bun:",pk"` +// Profile *Profile `bun:",rel:has-one,join:id=user_id"` +// } +// +// Creating a FK users.id -> profiles.user_id would be confusing and incorrect, +// so for such cases References() returns false. One notable exception to this rule +// is when a Relation is defined in a junction table, in which case it is perfectly +// fine for its primary keys to reference other tables. Consider: +// +// // UsersToGroups maps users to groups they follow. +// type UsersToGroups struct { +// UserID string `bun:"user_id,pk"` // Needs FK to users.id +// GroupID string `bun:"group_id,pk"` // Needs FK to groups.id +// +// User *User `bun:"rel:belongs-to,join:user_id=id"` +// Group *Group `bun:"rel:belongs-to,join:group_id=id"` +// } +// +// Here BooksToReaders has a composite primary key, composed of other primary keys. +func (r *Relation) References() bool { + allPK := true + nonePK := true + for _, f := range r.BasePKs { + allPK = allPK && f.IsPK + nonePK = nonePK && !f.IsPK + } + + // Erring on the side of caution, only create foreign keys + // if the referencing columns are part of a composite PK + // in the junction table of the m2m relationship. + effectsM2M := r.Field.Table.IsM2MTable && allPK + + return (r.Type == HasOneRelation || r.Type == BelongsToRelation) && (effectsM2M || nonePK) +} + +func (r *Relation) String() string { + return fmt.Sprintf("relation=%s", r.Field.GoName) +} diff --git a/vendor/github.com/uptrace/bun/schema/scan.go b/vendor/github.com/uptrace/bun/schema/scan.go new file mode 100644 index 0000000..306f55f --- /dev/null +++ b/vendor/github.com/uptrace/bun/schema/scan.go @@ -0,0 +1,566 @@ +package schema + +import ( + "bytes" + "database/sql" + "fmt" + "net" + "net/netip" + "reflect" + "strconv" + "strings" + "time" + + "github.com/puzpuzpuz/xsync/v3" + "github.com/vmihailenco/msgpack/v5" + + "github.com/uptrace/bun/dialect/sqltype" + "github.com/uptrace/bun/extra/bunjson" + "github.com/uptrace/bun/internal" +) + +var scannerType = reflect.TypeFor[sql.Scanner]() + +type ScannerFunc func(dest reflect.Value, src any) error + +var scanners []ScannerFunc + +func init() { + scanners = []ScannerFunc{ + reflect.Bool: scanBool, + reflect.Int: scanInt64, + reflect.Int8: scanInt64, + reflect.Int16: scanInt64, + reflect.Int32: scanInt64, + reflect.Int64: scanInt64, + reflect.Uint: scanUint64, + reflect.Uint8: scanUint64, + reflect.Uint16: scanUint64, + reflect.Uint32: scanUint64, + reflect.Uint64: scanUint64, + reflect.Uintptr: scanUint64, + reflect.Float32: scanFloat, + reflect.Float64: scanFloat, + reflect.Complex64: nil, + reflect.Complex128: nil, + reflect.Array: nil, + reflect.Interface: scanInterface, + reflect.Map: scanJSON, + reflect.Ptr: nil, + reflect.Slice: scanJSON, + reflect.String: scanString, + reflect.Struct: scanJSON, + reflect.UnsafePointer: nil, + } +} + +var scannerCache = xsync.NewMapOf[reflect.Type, ScannerFunc]() + +func FieldScanner(dialect Dialect, field *Field) ScannerFunc { + if field.Tag.HasOption("msgpack") { + return scanMsgpack + } + if field.Tag.HasOption("json_use_number") { + return scanJSONUseNumber + } + if field.StructField.Type.Kind() == reflect.Interface { + switch strings.ToUpper(field.UserSQLType) { + case sqltype.JSON, sqltype.JSONB: + return scanJSONIntoInterface + } + } + return Scanner(field.StructField.Type) +} + +func Scanner(typ reflect.Type) ScannerFunc { + if v, ok := scannerCache.Load(typ); ok { + return v + } + + fn := scanner(typ) + + if v, ok := scannerCache.LoadOrStore(typ, fn); ok { + return v + } + return fn +} + +func scanner(typ reflect.Type) ScannerFunc { + kind := typ.Kind() + + if kind == reflect.Ptr { + if fn := Scanner(typ.Elem()); fn != nil { + return PtrScanner(fn) + } + } + + switch typ { + case bytesType: + return scanBytes + case timeType: + return scanTime + case ipType: + return scanIP + case ipNetType: + return scanIPNet + case netipAddrType: + return scanNetIpAddr + case netipPrefixType: + return scanNetIpPrefix + case jsonRawMessageType: + return scanBytes + } + + if typ.Implements(scannerType) { + return scanScanner + } + + if kind != reflect.Ptr { + ptr := reflect.PointerTo(typ) + if ptr.Implements(scannerType) { + return addrScanner(scanScanner) + } + } + + if typ.Kind() == reflect.Slice && typ.Elem().Kind() == reflect.Uint8 { + return scanBytes + } + + return scanners[kind] +} + +func scanBool(dest reflect.Value, src any) error { + switch src := src.(type) { + case nil: + dest.SetBool(false) + return nil + case bool: + dest.SetBool(src) + return nil + case int64: + dest.SetBool(src != 0) + return nil + case []byte: + f, err := strconv.ParseBool(internal.String(src)) + if err != nil { + return err + } + dest.SetBool(f) + return nil + case string: + f, err := strconv.ParseBool(src) + if err != nil { + return err + } + dest.SetBool(f) + return nil + default: + return scanError(dest.Type(), src) + } +} + +func scanInt64(dest reflect.Value, src any) error { + switch src := src.(type) { + case nil: + dest.SetInt(0) + return nil + case int64: + dest.SetInt(src) + return nil + case uint64: + dest.SetInt(int64(src)) + return nil + case []byte: + n, err := strconv.ParseInt(internal.String(src), 10, 64) + if err != nil { + return err + } + dest.SetInt(n) + return nil + case string: + n, err := strconv.ParseInt(src, 10, 64) + if err != nil { + return err + } + dest.SetInt(n) + return nil + default: + return scanError(dest.Type(), src) + } +} + +func scanUint64(dest reflect.Value, src any) error { + switch src := src.(type) { + case nil: + dest.SetUint(0) + return nil + case uint64: + dest.SetUint(src) + return nil + case int64: + dest.SetUint(uint64(src)) + return nil + case []byte: + n, err := strconv.ParseUint(internal.String(src), 10, 64) + if err != nil { + return err + } + dest.SetUint(n) + return nil + case string: + n, err := strconv.ParseUint(src, 10, 64) + if err != nil { + return err + } + dest.SetUint(n) + return nil + default: + return scanError(dest.Type(), src) + } +} + +func scanFloat(dest reflect.Value, src any) error { + switch src := src.(type) { + case nil: + dest.SetFloat(0) + return nil + case float32: + dest.SetFloat(float64(src)) + return nil + case float64: + dest.SetFloat(src) + return nil + case []byte: + f, err := strconv.ParseFloat(internal.String(src), 64) + if err != nil { + return err + } + dest.SetFloat(f) + return nil + case string: + f, err := strconv.ParseFloat(src, 64) + if err != nil { + return err + } + dest.SetFloat(f) + return nil + default: + return scanError(dest.Type(), src) + } +} + +func scanString(dest reflect.Value, src any) error { + switch src := src.(type) { + case nil: + dest.SetString("") + return nil + case string: + dest.SetString(src) + return nil + case []byte: + dest.SetString(string(src)) + return nil + case time.Time: + dest.SetString(src.Format(time.RFC3339Nano)) + return nil + case int64: + dest.SetString(strconv.FormatInt(src, 10)) + return nil + case uint64: + dest.SetString(strconv.FormatUint(src, 10)) + return nil + case float64: + dest.SetString(strconv.FormatFloat(src, 'G', -1, 64)) + return nil + default: + return scanError(dest.Type(), src) + } +} + +func scanBytes(dest reflect.Value, src any) error { + switch src := src.(type) { + case nil: + dest.SetBytes(nil) + return nil + case string: + dest.SetBytes([]byte(src)) + return nil + case []byte: + clone := make([]byte, len(src)) + copy(clone, src) + + dest.SetBytes(clone) + return nil + default: + return scanError(dest.Type(), src) + } +} + +func scanTime(dest reflect.Value, src any) error { + switch src := src.(type) { + case nil: + destTime := dest.Addr().Interface().(*time.Time) + *destTime = time.Time{} + return nil + case time.Time: + destTime := dest.Addr().Interface().(*time.Time) + *destTime = src + return nil + case string: + srcTime, err := internal.ParseTime(src) + if err != nil { + return err + } + destTime := dest.Addr().Interface().(*time.Time) + *destTime = srcTime + return nil + case []byte: + srcTime, err := internal.ParseTime(internal.String(src)) + if err != nil { + return err + } + destTime := dest.Addr().Interface().(*time.Time) + *destTime = srcTime + return nil + default: + return scanError(dest.Type(), src) + } +} + +func scanScanner(dest reflect.Value, src any) error { + return dest.Interface().(sql.Scanner).Scan(src) +} + +func scanMsgpack(dest reflect.Value, src any) error { + if src == nil { + return scanNull(dest) + } + + b, err := toBytes(src) + if err != nil { + return err + } + + dec := msgpack.GetDecoder() + defer msgpack.PutDecoder(dec) + + dec.Reset(bytes.NewReader(b)) + return dec.DecodeValue(dest) +} + +func scanJSON(dest reflect.Value, src any) error { + if src == nil { + return scanNull(dest) + } + + b, err := toBytes(src) + if err != nil { + return err + } + + return bunjson.Unmarshal(b, dest.Addr().Interface()) +} + +func scanJSONUseNumber(dest reflect.Value, src any) error { + if src == nil { + return scanNull(dest) + } + + b, err := toBytes(src) + if err != nil { + return err + } + + dec := bunjson.NewDecoder(bytes.NewReader(b)) + dec.UseNumber() + return dec.Decode(dest.Addr().Interface()) +} + +func scanIP(dest reflect.Value, src any) error { + if src == nil { + return scanNull(dest) + } + + b, err := toBytes(src) + if err != nil { + return err + } + + ip := net.ParseIP(internal.String(b)) + if ip == nil { + return fmt.Errorf("bun: invalid ip: %q", b) + } + + ptr := dest.Addr().Interface().(*net.IP) + *ptr = ip + + return nil +} + +func scanIPNet(dest reflect.Value, src any) error { + if src == nil { + return scanNull(dest) + } + + b, err := toBytes(src) + if err != nil { + return err + } + + _, ipnet, err := net.ParseCIDR(internal.String(b)) + if err != nil { + return err + } + + ptr := dest.Addr().Interface().(*net.IPNet) + *ptr = *ipnet + + return nil +} + +func scanNetIpAddr(dest reflect.Value, src any) error { + if src == nil { + return scanNull(dest) + } + + b, err := toBytes(src) + if err != nil { + return err + } + + val, _ := netip.ParseAddr(internal.String(b)) + if !val.IsValid() { + return fmt.Errorf("bun: invalid ip: %q", b) + } + + ptr := dest.Addr().Interface().(*netip.Addr) + *ptr = val + + return nil +} + +func scanNetIpPrefix(dest reflect.Value, src any) error { + if src == nil { + return scanNull(dest) + } + + b, err := toBytes(src) + if err != nil { + return err + } + + val, _ := netip.ParsePrefix(internal.String(b)) + if !val.IsValid() { + return fmt.Errorf("bun: invalid prefix: %q", b) + } + + ptr := dest.Addr().Interface().(*netip.Prefix) + *ptr = val + + return nil +} + +func addrScanner(fn ScannerFunc) ScannerFunc { + return func(dest reflect.Value, src any) error { + if !dest.CanAddr() { + return fmt.Errorf("bun: Scan(nonaddressable %T)", dest.Interface()) + } + return fn(dest.Addr(), src) + } +} + +func toBytes(src any) ([]byte, error) { + switch src := src.(type) { + case string: + return internal.Bytes(src), nil + case []byte: + return src, nil + default: + return nil, fmt.Errorf("bun: got %T, wanted []byte or string", src) + } +} + +func PtrScanner(fn ScannerFunc) ScannerFunc { + return func(dest reflect.Value, src any) error { + if src == nil { + if !dest.CanAddr() { + if dest.IsNil() { + return nil + } + return fn(dest.Elem(), src) + } + + if !dest.IsNil() { + dest.Set(reflect.New(dest.Type().Elem())) + } + return nil + } + + if dest.IsNil() { + dest.Set(reflect.New(dest.Type().Elem())) + } + + if dest.Kind() == reflect.Map { + return fn(dest, src) + } + + return fn(dest.Elem(), src) + } +} + +func scanNull(dest reflect.Value) error { + if nilable(dest.Kind()) && dest.IsNil() { + return nil + } + dest.Set(reflect.New(dest.Type()).Elem()) + return nil +} + +func scanJSONIntoInterface(dest reflect.Value, src any) error { + if dest.IsNil() { + if src == nil { + return nil + } + + b, err := toBytes(src) + if err != nil { + return err + } + + return bunjson.Unmarshal(b, dest.Addr().Interface()) + } + + dest = dest.Elem() + if fn := Scanner(dest.Type()); fn != nil { + return fn(dest, src) + } + return scanError(dest.Type(), src) +} + +func scanInterface(dest reflect.Value, src any) error { + if dest.IsNil() { + if src == nil { + return nil + } + dest.Set(reflect.ValueOf(src)) + return nil + } + + dest = dest.Elem() + if fn := Scanner(dest.Type()); fn != nil { + return fn(dest, src) + } + return scanError(dest.Type(), src) +} + +func nilable(kind reflect.Kind) bool { + switch kind { + case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: + return true + } + return false +} + +func scanError(dest reflect.Type, src any) error { + return fmt.Errorf("bun: can't scan %#v (%T) into %s", src, src, dest.String()) +} diff --git a/vendor/github.com/uptrace/bun/schema/sqlfmt.go b/vendor/github.com/uptrace/bun/schema/sqlfmt.go new file mode 100644 index 0000000..cb72872 --- /dev/null +++ b/vendor/github.com/uptrace/bun/schema/sqlfmt.go @@ -0,0 +1,133 @@ +package schema + +import ( + "log/slog" + "strings" + + "github.com/uptrace/bun/internal" +) + +type QueryAppender interface { + AppendQuery(gen QueryGen, b []byte) ([]byte, error) +} + +type ColumnsAppender interface { + AppendColumns(gen QueryGen, b []byte) ([]byte, error) +} + +//------------------------------------------------------------------------------ + +// Safe represents a safe SQL query. +type Safe string + +var _ QueryAppender = (*Safe)(nil) + +func (s Safe) AppendQuery(gen QueryGen, b []byte) ([]byte, error) { + return append(b, s...), nil +} + +//------------------------------------------------------------------------------ + +// Name represents a single SQL name, for example, a column name. +type Name string + +var _ QueryAppender = (*Name)(nil) + +func (s Name) AppendQuery(gen QueryGen, b []byte) ([]byte, error) { + return gen.AppendName(b, string(s)), nil +} + +//------------------------------------------------------------------------------ + +// Ident represents a SQL identifier, for example, +// a fully qualified column name such as `table_name.col_name`. +type Ident string + +var _ QueryAppender = (*Ident)(nil) + +func (s Ident) AppendQuery(gen QueryGen, b []byte) ([]byte, error) { + return gen.AppendIdent(b, string(s)), nil +} + +//------------------------------------------------------------------------------ + +// NOTE: It should not be modified after creation. +type QueryWithArgs struct { + Query string + Args []any +} + +var _ QueryAppender = QueryWithArgs{} + +func SafeQuery(query string, args []any) QueryWithArgs { + if args == nil { + args = make([]any, 0) + } else if len(query) > 0 && strings.IndexByte(query, '?') == -1 { + internal.Warn.Printf("query %q has %v args, but no placeholders", query, args) + } + return QueryWithArgs{ + Query: query, + Args: args, + } +} + +func UnsafeIdent(ident string) QueryWithArgs { + return QueryWithArgs{Query: ident} +} + +func (q QueryWithArgs) IsZero() bool { + return q.Query == "" && q.Args == nil +} + +func (q QueryWithArgs) AppendQuery(gen QueryGen, b []byte) ([]byte, error) { + if q.Args == nil { + return gen.AppendIdent(b, q.Query), nil + } + return gen.AppendQuery(b, q.Query, q.Args...), nil +} + +//------------------------------------------------------------------------------ + +type Order string + +const ( + OrderNone Order = "" + OrderAsc Order = "ASC" + OrderAscNullsFirst Order = "ASC NULLS FIRST" + OrderAscNullsLast Order = "ASC NULLS LAST" + OrderDesc Order = "DESC" + OrderDescNullsFirst Order = "DESC NULLS FIRST" + OrderDescNullsLast Order = "DESC NULLS LAST" +) + +func (s Order) AppendQuery(gen QueryGen, b []byte) ([]byte, error) { + return AppendOrder(b, s), nil +} + +func AppendOrder(b []byte, sortDir Order) []byte { + switch sortDir { + case OrderAsc, OrderDesc, + OrderAscNullsFirst, OrderAscNullsLast, + OrderDescNullsFirst, OrderDescNullsLast: + return append(b, sortDir...) + case OrderNone: + return b + default: + slog.Error("unsupported sort direction", slog.String("sort_dir", string(sortDir))) + return b + } +} + +//------------------------------------------------------------------------------ + +type QueryWithSep struct { + QueryWithArgs + Sep string +} + +func SafeQueryWithSep(query string, args []any, sep string) QueryWithSep { + return QueryWithSep{ + QueryWithArgs: SafeQuery(query, args), + Sep: sep, + } +} diff --git a/vendor/github.com/uptrace/bun/schema/sqltype.go b/vendor/github.com/uptrace/bun/schema/sqltype.go new file mode 100644 index 0000000..eb42db7 --- /dev/null +++ b/vendor/github.com/uptrace/bun/schema/sqltype.go @@ -0,0 +1,141 @@ +package schema + +import ( + "bytes" + "database/sql" + "encoding/json" + "reflect" + "time" + + "github.com/uptrace/bun/dialect" + "github.com/uptrace/bun/dialect/sqltype" + "github.com/uptrace/bun/internal" +) + +var ( + bunNullTimeType = reflect.TypeFor[NullTime]() + nullTimeType = reflect.TypeFor[sql.NullTime]() + nullBoolType = reflect.TypeFor[sql.NullBool]() + nullFloatType = reflect.TypeFor[sql.NullFloat64]() + nullIntType = reflect.TypeFor[sql.NullInt64]() + nullStringType = reflect.TypeFor[sql.NullString]() +) + +var sqlTypes = []string{ + reflect.Bool: sqltype.Boolean, + reflect.Int: sqltype.BigInt, + reflect.Int8: sqltype.SmallInt, + reflect.Int16: sqltype.SmallInt, + reflect.Int32: sqltype.Integer, + reflect.Int64: sqltype.BigInt, + reflect.Uint: sqltype.BigInt, + reflect.Uint8: sqltype.SmallInt, + reflect.Uint16: sqltype.SmallInt, + reflect.Uint32: sqltype.Integer, + reflect.Uint64: sqltype.BigInt, + reflect.Uintptr: sqltype.BigInt, + reflect.Float32: sqltype.Real, + reflect.Float64: sqltype.DoublePrecision, + reflect.Complex64: "", + reflect.Complex128: "", + reflect.Array: "", + reflect.Interface: "", + reflect.Map: sqltype.VarChar, + reflect.Ptr: "", + reflect.Slice: sqltype.VarChar, + reflect.String: sqltype.VarChar, + reflect.Struct: sqltype.VarChar, +} + +func DiscoverSQLType(typ reflect.Type) string { + switch typ { + case timeType, nullTimeType, bunNullTimeType: + return sqltype.Timestamp + case nullBoolType: + return sqltype.Boolean + case nullFloatType: + return sqltype.DoublePrecision + case nullIntType: + return sqltype.BigInt + case nullStringType: + return sqltype.VarChar + case jsonRawMessageType: + return sqltype.JSON + } + + switch typ.Kind() { + case reflect.Slice: + if typ.Elem().Kind() == reflect.Uint8 { + return sqltype.Blob + } + } + + return sqlTypes[typ.Kind()] +} + +//------------------------------------------------------------------------------ + +var jsonNull = []byte("null") + +// NullTime is a time.Time wrapper that marshals zero time as JSON null and SQL NULL. +type NullTime struct { + time.Time +} + +var ( + _ json.Marshaler = (*NullTime)(nil) + _ json.Unmarshaler = (*NullTime)(nil) + _ sql.Scanner = (*NullTime)(nil) + _ QueryAppender = (*NullTime)(nil) +) + +func (tm NullTime) MarshalJSON() ([]byte, error) { + if tm.IsZero() { + return jsonNull, nil + } + return tm.Time.MarshalJSON() +} + +func (tm *NullTime) UnmarshalJSON(b []byte) error { + if bytes.Equal(b, jsonNull) { + tm.Time = time.Time{} + return nil + } + return tm.Time.UnmarshalJSON(b) +} + +func (tm NullTime) AppendQuery(gen QueryGen, b []byte) ([]byte, error) { + if tm.IsZero() { + return dialect.AppendNull(b), nil + } + return gen.Dialect().AppendTime(b, tm.Time), nil +} + +func (tm *NullTime) Scan(src any) error { + if src == nil { + tm.Time = time.Time{} + return nil + } + + switch src := src.(type) { + case time.Time: + tm.Time = src + return nil + case string: + newtm, err := internal.ParseTime(src) + if err != nil { + return err + } + tm.Time = newtm + return nil + case []byte: + newtm, err := internal.ParseTime(internal.String(src)) + if err != nil { + return err + } + tm.Time = newtm + return nil + default: + return scanError(bunNullTimeType, src) + } +} diff --git a/vendor/github.com/uptrace/bun/schema/table.go b/vendor/github.com/uptrace/bun/schema/table.go new file mode 100644 index 0000000..0d36371 --- /dev/null +++ b/vendor/github.com/uptrace/bun/schema/table.go @@ -0,0 +1,1130 @@ +package schema + +import ( + "cmp" + "database/sql" + "fmt" + "reflect" + "slices" + "strings" + "time" + + "github.com/jinzhu/inflection" + + "github.com/uptrace/bun/dialect/feature" + "github.com/uptrace/bun/internal" + "github.com/uptrace/bun/internal/tagparser" +) + +const ( + beforeAppendModelHookFlag internal.Flag = 1 << iota + beforeScanRowHookFlag + afterScanRowHookFlag +) + +var ( + baseModelType = reflect.TypeFor[BaseModel]() + tableNameInflector = inflection.Plural +) + +type BaseModel struct{} + +// SetTableNameInflector overrides the default func that pluralizes +// model name to get table name, e.g. my_article becomes my_articles. +func SetTableNameInflector(fn func(string) string) { + tableNameInflector = fn +} + +// Table represents a SQL table created from Go struct. +type Table struct { + dialect Dialect + + Type reflect.Type + ZeroValue reflect.Value // reflect.Struct + ZeroIface any // struct pointer + + TypeName string + ModelName string + + Schema string + Name string + SQLName Safe + SQLNameForSelects Safe + Alias string + SQLAlias Safe + + allFields []*Field // all fields including scanonly + Fields []*Field // PKs + DataFields + PKs []*Field + DataFields []*Field + relFields []*Field + + FieldMap map[string]*Field + StructMap map[string]*structField + + IsM2MTable bool // If true, this table is the "junction table" of an m2m relation. + Relations map[string]*Relation + Unique map[string][]*Field + + SoftDeleteField *Field + UpdateSoftDeleteField func(fv reflect.Value, tm time.Time) error + + flags internal.Flag +} + +type structField struct { + Index []int + Table *Table +} + +func (table *Table) init(dialect Dialect, typ reflect.Type) { + table.dialect = dialect + table.Type = typ + table.ZeroValue = reflect.New(table.Type).Elem() + table.ZeroIface = reflect.New(table.Type).Interface() + table.TypeName = internal.ToExported(table.Type.Name()) + table.ModelName = internal.Underscore(table.Type.Name()) + tableName := tableNameInflector(table.ModelName) + table.setName(tableName) + table.Alias = table.ModelName + table.SQLAlias = table.quoteIdent(table.ModelName) + table.Schema = dialect.DefaultSchema() + + table.Fields = make([]*Field, 0, typ.NumField()) + table.FieldMap = make(map[string]*Field, typ.NumField()) + table.processFields(typ) + + hooks := []struct { + typ reflect.Type + flag internal.Flag + }{ + {beforeAppendModelHookType, beforeAppendModelHookFlag}, + + {beforeScanRowHookType, beforeScanRowHookFlag}, + {afterScanRowHookType, afterScanRowHookFlag}, + } + + typ = reflect.PointerTo(table.Type) + for _, hook := range hooks { + if typ.Implements(hook.typ) { + table.flags = table.flags.Set(hook.flag) + } + } +} + +func (t *Table) processFields(typ reflect.Type) { + type embeddedField struct { + prefix string + index []int + unexported bool + subtable *Table + subfield *Field + } + + names := make(map[string]struct{}) + embedded := make([]embeddedField, 0, 10) + ebdStructs := make(map[string]*structField, 0) + + for i, n := 0, typ.NumField(); i < n; i++ { + sf := typ.Field(i) + unexported := sf.PkgPath != "" + + tagstr := sf.Tag.Get("bun") + if tagstr == "-" { + names[sf.Name] = struct{}{} + continue + } + tag := tagparser.Parse(tagstr) + + if unexported && !sf.Anonymous { // unexported + continue + } + + if sf.Anonymous { + if sf.Name == "BaseModel" && sf.Type == baseModelType { + t.processBaseModelField(sf) + continue + } + + sfType := sf.Type + if sfType.Kind() == reflect.Ptr { + sfType = sfType.Elem() + } + + if sfType.Kind() != reflect.Struct { // ignore unexported non-struct types + continue + } + + subtable := t.dialect.Tables().InProgress(sfType) + + for _, subfield := range subtable.allFields { + embedded = append(embedded, embeddedField{ + index: sf.Index, + unexported: unexported, + subtable: subtable, + subfield: subfield, + }) + } + if len(subtable.StructMap) > 0 { + for k, v := range subtable.StructMap { + // NOTE: conflict Struct name + if _, ok := ebdStructs[k]; !ok { + ebdStructs[k] = &structField{ + Index: makeIndex(sf.Index, v.Index), + Table: v.Table, + } + } + } + } + + if tagstr != "" { + tag := tagparser.Parse(tagstr) + if tag.HasOption("inherit") || tag.HasOption("extend") { + t.Name = subtable.Name + t.TypeName = subtable.TypeName + t.SQLName = subtable.SQLName + t.SQLNameForSelects = subtable.SQLNameForSelects + t.Alias = subtable.Alias + t.SQLAlias = subtable.SQLAlias + t.ModelName = subtable.ModelName + } + } + + continue + } + + if prefix, ok := tag.Option("embed"); ok { + fieldType := indirectType(sf.Type) + if fieldType.Kind() != reflect.Struct { + panic(fmt.Errorf("bun: embed %s.%s: got %s, wanted reflect.Struct", + t.TypeName, sf.Name, fieldType.Kind())) + } + + subtable := t.dialect.Tables().InProgress(fieldType) + for _, subfield := range subtable.allFields { + embedded = append(embedded, embeddedField{ + prefix: prefix, + index: sf.Index, + unexported: unexported, + subtable: subtable, + subfield: subfield, + }) + } + if len(subtable.StructMap) > 0 { + for k, v := range subtable.StructMap { + // NOTE: conflict Struct name + k = prefix + k + if _, ok := ebdStructs[k]; !ok { + ebdStructs[k] = &structField{ + Index: makeIndex(sf.Index, v.Index), + Table: subtable, + } + } + } + } + continue + } + + field := t.newField(sf, tag) + t.addField(field) + names[field.Name] = struct{}{} + + if field.IndirectType.Kind() == reflect.Struct { + if t.StructMap == nil { + t.StructMap = make(map[string]*structField) + } + t.StructMap[field.Name] = &structField{ + Index: field.Index, + Table: t.dialect.Tables().InProgress(field.IndirectType), + } + } + } + + // Only unambiguous embedded fields must be serialized. + ambiguousNames := make(map[string]int) + ambiguousTags := make(map[string]int) + + // Embedded types can never override a field that was already present at + // the top-level. + for name := range names { + ambiguousNames[name]++ + ambiguousTags[name]++ + } + + for _, f := range embedded { + ambiguousNames[f.prefix+f.subfield.Name]++ + if !f.subfield.Tag.IsZero() { + ambiguousTags[f.prefix+f.subfield.Name]++ + } + } + + for _, embfield := range embedded { + if ambiguousNames[embfield.prefix+embfield.subfield.Name] > 1 && + !(!embfield.subfield.Tag.IsZero() && ambiguousTags[embfield.prefix+embfield.subfield.Name] == 1) { + continue // ambiguous embedded field + } + + subfield := embfield.subfield.Clone() + + subfield.Index = makeIndex(embfield.index, subfield.Index) + if embfield.prefix != "" { + subfield.Name = embfield.prefix + subfield.Name + subfield.SQLName = t.quoteIdent(subfield.Name) + } + t.addField(subfield) + if v, ok := subfield.Tag.Options["unique"]; ok { + t.addUnique(subfield, embfield.prefix, v) + } + } + + if len(ebdStructs) > 0 && t.StructMap == nil { + t.StructMap = make(map[string]*structField) + } + for name, sfield := range ebdStructs { + if _, ok := t.StructMap[name]; !ok { + t.StructMap[name] = sfield + } + } + + if len(embedded) > 0 { + // https://github.com/uptrace/bun/issues/1095 + // < v1.2, all fields follow the order corresponding to the struct + // >= v1.2, < v1.2.8, fields of nested structs have been moved to the end. + // >= v1.2.8, The default behavior remains the same as initially, + sortFieldsByStruct(t.allFields) + sortFieldsByStruct(t.Fields) + sortFieldsByStruct(t.PKs) + sortFieldsByStruct(t.DataFields) + } +} + +func sortFieldsByStruct(fields []*Field) { + slices.SortFunc(fields, func(left, right *Field) int { + for k := 0; k < len(left.Index) && k < len(right.Index); k++ { + if res := cmp.Compare(left.Index[k], right.Index[k]); res != 0 { + return res + } + } + // NOTE: should not reach + return 0 + }) +} + +func (t *Table) addUnique(field *Field, prefix string, tagOptions []string) { + var names []string + if len(tagOptions) == 1 { + // Split the value by comma, this will allow multiple names to be specified. + // We can use this to create multiple named unique constraints where a single column + // might be included in multiple constraints. + names = strings.Split(tagOptions[0], ",") + } else { + names = tagOptions + } + + for _, uname := range names { + if t.Unique == nil { + t.Unique = make(map[string][]*Field) + } + if uname != "" && prefix != "" { + uname = prefix + uname + } + t.Unique[uname] = append(t.Unique[uname], field) + } +} + +func (t *Table) setName(name string) { + t.Name = name + t.SQLName = t.quoteIdent(name) + t.SQLNameForSelects = t.quoteIdent(name) + if t.SQLAlias == "" { + t.Alias = name + t.SQLAlias = t.quoteIdent(name) + } +} + +func (t *Table) String() string { + return "model=" + t.TypeName +} + +func (t *Table) CheckPKs() error { + if len(t.PKs) == 0 { + return fmt.Errorf("bun: %s does not have primary keys", t) + } + return nil +} + +func (t *Table) addField(field *Field) { + t.allFields = append(t.allFields, field) + + if field.Tag.HasOption("rel") || field.Tag.HasOption("m2m") { + t.relFields = append(t.relFields, field) + return + } + + if field.Tag.HasOption("join") { + internal.Warn.Printf( + `%s.%s "join" option must come together with "rel" option`, + t.TypeName, field.GoName, + ) + } + + t.FieldMap[field.Name] = field + if altName, ok := field.Tag.Option("alt"); ok { + t.FieldMap[altName] = field + } + + if field.Tag.HasOption("scanonly") { + return + } + + if _, ok := field.Tag.Options["soft_delete"]; ok { + t.SoftDeleteField = field + t.UpdateSoftDeleteField = softDeleteFieldUpdater(field) + } + + t.Fields = append(t.Fields, field) + if field.IsPK { + t.PKs = append(t.PKs, field) + } else { + t.DataFields = append(t.DataFields, field) + } +} + +func (t *Table) LookupField(name string) *Field { + if field, ok := t.FieldMap[name]; ok { + return field + } + + table := t + var index []int + for { + structName, columnName, ok := strings.Cut(name, "__") + if !ok { + field, ok := table.FieldMap[name] + if !ok { + return nil + } + return field.WithIndex(index) + } + name = columnName + + strct := table.StructMap[structName] + if strct == nil { + return nil + } + table = strct.Table + index = append(index, strct.Index...) + } +} + +func (t *Table) HasField(name string) bool { + _, ok := t.FieldMap[name] + return ok +} + +func (t *Table) Field(name string) (*Field, error) { + field, ok := t.FieldMap[name] + if !ok { + return nil, fmt.Errorf("bun: %s does not have column=%s", t, name) + } + return field, nil +} + +func (t *Table) fieldByGoName(name string) *Field { + for _, f := range t.allFields { + if f.GoName == name { + return f + } + } + return nil +} + +func (t *Table) processBaseModelField(f reflect.StructField) { + tag := tagparser.Parse(f.Tag.Get("bun")) + + if isKnownTableOption(tag.Name) { + internal.Warn.Printf( + "%s.%s tag name %q is also an option name, is it a mistake? Try table:%s.", + t.TypeName, f.Name, tag.Name, tag.Name, + ) + } + + for name := range tag.Options { + if !isKnownTableOption(name) { + internal.Warn.Printf("%s.%s has unknown tag option: %q", t.TypeName, f.Name, name) + } + } + + if tag.Name != "" { + schema, _ := t.schemaFromTagName(tag.Name) + t.Schema = schema + + // Eventually, we should only assign the "table" portion as the table name, + // which will also require a change in how the table name is appended to queries. + // Until that is done, set table name to tag.Name. + t.setName(tag.Name) + } + + if s, ok := tag.Option("table"); ok { + schema, _ := t.schemaFromTagName(s) + t.Schema = schema + t.setName(s) + } + + if s, ok := tag.Option("select"); ok { + t.SQLNameForSelects = t.quoteTableName(s) + } + + if s, ok := tag.Option("alias"); ok { + t.Alias = s + t.SQLAlias = t.quoteIdent(s) + } +} + +// schemaFromTagName splits the bun.BaseModel tag name into schema and table name +// in case it is specified in the "schema"."table" format. +// Assume default schema if one isn't explicitly specified. +func (t *Table) schemaFromTagName(name string) (string, string) { + schema, table := t.dialect.DefaultSchema(), name + if schemaTable := strings.Split(name, "."); len(schemaTable) == 2 { + schema, table = schemaTable[0], schemaTable[1] + } + return schema, table +} + +// nolint +func (t *Table) newField(sf reflect.StructField, tag tagparser.Tag) *Field { + sqlName := internal.Underscore(sf.Name) + if tag.Name != "" && tag.Name != sqlName { + if isKnownFieldOption(tag.Name) { + internal.Warn.Printf( + "%s.%s tag name %q is also an option name, is it a mistake? Try column:%s.", + t.TypeName, sf.Name, tag.Name, tag.Name, + ) + } + sqlName = tag.Name + } + + if s, ok := tag.Option("column"); ok { + sqlName = s + } + + for name := range tag.Options { + if !isKnownFieldOption(name) { + internal.Warn.Printf("%s.%s has unknown tag option: %q", t.TypeName, sf.Name, name) + } + } + + field := &Field{ + Table: t, + StructField: sf, + IsPtr: sf.Type.Kind() == reflect.Ptr, + + Tag: tag, + IndirectType: indirectType(sf.Type), + Index: sf.Index, + + Name: sqlName, + GoName: sf.Name, + SQLName: t.quoteIdent(sqlName), + } + + field.NotNull = tag.HasOption("notnull") + field.NullZero = tag.HasOption("nullzero") + if tag.HasOption("pk") { + field.IsPK = true + field.NotNull = true + } + if tag.HasOption("autoincrement") { + field.AutoIncrement = true + field.NotNull = true + field.NullZero = true + } + if tag.HasOption("identity") { + field.Identity = true + } + + if v, ok := tag.Options["unique"]; ok { + t.addUnique(field, "", v) + } + if s, ok := tag.Option("default"); ok { + field.SQLDefault = s + } + if s, ok := field.Tag.Option("type"); ok { + field.UserSQLType = s + } + field.DiscoveredSQLType = DiscoverSQLType(field.IndirectType) + field.Append = FieldAppender(t.dialect, field) + field.Scan = FieldScanner(t.dialect, field) + field.IsZero = zeroChecker(field.StructField.Type) + + return field +} + +//--------------------------------------------------------------------------------------- + +func (t *Table) initRelations() { + for _, field := range t.relFields { + t.processRelation(field) + } + t.relFields = nil +} + +func (t *Table) processRelation(field *Field) { + if rel, ok := field.Tag.Option("rel"); ok { + t.initRelation(field, rel) + return + } + if field.Tag.HasOption("m2m") { + t.addRelation(t.m2mRelation(field)) + return + } + panic("not reached") +} + +func (t *Table) initRelation(field *Field, rel string) { + switch rel { + case "belongs-to": + t.addRelation(t.belongsToRelation(field)) + case "has-one": + t.addRelation(t.hasOneRelation(field)) + case "has-many": + t.addRelation(t.hasManyRelation(field)) + default: + panic(fmt.Errorf("bun: unknown relation=%s on field=%s", rel, field.GoName)) + } +} + +func (t *Table) addRelation(rel *Relation) { + if t.Relations == nil { + t.Relations = make(map[string]*Relation) + } + _, ok := t.Relations[rel.Field.GoName] + if ok { + panic(fmt.Errorf("%s already has %s", t, rel)) + } + t.Relations[rel.Field.GoName] = rel +} + +func (t *Table) belongsToRelation(field *Field) *Relation { + joinTable := t.dialect.Tables().InProgress(field.IndirectType) + if err := joinTable.CheckPKs(); err != nil { + panic(err) + } + + rel := &Relation{ + Type: BelongsToRelation, + Field: field, + JoinTable: joinTable, + } + + if field.Tag.HasOption("join_on") { + rel.Condition = field.Tag.Options["join_on"] + } + + if t.dialect.Features().Has(feature.FKDefaultOnAction) { + rel.OnUpdate = "ON UPDATE NO ACTION" + rel.OnDelete = "ON DELETE NO ACTION" + } + if onUpdate, ok := field.Tag.Options["on_update"]; ok { + if len(onUpdate) > 1 { + panic(fmt.Errorf("bun: %s belongs-to %s: on_update option must be a single field", t.TypeName, field.GoName)) + } + + rule := strings.ToUpper(onUpdate[0]) + if !isKnownFKRule(rule) { + internal.Warn.Printf("bun: %s belongs-to %s: unknown on_update rule %s", t.TypeName, field.GoName, rule) + } + + s := fmt.Sprintf("ON UPDATE %s", rule) + rel.OnUpdate = s + } + + if onDelete, ok := field.Tag.Options["on_delete"]; ok { + if len(onDelete) > 1 { + panic(fmt.Errorf("bun: %s belongs-to %s: on_delete option must be a single field", t.TypeName, field.GoName)) + } + + rule := strings.ToUpper(onDelete[0]) + if !isKnownFKRule(rule) { + internal.Warn.Printf("bun: %s belongs-to %s: unknown on_delete rule %s", t.TypeName, field.GoName, rule) + } + s := fmt.Sprintf("ON DELETE %s", rule) + rel.OnDelete = s + } + + if join, ok := field.Tag.Options["join"]; ok { + baseColumns, joinColumns := parseRelationJoin(join) + for i, baseColumn := range baseColumns { + joinColumn := joinColumns[i] + + if f := t.FieldMap[baseColumn]; f != nil { + rel.BasePKs = append(rel.BasePKs, f) + } else { + panic(fmt.Errorf( + "bun: %s belongs-to %s: %s must have column %s", + t.TypeName, field.GoName, t.TypeName, baseColumn, + )) + } + + if f := joinTable.FieldMap[joinColumn]; f != nil { + rel.JoinPKs = append(rel.JoinPKs, f) + } else { + panic(fmt.Errorf( + "bun: %s belongs-to %s: %s must have column %s", + t.TypeName, field.GoName, joinTable.TypeName, joinColumn, + )) + } + } + return rel + } + + rel.JoinPKs = joinTable.PKs + fkPrefix := internal.Underscore(field.GoName) + "_" + for _, joinPK := range joinTable.PKs { + fkName := fkPrefix + joinPK.Name + if fk := t.FieldMap[fkName]; fk != nil { + rel.BasePKs = append(rel.BasePKs, fk) + continue + } + + if fk := t.FieldMap[joinPK.Name]; fk != nil { + rel.BasePKs = append(rel.BasePKs, fk) + continue + } + + panic(fmt.Errorf( + "bun: %s belongs-to %s: %s must have column %s "+ + "(to override, use join:base_column=join_column tag on %s field)", + t.TypeName, field.GoName, t.TypeName, fkName, field.GoName, + )) + } + return rel +} + +func (t *Table) hasOneRelation(field *Field) *Relation { + if err := t.CheckPKs(); err != nil { + panic(err) + } + + joinTable := t.dialect.Tables().InProgress(field.IndirectType) + rel := &Relation{ + Type: HasOneRelation, + Field: field, + JoinTable: joinTable, + } + + if field.Tag.HasOption("join_on") { + rel.Condition = field.Tag.Options["join_on"] + } + + if join, ok := field.Tag.Options["join"]; ok { + baseColumns, joinColumns := parseRelationJoin(join) + for i, baseColumn := range baseColumns { + if f := t.FieldMap[baseColumn]; f != nil { + rel.BasePKs = append(rel.BasePKs, f) + } else { + panic(fmt.Errorf( + "bun: %s has-one %s: %s must have column %s", + field.GoName, t.TypeName, t.TypeName, baseColumn, + )) + } + + joinColumn := joinColumns[i] + if f := joinTable.FieldMap[joinColumn]; f != nil { + rel.JoinPKs = append(rel.JoinPKs, f) + } else { + panic(fmt.Errorf( + "bun: %s has-one %s: %s must have column %s", + field.GoName, t.TypeName, joinTable.TypeName, joinColumn, + )) + } + } + return rel + } + + rel.BasePKs = t.PKs + fkPrefix := internal.Underscore(t.ModelName) + "_" + for _, pk := range t.PKs { + fkName := fkPrefix + pk.Name + if f := joinTable.FieldMap[fkName]; f != nil { + rel.JoinPKs = append(rel.JoinPKs, f) + continue + } + + if f := joinTable.FieldMap[pk.Name]; f != nil { + rel.JoinPKs = append(rel.JoinPKs, f) + continue + } + + panic(fmt.Errorf( + "bun: %s has-one %s: %s must have column %s "+ + "(to override, use join:base_column=join_column tag on %s field)", + field.GoName, t.TypeName, joinTable.TypeName, fkName, field.GoName, + )) + } + return rel +} + +func (t *Table) hasManyRelation(field *Field) *Relation { + if err := t.CheckPKs(); err != nil { + panic(err) + } + if field.IndirectType.Kind() != reflect.Slice { + panic(fmt.Errorf( + "bun: %s.%s has-many relation requires slice, got %q", + t.TypeName, field.GoName, field.IndirectType.Kind(), + )) + } + + joinTable := t.dialect.Tables().InProgress(indirectType(field.IndirectType.Elem())) + polymorphicValue, isPolymorphic := field.Tag.Option("polymorphic") + rel := &Relation{ + Type: HasManyRelation, + Field: field, + JoinTable: joinTable, + } + + if field.Tag.HasOption("join_on") { + rel.Condition = field.Tag.Options["join_on"] + } + + var polymorphicColumn string + + if join, ok := field.Tag.Options["join"]; ok { + baseColumns, joinColumns := parseRelationJoin(join) + for i, baseColumn := range baseColumns { + joinColumn := joinColumns[i] + + if isPolymorphic && baseColumn == "type" { + polymorphicColumn = joinColumn + continue + } + + if f := t.FieldMap[baseColumn]; f != nil { + rel.BasePKs = append(rel.BasePKs, f) + } else { + panic(fmt.Errorf( + "bun: %s has-many %s: %s must have column %s", + t.TypeName, field.GoName, t.TypeName, baseColumn, + )) + } + + if f := joinTable.FieldMap[joinColumn]; f != nil { + rel.JoinPKs = append(rel.JoinPKs, f) + } else { + panic(fmt.Errorf( + "bun: %s has-many %s: %s must have column %s", + t.TypeName, field.GoName, joinTable.TypeName, joinColumn, + )) + } + } + } else { + rel.BasePKs = t.PKs + fkPrefix := internal.Underscore(t.ModelName) + "_" + if isPolymorphic { + polymorphicColumn = fkPrefix + "type" + } + + for _, pk := range t.PKs { + joinColumn := fkPrefix + pk.Name + if fk := joinTable.FieldMap[joinColumn]; fk != nil { + rel.JoinPKs = append(rel.JoinPKs, fk) + continue + } + + if fk := joinTable.FieldMap[pk.Name]; fk != nil { + rel.JoinPKs = append(rel.JoinPKs, fk) + continue + } + + panic(fmt.Errorf( + "bun: %s has-many %s: %s must have column %s "+ + "(to override, use join:base_column=join_column tag on the field %s)", + t.TypeName, field.GoName, joinTable.TypeName, joinColumn, field.GoName, + )) + } + } + + if isPolymorphic { + rel.PolymorphicField = joinTable.FieldMap[polymorphicColumn] + if rel.PolymorphicField == nil { + panic(fmt.Errorf( + "bun: %s has-many %s: %s must have polymorphic column %s", + t.TypeName, field.GoName, joinTable.TypeName, polymorphicColumn, + )) + } + + if polymorphicValue == "" { + polymorphicValue = t.ModelName + } + rel.PolymorphicValue = polymorphicValue + } + + return rel +} + +func (t *Table) m2mRelation(field *Field) *Relation { + if field.IndirectType.Kind() != reflect.Slice { + panic(fmt.Errorf( + "bun: %s.%s m2m relation requires slice, got %q", + t.TypeName, field.GoName, field.IndirectType.Kind(), + )) + } + joinTable := t.dialect.Tables().InProgress(indirectType(field.IndirectType.Elem())) + + if err := t.CheckPKs(); err != nil { + panic(err) + } + if err := joinTable.CheckPKs(); err != nil { + panic(err) + } + + m2mTableName, ok := field.Tag.Option("m2m") + if !ok { + panic(fmt.Errorf("bun: %s must have m2m tag option", field.GoName)) + } + + m2mTable := t.dialect.Tables().ByName(m2mTableName) + if m2mTable == nil { + panic(fmt.Errorf( + "bun: can't find m2m %s table (use db.RegisterModel)", + m2mTableName, + )) + } + + rel := &Relation{ + Type: ManyToManyRelation, + Field: field, + JoinTable: joinTable, + M2MTable: m2mTable, + } + m2mTable.markM2M() + + if field.Tag.HasOption("join_on") { + rel.Condition = field.Tag.Options["join_on"] + } + + var leftColumn, rightColumn string + + if join, ok := field.Tag.Options["join"]; ok { + left, right := parseRelationJoin(join) + leftColumn = left[0] + rightColumn = right[0] + } else { + leftColumn = t.TypeName + rightColumn = joinTable.TypeName + } + + leftField := m2mTable.fieldByGoName(leftColumn) + if leftField == nil { + panic(fmt.Errorf( + "bun: %s many-to-many %s: %s must have field %s "+ + "(to override, use tag join:LeftField=RightField on field %s.%s", + t.TypeName, field.GoName, m2mTable.TypeName, leftColumn, t.TypeName, field.GoName, + )) + } + + rightField := m2mTable.fieldByGoName(rightColumn) + if rightField == nil { + panic(fmt.Errorf( + "bun: %s many-to-many %s: %s must have field %s "+ + "(to override, use tag join:LeftField=RightField on field %s.%s", + t.TypeName, field.GoName, m2mTable.TypeName, rightColumn, t.TypeName, field.GoName, + )) + } + + leftRel := m2mTable.belongsToRelation(leftField) + rel.BasePKs = leftRel.JoinPKs + rel.M2MBasePKs = leftRel.BasePKs + + rightRel := m2mTable.belongsToRelation(rightField) + rel.JoinPKs = rightRel.JoinPKs + rel.M2MJoinPKs = rightRel.BasePKs + + return rel +} + +func (t *Table) markM2M() { + t.IsM2MTable = true +} + +//------------------------------------------------------------------------------ + +func (t *Table) Dialect() Dialect { return t.dialect } + +func (t *Table) HasBeforeAppendModelHook() bool { return t.flags.Has(beforeAppendModelHookFlag) } + +func (t *Table) HasBeforeScanRowHook() bool { return t.flags.Has(beforeScanRowHookFlag) } +func (t *Table) HasAfterScanRowHook() bool { return t.flags.Has(afterScanRowHookFlag) } + +//------------------------------------------------------------------------------ + +func (t *Table) AppendNamedArg( + gen QueryGen, b []byte, name string, strct reflect.Value, +) ([]byte, bool) { + if field, ok := t.FieldMap[name]; ok { + return field.AppendValue(gen, b, strct), true + } + return b, false +} + +func (t *Table) quoteTableName(s string) Safe { + // Don't quote if table name contains placeholder (?) or parentheses. + if strings.IndexByte(s, '?') >= 0 || + strings.IndexByte(s, '(') >= 0 || + strings.IndexByte(s, ')') >= 0 { + return Safe(s) + } + return t.quoteIdent(s) +} + +func (t *Table) quoteIdent(s string) Safe { + return Safe(NewQueryGen(t.dialect).AppendIdent(nil, s)) +} + +func isKnownTableOption(name string) bool { + switch name { + case "table", "alias", "select": + return true + } + return false +} + +func isKnownFieldOption(name string) bool { + switch name { + case "column", + "alt", + "type", + "array", + "hstore", + "composite", + "multirange", + "json_use_number", + "msgpack", + "notnull", + "nullzero", + "default", + "unique", + "soft_delete", + "scanonly", + "skipupdate", + + "pk", + "autoincrement", + "rel", + "join", + "join_on", + "on_update", + "on_delete", + "m2m", + "polymorphic", + "identity": + return true + } + return false +} + +func isKnownFKRule(name string) bool { + switch name { + case "CASCADE", + "RESTRICT", + "SET NULL", + "SET DEFAULT": + return true + } + return false +} + +func parseRelationJoin(join []string) ([]string, []string) { + var ss []string + if len(join) == 1 { + ss = strings.Split(join[0], ",") + } else { + ss = join + } + + baseColumns := make([]string, len(ss)) + joinColumns := make([]string, len(ss)) + for i, s := range ss { + ss := strings.Split(strings.TrimSpace(s), "=") + if len(ss) != 2 { + panic(fmt.Errorf("can't parse relation join: %q", join)) + } + baseColumns[i] = ss[0] + joinColumns[i] = ss[1] + } + return baseColumns, joinColumns +} + +//------------------------------------------------------------------------------ + +func softDeleteFieldUpdater(field *Field) func(fv reflect.Value, tm time.Time) error { + typ := field.StructField.Type + + switch typ { + case timeType: + return func(fv reflect.Value, tm time.Time) error { + ptr := fv.Addr().Interface().(*time.Time) + *ptr = tm + return nil + } + case nullTimeType: + return func(fv reflect.Value, tm time.Time) error { + ptr := fv.Addr().Interface().(*sql.NullTime) + *ptr = sql.NullTime{Time: tm} + return nil + } + case nullIntType: + return func(fv reflect.Value, tm time.Time) error { + ptr := fv.Addr().Interface().(*sql.NullInt64) + *ptr = sql.NullInt64{Int64: tm.UnixNano()} + return nil + } + } + + switch field.IndirectType.Kind() { + case reflect.Int64: + return func(fv reflect.Value, tm time.Time) error { + ptr := fv.Addr().Interface().(*int64) + *ptr = tm.UnixNano() + return nil + } + case reflect.Ptr: + typ = typ.Elem() + default: + return softDeleteFieldUpdaterFallback(field) + } + + switch typ { //nolint:gocritic + case timeType: + return func(fv reflect.Value, tm time.Time) error { + fv.Set(reflect.ValueOf(&tm)) + return nil + } + } + + switch typ.Kind() { //nolint:gocritic + case reflect.Int64: + return func(fv reflect.Value, tm time.Time) error { + utime := tm.UnixNano() + fv.Set(reflect.ValueOf(&utime)) + return nil + } + } + + return softDeleteFieldUpdaterFallback(field) +} + +func softDeleteFieldUpdaterFallback(field *Field) func(fv reflect.Value, tm time.Time) error { + return func(fv reflect.Value, tm time.Time) error { + return field.ScanWithCheck(fv, tm) + } +} + +func makeIndex(a, b []int) []int { + dest := make([]int, 0, len(a)+len(b)) + dest = append(dest, a...) + dest = append(dest, b...) + return dest +} diff --git a/vendor/github.com/uptrace/bun/schema/tables.go b/vendor/github.com/uptrace/bun/schema/tables.go new file mode 100644 index 0000000..272fd85 --- /dev/null +++ b/vendor/github.com/uptrace/bun/schema/tables.go @@ -0,0 +1,114 @@ +package schema + +import ( + "fmt" + "reflect" + "sync" + + "github.com/puzpuzpuz/xsync/v3" +) + +type Tables struct { + dialect Dialect + + mu sync.Mutex + tables *xsync.MapOf[reflect.Type, *Table] + + inProgress map[reflect.Type]*Table +} + +func NewTables(dialect Dialect) *Tables { + return &Tables{ + dialect: dialect, + tables: xsync.NewMapOf[reflect.Type, *Table](), + inProgress: make(map[reflect.Type]*Table), + } +} + +func (t *Tables) Register(models ...any) { + for _, model := range models { + _ = t.Get(reflect.TypeOf(model).Elem()) + } +} + +func (t *Tables) Get(typ reflect.Type) *Table { + typ = indirectType(typ) + if typ.Kind() != reflect.Struct { + panic(fmt.Errorf("got %s, wanted %s", typ.Kind(), reflect.Struct)) + } + + if v, ok := t.tables.Load(typ); ok { + return v + } + + t.mu.Lock() + defer t.mu.Unlock() + + if v, ok := t.tables.Load(typ); ok { + return v + } + + table := t.InProgress(typ) + table.initRelations() + + t.dialect.OnTable(table) + for _, field := range table.FieldMap { + if field.UserSQLType == "" { + field.UserSQLType = field.DiscoveredSQLType + } + if field.CreateTableSQLType == "" { + field.CreateTableSQLType = field.UserSQLType + } + } + + t.tables.Store(typ, table) + return table +} + +func (t *Tables) InProgress(typ reflect.Type) *Table { + if table, ok := t.inProgress[typ]; ok { + return table + } + + table := new(Table) + t.inProgress[typ] = table + table.init(t.dialect, typ) + + return table +} + +// ByModel gets the table by its Go name. +func (t *Tables) ByModel(name string) *Table { + var found *Table + t.tables.Range(func(typ reflect.Type, table *Table) bool { + if table.TypeName == name { + found = table + return false + } + return true + }) + return found +} + +// ByName gets the table by its SQL name. +func (t *Tables) ByName(name string) *Table { + var found *Table + t.tables.Range(func(typ reflect.Type, table *Table) bool { + if table.Name == name { + found = table + return false + } + return true + }) + return found +} + +// All returns all registered tables. +func (t *Tables) All() []*Table { + var found []*Table + t.tables.Range(func(typ reflect.Type, table *Table) bool { + found = append(found, table) + return true + }) + return found +} diff --git a/vendor/github.com/uptrace/bun/schema/zerochecker.go b/vendor/github.com/uptrace/bun/schema/zerochecker.go new file mode 100644 index 0000000..90516e3 --- /dev/null +++ b/vendor/github.com/uptrace/bun/schema/zerochecker.go @@ -0,0 +1,161 @@ +package schema + +import ( + "database/sql/driver" + "reflect" +) + +var isZeroerType = reflect.TypeFor[isZeroer]() + +type isZeroer interface { + IsZero() bool +} + +func isZero(v any) bool { + switch v := v.(type) { + case isZeroer: + return v.IsZero() + case string: + return v == "" + case []byte: + return v == nil + case int: + return v == 0 + case int64: + return v == 0 + case uint: + return v == 0 + case uint64: + return v == 0 + case float32: + return v == 0 + case float64: + return v == 0 + case int8: + return v == 0 + case int16: + return v == 0 + case int32: + return v == 0 + case uint8: + return v == 0 + case uint16: + return v == 0 + case uint32: + return v == 0 + default: + rv := reflect.ValueOf(v) + fn := zeroChecker(rv.Type()) + return fn(rv) + } +} + +type IsZeroerFunc func(reflect.Value) bool + +func zeroChecker(typ reflect.Type) IsZeroerFunc { + if typ.Implements(isZeroerType) { + return isZeroInterface + } + + kind := typ.Kind() + + if kind != reflect.Ptr { + ptr := reflect.PointerTo(typ) + if ptr.Implements(isZeroerType) { + return addrChecker(isZeroInterface) + } + } + + switch kind { + case reflect.Array: + if typ.Elem().Kind() == reflect.Uint8 { + return isZeroBytes + } + return isZeroLen + case reflect.String: + return isZeroLen + case reflect.Bool: + return isZeroBool + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return isZeroInt + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return isZeroUint + case reflect.Float32, reflect.Float64: + return isZeroFloat + case reflect.Interface, reflect.Ptr, reflect.Slice, reflect.Map: + return isNil + } + + if typ.Implements(driverValuerType) { + return isZeroDriverValue + } + + return notZero +} + +func addrChecker(fn IsZeroerFunc) IsZeroerFunc { + return func(v reflect.Value) bool { + if !v.CanAddr() { + return false + } + return fn(v.Addr()) + } +} + +func isZeroInterface(v reflect.Value) bool { + if v.Kind() == reflect.Ptr && v.IsNil() { + return true + } + return v.Interface().(isZeroer).IsZero() +} + +func isZeroDriverValue(v reflect.Value) bool { + if v.Kind() == reflect.Ptr { + return v.IsNil() + } + + valuer := v.Interface().(driver.Valuer) + value, err := valuer.Value() + if err != nil { + return false + } + return value == nil +} + +func isZeroLen(v reflect.Value) bool { + return v.Len() == 0 +} + +func isNil(v reflect.Value) bool { + return v.IsNil() +} + +func isZeroBool(v reflect.Value) bool { + return !v.Bool() +} + +func isZeroInt(v reflect.Value) bool { + return v.Int() == 0 +} + +func isZeroUint(v reflect.Value) bool { + return v.Uint() == 0 +} + +func isZeroFloat(v reflect.Value) bool { + return v.Float() == 0 +} + +func isZeroBytes(v reflect.Value) bool { + b := v.Slice(0, v.Len()).Bytes() + for _, c := range b { + if c != 0 { + return false + } + } + return true +} + +func notZero(v reflect.Value) bool { + return false +} diff --git a/vendor/github.com/uptrace/bun/util.go b/vendor/github.com/uptrace/bun/util.go new file mode 100644 index 0000000..cffcfab --- /dev/null +++ b/vendor/github.com/uptrace/bun/util.go @@ -0,0 +1,112 @@ +package bun + +import ( + "context" + "fmt" + "reflect" + "strings" +) + +func indirect(v reflect.Value) reflect.Value { + switch v.Kind() { + case reflect.Interface: + return indirect(v.Elem()) + case reflect.Ptr: + return v.Elem() + default: + return v + } +} + +func walk(v reflect.Value, index []int, fn func(reflect.Value)) { + v = reflect.Indirect(v) + switch v.Kind() { + case reflect.Slice: + sliceLen := v.Len() + for i := 0; i < sliceLen; i++ { + visitField(v.Index(i), index, fn) + } + default: + visitField(v, index, fn) + } +} + +func visitField(v reflect.Value, index []int, fn func(reflect.Value)) { + v = reflect.Indirect(v) + if len(index) > 0 { + v = v.Field(index[0]) + if v.Kind() == reflect.Ptr && v.IsNil() { + return + } + walk(v, index[1:], fn) + } else { + fn(v) + } +} + +func typeByIndex(t reflect.Type, index []int) reflect.Type { + for _, x := range index { + switch t.Kind() { + case reflect.Ptr: + t = t.Elem() + case reflect.Slice: + t = indirectType(t.Elem()) + } + t = t.Field(x).Type + } + return indirectType(t) +} + +func indirectType(t reflect.Type) reflect.Type { + if t.Kind() == reflect.Ptr { + t = t.Elem() + } + return t +} + +func sliceElemType(v reflect.Value) reflect.Type { + elemType := v.Type().Elem() + if elemType.Kind() == reflect.Interface && v.Len() > 0 { + return indirect(v.Index(0).Elem()).Type() + } + return indirectType(elemType) +} + +// appendComment adds comment in the header of the query into buffer +func appendComment(b []byte, name string) []byte { + if name == "" { + return b + } + name = strings.Map(func(r rune) rune { + if r == '\x00' { + return -1 + } + return r + }, name) + name = strings.ReplaceAll(name, `/*`, `/\*`) + name = strings.ReplaceAll(name, `*/`, `*\/`) + return append(b, fmt.Sprintf("/* %s */ ", name)...) +} + +// queryCommentCtxKey is a context key for setting a query comment on a context instead of calling the Comment("...") API directly +type queryCommentCtxKey struct{} + +// WithComment returns a context that includes a comment that may be included in a query for debugging +// +// If a context with an attached query is used, a comment set by the Comment("...") API will be overwritten. +func WithComment(ctx context.Context, comment string) context.Context { + return context.WithValue(ctx, queryCommentCtxKey{}, comment) +} + +// commenter describes the Comment interface implemented by all of the query types +type commenter[T any] interface { + Comment(string) T +} + +// setCommentFromContext sets the comment on the given query from the supplied context if one is set using the Comment(...) method. +func setCommentFromContext[T any](ctx context.Context, q commenter[T]) { + s, _ := ctx.Value(queryCommentCtxKey{}).(string) + if s != "" { + q.Comment(s) + } +} diff --git a/vendor/github.com/uptrace/bun/version.go b/vendor/github.com/uptrace/bun/version.go new file mode 100644 index 0000000..93333c9 --- /dev/null +++ b/vendor/github.com/uptrace/bun/version.go @@ -0,0 +1,6 @@ +package bun + +// Version is the current release version. +func Version() string { + return "1.2.16" +} diff --git a/vendor/github.com/vmihailenco/msgpack/v5/.prettierrc b/vendor/github.com/vmihailenco/msgpack/v5/.prettierrc new file mode 100644 index 0000000..8b7f044 --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/v5/.prettierrc @@ -0,0 +1,4 @@ +semi: false +singleQuote: true +proseWrap: always +printWidth: 100 diff --git a/vendor/github.com/vmihailenco/msgpack/v5/.travis.yml b/vendor/github.com/vmihailenco/msgpack/v5/.travis.yml new file mode 100644 index 0000000..e2ce06c --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/v5/.travis.yml @@ -0,0 +1,20 @@ +sudo: false +language: go + +go: + - 1.15.x + - 1.16.x + - tip + +matrix: + allow_failures: + - go: tip + +env: + - GO111MODULE=on + +go_import_path: github.com/vmihailenco/msgpack + +before_install: + - curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh -s -- -b $(go + env GOPATH)/bin v1.31.0 diff --git a/vendor/github.com/vmihailenco/msgpack/v5/CHANGELOG.md b/vendor/github.com/vmihailenco/msgpack/v5/CHANGELOG.md new file mode 100644 index 0000000..d45441e --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/v5/CHANGELOG.md @@ -0,0 +1,75 @@ +## [5.4.1](https://github.com/vmihailenco/msgpack/compare/v5.4.0...v5.4.1) (2023-10-26) + + +### Bug Fixes + +* **reflect:** not assignable to type ([edeaedd](https://github.com/vmihailenco/msgpack/commit/edeaeddb2d51868df8c6ff2d8a218b527aeaf5fd)) + + + +# [5.4.0](https://github.com/vmihailenco/msgpack/compare/v5.3.6...v5.4.0) (2023-10-01) + + + +## [5.3.6](https://github.com/vmihailenco/msgpack/compare/v5.3.5...v5.3.6) (2023-10-01) + + +### Features + +* allow overwriting time.Time parsing from extID 13 (for NodeJS Date) ([9a6b73b](https://github.com/vmihailenco/msgpack/commit/9a6b73b3588fd962d568715f4375e24b089f7066)) +* apply omitEmptyFlag to empty structs ([e5f8d03](https://github.com/vmihailenco/msgpack/commit/e5f8d03c0a1dd9cc571d648cd610305139078de5)) +* support sorted keys for map[string]bool ([690c1fa](https://github.com/vmihailenco/msgpack/commit/690c1fab9814fab4842295ea986111f49850d9a4)) + + + +## [5.3.5](https://github.com/vmihailenco/msgpack/compare/v5.3.4...v5.3.5) (2021-10-22) + +- Allow decoding `nil` code as boolean false. + +## v5 + +### Added + +- `DecodeMap` is split into `DecodeMap`, `DecodeTypedMap`, and `DecodeUntypedMap`. +- New msgpack extensions API. + +### Changed + +- `Reset*` functions also reset flags. +- `SetMapDecodeFunc` is renamed to `SetMapDecoder`. +- `StructAsArray` is renamed to `UseArrayEncodedStructs`. +- `SortMapKeys` is renamed to `SetSortMapKeys`. + +### Removed + +- `UseJSONTag` is removed. Use `SetCustomStructTag("json")` instead. + +## v4 + +- Encode, Decode, Marshal, and Unmarshal are changed to accept single argument. EncodeMulti and + DecodeMulti are added as replacement. +- Added EncodeInt8/16/32/64 and EncodeUint8/16/32/64. +- Encoder changed to preserve type of numbers instead of chosing most compact encoding. The old + behavior can be achieved with Encoder.UseCompactEncoding. + +## v3.3 + +- `msgpack:",inline"` tag is restored to force inlining structs. + +## v3.2 + +- Decoding extension types returns pointer to the value instead of the value. Fixes #153 + +## v3 + +- gopkg.in is not supported any more. Update import path to github.com/vmihailenco/msgpack. +- Msgpack maps are decoded into map[string]interface{} by default. +- EncodeSliceLen is removed in favor of EncodeArrayLen. DecodeSliceLen is removed in favor of + DecodeArrayLen. +- Embedded structs are automatically inlined where possible. +- Time is encoded using extension as described in https://github.com/msgpack/msgpack/pull/209. Old + format is supported as well. +- EncodeInt8/16/32/64 is replaced with EncodeInt. EncodeUint8/16/32/64 is replaced with EncodeUint. + There should be no performance differences. +- DecodeInterface can now return int8/16/32 and uint8/16/32. +- PeekCode returns codes.Code instead of byte. diff --git a/vendor/github.com/vmihailenco/msgpack/v5/LICENSE b/vendor/github.com/vmihailenco/msgpack/v5/LICENSE new file mode 100644 index 0000000..b749d07 --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/v5/LICENSE @@ -0,0 +1,25 @@ +Copyright (c) 2013 The github.com/vmihailenco/msgpack Authors. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/vmihailenco/msgpack/v5/Makefile b/vendor/github.com/vmihailenco/msgpack/v5/Makefile new file mode 100644 index 0000000..e9aade7 --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/v5/Makefile @@ -0,0 +1,6 @@ +test: + go test ./... + go test ./... -short -race + go test ./... -run=NONE -bench=. -benchmem + env GOOS=linux GOARCH=386 go test ./... + go vet diff --git a/vendor/github.com/vmihailenco/msgpack/v5/README.md b/vendor/github.com/vmihailenco/msgpack/v5/README.md new file mode 100644 index 0000000..038464f --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/v5/README.md @@ -0,0 +1,100 @@ +# MessagePack encoding for Golang + +[![Build Status](https://travis-ci.org/vmihailenco/msgpack.svg)](https://travis-ci.org/vmihailenco/msgpack) +[![PkgGoDev](https://pkg.go.dev/badge/github.com/vmihailenco/msgpack/v5)](https://pkg.go.dev/github.com/vmihailenco/msgpack/v5) +[![Documentation](https://img.shields.io/badge/msgpack-documentation-informational)](https://msgpack.uptrace.dev/) +[![Chat](https://discordapp.com/api/guilds/752070105847955518/widget.png)](https://discord.gg/rWtp5Aj) + +> msgpack is brought to you by :star: [**uptrace/uptrace**](https://github.com/uptrace/uptrace). +> Uptrace is an [open source APM](https://uptrace.dev/get/open-source-apm.html) and blazingly fast +> [distributed tracing tool](https://get.uptrace.dev/compare/distributed-tracing-tools.html) powered +> by OpenTelemetry and ClickHouse. Give it a star as well! + +## Resources + +- [Documentation](https://msgpack.uptrace.dev) +- [Chat](https://discord.gg/rWtp5Aj) +- [Reference](https://pkg.go.dev/github.com/vmihailenco/msgpack/v5) +- [Examples](https://pkg.go.dev/github.com/vmihailenco/msgpack/v5#pkg-examples) + +## Features + +- Primitives, arrays, maps, structs, time.Time and interface{}. +- Appengine \*datastore.Key and datastore.Cursor. +- [CustomEncoder]/[CustomDecoder] interfaces for custom encoding. +- [Extensions](https://pkg.go.dev/github.com/vmihailenco/msgpack/v5#example-RegisterExt) to encode + type information. +- Renaming fields via `msgpack:"my_field_name"` and alias via `msgpack:"alias:another_name"`. +- Omitting individual empty fields via `msgpack:",omitempty"` tag or all + [empty fields in a struct](https://pkg.go.dev/github.com/vmihailenco/msgpack/v5#example-Marshal-OmitEmpty). +- [Map keys sorting](https://pkg.go.dev/github.com/vmihailenco/msgpack/v5#Encoder.SetSortMapKeys). +- Encoding/decoding all + [structs as arrays](https://pkg.go.dev/github.com/vmihailenco/msgpack/v5#Encoder.UseArrayEncodedStructs) + or + [individual structs](https://pkg.go.dev/github.com/vmihailenco/msgpack/v5#example-Marshal-AsArray). +- [Encoder.SetCustomStructTag] with [Decoder.SetCustomStructTag] can turn msgpack into drop-in + replacement for any tag. +- Simple but very fast and efficient + [queries](https://pkg.go.dev/github.com/vmihailenco/msgpack/v5#example-Decoder.Query). + +[customencoder]: https://pkg.go.dev/github.com/vmihailenco/msgpack/v5#CustomEncoder +[customdecoder]: https://pkg.go.dev/github.com/vmihailenco/msgpack/v5#CustomDecoder +[encoder.setcustomstructtag]: + https://pkg.go.dev/github.com/vmihailenco/msgpack/v5#Encoder.SetCustomStructTag +[decoder.setcustomstructtag]: + https://pkg.go.dev/github.com/vmihailenco/msgpack/v5#Decoder.SetCustomStructTag + +## Installation + +msgpack supports 2 last Go versions and requires support for +[Go modules](https://github.com/golang/go/wiki/Modules). So make sure to initialize a Go module: + +```shell +go mod init github.com/my/repo +``` + +And then install msgpack/v5 (note _v5_ in the import; omitting it is a popular mistake): + +```shell +go get github.com/vmihailenco/msgpack/v5 +``` + +## Quickstart + +```go +import "github.com/vmihailenco/msgpack/v5" + +func ExampleMarshal() { + type Item struct { + Foo string + } + + b, err := msgpack.Marshal(&Item{Foo: "bar"}) + if err != nil { + panic(err) + } + + var item Item + err = msgpack.Unmarshal(b, &item) + if err != nil { + panic(err) + } + fmt.Println(item.Foo) + // Output: bar +} +``` + +## See also + +- [Golang ORM](https://github.com/uptrace/bun) for PostgreSQL, MySQL, MSSQL, and SQLite +- [Golang PostgreSQL](https://bun.uptrace.dev/postgres/) +- [Golang HTTP router](https://github.com/uptrace/bunrouter) +- [Golang ClickHouse ORM](https://github.com/uptrace/go-clickhouse) + +## Contributors + +Thanks to all the people who already contributed! + + + + diff --git a/vendor/github.com/vmihailenco/msgpack/v5/commitlint.config.js b/vendor/github.com/vmihailenco/msgpack/v5/commitlint.config.js new file mode 100644 index 0000000..4fedde6 --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/v5/commitlint.config.js @@ -0,0 +1 @@ +module.exports = { extends: ['@commitlint/config-conventional'] } diff --git a/vendor/github.com/vmihailenco/msgpack/v5/decode.go b/vendor/github.com/vmihailenco/msgpack/v5/decode.go new file mode 100644 index 0000000..ea645aa --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/v5/decode.go @@ -0,0 +1,708 @@ +package msgpack + +import ( + "bufio" + "bytes" + "errors" + "fmt" + "io" + "reflect" + "sync" + "time" + + "github.com/vmihailenco/msgpack/v5/msgpcode" +) + +const ( + bytesAllocLimit = 1 << 20 // 1mb + sliceAllocLimit = 1e6 // 1m elements + maxMapSize = 1e6 // 1m elements +) + +const ( + looseInterfaceDecodingFlag uint32 = 1 << iota + disallowUnknownFieldsFlag + usePreallocateValues + disableAllocLimitFlag +) + +type bufReader interface { + io.Reader + io.ByteScanner +} + +//------------------------------------------------------------------------------ + +var decPool = sync.Pool{ + New: func() interface{} { + return NewDecoder(nil) + }, +} + +func GetDecoder() *Decoder { + return decPool.Get().(*Decoder) +} + +func PutDecoder(dec *Decoder) { + dec.r = nil + dec.s = nil + decPool.Put(dec) +} + +//------------------------------------------------------------------------------ + +// Unmarshal decodes the MessagePack-encoded data and stores the result +// in the value pointed to by v. +func Unmarshal(data []byte, v interface{}) error { + dec := GetDecoder() + dec.UsePreallocateValues(true) + dec.Reset(bytes.NewReader(data)) + err := dec.Decode(v) + + PutDecoder(dec) + + return err +} + +// A Decoder reads and decodes MessagePack values from an input stream. +type Decoder struct { + r io.Reader + s io.ByteScanner + mapDecoder func(*Decoder) (interface{}, error) + structTag string + buf []byte + rec []byte + dict []string + flags uint32 +} + +// NewDecoder returns a new decoder that reads from r. +// +// The decoder introduces its own buffering and may read data from r +// beyond the requested msgpack values. Buffering can be disabled +// by passing a reader that implements io.ByteScanner interface. +func NewDecoder(r io.Reader) *Decoder { + d := new(Decoder) + d.Reset(r) + return d +} + +// Reset discards any buffered data, resets all state, and switches the buffered +// reader to read from r. +func (d *Decoder) Reset(r io.Reader) { + d.ResetDict(r, nil) +} + +// ResetDict is like Reset, but also resets the dict. +func (d *Decoder) ResetDict(r io.Reader, dict []string) { + d.ResetReader(r) + d.flags = 0 + d.structTag = "" + d.dict = dict +} + +func (d *Decoder) WithDict(dict []string, fn func(*Decoder) error) error { + oldDict := d.dict + d.dict = dict + err := fn(d) + d.dict = oldDict + return err +} + +func (d *Decoder) ResetReader(r io.Reader) { + d.mapDecoder = nil + d.dict = nil + + if br, ok := r.(bufReader); ok { + d.r = br + d.s = br + } else if r == nil { + d.r = nil + d.s = nil + } else { + br := bufio.NewReader(r) + d.r = br + d.s = br + } +} + +func (d *Decoder) SetMapDecoder(fn func(*Decoder) (interface{}, error)) { + d.mapDecoder = fn +} + +// UseLooseInterfaceDecoding causes decoder to use DecodeInterfaceLoose +// to decode msgpack value into Go interface{}. +func (d *Decoder) UseLooseInterfaceDecoding(on bool) { + if on { + d.flags |= looseInterfaceDecodingFlag + } else { + d.flags &= ^looseInterfaceDecodingFlag + } +} + +// SetCustomStructTag causes the decoder to use the supplied tag as a fallback option +// if there is no msgpack tag. +func (d *Decoder) SetCustomStructTag(tag string) { + d.structTag = tag +} + +// DisallowUnknownFields causes the Decoder to return an error when the destination +// is a struct and the input contains object keys which do not match any +// non-ignored, exported fields in the destination. +func (d *Decoder) DisallowUnknownFields(on bool) { + if on { + d.flags |= disallowUnknownFieldsFlag + } else { + d.flags &= ^disallowUnknownFieldsFlag + } +} + +// UseInternedStrings enables support for decoding interned strings. +func (d *Decoder) UseInternedStrings(on bool) { + if on { + d.flags |= useInternedStringsFlag + } else { + d.flags &= ^useInternedStringsFlag + } +} + +// UsePreallocateValues enables preallocating values in chunks +func (d *Decoder) UsePreallocateValues(on bool) { + if on { + d.flags |= usePreallocateValues + } else { + d.flags &= ^usePreallocateValues + } +} + +// DisableAllocLimit enables fully allocating slices/maps when the size is known +func (d *Decoder) DisableAllocLimit(on bool) { + if on { + d.flags |= disableAllocLimitFlag + } else { + d.flags &= ^disableAllocLimitFlag + } +} + +// Buffered returns a reader of the data remaining in the Decoder's buffer. +// The reader is valid until the next call to Decode. +func (d *Decoder) Buffered() io.Reader { + return d.r +} + +//nolint:gocyclo +func (d *Decoder) Decode(v interface{}) error { + var err error + switch v := v.(type) { + case *string: + if v != nil { + *v, err = d.DecodeString() + return err + } + case *[]byte: + if v != nil { + return d.decodeBytesPtr(v) + } + case *int: + if v != nil { + *v, err = d.DecodeInt() + return err + } + case *int8: + if v != nil { + *v, err = d.DecodeInt8() + return err + } + case *int16: + if v != nil { + *v, err = d.DecodeInt16() + return err + } + case *int32: + if v != nil { + *v, err = d.DecodeInt32() + return err + } + case *int64: + if v != nil { + *v, err = d.DecodeInt64() + return err + } + case *uint: + if v != nil { + *v, err = d.DecodeUint() + return err + } + case *uint8: + if v != nil { + *v, err = d.DecodeUint8() + return err + } + case *uint16: + if v != nil { + *v, err = d.DecodeUint16() + return err + } + case *uint32: + if v != nil { + *v, err = d.DecodeUint32() + return err + } + case *uint64: + if v != nil { + *v, err = d.DecodeUint64() + return err + } + case *bool: + if v != nil { + *v, err = d.DecodeBool() + return err + } + case *float32: + if v != nil { + *v, err = d.DecodeFloat32() + return err + } + case *float64: + if v != nil { + *v, err = d.DecodeFloat64() + return err + } + case *[]string: + return d.decodeStringSlicePtr(v) + case *map[string]string: + return d.decodeMapStringStringPtr(v) + case *map[string]interface{}: + return d.decodeMapStringInterfacePtr(v) + case *time.Duration: + if v != nil { + vv, err := d.DecodeInt64() + *v = time.Duration(vv) + return err + } + case *time.Time: + if v != nil { + *v, err = d.DecodeTime() + return err + } + } + + vv := reflect.ValueOf(v) + if !vv.IsValid() { + return errors.New("msgpack: Decode(nil)") + } + if vv.Kind() != reflect.Ptr { + return fmt.Errorf("msgpack: Decode(non-pointer %T)", v) + } + if vv.IsNil() { + return fmt.Errorf("msgpack: Decode(non-settable %T)", v) + } + + vv = vv.Elem() + if vv.Kind() == reflect.Interface { + if !vv.IsNil() { + vv = vv.Elem() + if vv.Kind() != reflect.Ptr { + return fmt.Errorf("msgpack: Decode(non-pointer %s)", vv.Type().String()) + } + } + } + + return d.DecodeValue(vv) +} + +func (d *Decoder) DecodeMulti(v ...interface{}) error { + for _, vv := range v { + if err := d.Decode(vv); err != nil { + return err + } + } + return nil +} + +func (d *Decoder) decodeInterfaceCond() (interface{}, error) { + if d.flags&looseInterfaceDecodingFlag != 0 { + return d.DecodeInterfaceLoose() + } + return d.DecodeInterface() +} + +func (d *Decoder) DecodeValue(v reflect.Value) error { + decode := getDecoder(v.Type()) + return decode(d, v) +} + +func (d *Decoder) DecodeNil() error { + c, err := d.readCode() + if err != nil { + return err + } + if c != msgpcode.Nil { + return fmt.Errorf("msgpack: invalid code=%x decoding nil", c) + } + return nil +} + +func (d *Decoder) decodeNilValue(v reflect.Value) error { + err := d.DecodeNil() + if v.IsNil() { + return err + } + if v.Kind() == reflect.Ptr { + v = v.Elem() + } + v.Set(reflect.Zero(v.Type())) + return err +} + +func (d *Decoder) DecodeBool() (bool, error) { + c, err := d.readCode() + if err != nil { + return false, err + } + return d.bool(c) +} + +func (d *Decoder) bool(c byte) (bool, error) { + if c == msgpcode.Nil { + return false, nil + } + if c == msgpcode.False { + return false, nil + } + if c == msgpcode.True { + return true, nil + } + return false, fmt.Errorf("msgpack: invalid code=%x decoding bool", c) +} + +func (d *Decoder) DecodeDuration() (time.Duration, error) { + n, err := d.DecodeInt64() + if err != nil { + return 0, err + } + return time.Duration(n), nil +} + +// DecodeInterface decodes value into interface. It returns following types: +// - nil, +// - bool, +// - int8, int16, int32, int64, +// - uint8, uint16, uint32, uint64, +// - float32 and float64, +// - string, +// - []byte, +// - slices of any of the above, +// - maps of any of the above. +// +// DecodeInterface should be used only when you don't know the type of value +// you are decoding. For example, if you are decoding number it is better to use +// DecodeInt64 for negative numbers and DecodeUint64 for positive numbers. +func (d *Decoder) DecodeInterface() (interface{}, error) { + c, err := d.readCode() + if err != nil { + return nil, err + } + + if msgpcode.IsFixedNum(c) { + return int8(c), nil + } + if msgpcode.IsFixedMap(c) { + err = d.s.UnreadByte() + if err != nil { + return nil, err + } + return d.decodeMapDefault() + } + if msgpcode.IsFixedArray(c) { + return d.decodeSlice(c) + } + if msgpcode.IsFixedString(c) { + return d.string(c) + } + + switch c { + case msgpcode.Nil: + return nil, nil + case msgpcode.False, msgpcode.True: + return d.bool(c) + case msgpcode.Float: + return d.float32(c) + case msgpcode.Double: + return d.float64(c) + case msgpcode.Uint8: + return d.uint8() + case msgpcode.Uint16: + return d.uint16() + case msgpcode.Uint32: + return d.uint32() + case msgpcode.Uint64: + return d.uint64() + case msgpcode.Int8: + return d.int8() + case msgpcode.Int16: + return d.int16() + case msgpcode.Int32: + return d.int32() + case msgpcode.Int64: + return d.int64() + case msgpcode.Bin8, msgpcode.Bin16, msgpcode.Bin32: + return d.bytes(c, nil) + case msgpcode.Str8, msgpcode.Str16, msgpcode.Str32: + return d.string(c) + case msgpcode.Array16, msgpcode.Array32: + return d.decodeSlice(c) + case msgpcode.Map16, msgpcode.Map32: + err = d.s.UnreadByte() + if err != nil { + return nil, err + } + return d.decodeMapDefault() + case msgpcode.FixExt1, msgpcode.FixExt2, msgpcode.FixExt4, msgpcode.FixExt8, msgpcode.FixExt16, + msgpcode.Ext8, msgpcode.Ext16, msgpcode.Ext32: + return d.decodeInterfaceExt(c) + } + + return 0, fmt.Errorf("msgpack: unknown code %x decoding interface{}", c) +} + +// DecodeInterfaceLoose is like DecodeInterface except that: +// - int8, int16, and int32 are converted to int64, +// - uint8, uint16, and uint32 are converted to uint64, +// - float32 is converted to float64. +// - []byte is converted to string. +func (d *Decoder) DecodeInterfaceLoose() (interface{}, error) { + c, err := d.readCode() + if err != nil { + return nil, err + } + + if msgpcode.IsFixedNum(c) { + return int64(int8(c)), nil + } + if msgpcode.IsFixedMap(c) { + err = d.s.UnreadByte() + if err != nil { + return nil, err + } + return d.decodeMapDefault() + } + if msgpcode.IsFixedArray(c) { + return d.decodeSlice(c) + } + if msgpcode.IsFixedString(c) { + return d.string(c) + } + + switch c { + case msgpcode.Nil: + return nil, nil + case msgpcode.False, msgpcode.True: + return d.bool(c) + case msgpcode.Float, msgpcode.Double: + return d.float64(c) + case msgpcode.Uint8, msgpcode.Uint16, msgpcode.Uint32, msgpcode.Uint64: + return d.uint(c) + case msgpcode.Int8, msgpcode.Int16, msgpcode.Int32, msgpcode.Int64: + return d.int(c) + case msgpcode.Str8, msgpcode.Str16, msgpcode.Str32, + msgpcode.Bin8, msgpcode.Bin16, msgpcode.Bin32: + return d.string(c) + case msgpcode.Array16, msgpcode.Array32: + return d.decodeSlice(c) + case msgpcode.Map16, msgpcode.Map32: + err = d.s.UnreadByte() + if err != nil { + return nil, err + } + return d.decodeMapDefault() + case msgpcode.FixExt1, msgpcode.FixExt2, msgpcode.FixExt4, msgpcode.FixExt8, msgpcode.FixExt16, + msgpcode.Ext8, msgpcode.Ext16, msgpcode.Ext32: + return d.decodeInterfaceExt(c) + } + + return 0, fmt.Errorf("msgpack: unknown code %x decoding interface{}", c) +} + +// Skip skips next value. +func (d *Decoder) Skip() error { + c, err := d.readCode() + if err != nil { + return err + } + + if msgpcode.IsFixedNum(c) { + return nil + } + if msgpcode.IsFixedMap(c) { + return d.skipMap(c) + } + if msgpcode.IsFixedArray(c) { + return d.skipSlice(c) + } + if msgpcode.IsFixedString(c) { + return d.skipBytes(c) + } + + switch c { + case msgpcode.Nil, msgpcode.False, msgpcode.True: + return nil + case msgpcode.Uint8, msgpcode.Int8: + return d.skipN(1) + case msgpcode.Uint16, msgpcode.Int16: + return d.skipN(2) + case msgpcode.Uint32, msgpcode.Int32, msgpcode.Float: + return d.skipN(4) + case msgpcode.Uint64, msgpcode.Int64, msgpcode.Double: + return d.skipN(8) + case msgpcode.Bin8, msgpcode.Bin16, msgpcode.Bin32: + return d.skipBytes(c) + case msgpcode.Str8, msgpcode.Str16, msgpcode.Str32: + return d.skipBytes(c) + case msgpcode.Array16, msgpcode.Array32: + return d.skipSlice(c) + case msgpcode.Map16, msgpcode.Map32: + return d.skipMap(c) + case msgpcode.FixExt1, msgpcode.FixExt2, msgpcode.FixExt4, msgpcode.FixExt8, msgpcode.FixExt16, + msgpcode.Ext8, msgpcode.Ext16, msgpcode.Ext32: + return d.skipExt(c) + } + + return fmt.Errorf("msgpack: unknown code %x", c) +} + +func (d *Decoder) DecodeRaw() (RawMessage, error) { + d.rec = make([]byte, 0) + if err := d.Skip(); err != nil { + return nil, err + } + msg := RawMessage(d.rec) + d.rec = nil + return msg, nil +} + +// PeekCode returns the next MessagePack code without advancing the reader. +// Subpackage msgpack/codes defines the list of available msgpcode. +func (d *Decoder) PeekCode() (byte, error) { + c, err := d.s.ReadByte() + if err != nil { + return 0, err + } + return c, d.s.UnreadByte() +} + +// ReadFull reads exactly len(buf) bytes into the buf. +func (d *Decoder) ReadFull(buf []byte) error { + _, err := readN(d.r, buf, len(buf)) + return err +} + +func (d *Decoder) hasNilCode() bool { + code, err := d.PeekCode() + return err == nil && code == msgpcode.Nil +} + +func (d *Decoder) readCode() (byte, error) { + c, err := d.s.ReadByte() + if err != nil { + return 0, err + } + if d.rec != nil { + d.rec = append(d.rec, c) + } + return c, nil +} + +func (d *Decoder) readFull(b []byte) error { + _, err := io.ReadFull(d.r, b) + if err != nil { + return err + } + if d.rec != nil { + d.rec = append(d.rec, b...) + } + return nil +} + +func (d *Decoder) readN(n int) ([]byte, error) { + var err error + if d.flags&disableAllocLimitFlag != 0 { + d.buf, err = readN(d.r, d.buf, n) + } else { + d.buf, err = readNGrow(d.r, d.buf, n) + } + if err != nil { + return nil, err + } + if d.rec != nil { + // TODO: read directly into d.rec? + d.rec = append(d.rec, d.buf...) + } + return d.buf, nil +} + +func readN(r io.Reader, b []byte, n int) ([]byte, error) { + if b == nil { + if n == 0 { + return make([]byte, 0), nil + } + b = make([]byte, 0, n) + } + + if n > cap(b) { + b = append(b, make([]byte, n-len(b))...) + } else if n <= cap(b) { + b = b[:n] + } + + _, err := io.ReadFull(r, b) + return b, err +} + +func readNGrow(r io.Reader, b []byte, n int) ([]byte, error) { + if b == nil { + if n == 0 { + return make([]byte, 0), nil + } + switch { + case n < 64: + b = make([]byte, 0, 64) + case n <= bytesAllocLimit: + b = make([]byte, 0, n) + default: + b = make([]byte, 0, bytesAllocLimit) + } + } + + if n <= cap(b) { + b = b[:n] + _, err := io.ReadFull(r, b) + return b, err + } + b = b[:cap(b)] + + var pos int + for { + alloc := min(n-len(b), bytesAllocLimit) + b = append(b, make([]byte, alloc)...) + + _, err := io.ReadFull(r, b[pos:]) + if err != nil { + return b, err + } + + if len(b) == n { + break + } + pos = len(b) + } + + return b, nil +} + +func min(a, b int) int { //nolint:unparam + if a <= b { + return a + } + return b +} diff --git a/vendor/github.com/vmihailenco/msgpack/v5/decode_map.go b/vendor/github.com/vmihailenco/msgpack/v5/decode_map.go new file mode 100644 index 0000000..c54dae3 --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/v5/decode_map.go @@ -0,0 +1,356 @@ +package msgpack + +import ( + "errors" + "fmt" + "reflect" + + "github.com/vmihailenco/msgpack/v5/msgpcode" +) + +var errArrayStruct = errors.New("msgpack: number of fields in array-encoded struct has changed") + +var ( + mapStringStringPtrType = reflect.TypeOf((*map[string]string)(nil)) + mapStringStringType = mapStringStringPtrType.Elem() + mapStringBoolPtrType = reflect.TypeOf((*map[string]bool)(nil)) + mapStringBoolType = mapStringBoolPtrType.Elem() +) + +var ( + mapStringInterfacePtrType = reflect.TypeOf((*map[string]interface{})(nil)) + mapStringInterfaceType = mapStringInterfacePtrType.Elem() +) + +func decodeMapValue(d *Decoder, v reflect.Value) error { + n, err := d.DecodeMapLen() + if err != nil { + return err + } + + typ := v.Type() + if n == -1 { + v.Set(reflect.Zero(typ)) + return nil + } + + if v.IsNil() { + ln := n + if d.flags&disableAllocLimitFlag == 0 { + ln = min(ln, maxMapSize) + } + v.Set(reflect.MakeMapWithSize(typ, ln)) + } + if n == 0 { + return nil + } + + return d.decodeTypedMapValue(v, n) +} + +func (d *Decoder) decodeMapDefault() (interface{}, error) { + if d.mapDecoder != nil { + return d.mapDecoder(d) + } + return d.DecodeMap() +} + +// DecodeMapLen decodes map length. Length is -1 when map is nil. +func (d *Decoder) DecodeMapLen() (int, error) { + c, err := d.readCode() + if err != nil { + return 0, err + } + + if msgpcode.IsExt(c) { + if err = d.skipExtHeader(c); err != nil { + return 0, err + } + + c, err = d.readCode() + if err != nil { + return 0, err + } + } + return d.mapLen(c) +} + +func (d *Decoder) mapLen(c byte) (int, error) { + if c == msgpcode.Nil { + return -1, nil + } + if c >= msgpcode.FixedMapLow && c <= msgpcode.FixedMapHigh { + return int(c & msgpcode.FixedMapMask), nil + } + if c == msgpcode.Map16 { + size, err := d.uint16() + return int(size), err + } + if c == msgpcode.Map32 { + size, err := d.uint32() + return int(size), err + } + return 0, unexpectedCodeError{code: c, hint: "map length"} +} + +func decodeMapStringStringValue(d *Decoder, v reflect.Value) error { + mptr := v.Addr().Convert(mapStringStringPtrType).Interface().(*map[string]string) + return d.decodeMapStringStringPtr(mptr) +} + +func (d *Decoder) decodeMapStringStringPtr(ptr *map[string]string) error { + size, err := d.DecodeMapLen() + if err != nil { + return err + } + if size == -1 { + *ptr = nil + return nil + } + + m := *ptr + if m == nil { + ln := size + if d.flags&disableAllocLimitFlag == 0 { + ln = min(size, maxMapSize) + } + *ptr = make(map[string]string, ln) + m = *ptr + } + + for i := 0; i < size; i++ { + mk, err := d.DecodeString() + if err != nil { + return err + } + mv, err := d.DecodeString() + if err != nil { + return err + } + m[mk] = mv + } + + return nil +} + +func decodeMapStringInterfaceValue(d *Decoder, v reflect.Value) error { + ptr := v.Addr().Convert(mapStringInterfacePtrType).Interface().(*map[string]interface{}) + return d.decodeMapStringInterfacePtr(ptr) +} + +func (d *Decoder) decodeMapStringInterfacePtr(ptr *map[string]interface{}) error { + m, err := d.DecodeMap() + if err != nil { + return err + } + *ptr = m + return nil +} + +func (d *Decoder) DecodeMap() (map[string]interface{}, error) { + n, err := d.DecodeMapLen() + if err != nil { + return nil, err + } + + if n == -1 { + return nil, nil + } + + m := make(map[string]interface{}, n) + + for i := 0; i < n; i++ { + mk, err := d.DecodeString() + if err != nil { + return nil, err + } + mv, err := d.decodeInterfaceCond() + if err != nil { + return nil, err + } + m[mk] = mv + } + + return m, nil +} + +func (d *Decoder) DecodeUntypedMap() (map[interface{}]interface{}, error) { + n, err := d.DecodeMapLen() + if err != nil { + return nil, err + } + + if n == -1 { + return nil, nil + } + + m := make(map[interface{}]interface{}, n) + + for i := 0; i < n; i++ { + mk, err := d.decodeInterfaceCond() + if err != nil { + return nil, err + } + + mv, err := d.decodeInterfaceCond() + if err != nil { + return nil, err + } + + m[mk] = mv + } + + return m, nil +} + +// DecodeTypedMap decodes a typed map. Typed map is a map that has a fixed type for keys and values. +// Key and value types may be different. +func (d *Decoder) DecodeTypedMap() (interface{}, error) { + n, err := d.DecodeMapLen() + if err != nil { + return nil, err + } + if n <= 0 { + return nil, nil + } + + key, err := d.decodeInterfaceCond() + if err != nil { + return nil, err + } + + value, err := d.decodeInterfaceCond() + if err != nil { + return nil, err + } + + keyType := reflect.TypeOf(key) + valueType := reflect.TypeOf(value) + + if !keyType.Comparable() { + return nil, fmt.Errorf("msgpack: unsupported map key: %s", keyType.String()) + } + + mapType := reflect.MapOf(keyType, valueType) + + ln := n + if d.flags&disableAllocLimitFlag == 0 { + ln = min(ln, maxMapSize) + } + + mapValue := reflect.MakeMapWithSize(mapType, ln) + mapValue.SetMapIndex(reflect.ValueOf(key), reflect.ValueOf(value)) + + n-- + if err := d.decodeTypedMapValue(mapValue, n); err != nil { + return nil, err + } + + return mapValue.Interface(), nil +} + +func (d *Decoder) decodeTypedMapValue(v reflect.Value, n int) error { + var ( + typ = v.Type() + keyType = typ.Key() + valueType = typ.Elem() + ) + for i := 0; i < n; i++ { + mk := d.newValue(keyType).Elem() + if err := d.DecodeValue(mk); err != nil { + return err + } + + mv := d.newValue(valueType).Elem() + if err := d.DecodeValue(mv); err != nil { + return err + } + + v.SetMapIndex(mk, mv) + } + + return nil +} + +func (d *Decoder) skipMap(c byte) error { + n, err := d.mapLen(c) + if err != nil { + return err + } + for i := 0; i < n; i++ { + if err := d.Skip(); err != nil { + return err + } + if err := d.Skip(); err != nil { + return err + } + } + return nil +} + +func decodeStructValue(d *Decoder, v reflect.Value) error { + c, err := d.readCode() + if err != nil { + return err + } + + n, err := d.mapLen(c) + if err == nil { + return d.decodeStruct(v, n) + } + + var err2 error + n, err2 = d.arrayLen(c) + if err2 != nil { + return err + } + + if n <= 0 { + v.Set(reflect.Zero(v.Type())) + return nil + } + + fields := structs.Fields(v.Type(), d.structTag) + if n != len(fields.List) { + return errArrayStruct + } + + for _, f := range fields.List { + if err := f.DecodeValue(d, v); err != nil { + return err + } + } + + return nil +} + +func (d *Decoder) decodeStruct(v reflect.Value, n int) error { + if n == -1 { + v.Set(reflect.Zero(v.Type())) + return nil + } + + fields := structs.Fields(v.Type(), d.structTag) + for i := 0; i < n; i++ { + name, err := d.decodeStringTemp() + if err != nil { + return err + } + + if f := fields.Map[name]; f != nil { + if err := f.DecodeValue(d, v); err != nil { + return err + } + continue + } + + if d.flags&disallowUnknownFieldsFlag != 0 { + return fmt.Errorf("msgpack: unknown field %q", name) + } + if err := d.Skip(); err != nil { + return err + } + } + + return nil +} diff --git a/vendor/github.com/vmihailenco/msgpack/v5/decode_number.go b/vendor/github.com/vmihailenco/msgpack/v5/decode_number.go new file mode 100644 index 0000000..45d6a74 --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/v5/decode_number.go @@ -0,0 +1,295 @@ +package msgpack + +import ( + "fmt" + "math" + "reflect" + + "github.com/vmihailenco/msgpack/v5/msgpcode" +) + +func (d *Decoder) skipN(n int) error { + _, err := d.readN(n) + return err +} + +func (d *Decoder) uint8() (uint8, error) { + c, err := d.readCode() + if err != nil { + return 0, err + } + return c, nil +} + +func (d *Decoder) int8() (int8, error) { + n, err := d.uint8() + return int8(n), err +} + +func (d *Decoder) uint16() (uint16, error) { + b, err := d.readN(2) + if err != nil { + return 0, err + } + return (uint16(b[0]) << 8) | uint16(b[1]), nil +} + +func (d *Decoder) int16() (int16, error) { + n, err := d.uint16() + return int16(n), err +} + +func (d *Decoder) uint32() (uint32, error) { + b, err := d.readN(4) + if err != nil { + return 0, err + } + n := (uint32(b[0]) << 24) | + (uint32(b[1]) << 16) | + (uint32(b[2]) << 8) | + uint32(b[3]) + return n, nil +} + +func (d *Decoder) int32() (int32, error) { + n, err := d.uint32() + return int32(n), err +} + +func (d *Decoder) uint64() (uint64, error) { + b, err := d.readN(8) + if err != nil { + return 0, err + } + n := (uint64(b[0]) << 56) | + (uint64(b[1]) << 48) | + (uint64(b[2]) << 40) | + (uint64(b[3]) << 32) | + (uint64(b[4]) << 24) | + (uint64(b[5]) << 16) | + (uint64(b[6]) << 8) | + uint64(b[7]) + return n, nil +} + +func (d *Decoder) int64() (int64, error) { + n, err := d.uint64() + return int64(n), err +} + +// DecodeUint64 decodes msgpack int8/16/32/64 and uint8/16/32/64 +// into Go uint64. +func (d *Decoder) DecodeUint64() (uint64, error) { + c, err := d.readCode() + if err != nil { + return 0, err + } + return d.uint(c) +} + +func (d *Decoder) uint(c byte) (uint64, error) { + if c == msgpcode.Nil { + return 0, nil + } + if msgpcode.IsFixedNum(c) { + return uint64(int8(c)), nil + } + switch c { + case msgpcode.Uint8: + n, err := d.uint8() + return uint64(n), err + case msgpcode.Int8: + n, err := d.int8() + return uint64(n), err + case msgpcode.Uint16: + n, err := d.uint16() + return uint64(n), err + case msgpcode.Int16: + n, err := d.int16() + return uint64(n), err + case msgpcode.Uint32: + n, err := d.uint32() + return uint64(n), err + case msgpcode.Int32: + n, err := d.int32() + return uint64(n), err + case msgpcode.Uint64, msgpcode.Int64: + return d.uint64() + } + return 0, fmt.Errorf("msgpack: invalid code=%x decoding uint64", c) +} + +// DecodeInt64 decodes msgpack int8/16/32/64 and uint8/16/32/64 +// into Go int64. +func (d *Decoder) DecodeInt64() (int64, error) { + c, err := d.readCode() + if err != nil { + return 0, err + } + return d.int(c) +} + +func (d *Decoder) int(c byte) (int64, error) { + if c == msgpcode.Nil { + return 0, nil + } + if msgpcode.IsFixedNum(c) { + return int64(int8(c)), nil + } + switch c { + case msgpcode.Uint8: + n, err := d.uint8() + return int64(n), err + case msgpcode.Int8: + n, err := d.uint8() + return int64(int8(n)), err + case msgpcode.Uint16: + n, err := d.uint16() + return int64(n), err + case msgpcode.Int16: + n, err := d.uint16() + return int64(int16(n)), err + case msgpcode.Uint32: + n, err := d.uint32() + return int64(n), err + case msgpcode.Int32: + n, err := d.uint32() + return int64(int32(n)), err + case msgpcode.Uint64, msgpcode.Int64: + n, err := d.uint64() + return int64(n), err + } + return 0, fmt.Errorf("msgpack: invalid code=%x decoding int64", c) +} + +func (d *Decoder) DecodeFloat32() (float32, error) { + c, err := d.readCode() + if err != nil { + return 0, err + } + return d.float32(c) +} + +func (d *Decoder) float32(c byte) (float32, error) { + if c == msgpcode.Float { + n, err := d.uint32() + if err != nil { + return 0, err + } + return math.Float32frombits(n), nil + } + + n, err := d.int(c) + if err != nil { + return 0, fmt.Errorf("msgpack: invalid code=%x decoding float32", c) + } + return float32(n), nil +} + +// DecodeFloat64 decodes msgpack float32/64 into Go float64. +func (d *Decoder) DecodeFloat64() (float64, error) { + c, err := d.readCode() + if err != nil { + return 0, err + } + return d.float64(c) +} + +func (d *Decoder) float64(c byte) (float64, error) { + switch c { + case msgpcode.Float: + n, err := d.float32(c) + if err != nil { + return 0, err + } + return float64(n), nil + case msgpcode.Double: + n, err := d.uint64() + if err != nil { + return 0, err + } + return math.Float64frombits(n), nil + } + + n, err := d.int(c) + if err != nil { + return 0, fmt.Errorf("msgpack: invalid code=%x decoding float32", c) + } + return float64(n), nil +} + +func (d *Decoder) DecodeUint() (uint, error) { + n, err := d.DecodeUint64() + return uint(n), err +} + +func (d *Decoder) DecodeUint8() (uint8, error) { + n, err := d.DecodeUint64() + return uint8(n), err +} + +func (d *Decoder) DecodeUint16() (uint16, error) { + n, err := d.DecodeUint64() + return uint16(n), err +} + +func (d *Decoder) DecodeUint32() (uint32, error) { + n, err := d.DecodeUint64() + return uint32(n), err +} + +func (d *Decoder) DecodeInt() (int, error) { + n, err := d.DecodeInt64() + return int(n), err +} + +func (d *Decoder) DecodeInt8() (int8, error) { + n, err := d.DecodeInt64() + return int8(n), err +} + +func (d *Decoder) DecodeInt16() (int16, error) { + n, err := d.DecodeInt64() + return int16(n), err +} + +func (d *Decoder) DecodeInt32() (int32, error) { + n, err := d.DecodeInt64() + return int32(n), err +} + +func decodeFloat32Value(d *Decoder, v reflect.Value) error { + f, err := d.DecodeFloat32() + if err != nil { + return err + } + v.SetFloat(float64(f)) + return nil +} + +func decodeFloat64Value(d *Decoder, v reflect.Value) error { + f, err := d.DecodeFloat64() + if err != nil { + return err + } + v.SetFloat(f) + return nil +} + +func decodeInt64Value(d *Decoder, v reflect.Value) error { + n, err := d.DecodeInt64() + if err != nil { + return err + } + v.SetInt(n) + return nil +} + +func decodeUint64Value(d *Decoder, v reflect.Value) error { + n, err := d.DecodeUint64() + if err != nil { + return err + } + v.SetUint(n) + return nil +} diff --git a/vendor/github.com/vmihailenco/msgpack/v5/decode_query.go b/vendor/github.com/vmihailenco/msgpack/v5/decode_query.go new file mode 100644 index 0000000..4dce0fe --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/v5/decode_query.go @@ -0,0 +1,157 @@ +package msgpack + +import ( + "fmt" + "strconv" + "strings" + + "github.com/vmihailenco/msgpack/v5/msgpcode" +) + +type queryResult struct { + query string + key string + values []interface{} + hasAsterisk bool +} + +func (q *queryResult) nextKey() { + ind := strings.IndexByte(q.query, '.') + if ind == -1 { + q.key = q.query + q.query = "" + return + } + q.key = q.query[:ind] + q.query = q.query[ind+1:] +} + +// Query extracts data specified by the query from the msgpack stream skipping +// any other data. Query consists of map keys and array indexes separated with dot, +// e.g. key1.0.key2. +func (d *Decoder) Query(query string) ([]interface{}, error) { + res := queryResult{ + query: query, + } + if err := d.query(&res); err != nil { + return nil, err + } + return res.values, nil +} + +func (d *Decoder) query(q *queryResult) error { + q.nextKey() + if q.key == "" { + v, err := d.decodeInterfaceCond() + if err != nil { + return err + } + q.values = append(q.values, v) + return nil + } + + code, err := d.PeekCode() + if err != nil { + return err + } + + switch { + case code == msgpcode.Map16 || code == msgpcode.Map32 || msgpcode.IsFixedMap(code): + err = d.queryMapKey(q) + case code == msgpcode.Array16 || code == msgpcode.Array32 || msgpcode.IsFixedArray(code): + err = d.queryArrayIndex(q) + default: + err = fmt.Errorf("msgpack: unsupported code=%x decoding key=%q", code, q.key) + } + return err +} + +func (d *Decoder) queryMapKey(q *queryResult) error { + n, err := d.DecodeMapLen() + if err != nil { + return err + } + if n == -1 { + return nil + } + + for i := 0; i < n; i++ { + key, err := d.decodeStringTemp() + if err != nil { + return err + } + + if key == q.key { + if err := d.query(q); err != nil { + return err + } + if q.hasAsterisk { + return d.skipNext((n - i - 1) * 2) + } + return nil + } + + if err := d.Skip(); err != nil { + return err + } + } + + return nil +} + +func (d *Decoder) queryArrayIndex(q *queryResult) error { + n, err := d.DecodeArrayLen() + if err != nil { + return err + } + if n == -1 { + return nil + } + + if q.key == "*" { + q.hasAsterisk = true + + query := q.query + for i := 0; i < n; i++ { + q.query = query + if err := d.query(q); err != nil { + return err + } + } + + q.hasAsterisk = false + return nil + } + + ind, err := strconv.Atoi(q.key) + if err != nil { + return err + } + + for i := 0; i < n; i++ { + if i == ind { + if err := d.query(q); err != nil { + return err + } + if q.hasAsterisk { + return d.skipNext(n - i - 1) + } + return nil + } + + if err := d.Skip(); err != nil { + return err + } + } + + return nil +} + +func (d *Decoder) skipNext(n int) error { + for i := 0; i < n; i++ { + if err := d.Skip(); err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/vmihailenco/msgpack/v5/decode_slice.go b/vendor/github.com/vmihailenco/msgpack/v5/decode_slice.go new file mode 100644 index 0000000..9c155f2 --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/v5/decode_slice.go @@ -0,0 +1,198 @@ +package msgpack + +import ( + "fmt" + "reflect" + + "github.com/vmihailenco/msgpack/v5/msgpcode" +) + +var sliceStringPtrType = reflect.TypeOf((*[]string)(nil)) + +// DecodeArrayLen decodes array length. Length is -1 when array is nil. +func (d *Decoder) DecodeArrayLen() (int, error) { + c, err := d.readCode() + if err != nil { + return 0, err + } + return d.arrayLen(c) +} + +func (d *Decoder) arrayLen(c byte) (int, error) { + if c == msgpcode.Nil { + return -1, nil + } else if c >= msgpcode.FixedArrayLow && c <= msgpcode.FixedArrayHigh { + return int(c & msgpcode.FixedArrayMask), nil + } + switch c { + case msgpcode.Array16: + n, err := d.uint16() + return int(n), err + case msgpcode.Array32: + n, err := d.uint32() + return int(n), err + } + return 0, fmt.Errorf("msgpack: invalid code=%x decoding array length", c) +} + +func decodeStringSliceValue(d *Decoder, v reflect.Value) error { + ptr := v.Addr().Convert(sliceStringPtrType).Interface().(*[]string) + return d.decodeStringSlicePtr(ptr) +} + +func (d *Decoder) decodeStringSlicePtr(ptr *[]string) error { + n, err := d.DecodeArrayLen() + if err != nil { + return err + } + if n == -1 { + return nil + } + + ss := makeStrings(*ptr, n, d.flags&disableAllocLimitFlag != 0) + for i := 0; i < n; i++ { + s, err := d.DecodeString() + if err != nil { + return err + } + ss = append(ss, s) + } + *ptr = ss + + return nil +} + +func makeStrings(s []string, n int, noLimit bool) []string { + if !noLimit && n > sliceAllocLimit { + n = sliceAllocLimit + } + + if s == nil { + return make([]string, 0, n) + } + + if cap(s) >= n { + return s[:0] + } + + s = s[:cap(s)] + s = append(s, make([]string, n-len(s))...) + return s[:0] +} + +func decodeSliceValue(d *Decoder, v reflect.Value) error { + n, err := d.DecodeArrayLen() + if err != nil { + return err + } + + if n == -1 { + v.Set(reflect.Zero(v.Type())) + return nil + } + if n == 0 && v.IsNil() { + v.Set(reflect.MakeSlice(v.Type(), 0, 0)) + return nil + } + + if v.Cap() >= n { + v.Set(v.Slice(0, n)) + } else if v.Len() < v.Cap() { + v.Set(v.Slice(0, v.Cap())) + } + + noLimit := d.flags&disableAllocLimitFlag != 1 + + if noLimit && n > v.Len() { + v.Set(growSliceValue(v, n, noLimit)) + } + + for i := 0; i < n; i++ { + if !noLimit && i >= v.Len() { + v.Set(growSliceValue(v, n, noLimit)) + } + + elem := v.Index(i) + if err := d.DecodeValue(elem); err != nil { + return err + } + } + + return nil +} + +func growSliceValue(v reflect.Value, n int, noLimit bool) reflect.Value { + diff := n - v.Len() + if !noLimit && diff > sliceAllocLimit { + diff = sliceAllocLimit + } + v = reflect.AppendSlice(v, reflect.MakeSlice(v.Type(), diff, diff)) + return v +} + +func decodeArrayValue(d *Decoder, v reflect.Value) error { + n, err := d.DecodeArrayLen() + if err != nil { + return err + } + + if n == -1 { + return nil + } + if n > v.Len() { + return fmt.Errorf("%s len is %d, but msgpack has %d elements", v.Type(), v.Len(), n) + } + + for i := 0; i < n; i++ { + sv := v.Index(i) + if err := d.DecodeValue(sv); err != nil { + return err + } + } + + return nil +} + +func (d *Decoder) DecodeSlice() ([]interface{}, error) { + c, err := d.readCode() + if err != nil { + return nil, err + } + return d.decodeSlice(c) +} + +func (d *Decoder) decodeSlice(c byte) ([]interface{}, error) { + n, err := d.arrayLen(c) + if err != nil { + return nil, err + } + if n == -1 { + return nil, nil + } + + s := make([]interface{}, 0, n) + for i := 0; i < n; i++ { + v, err := d.decodeInterfaceCond() + if err != nil { + return nil, err + } + s = append(s, v) + } + + return s, nil +} + +func (d *Decoder) skipSlice(c byte) error { + n, err := d.arrayLen(c) + if err != nil { + return err + } + + for i := 0; i < n; i++ { + if err := d.Skip(); err != nil { + return err + } + } + + return nil +} diff --git a/vendor/github.com/vmihailenco/msgpack/v5/decode_string.go b/vendor/github.com/vmihailenco/msgpack/v5/decode_string.go new file mode 100644 index 0000000..e837e08 --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/v5/decode_string.go @@ -0,0 +1,192 @@ +package msgpack + +import ( + "fmt" + "reflect" + + "github.com/vmihailenco/msgpack/v5/msgpcode" +) + +func (d *Decoder) bytesLen(c byte) (int, error) { + if c == msgpcode.Nil { + return -1, nil + } + + if msgpcode.IsFixedString(c) { + return int(c & msgpcode.FixedStrMask), nil + } + + switch c { + case msgpcode.Str8, msgpcode.Bin8: + n, err := d.uint8() + return int(n), err + case msgpcode.Str16, msgpcode.Bin16: + n, err := d.uint16() + return int(n), err + case msgpcode.Str32, msgpcode.Bin32: + n, err := d.uint32() + return int(n), err + } + + return 0, fmt.Errorf("msgpack: invalid code=%x decoding string/bytes length", c) +} + +func (d *Decoder) DecodeString() (string, error) { + if intern := d.flags&useInternedStringsFlag != 0; intern || len(d.dict) > 0 { + return d.decodeInternedString(intern) + } + + c, err := d.readCode() + if err != nil { + return "", err + } + return d.string(c) +} + +func (d *Decoder) string(c byte) (string, error) { + n, err := d.bytesLen(c) + if err != nil { + return "", err + } + return d.stringWithLen(n) +} + +func (d *Decoder) stringWithLen(n int) (string, error) { + if n <= 0 { + return "", nil + } + b, err := d.readN(n) + return string(b), err +} + +func decodeStringValue(d *Decoder, v reflect.Value) error { + s, err := d.DecodeString() + if err != nil { + return err + } + v.SetString(s) + return nil +} + +func (d *Decoder) DecodeBytesLen() (int, error) { + c, err := d.readCode() + if err != nil { + return 0, err + } + return d.bytesLen(c) +} + +func (d *Decoder) DecodeBytes() ([]byte, error) { + c, err := d.readCode() + if err != nil { + return nil, err + } + return d.bytes(c, nil) +} + +func (d *Decoder) bytes(c byte, b []byte) ([]byte, error) { + n, err := d.bytesLen(c) + if err != nil { + return nil, err + } + if n == -1 { + return nil, nil + } + return readN(d.r, b, n) +} + +func (d *Decoder) decodeStringTemp() (string, error) { + if intern := d.flags&useInternedStringsFlag != 0; intern || len(d.dict) > 0 { + return d.decodeInternedString(intern) + } + + c, err := d.readCode() + if err != nil { + return "", err + } + + n, err := d.bytesLen(c) + if err != nil { + return "", err + } + if n == -1 { + return "", nil + } + + b, err := d.readN(n) + if err != nil { + return "", err + } + + return bytesToString(b), nil +} + +func (d *Decoder) decodeBytesPtr(ptr *[]byte) error { + c, err := d.readCode() + if err != nil { + return err + } + return d.bytesPtr(c, ptr) +} + +func (d *Decoder) bytesPtr(c byte, ptr *[]byte) error { + n, err := d.bytesLen(c) + if err != nil { + return err + } + if n == -1 { + *ptr = nil + return nil + } + + *ptr, err = readN(d.r, *ptr, n) + return err +} + +func (d *Decoder) skipBytes(c byte) error { + n, err := d.bytesLen(c) + if err != nil { + return err + } + if n <= 0 { + return nil + } + return d.skipN(n) +} + +func decodeBytesValue(d *Decoder, v reflect.Value) error { + c, err := d.readCode() + if err != nil { + return err + } + + b, err := d.bytes(c, v.Bytes()) + if err != nil { + return err + } + + v.SetBytes(b) + + return nil +} + +func decodeByteArrayValue(d *Decoder, v reflect.Value) error { + c, err := d.readCode() + if err != nil { + return err + } + + n, err := d.bytesLen(c) + if err != nil { + return err + } + if n == -1 { + return nil + } + if n > v.Len() { + return fmt.Errorf("%s len is %d, but msgpack has %d elements", v.Type(), v.Len(), n) + } + + b := v.Slice(0, n).Bytes() + return d.readFull(b) +} diff --git a/vendor/github.com/vmihailenco/msgpack/v5/decode_typgen.go b/vendor/github.com/vmihailenco/msgpack/v5/decode_typgen.go new file mode 100644 index 0000000..0b4c1d0 --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/v5/decode_typgen.go @@ -0,0 +1,46 @@ +package msgpack + +import ( + "reflect" + "sync" +) + +var cachedValues struct { + m map[reflect.Type]chan reflect.Value + sync.RWMutex +} + +func cachedValue(t reflect.Type) reflect.Value { + cachedValues.RLock() + ch := cachedValues.m[t] + cachedValues.RUnlock() + if ch != nil { + return <-ch + } + + cachedValues.Lock() + defer cachedValues.Unlock() + if ch = cachedValues.m[t]; ch != nil { + return <-ch + } + + ch = make(chan reflect.Value, 256) + go func() { + for { + ch <- reflect.New(t) + } + }() + if cachedValues.m == nil { + cachedValues.m = make(map[reflect.Type]chan reflect.Value, 8) + } + cachedValues.m[t] = ch + return <-ch +} + +func (d *Decoder) newValue(t reflect.Type) reflect.Value { + if d.flags&usePreallocateValues == 0 { + return reflect.New(t) + } + + return cachedValue(t) +} diff --git a/vendor/github.com/vmihailenco/msgpack/v5/decode_value.go b/vendor/github.com/vmihailenco/msgpack/v5/decode_value.go new file mode 100644 index 0000000..c44a674 --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/v5/decode_value.go @@ -0,0 +1,251 @@ +package msgpack + +import ( + "encoding" + "errors" + "fmt" + "reflect" +) + +var ( + interfaceType = reflect.TypeOf((*interface{})(nil)).Elem() + stringType = reflect.TypeOf((*string)(nil)).Elem() + boolType = reflect.TypeOf((*bool)(nil)).Elem() +) + +var valueDecoders []decoderFunc + +//nolint:gochecknoinits +func init() { + valueDecoders = []decoderFunc{ + reflect.Bool: decodeBoolValue, + reflect.Int: decodeInt64Value, + reflect.Int8: decodeInt64Value, + reflect.Int16: decodeInt64Value, + reflect.Int32: decodeInt64Value, + reflect.Int64: decodeInt64Value, + reflect.Uint: decodeUint64Value, + reflect.Uint8: decodeUint64Value, + reflect.Uint16: decodeUint64Value, + reflect.Uint32: decodeUint64Value, + reflect.Uint64: decodeUint64Value, + reflect.Float32: decodeFloat32Value, + reflect.Float64: decodeFloat64Value, + reflect.Complex64: decodeUnsupportedValue, + reflect.Complex128: decodeUnsupportedValue, + reflect.Array: decodeArrayValue, + reflect.Chan: decodeUnsupportedValue, + reflect.Func: decodeUnsupportedValue, + reflect.Interface: decodeInterfaceValue, + reflect.Map: decodeMapValue, + reflect.Ptr: decodeUnsupportedValue, + reflect.Slice: decodeSliceValue, + reflect.String: decodeStringValue, + reflect.Struct: decodeStructValue, + reflect.UnsafePointer: decodeUnsupportedValue, + } +} + +func getDecoder(typ reflect.Type) decoderFunc { + if v, ok := typeDecMap.Load(typ); ok { + return v.(decoderFunc) + } + fn := _getDecoder(typ) + typeDecMap.Store(typ, fn) + return fn +} + +func _getDecoder(typ reflect.Type) decoderFunc { + kind := typ.Kind() + + if kind == reflect.Ptr { + if _, ok := typeDecMap.Load(typ.Elem()); ok { + return ptrValueDecoder(typ) + } + } + + if typ.Implements(customDecoderType) { + return nilAwareDecoder(typ, decodeCustomValue) + } + if typ.Implements(unmarshalerType) { + return nilAwareDecoder(typ, unmarshalValue) + } + if typ.Implements(binaryUnmarshalerType) { + return nilAwareDecoder(typ, unmarshalBinaryValue) + } + if typ.Implements(textUnmarshalerType) { + return nilAwareDecoder(typ, unmarshalTextValue) + } + + // Addressable struct field value. + if kind != reflect.Ptr { + ptr := reflect.PtrTo(typ) + if ptr.Implements(customDecoderType) { + return addrDecoder(nilAwareDecoder(typ, decodeCustomValue)) + } + if ptr.Implements(unmarshalerType) { + return addrDecoder(nilAwareDecoder(typ, unmarshalValue)) + } + if ptr.Implements(binaryUnmarshalerType) { + return addrDecoder(nilAwareDecoder(typ, unmarshalBinaryValue)) + } + if ptr.Implements(textUnmarshalerType) { + return addrDecoder(nilAwareDecoder(typ, unmarshalTextValue)) + } + } + + switch kind { + case reflect.Ptr: + return ptrValueDecoder(typ) + case reflect.Slice: + elem := typ.Elem() + if elem.Kind() == reflect.Uint8 { + return decodeBytesValue + } + if elem == stringType { + return decodeStringSliceValue + } + case reflect.Array: + if typ.Elem().Kind() == reflect.Uint8 { + return decodeByteArrayValue + } + case reflect.Map: + if typ.Key() == stringType { + switch typ.Elem() { + case stringType: + return decodeMapStringStringValue + case interfaceType: + return decodeMapStringInterfaceValue + } + } + } + + return valueDecoders[kind] +} + +func ptrValueDecoder(typ reflect.Type) decoderFunc { + decoder := getDecoder(typ.Elem()) + return func(d *Decoder, v reflect.Value) error { + if d.hasNilCode() { + if !v.IsNil() { + v.Set(d.newValue(typ).Elem()) + } + return d.DecodeNil() + } + if v.IsNil() { + v.Set(d.newValue(typ.Elem())) + } + return decoder(d, v.Elem()) + } +} + +func addrDecoder(fn decoderFunc) decoderFunc { + return func(d *Decoder, v reflect.Value) error { + if !v.CanAddr() { + return fmt.Errorf("msgpack: Decode(nonaddressable %T)", v.Interface()) + } + return fn(d, v.Addr()) + } +} + +func nilAwareDecoder(typ reflect.Type, fn decoderFunc) decoderFunc { + if nilable(typ.Kind()) { + return func(d *Decoder, v reflect.Value) error { + if d.hasNilCode() { + return d.decodeNilValue(v) + } + if v.IsNil() { + v.Set(d.newValue(typ.Elem())) + } + return fn(d, v) + } + } + + return func(d *Decoder, v reflect.Value) error { + if d.hasNilCode() { + return d.decodeNilValue(v) + } + return fn(d, v) + } +} + +func decodeBoolValue(d *Decoder, v reflect.Value) error { + flag, err := d.DecodeBool() + if err != nil { + return err + } + v.SetBool(flag) + return nil +} + +func decodeInterfaceValue(d *Decoder, v reflect.Value) error { + if v.IsNil() { + return d.interfaceValue(v) + } + return d.DecodeValue(v.Elem()) +} + +func (d *Decoder) interfaceValue(v reflect.Value) error { + vv, err := d.decodeInterfaceCond() + if err != nil { + return err + } + + if vv != nil { + if v.Type() == errorType { + if vv, ok := vv.(string); ok { + v.Set(reflect.ValueOf(errors.New(vv))) + return nil + } + } + + v.Set(reflect.ValueOf(vv)) + } + + return nil +} + +func decodeUnsupportedValue(d *Decoder, v reflect.Value) error { + return fmt.Errorf("msgpack: Decode(unsupported %s)", v.Type()) +} + +//------------------------------------------------------------------------------ + +func decodeCustomValue(d *Decoder, v reflect.Value) error { + decoder := v.Interface().(CustomDecoder) + return decoder.DecodeMsgpack(d) +} + +func unmarshalValue(d *Decoder, v reflect.Value) error { + var b []byte + + d.rec = make([]byte, 0, 64) + if err := d.Skip(); err != nil { + return err + } + b = d.rec + d.rec = nil + + unmarshaler := v.Interface().(Unmarshaler) + return unmarshaler.UnmarshalMsgpack(b) +} + +func unmarshalBinaryValue(d *Decoder, v reflect.Value) error { + data, err := d.DecodeBytes() + if err != nil { + return err + } + + unmarshaler := v.Interface().(encoding.BinaryUnmarshaler) + return unmarshaler.UnmarshalBinary(data) +} + +func unmarshalTextValue(d *Decoder, v reflect.Value) error { + data, err := d.DecodeBytes() + if err != nil { + return err + } + + unmarshaler := v.Interface().(encoding.TextUnmarshaler) + return unmarshaler.UnmarshalText(data) +} diff --git a/vendor/github.com/vmihailenco/msgpack/v5/encode.go b/vendor/github.com/vmihailenco/msgpack/v5/encode.go new file mode 100644 index 0000000..135adc8 --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/v5/encode.go @@ -0,0 +1,270 @@ +package msgpack + +import ( + "bytes" + "io" + "reflect" + "sync" + "time" + + "github.com/vmihailenco/msgpack/v5/msgpcode" +) + +const ( + sortMapKeysFlag uint32 = 1 << iota + arrayEncodedStructsFlag + useCompactIntsFlag + useCompactFloatsFlag + useInternedStringsFlag + omitEmptyFlag +) + +type writer interface { + io.Writer + WriteByte(byte) error +} + +type byteWriter struct { + io.Writer +} + +func newByteWriter(w io.Writer) byteWriter { + return byteWriter{ + Writer: w, + } +} + +func (bw byteWriter) WriteByte(c byte) error { + _, err := bw.Write([]byte{c}) + return err +} + +//------------------------------------------------------------------------------ + +var encPool = sync.Pool{ + New: func() interface{} { + return NewEncoder(nil) + }, +} + +func GetEncoder() *Encoder { + return encPool.Get().(*Encoder) +} + +func PutEncoder(enc *Encoder) { + enc.w = nil + encPool.Put(enc) +} + +// Marshal returns the MessagePack encoding of v. +func Marshal(v interface{}) ([]byte, error) { + enc := GetEncoder() + + var buf bytes.Buffer + enc.Reset(&buf) + + err := enc.Encode(v) + b := buf.Bytes() + + PutEncoder(enc) + + if err != nil { + return nil, err + } + return b, err +} + +type Encoder struct { + w writer + dict map[string]int + structTag string + buf []byte + timeBuf []byte + flags uint32 +} + +// NewEncoder returns a new encoder that writes to w. +func NewEncoder(w io.Writer) *Encoder { + e := &Encoder{ + buf: make([]byte, 9), + } + e.Reset(w) + return e +} + +// Writer returns the Encoder's writer. +func (e *Encoder) Writer() io.Writer { + return e.w +} + +// Reset discards any buffered data, resets all state, and switches the writer to write to w. +func (e *Encoder) Reset(w io.Writer) { + e.ResetDict(w, nil) +} + +// ResetDict is like Reset, but also resets the dict. +func (e *Encoder) ResetDict(w io.Writer, dict map[string]int) { + e.ResetWriter(w) + e.flags = 0 + e.structTag = "" + e.dict = dict +} + +func (e *Encoder) WithDict(dict map[string]int, fn func(*Encoder) error) error { + oldDict := e.dict + e.dict = dict + err := fn(e) + e.dict = oldDict + return err +} + +func (e *Encoder) ResetWriter(w io.Writer) { + e.dict = nil + if bw, ok := w.(writer); ok { + e.w = bw + } else if w == nil { + e.w = nil + } else { + e.w = newByteWriter(w) + } +} + +// SetSortMapKeys causes the Encoder to encode map keys in increasing order. +// Supported map types are: +// - map[string]string +// - map[string]bool +// - map[string]interface{} +func (e *Encoder) SetSortMapKeys(on bool) *Encoder { + if on { + e.flags |= sortMapKeysFlag + } else { + e.flags &= ^sortMapKeysFlag + } + return e +} + +// SetCustomStructTag causes the Encoder to use a custom struct tag as +// fallback option if there is no msgpack tag. +func (e *Encoder) SetCustomStructTag(tag string) { + e.structTag = tag +} + +// SetOmitEmpty causes the Encoder to omit empty values by default. +func (e *Encoder) SetOmitEmpty(on bool) { + if on { + e.flags |= omitEmptyFlag + } else { + e.flags &= ^omitEmptyFlag + } +} + +// UseArrayEncodedStructs causes the Encoder to encode Go structs as msgpack arrays. +func (e *Encoder) UseArrayEncodedStructs(on bool) { + if on { + e.flags |= arrayEncodedStructsFlag + } else { + e.flags &= ^arrayEncodedStructsFlag + } +} + +// UseCompactEncoding causes the Encoder to chose the most compact encoding. +// For example, it allows to encode small Go int64 as msgpack int8 saving 7 bytes. +func (e *Encoder) UseCompactInts(on bool) { + if on { + e.flags |= useCompactIntsFlag + } else { + e.flags &= ^useCompactIntsFlag + } +} + +// UseCompactFloats causes the Encoder to chose a compact integer encoding +// for floats that can be represented as integers. +func (e *Encoder) UseCompactFloats(on bool) { + if on { + e.flags |= useCompactFloatsFlag + } else { + e.flags &= ^useCompactFloatsFlag + } +} + +// UseInternedStrings causes the Encoder to intern strings. +func (e *Encoder) UseInternedStrings(on bool) { + if on { + e.flags |= useInternedStringsFlag + } else { + e.flags &= ^useInternedStringsFlag + } +} + +func (e *Encoder) Encode(v interface{}) error { + switch v := v.(type) { + case nil: + return e.EncodeNil() + case string: + return e.EncodeString(v) + case []byte: + return e.EncodeBytes(v) + case int: + return e.EncodeInt(int64(v)) + case int64: + return e.encodeInt64Cond(v) + case uint: + return e.EncodeUint(uint64(v)) + case uint64: + return e.encodeUint64Cond(v) + case bool: + return e.EncodeBool(v) + case float32: + return e.EncodeFloat32(v) + case float64: + return e.EncodeFloat64(v) + case time.Duration: + return e.encodeInt64Cond(int64(v)) + case time.Time: + return e.EncodeTime(v) + } + return e.EncodeValue(reflect.ValueOf(v)) +} + +func (e *Encoder) EncodeMulti(v ...interface{}) error { + for _, vv := range v { + if err := e.Encode(vv); err != nil { + return err + } + } + return nil +} + +func (e *Encoder) EncodeValue(v reflect.Value) error { + fn := getEncoder(v.Type()) + return fn(e, v) +} + +func (e *Encoder) EncodeNil() error { + return e.writeCode(msgpcode.Nil) +} + +func (e *Encoder) EncodeBool(value bool) error { + if value { + return e.writeCode(msgpcode.True) + } + return e.writeCode(msgpcode.False) +} + +func (e *Encoder) EncodeDuration(d time.Duration) error { + return e.EncodeInt(int64(d)) +} + +func (e *Encoder) writeCode(c byte) error { + return e.w.WriteByte(c) +} + +func (e *Encoder) write(b []byte) error { + _, err := e.w.Write(b) + return err +} + +func (e *Encoder) writeString(s string) error { + _, err := e.w.Write(stringToBytes(s)) + return err +} diff --git a/vendor/github.com/vmihailenco/msgpack/v5/encode_map.go b/vendor/github.com/vmihailenco/msgpack/v5/encode_map.go new file mode 100644 index 0000000..a5aa31b --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/v5/encode_map.go @@ -0,0 +1,225 @@ +package msgpack + +import ( + "math" + "reflect" + "sort" + + "github.com/vmihailenco/msgpack/v5/msgpcode" +) + +func encodeMapValue(e *Encoder, v reflect.Value) error { + if v.IsNil() { + return e.EncodeNil() + } + + if err := e.EncodeMapLen(v.Len()); err != nil { + return err + } + + iter := v.MapRange() + for iter.Next() { + if err := e.EncodeValue(iter.Key()); err != nil { + return err + } + if err := e.EncodeValue(iter.Value()); err != nil { + return err + } + } + + return nil +} + +func encodeMapStringBoolValue(e *Encoder, v reflect.Value) error { + if v.IsNil() { + return e.EncodeNil() + } + + if err := e.EncodeMapLen(v.Len()); err != nil { + return err + } + + m := v.Convert(mapStringBoolType).Interface().(map[string]bool) + if e.flags&sortMapKeysFlag != 0 { + return e.encodeSortedMapStringBool(m) + } + + for mk, mv := range m { + if err := e.EncodeString(mk); err != nil { + return err + } + if err := e.EncodeBool(mv); err != nil { + return err + } + } + + return nil +} + +func encodeMapStringStringValue(e *Encoder, v reflect.Value) error { + if v.IsNil() { + return e.EncodeNil() + } + + if err := e.EncodeMapLen(v.Len()); err != nil { + return err + } + + m := v.Convert(mapStringStringType).Interface().(map[string]string) + if e.flags&sortMapKeysFlag != 0 { + return e.encodeSortedMapStringString(m) + } + + for mk, mv := range m { + if err := e.EncodeString(mk); err != nil { + return err + } + if err := e.EncodeString(mv); err != nil { + return err + } + } + + return nil +} + +func encodeMapStringInterfaceValue(e *Encoder, v reflect.Value) error { + if v.IsNil() { + return e.EncodeNil() + } + m := v.Convert(mapStringInterfaceType).Interface().(map[string]interface{}) + if e.flags&sortMapKeysFlag != 0 { + return e.EncodeMapSorted(m) + } + return e.EncodeMap(m) +} + +func (e *Encoder) EncodeMap(m map[string]interface{}) error { + if m == nil { + return e.EncodeNil() + } + if err := e.EncodeMapLen(len(m)); err != nil { + return err + } + for mk, mv := range m { + if err := e.EncodeString(mk); err != nil { + return err + } + if err := e.Encode(mv); err != nil { + return err + } + } + return nil +} + +func (e *Encoder) EncodeMapSorted(m map[string]interface{}) error { + if m == nil { + return e.EncodeNil() + } + if err := e.EncodeMapLen(len(m)); err != nil { + return err + } + + keys := make([]string, 0, len(m)) + + for k := range m { + keys = append(keys, k) + } + + sort.Strings(keys) + + for _, k := range keys { + if err := e.EncodeString(k); err != nil { + return err + } + if err := e.Encode(m[k]); err != nil { + return err + } + } + + return nil +} + +func (e *Encoder) encodeSortedMapStringBool(m map[string]bool) error { + keys := make([]string, 0, len(m)) + for k := range m { + keys = append(keys, k) + } + sort.Strings(keys) + + for _, k := range keys { + err := e.EncodeString(k) + if err != nil { + return err + } + if err = e.EncodeBool(m[k]); err != nil { + return err + } + } + + return nil +} + +func (e *Encoder) encodeSortedMapStringString(m map[string]string) error { + keys := make([]string, 0, len(m)) + for k := range m { + keys = append(keys, k) + } + sort.Strings(keys) + + for _, k := range keys { + err := e.EncodeString(k) + if err != nil { + return err + } + if err = e.EncodeString(m[k]); err != nil { + return err + } + } + + return nil +} + +func (e *Encoder) EncodeMapLen(l int) error { + if l < 16 { + return e.writeCode(msgpcode.FixedMapLow | byte(l)) + } + if l <= math.MaxUint16 { + return e.write2(msgpcode.Map16, uint16(l)) + } + return e.write4(msgpcode.Map32, uint32(l)) +} + +func encodeStructValue(e *Encoder, strct reflect.Value) error { + structFields := structs.Fields(strct.Type(), e.structTag) + if e.flags&arrayEncodedStructsFlag != 0 || structFields.AsArray { + return encodeStructValueAsArray(e, strct, structFields.List) + } + fields := structFields.OmitEmpty(e, strct) + + if err := e.EncodeMapLen(len(fields)); err != nil { + return err + } + + for _, f := range fields { + if err := e.EncodeString(f.name); err != nil { + return err + } + if err := f.EncodeValue(e, strct); err != nil { + return err + } + } + + return nil +} + +func encodeStructValueAsArray(e *Encoder, strct reflect.Value, fields []*field) error { + if err := e.EncodeArrayLen(len(fields)); err != nil { + return err + } + for _, f := range fields { + if err := f.EncodeValue(e, strct); err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/vmihailenco/msgpack/v5/encode_number.go b/vendor/github.com/vmihailenco/msgpack/v5/encode_number.go new file mode 100644 index 0000000..63c311b --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/v5/encode_number.go @@ -0,0 +1,252 @@ +package msgpack + +import ( + "math" + "reflect" + + "github.com/vmihailenco/msgpack/v5/msgpcode" +) + +// EncodeUint8 encodes an uint8 in 2 bytes preserving type of the number. +func (e *Encoder) EncodeUint8(n uint8) error { + return e.write1(msgpcode.Uint8, n) +} + +func (e *Encoder) encodeUint8Cond(n uint8) error { + if e.flags&useCompactIntsFlag != 0 { + return e.EncodeUint(uint64(n)) + } + return e.EncodeUint8(n) +} + +// EncodeUint16 encodes an uint16 in 3 bytes preserving type of the number. +func (e *Encoder) EncodeUint16(n uint16) error { + return e.write2(msgpcode.Uint16, n) +} + +func (e *Encoder) encodeUint16Cond(n uint16) error { + if e.flags&useCompactIntsFlag != 0 { + return e.EncodeUint(uint64(n)) + } + return e.EncodeUint16(n) +} + +// EncodeUint32 encodes an uint16 in 5 bytes preserving type of the number. +func (e *Encoder) EncodeUint32(n uint32) error { + return e.write4(msgpcode.Uint32, n) +} + +func (e *Encoder) encodeUint32Cond(n uint32) error { + if e.flags&useCompactIntsFlag != 0 { + return e.EncodeUint(uint64(n)) + } + return e.EncodeUint32(n) +} + +// EncodeUint64 encodes an uint16 in 9 bytes preserving type of the number. +func (e *Encoder) EncodeUint64(n uint64) error { + return e.write8(msgpcode.Uint64, n) +} + +func (e *Encoder) encodeUint64Cond(n uint64) error { + if e.flags&useCompactIntsFlag != 0 { + return e.EncodeUint(n) + } + return e.EncodeUint64(n) +} + +// EncodeInt8 encodes an int8 in 2 bytes preserving type of the number. +func (e *Encoder) EncodeInt8(n int8) error { + return e.write1(msgpcode.Int8, uint8(n)) +} + +func (e *Encoder) encodeInt8Cond(n int8) error { + if e.flags&useCompactIntsFlag != 0 { + return e.EncodeInt(int64(n)) + } + return e.EncodeInt8(n) +} + +// EncodeInt16 encodes an int16 in 3 bytes preserving type of the number. +func (e *Encoder) EncodeInt16(n int16) error { + return e.write2(msgpcode.Int16, uint16(n)) +} + +func (e *Encoder) encodeInt16Cond(n int16) error { + if e.flags&useCompactIntsFlag != 0 { + return e.EncodeInt(int64(n)) + } + return e.EncodeInt16(n) +} + +// EncodeInt32 encodes an int32 in 5 bytes preserving type of the number. +func (e *Encoder) EncodeInt32(n int32) error { + return e.write4(msgpcode.Int32, uint32(n)) +} + +func (e *Encoder) encodeInt32Cond(n int32) error { + if e.flags&useCompactIntsFlag != 0 { + return e.EncodeInt(int64(n)) + } + return e.EncodeInt32(n) +} + +// EncodeInt64 encodes an int64 in 9 bytes preserving type of the number. +func (e *Encoder) EncodeInt64(n int64) error { + return e.write8(msgpcode.Int64, uint64(n)) +} + +func (e *Encoder) encodeInt64Cond(n int64) error { + if e.flags&useCompactIntsFlag != 0 { + return e.EncodeInt(n) + } + return e.EncodeInt64(n) +} + +// EncodeUnsignedNumber encodes an uint64 in 1, 2, 3, 5, or 9 bytes. +// Type of the number is lost during encoding. +func (e *Encoder) EncodeUint(n uint64) error { + if n <= math.MaxInt8 { + return e.w.WriteByte(byte(n)) + } + if n <= math.MaxUint8 { + return e.EncodeUint8(uint8(n)) + } + if n <= math.MaxUint16 { + return e.EncodeUint16(uint16(n)) + } + if n <= math.MaxUint32 { + return e.EncodeUint32(uint32(n)) + } + return e.EncodeUint64(n) +} + +// EncodeNumber encodes an int64 in 1, 2, 3, 5, or 9 bytes. +// Type of the number is lost during encoding. +func (e *Encoder) EncodeInt(n int64) error { + if n >= 0 { + return e.EncodeUint(uint64(n)) + } + if n >= int64(int8(msgpcode.NegFixedNumLow)) { + return e.w.WriteByte(byte(n)) + } + if n >= math.MinInt8 { + return e.EncodeInt8(int8(n)) + } + if n >= math.MinInt16 { + return e.EncodeInt16(int16(n)) + } + if n >= math.MinInt32 { + return e.EncodeInt32(int32(n)) + } + return e.EncodeInt64(n) +} + +func (e *Encoder) EncodeFloat32(n float32) error { + if e.flags&useCompactFloatsFlag != 0 { + if float32(int64(n)) == n { + return e.EncodeInt(int64(n)) + } + } + return e.write4(msgpcode.Float, math.Float32bits(n)) +} + +func (e *Encoder) EncodeFloat64(n float64) error { + if e.flags&useCompactFloatsFlag != 0 { + // Both NaN and Inf convert to int64(-0x8000000000000000) + // If n is NaN then it never compares true with any other value + // If n is Inf then it doesn't convert from int64 back to +/-Inf + // In both cases the comparison works. + if float64(int64(n)) == n { + return e.EncodeInt(int64(n)) + } + } + return e.write8(msgpcode.Double, math.Float64bits(n)) +} + +func (e *Encoder) write1(code byte, n uint8) error { + e.buf = e.buf[:2] + e.buf[0] = code + e.buf[1] = n + return e.write(e.buf) +} + +func (e *Encoder) write2(code byte, n uint16) error { + e.buf = e.buf[:3] + e.buf[0] = code + e.buf[1] = byte(n >> 8) + e.buf[2] = byte(n) + return e.write(e.buf) +} + +func (e *Encoder) write4(code byte, n uint32) error { + e.buf = e.buf[:5] + e.buf[0] = code + e.buf[1] = byte(n >> 24) + e.buf[2] = byte(n >> 16) + e.buf[3] = byte(n >> 8) + e.buf[4] = byte(n) + return e.write(e.buf) +} + +func (e *Encoder) write8(code byte, n uint64) error { + e.buf = e.buf[:9] + e.buf[0] = code + e.buf[1] = byte(n >> 56) + e.buf[2] = byte(n >> 48) + e.buf[3] = byte(n >> 40) + e.buf[4] = byte(n >> 32) + e.buf[5] = byte(n >> 24) + e.buf[6] = byte(n >> 16) + e.buf[7] = byte(n >> 8) + e.buf[8] = byte(n) + return e.write(e.buf) +} + +func encodeUintValue(e *Encoder, v reflect.Value) error { + return e.EncodeUint(v.Uint()) +} + +func encodeIntValue(e *Encoder, v reflect.Value) error { + return e.EncodeInt(v.Int()) +} + +func encodeUint8CondValue(e *Encoder, v reflect.Value) error { + return e.encodeUint8Cond(uint8(v.Uint())) +} + +func encodeUint16CondValue(e *Encoder, v reflect.Value) error { + return e.encodeUint16Cond(uint16(v.Uint())) +} + +func encodeUint32CondValue(e *Encoder, v reflect.Value) error { + return e.encodeUint32Cond(uint32(v.Uint())) +} + +func encodeUint64CondValue(e *Encoder, v reflect.Value) error { + return e.encodeUint64Cond(v.Uint()) +} + +func encodeInt8CondValue(e *Encoder, v reflect.Value) error { + return e.encodeInt8Cond(int8(v.Int())) +} + +func encodeInt16CondValue(e *Encoder, v reflect.Value) error { + return e.encodeInt16Cond(int16(v.Int())) +} + +func encodeInt32CondValue(e *Encoder, v reflect.Value) error { + return e.encodeInt32Cond(int32(v.Int())) +} + +func encodeInt64CondValue(e *Encoder, v reflect.Value) error { + return e.encodeInt64Cond(v.Int()) +} + +func encodeFloat32Value(e *Encoder, v reflect.Value) error { + return e.EncodeFloat32(float32(v.Float())) +} + +func encodeFloat64Value(e *Encoder, v reflect.Value) error { + return e.EncodeFloat64(v.Float()) +} diff --git a/vendor/github.com/vmihailenco/msgpack/v5/encode_slice.go b/vendor/github.com/vmihailenco/msgpack/v5/encode_slice.go new file mode 100644 index 0000000..ca46ead --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/v5/encode_slice.go @@ -0,0 +1,139 @@ +package msgpack + +import ( + "math" + "reflect" + + "github.com/vmihailenco/msgpack/v5/msgpcode" +) + +var stringSliceType = reflect.TypeOf(([]string)(nil)) + +func encodeStringValue(e *Encoder, v reflect.Value) error { + return e.EncodeString(v.String()) +} + +func encodeByteSliceValue(e *Encoder, v reflect.Value) error { + return e.EncodeBytes(v.Bytes()) +} + +func encodeByteArrayValue(e *Encoder, v reflect.Value) error { + if err := e.EncodeBytesLen(v.Len()); err != nil { + return err + } + + if v.CanAddr() { + b := v.Slice(0, v.Len()).Bytes() + return e.write(b) + } + + e.buf = grow(e.buf, v.Len()) + reflect.Copy(reflect.ValueOf(e.buf), v) + return e.write(e.buf) +} + +func grow(b []byte, n int) []byte { + if cap(b) >= n { + return b[:n] + } + b = b[:cap(b)] + b = append(b, make([]byte, n-len(b))...) + return b +} + +func (e *Encoder) EncodeBytesLen(l int) error { + if l < 256 { + return e.write1(msgpcode.Bin8, uint8(l)) + } + if l <= math.MaxUint16 { + return e.write2(msgpcode.Bin16, uint16(l)) + } + return e.write4(msgpcode.Bin32, uint32(l)) +} + +func (e *Encoder) encodeStringLen(l int) error { + if l < 32 { + return e.writeCode(msgpcode.FixedStrLow | byte(l)) + } + if l < 256 { + return e.write1(msgpcode.Str8, uint8(l)) + } + if l <= math.MaxUint16 { + return e.write2(msgpcode.Str16, uint16(l)) + } + return e.write4(msgpcode.Str32, uint32(l)) +} + +func (e *Encoder) EncodeString(v string) error { + if intern := e.flags&useInternedStringsFlag != 0; intern || len(e.dict) > 0 { + return e.encodeInternedString(v, intern) + } + return e.encodeNormalString(v) +} + +func (e *Encoder) encodeNormalString(v string) error { + if err := e.encodeStringLen(len(v)); err != nil { + return err + } + return e.writeString(v) +} + +func (e *Encoder) EncodeBytes(v []byte) error { + if v == nil { + return e.EncodeNil() + } + if err := e.EncodeBytesLen(len(v)); err != nil { + return err + } + return e.write(v) +} + +func (e *Encoder) EncodeArrayLen(l int) error { + if l < 16 { + return e.writeCode(msgpcode.FixedArrayLow | byte(l)) + } + if l <= math.MaxUint16 { + return e.write2(msgpcode.Array16, uint16(l)) + } + return e.write4(msgpcode.Array32, uint32(l)) +} + +func encodeStringSliceValue(e *Encoder, v reflect.Value) error { + ss := v.Convert(stringSliceType).Interface().([]string) + return e.encodeStringSlice(ss) +} + +func (e *Encoder) encodeStringSlice(s []string) error { + if s == nil { + return e.EncodeNil() + } + if err := e.EncodeArrayLen(len(s)); err != nil { + return err + } + for _, v := range s { + if err := e.EncodeString(v); err != nil { + return err + } + } + return nil +} + +func encodeSliceValue(e *Encoder, v reflect.Value) error { + if v.IsNil() { + return e.EncodeNil() + } + return encodeArrayValue(e, v) +} + +func encodeArrayValue(e *Encoder, v reflect.Value) error { + l := v.Len() + if err := e.EncodeArrayLen(l); err != nil { + return err + } + for i := 0; i < l; i++ { + if err := e.EncodeValue(v.Index(i)); err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/vmihailenco/msgpack/v5/encode_value.go b/vendor/github.com/vmihailenco/msgpack/v5/encode_value.go new file mode 100644 index 0000000..1d6303a --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/v5/encode_value.go @@ -0,0 +1,254 @@ +package msgpack + +import ( + "encoding" + "fmt" + "reflect" +) + +var valueEncoders []encoderFunc + +//nolint:gochecknoinits +func init() { + valueEncoders = []encoderFunc{ + reflect.Bool: encodeBoolValue, + reflect.Int: encodeIntValue, + reflect.Int8: encodeInt8CondValue, + reflect.Int16: encodeInt16CondValue, + reflect.Int32: encodeInt32CondValue, + reflect.Int64: encodeInt64CondValue, + reflect.Uint: encodeUintValue, + reflect.Uint8: encodeUint8CondValue, + reflect.Uint16: encodeUint16CondValue, + reflect.Uint32: encodeUint32CondValue, + reflect.Uint64: encodeUint64CondValue, + reflect.Float32: encodeFloat32Value, + reflect.Float64: encodeFloat64Value, + reflect.Complex64: encodeUnsupportedValue, + reflect.Complex128: encodeUnsupportedValue, + reflect.Array: encodeArrayValue, + reflect.Chan: encodeUnsupportedValue, + reflect.Func: encodeUnsupportedValue, + reflect.Interface: encodeInterfaceValue, + reflect.Map: encodeMapValue, + reflect.Ptr: encodeUnsupportedValue, + reflect.Slice: encodeSliceValue, + reflect.String: encodeStringValue, + reflect.Struct: encodeStructValue, + reflect.UnsafePointer: encodeUnsupportedValue, + } +} + +func getEncoder(typ reflect.Type) encoderFunc { + if v, ok := typeEncMap.Load(typ); ok { + return v.(encoderFunc) + } + fn := _getEncoder(typ) + typeEncMap.Store(typ, fn) + return fn +} + +func _getEncoder(typ reflect.Type) encoderFunc { + kind := typ.Kind() + + if kind == reflect.Ptr { + if _, ok := typeEncMap.Load(typ.Elem()); ok { + return ptrEncoderFunc(typ) + } + } + + if typ.Implements(customEncoderType) { + return encodeCustomValue + } + if typ.Implements(marshalerType) { + return marshalValue + } + if typ.Implements(binaryMarshalerType) { + return marshalBinaryValue + } + if typ.Implements(textMarshalerType) { + return marshalTextValue + } + + // Addressable struct field value. + if kind != reflect.Ptr { + ptr := reflect.PtrTo(typ) + if ptr.Implements(customEncoderType) { + return encodeCustomValuePtr + } + if ptr.Implements(marshalerType) { + return marshalValuePtr + } + if ptr.Implements(binaryMarshalerType) { + return marshalBinaryValueAddr + } + if ptr.Implements(textMarshalerType) { + return marshalTextValueAddr + } + } + + if typ == errorType { + return encodeErrorValue + } + + switch kind { + case reflect.Ptr: + return ptrEncoderFunc(typ) + case reflect.Slice: + elem := typ.Elem() + if elem.Kind() == reflect.Uint8 { + return encodeByteSliceValue + } + if elem == stringType { + return encodeStringSliceValue + } + case reflect.Array: + if typ.Elem().Kind() == reflect.Uint8 { + return encodeByteArrayValue + } + case reflect.Map: + if typ.Key() == stringType { + switch typ.Elem() { + case stringType: + return encodeMapStringStringValue + case boolType: + return encodeMapStringBoolValue + case interfaceType: + return encodeMapStringInterfaceValue + } + } + } + + return valueEncoders[kind] +} + +func ptrEncoderFunc(typ reflect.Type) encoderFunc { + encoder := getEncoder(typ.Elem()) + return func(e *Encoder, v reflect.Value) error { + if v.IsNil() { + return e.EncodeNil() + } + return encoder(e, v.Elem()) + } +} + +func encodeCustomValuePtr(e *Encoder, v reflect.Value) error { + if !v.CanAddr() { + return fmt.Errorf("msgpack: Encode(non-addressable %T)", v.Interface()) + } + encoder := v.Addr().Interface().(CustomEncoder) + return encoder.EncodeMsgpack(e) +} + +func encodeCustomValue(e *Encoder, v reflect.Value) error { + if nilable(v.Kind()) && v.IsNil() { + return e.EncodeNil() + } + + encoder := v.Interface().(CustomEncoder) + return encoder.EncodeMsgpack(e) +} + +func marshalValuePtr(e *Encoder, v reflect.Value) error { + if !v.CanAddr() { + return fmt.Errorf("msgpack: Encode(non-addressable %T)", v.Interface()) + } + return marshalValue(e, v.Addr()) +} + +func marshalValue(e *Encoder, v reflect.Value) error { + if nilable(v.Kind()) && v.IsNil() { + return e.EncodeNil() + } + + marshaler := v.Interface().(Marshaler) + b, err := marshaler.MarshalMsgpack() + if err != nil { + return err + } + _, err = e.w.Write(b) + return err +} + +func encodeBoolValue(e *Encoder, v reflect.Value) error { + return e.EncodeBool(v.Bool()) +} + +func encodeInterfaceValue(e *Encoder, v reflect.Value) error { + if v.IsNil() { + return e.EncodeNil() + } + return e.EncodeValue(v.Elem()) +} + +func encodeErrorValue(e *Encoder, v reflect.Value) error { + if v.IsNil() { + return e.EncodeNil() + } + return e.EncodeString(v.Interface().(error).Error()) +} + +func encodeUnsupportedValue(e *Encoder, v reflect.Value) error { + return fmt.Errorf("msgpack: Encode(unsupported %s)", v.Type()) +} + +func nilable(kind reflect.Kind) bool { + switch kind { + case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: + return true + } + return false +} + +func nilableType(t reflect.Type) bool { + if t.Kind() == reflect.Ptr { + t = t.Elem() + } + return nilable(t.Kind()) +} + +//------------------------------------------------------------------------------ + +func marshalBinaryValueAddr(e *Encoder, v reflect.Value) error { + if !v.CanAddr() { + return fmt.Errorf("msgpack: Encode(non-addressable %T)", v.Interface()) + } + return marshalBinaryValue(e, v.Addr()) +} + +func marshalBinaryValue(e *Encoder, v reflect.Value) error { + if nilable(v.Kind()) && v.IsNil() { + return e.EncodeNil() + } + + marshaler := v.Interface().(encoding.BinaryMarshaler) + data, err := marshaler.MarshalBinary() + if err != nil { + return err + } + + return e.EncodeBytes(data) +} + +//------------------------------------------------------------------------------ + +func marshalTextValueAddr(e *Encoder, v reflect.Value) error { + if !v.CanAddr() { + return fmt.Errorf("msgpack: Encode(non-addressable %T)", v.Interface()) + } + return marshalTextValue(e, v.Addr()) +} + +func marshalTextValue(e *Encoder, v reflect.Value) error { + if nilable(v.Kind()) && v.IsNil() { + return e.EncodeNil() + } + + marshaler := v.Interface().(encoding.TextMarshaler) + data, err := marshaler.MarshalText() + if err != nil { + return err + } + + return e.EncodeBytes(data) +} diff --git a/vendor/github.com/vmihailenco/msgpack/v5/ext.go b/vendor/github.com/vmihailenco/msgpack/v5/ext.go new file mode 100644 index 0000000..354b9d9 --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/v5/ext.go @@ -0,0 +1,303 @@ +package msgpack + +import ( + "fmt" + "math" + "reflect" + + "github.com/vmihailenco/msgpack/v5/msgpcode" +) + +type extInfo struct { + Type reflect.Type + Decoder func(d *Decoder, v reflect.Value, extLen int) error +} + +var extTypes = make(map[int8]*extInfo) + +type MarshalerUnmarshaler interface { + Marshaler + Unmarshaler +} + +func RegisterExt(extID int8, value MarshalerUnmarshaler) { + RegisterExtEncoder(extID, value, func(e *Encoder, v reflect.Value) ([]byte, error) { + marshaler := v.Interface().(Marshaler) + return marshaler.MarshalMsgpack() + }) + RegisterExtDecoder(extID, value, func(d *Decoder, v reflect.Value, extLen int) error { + b, err := d.readN(extLen) + if err != nil { + return err + } + return v.Interface().(Unmarshaler).UnmarshalMsgpack(b) + }) +} + +func UnregisterExt(extID int8) { + unregisterExtEncoder(extID) + unregisterExtDecoder(extID) +} + +func RegisterExtEncoder( + extID int8, + value interface{}, + encoder func(enc *Encoder, v reflect.Value) ([]byte, error), +) { + unregisterExtEncoder(extID) + + typ := reflect.TypeOf(value) + extEncoder := makeExtEncoder(extID, typ, encoder) + typeEncMap.Store(extID, typ) + typeEncMap.Store(typ, extEncoder) + if typ.Kind() == reflect.Ptr { + typeEncMap.Store(typ.Elem(), makeExtEncoderAddr(extEncoder)) + } +} + +func unregisterExtEncoder(extID int8) { + t, ok := typeEncMap.Load(extID) + if !ok { + return + } + typeEncMap.Delete(extID) + typ := t.(reflect.Type) + typeEncMap.Delete(typ) + if typ.Kind() == reflect.Ptr { + typeEncMap.Delete(typ.Elem()) + } +} + +func makeExtEncoder( + extID int8, + typ reflect.Type, + encoder func(enc *Encoder, v reflect.Value) ([]byte, error), +) encoderFunc { + nilable := typ.Kind() == reflect.Ptr + + return func(e *Encoder, v reflect.Value) error { + if nilable && v.IsNil() { + return e.EncodeNil() + } + + b, err := encoder(e, v) + if err != nil { + return err + } + + if err := e.EncodeExtHeader(extID, len(b)); err != nil { + return err + } + + return e.write(b) + } +} + +func makeExtEncoderAddr(extEncoder encoderFunc) encoderFunc { + return func(e *Encoder, v reflect.Value) error { + if !v.CanAddr() { + return fmt.Errorf("msgpack: EncodeExt(nonaddressable %T)", v.Interface()) + } + return extEncoder(e, v.Addr()) + } +} + +func RegisterExtDecoder( + extID int8, + value interface{}, + decoder func(dec *Decoder, v reflect.Value, extLen int) error, +) { + unregisterExtDecoder(extID) + + typ := reflect.TypeOf(value) + extDecoder := makeExtDecoder(extID, typ, decoder) + extTypes[extID] = &extInfo{ + Type: typ, + Decoder: decoder, + } + + typeDecMap.Store(extID, typ) + typeDecMap.Store(typ, extDecoder) + if typ.Kind() == reflect.Ptr { + typeDecMap.Store(typ.Elem(), makeExtDecoderAddr(extDecoder)) + } +} + +func unregisterExtDecoder(extID int8) { + t, ok := typeDecMap.Load(extID) + if !ok { + return + } + typeDecMap.Delete(extID) + delete(extTypes, extID) + typ := t.(reflect.Type) + typeDecMap.Delete(typ) + if typ.Kind() == reflect.Ptr { + typeDecMap.Delete(typ.Elem()) + } +} + +func makeExtDecoder( + wantedExtID int8, + typ reflect.Type, + decoder func(d *Decoder, v reflect.Value, extLen int) error, +) decoderFunc { + return nilAwareDecoder(typ, func(d *Decoder, v reflect.Value) error { + extID, extLen, err := d.DecodeExtHeader() + if err != nil { + return err + } + if extID != wantedExtID { + return fmt.Errorf("msgpack: got ext type=%d, wanted %d", extID, wantedExtID) + } + return decoder(d, v, extLen) + }) +} + +func makeExtDecoderAddr(extDecoder decoderFunc) decoderFunc { + return func(d *Decoder, v reflect.Value) error { + if !v.CanAddr() { + return fmt.Errorf("msgpack: DecodeExt(nonaddressable %T)", v.Interface()) + } + return extDecoder(d, v.Addr()) + } +} + +func (e *Encoder) EncodeExtHeader(extID int8, extLen int) error { + if err := e.encodeExtLen(extLen); err != nil { + return err + } + if err := e.w.WriteByte(byte(extID)); err != nil { + return err + } + return nil +} + +func (e *Encoder) encodeExtLen(l int) error { + switch l { + case 1: + return e.writeCode(msgpcode.FixExt1) + case 2: + return e.writeCode(msgpcode.FixExt2) + case 4: + return e.writeCode(msgpcode.FixExt4) + case 8: + return e.writeCode(msgpcode.FixExt8) + case 16: + return e.writeCode(msgpcode.FixExt16) + } + if l <= math.MaxUint8 { + return e.write1(msgpcode.Ext8, uint8(l)) + } + if l <= math.MaxUint16 { + return e.write2(msgpcode.Ext16, uint16(l)) + } + return e.write4(msgpcode.Ext32, uint32(l)) +} + +func (d *Decoder) DecodeExtHeader() (extID int8, extLen int, err error) { + c, err := d.readCode() + if err != nil { + return + } + return d.extHeader(c) +} + +func (d *Decoder) extHeader(c byte) (int8, int, error) { + extLen, err := d.parseExtLen(c) + if err != nil { + return 0, 0, err + } + + extID, err := d.readCode() + if err != nil { + return 0, 0, err + } + + return int8(extID), extLen, nil +} + +func (d *Decoder) parseExtLen(c byte) (int, error) { + switch c { + case msgpcode.FixExt1: + return 1, nil + case msgpcode.FixExt2: + return 2, nil + case msgpcode.FixExt4: + return 4, nil + case msgpcode.FixExt8: + return 8, nil + case msgpcode.FixExt16: + return 16, nil + case msgpcode.Ext8: + n, err := d.uint8() + return int(n), err + case msgpcode.Ext16: + n, err := d.uint16() + return int(n), err + case msgpcode.Ext32: + n, err := d.uint32() + return int(n), err + default: + return 0, fmt.Errorf("msgpack: invalid code=%x decoding ext len", c) + } +} + +func (d *Decoder) decodeInterfaceExt(c byte) (interface{}, error) { + extID, extLen, err := d.extHeader(c) + if err != nil { + return nil, err + } + + info, ok := extTypes[extID] + if !ok { + return nil, fmt.Errorf("msgpack: unknown ext id=%d", extID) + } + + v := d.newValue(info.Type).Elem() + if nilable(v.Kind()) && v.IsNil() { + v.Set(d.newValue(info.Type.Elem())) + } + + if err := info.Decoder(d, v, extLen); err != nil { + return nil, err + } + + return v.Interface(), nil +} + +func (d *Decoder) skipExt(c byte) error { + n, err := d.parseExtLen(c) + if err != nil { + return err + } + return d.skipN(n + 1) +} + +func (d *Decoder) skipExtHeader(c byte) error { + // Read ext type. + _, err := d.readCode() + if err != nil { + return err + } + // Read ext body len. + for i := 0; i < extHeaderLen(c); i++ { + _, err := d.readCode() + if err != nil { + return err + } + } + return nil +} + +func extHeaderLen(c byte) int { + switch c { + case msgpcode.Ext8: + return 1 + case msgpcode.Ext16: + return 2 + case msgpcode.Ext32: + return 4 + } + return 0 +} diff --git a/vendor/github.com/vmihailenco/msgpack/v5/intern.go b/vendor/github.com/vmihailenco/msgpack/v5/intern.go new file mode 100644 index 0000000..7f019aa --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/v5/intern.go @@ -0,0 +1,236 @@ +package msgpack + +import ( + "fmt" + "math" + "reflect" + + "github.com/vmihailenco/msgpack/v5/msgpcode" +) + +const ( + minInternedStringLen = 3 + maxDictLen = math.MaxUint16 +) + +var internedStringExtID = int8(math.MinInt8) + +func init() { + extTypes[internedStringExtID] = &extInfo{ + Type: stringType, + Decoder: decodeInternedStringExt, + } +} + +func decodeInternedStringExt(d *Decoder, v reflect.Value, extLen int) error { + idx, err := d.decodeInternedStringIndex(extLen) + if err != nil { + return err + } + + s, err := d.internedStringAtIndex(idx) + if err != nil { + return err + } + + v.SetString(s) + return nil +} + +//------------------------------------------------------------------------------ + +func encodeInternedInterfaceValue(e *Encoder, v reflect.Value) error { + if v.IsNil() { + return e.EncodeNil() + } + + v = v.Elem() + if v.Kind() == reflect.String { + return e.encodeInternedString(v.String(), true) + } + return e.EncodeValue(v) +} + +func encodeInternedStringValue(e *Encoder, v reflect.Value) error { + return e.encodeInternedString(v.String(), true) +} + +func (e *Encoder) encodeInternedString(s string, intern bool) error { + // Interned string takes at least 3 bytes. Plain string 1 byte + string len. + if idx, ok := e.dict[s]; ok { + return e.encodeInternedStringIndex(idx) + } + + if intern && len(s) >= minInternedStringLen && len(e.dict) < maxDictLen { + if e.dict == nil { + e.dict = make(map[string]int) + } + idx := len(e.dict) + e.dict[s] = idx + } + + return e.encodeNormalString(s) +} + +func (e *Encoder) encodeInternedStringIndex(idx int) error { + if idx <= math.MaxUint8 { + if err := e.writeCode(msgpcode.FixExt1); err != nil { + return err + } + return e.write1(byte(internedStringExtID), uint8(idx)) + } + + if idx <= math.MaxUint16 { + if err := e.writeCode(msgpcode.FixExt2); err != nil { + return err + } + return e.write2(byte(internedStringExtID), uint16(idx)) + } + + if uint64(idx) <= math.MaxUint32 { + if err := e.writeCode(msgpcode.FixExt4); err != nil { + return err + } + return e.write4(byte(internedStringExtID), uint32(idx)) + } + + return fmt.Errorf("msgpack: interned string index=%d is too large", idx) +} + +//------------------------------------------------------------------------------ + +func decodeInternedInterfaceValue(d *Decoder, v reflect.Value) error { + s, err := d.decodeInternedString(true) + if err == nil { + v.Set(reflect.ValueOf(s)) + return nil + } + if err != nil { + if _, ok := err.(unexpectedCodeError); !ok { + return err + } + } + + if err := d.s.UnreadByte(); err != nil { + return err + } + return decodeInterfaceValue(d, v) +} + +func decodeInternedStringValue(d *Decoder, v reflect.Value) error { + s, err := d.decodeInternedString(true) + if err != nil { + return err + } + + v.SetString(s) + return nil +} + +func (d *Decoder) decodeInternedString(intern bool) (string, error) { + c, err := d.readCode() + if err != nil { + return "", err + } + + if msgpcode.IsFixedString(c) { + n := int(c & msgpcode.FixedStrMask) + return d.decodeInternedStringWithLen(n, intern) + } + + switch c { + case msgpcode.Nil: + return "", nil + case msgpcode.FixExt1, msgpcode.FixExt2, msgpcode.FixExt4: + typeID, extLen, err := d.extHeader(c) + if err != nil { + return "", err + } + if typeID != internedStringExtID { + err := fmt.Errorf("msgpack: got ext type=%d, wanted %d", + typeID, internedStringExtID) + return "", err + } + + idx, err := d.decodeInternedStringIndex(extLen) + if err != nil { + return "", err + } + + return d.internedStringAtIndex(idx) + case msgpcode.Str8, msgpcode.Bin8: + n, err := d.uint8() + if err != nil { + return "", err + } + return d.decodeInternedStringWithLen(int(n), intern) + case msgpcode.Str16, msgpcode.Bin16: + n, err := d.uint16() + if err != nil { + return "", err + } + return d.decodeInternedStringWithLen(int(n), intern) + case msgpcode.Str32, msgpcode.Bin32: + n, err := d.uint32() + if err != nil { + return "", err + } + return d.decodeInternedStringWithLen(int(n), intern) + } + + return "", unexpectedCodeError{ + code: c, + hint: "interned string", + } +} + +func (d *Decoder) decodeInternedStringIndex(extLen int) (int, error) { + switch extLen { + case 1: + n, err := d.uint8() + if err != nil { + return 0, err + } + return int(n), nil + case 2: + n, err := d.uint16() + if err != nil { + return 0, err + } + return int(n), nil + case 4: + n, err := d.uint32() + if err != nil { + return 0, err + } + return int(n), nil + } + + err := fmt.Errorf("msgpack: unsupported ext len=%d decoding interned string", extLen) + return 0, err +} + +func (d *Decoder) internedStringAtIndex(idx int) (string, error) { + if idx >= len(d.dict) { + err := fmt.Errorf("msgpack: interned string at index=%d does not exist", idx) + return "", err + } + return d.dict[idx], nil +} + +func (d *Decoder) decodeInternedStringWithLen(n int, intern bool) (string, error) { + if n <= 0 { + return "", nil + } + + s, err := d.stringWithLen(n) + if err != nil { + return "", err + } + + if intern && len(s) >= minInternedStringLen && len(d.dict) < maxDictLen { + d.dict = append(d.dict, s) + } + + return s, nil +} diff --git a/vendor/github.com/vmihailenco/msgpack/v5/msgpack.go b/vendor/github.com/vmihailenco/msgpack/v5/msgpack.go new file mode 100644 index 0000000..4fa000b --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/v5/msgpack.go @@ -0,0 +1,52 @@ +package msgpack + +import "fmt" + +type Marshaler interface { + MarshalMsgpack() ([]byte, error) +} + +type Unmarshaler interface { + UnmarshalMsgpack([]byte) error +} + +type CustomEncoder interface { + EncodeMsgpack(*Encoder) error +} + +type CustomDecoder interface { + DecodeMsgpack(*Decoder) error +} + +//------------------------------------------------------------------------------ + +type RawMessage []byte + +var ( + _ CustomEncoder = (RawMessage)(nil) + _ CustomDecoder = (*RawMessage)(nil) +) + +func (m RawMessage) EncodeMsgpack(enc *Encoder) error { + return enc.write(m) +} + +func (m *RawMessage) DecodeMsgpack(dec *Decoder) error { + msg, err := dec.DecodeRaw() + if err != nil { + return err + } + *m = msg + return nil +} + +//------------------------------------------------------------------------------ + +type unexpectedCodeError struct { + hint string + code byte +} + +func (err unexpectedCodeError) Error() string { + return fmt.Sprintf("msgpack: unexpected code=%x decoding %s", err.code, err.hint) +} diff --git a/vendor/github.com/vmihailenco/msgpack/v5/msgpcode/msgpcode.go b/vendor/github.com/vmihailenco/msgpack/v5/msgpcode/msgpcode.go new file mode 100644 index 0000000..e35389c --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/v5/msgpcode/msgpcode.go @@ -0,0 +1,88 @@ +package msgpcode + +var ( + PosFixedNumHigh byte = 0x7f + NegFixedNumLow byte = 0xe0 + + Nil byte = 0xc0 + + False byte = 0xc2 + True byte = 0xc3 + + Float byte = 0xca + Double byte = 0xcb + + Uint8 byte = 0xcc + Uint16 byte = 0xcd + Uint32 byte = 0xce + Uint64 byte = 0xcf + + Int8 byte = 0xd0 + Int16 byte = 0xd1 + Int32 byte = 0xd2 + Int64 byte = 0xd3 + + FixedStrLow byte = 0xa0 + FixedStrHigh byte = 0xbf + FixedStrMask byte = 0x1f + Str8 byte = 0xd9 + Str16 byte = 0xda + Str32 byte = 0xdb + + Bin8 byte = 0xc4 + Bin16 byte = 0xc5 + Bin32 byte = 0xc6 + + FixedArrayLow byte = 0x90 + FixedArrayHigh byte = 0x9f + FixedArrayMask byte = 0xf + Array16 byte = 0xdc + Array32 byte = 0xdd + + FixedMapLow byte = 0x80 + FixedMapHigh byte = 0x8f + FixedMapMask byte = 0xf + Map16 byte = 0xde + Map32 byte = 0xdf + + FixExt1 byte = 0xd4 + FixExt2 byte = 0xd5 + FixExt4 byte = 0xd6 + FixExt8 byte = 0xd7 + FixExt16 byte = 0xd8 + Ext8 byte = 0xc7 + Ext16 byte = 0xc8 + Ext32 byte = 0xc9 +) + +func IsFixedNum(c byte) bool { + return c <= PosFixedNumHigh || c >= NegFixedNumLow +} + +func IsFixedMap(c byte) bool { + return c >= FixedMapLow && c <= FixedMapHigh +} + +func IsFixedArray(c byte) bool { + return c >= FixedArrayLow && c <= FixedArrayHigh +} + +func IsFixedString(c byte) bool { + return c >= FixedStrLow && c <= FixedStrHigh +} + +func IsString(c byte) bool { + return IsFixedString(c) || c == Str8 || c == Str16 || c == Str32 +} + +func IsBin(c byte) bool { + return c == Bin8 || c == Bin16 || c == Bin32 +} + +func IsFixedExt(c byte) bool { + return c >= FixExt1 && c <= FixExt16 +} + +func IsExt(c byte) bool { + return IsFixedExt(c) || c == Ext8 || c == Ext16 || c == Ext32 +} diff --git a/vendor/github.com/vmihailenco/msgpack/v5/package.json b/vendor/github.com/vmihailenco/msgpack/v5/package.json new file mode 100644 index 0000000..921f8ea --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/v5/package.json @@ -0,0 +1,4 @@ +{ + "name": "msgpack", + "version": "5.4.1" +} diff --git a/vendor/github.com/vmihailenco/msgpack/v5/safe.go b/vendor/github.com/vmihailenco/msgpack/v5/safe.go new file mode 100644 index 0000000..8352c9d --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/v5/safe.go @@ -0,0 +1,13 @@ +// +build appengine + +package msgpack + +// bytesToString converts byte slice to string. +func bytesToString(b []byte) string { + return string(b) +} + +// stringToBytes converts string to byte slice. +func stringToBytes(s string) []byte { + return []byte(s) +} diff --git a/vendor/github.com/vmihailenco/msgpack/v5/time.go b/vendor/github.com/vmihailenco/msgpack/v5/time.go new file mode 100644 index 0000000..1a4ba12 --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/v5/time.go @@ -0,0 +1,151 @@ +package msgpack + +import ( + "encoding/binary" + "fmt" + "reflect" + "time" + + "github.com/vmihailenco/msgpack/v5/msgpcode" +) + +var timeExtID int8 = -1 + +func init() { + RegisterExtEncoder(timeExtID, time.Time{}, timeEncoder) + RegisterExtDecoder(timeExtID, time.Time{}, timeDecoder) +} + +func timeEncoder(e *Encoder, v reflect.Value) ([]byte, error) { + return e.encodeTime(v.Interface().(time.Time)), nil +} + +func timeDecoder(d *Decoder, v reflect.Value, extLen int) error { + tm, err := d.decodeTime(extLen) + if err != nil { + return err + } + + if tm.IsZero() { + // Zero time does not have timezone information. + tm = tm.UTC() + } + + ptr := v.Addr().Interface().(*time.Time) + *ptr = tm + + return nil +} + +func (e *Encoder) EncodeTime(tm time.Time) error { + b := e.encodeTime(tm) + if err := e.encodeExtLen(len(b)); err != nil { + return err + } + if err := e.w.WriteByte(byte(timeExtID)); err != nil { + return err + } + return e.write(b) +} + +func (e *Encoder) encodeTime(tm time.Time) []byte { + if e.timeBuf == nil { + e.timeBuf = make([]byte, 12) + } + + secs := uint64(tm.Unix()) + if secs>>34 == 0 { + data := uint64(tm.Nanosecond())<<34 | secs + + if data&0xffffffff00000000 == 0 { + b := e.timeBuf[:4] + binary.BigEndian.PutUint32(b, uint32(data)) + return b + } + + b := e.timeBuf[:8] + binary.BigEndian.PutUint64(b, data) + return b + } + + b := e.timeBuf[:12] + binary.BigEndian.PutUint32(b, uint32(tm.Nanosecond())) + binary.BigEndian.PutUint64(b[4:], secs) + return b +} + +func (d *Decoder) DecodeTime() (time.Time, error) { + c, err := d.readCode() + if err != nil { + return time.Time{}, err + } + + // Legacy format. + if c == msgpcode.FixedArrayLow|2 { + sec, err := d.DecodeInt64() + if err != nil { + return time.Time{}, err + } + + nsec, err := d.DecodeInt64() + if err != nil { + return time.Time{}, err + } + + return time.Unix(sec, nsec), nil + } + + if msgpcode.IsString(c) { + s, err := d.string(c) + if err != nil { + return time.Time{}, err + } + return time.Parse(time.RFC3339Nano, s) + } + + extID, extLen, err := d.extHeader(c) + if err != nil { + return time.Time{}, err + } + + // NodeJS seems to use extID 13. + if extID != timeExtID && extID != 13 { + return time.Time{}, fmt.Errorf("msgpack: invalid time ext id=%d", extID) + } + + tm, err := d.decodeTime(extLen) + if err != nil { + return tm, err + } + + if tm.IsZero() { + // Zero time does not have timezone information. + return tm.UTC(), nil + } + return tm, nil +} + +func (d *Decoder) decodeTime(extLen int) (time.Time, error) { + b, err := d.readN(extLen) + if err != nil { + return time.Time{}, err + } + + switch len(b) { + case 4: + sec := binary.BigEndian.Uint32(b) + return time.Unix(int64(sec), 0), nil + case 8: + sec := binary.BigEndian.Uint64(b) + nsec := int64(sec >> 34) + sec &= 0x00000003ffffffff + return time.Unix(int64(sec), nsec), nil + case 12: + nsec := binary.BigEndian.Uint32(b) + sec := binary.BigEndian.Uint64(b[4:]) + return time.Unix(int64(sec), int64(nsec)), nil + default: + err = fmt.Errorf("msgpack: invalid ext len=%d decoding time", extLen) + return time.Time{}, err + } +} diff --git a/vendor/github.com/vmihailenco/msgpack/v5/types.go b/vendor/github.com/vmihailenco/msgpack/v5/types.go new file mode 100644 index 0000000..d212e09 --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/v5/types.go @@ -0,0 +1,413 @@ +package msgpack + +import ( + "encoding" + "fmt" + "log" + "reflect" + "sync" + + "github.com/vmihailenco/tagparser/v2" +) + +var errorType = reflect.TypeOf((*error)(nil)).Elem() + +var ( + customEncoderType = reflect.TypeOf((*CustomEncoder)(nil)).Elem() + customDecoderType = reflect.TypeOf((*CustomDecoder)(nil)).Elem() +) + +var ( + marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem() + unmarshalerType = reflect.TypeOf((*Unmarshaler)(nil)).Elem() +) + +var ( + binaryMarshalerType = reflect.TypeOf((*encoding.BinaryMarshaler)(nil)).Elem() + binaryUnmarshalerType = reflect.TypeOf((*encoding.BinaryUnmarshaler)(nil)).Elem() +) + +var ( + textMarshalerType = reflect.TypeOf((*encoding.TextMarshaler)(nil)).Elem() + textUnmarshalerType = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem() +) + +type ( + encoderFunc func(*Encoder, reflect.Value) error + decoderFunc func(*Decoder, reflect.Value) error +) + +var ( + typeEncMap sync.Map + typeDecMap sync.Map +) + +// Register registers encoder and decoder functions for a value. +// This is low level API and in most cases you should prefer implementing +// CustomEncoder/CustomDecoder or Marshaler/Unmarshaler interfaces. +func Register(value interface{}, enc encoderFunc, dec decoderFunc) { + typ := reflect.TypeOf(value) + if enc != nil { + typeEncMap.Store(typ, enc) + } + if dec != nil { + typeDecMap.Store(typ, dec) + } +} + +//------------------------------------------------------------------------------ + +const defaultStructTag = "msgpack" + +var structs = newStructCache() + +type structCache struct { + m sync.Map +} + +type structCacheKey struct { + typ reflect.Type + tag string +} + +func newStructCache() *structCache { + return new(structCache) +} + +func (m *structCache) Fields(typ reflect.Type, tag string) *fields { + key := structCacheKey{tag: tag, typ: typ} + + if v, ok := m.m.Load(key); ok { + return v.(*fields) + } + + fs := getFields(typ, tag) + m.m.Store(key, fs) + + return fs +} + +//------------------------------------------------------------------------------ + +type field struct { + encoder encoderFunc + decoder decoderFunc + name string + index []int + omitEmpty bool +} + +func (f *field) Omit(e *Encoder, strct reflect.Value) bool { + v, ok := fieldByIndex(strct, f.index) + if !ok { + return true + } + forced := e.flags&omitEmptyFlag != 0 + return (f.omitEmpty || forced) && e.isEmptyValue(v) +} + +func (f *field) EncodeValue(e *Encoder, strct reflect.Value) error { + v, ok := fieldByIndex(strct, f.index) + if !ok { + return e.EncodeNil() + } + return f.encoder(e, v) +} + +func (f *field) DecodeValue(d *Decoder, strct reflect.Value) error { + v := fieldByIndexAlloc(strct, f.index) + return f.decoder(d, v) +} + +//------------------------------------------------------------------------------ + +type fields struct { + Type reflect.Type + Map map[string]*field + List []*field + AsArray bool + + hasOmitEmpty bool +} + +func newFields(typ reflect.Type) *fields { + return &fields{ + Type: typ, + Map: make(map[string]*field, typ.NumField()), + List: make([]*field, 0, typ.NumField()), + } +} + +func (fs *fields) Add(field *field) { + fs.warnIfFieldExists(field.name) + fs.Map[field.name] = field + fs.List = append(fs.List, field) + if field.omitEmpty { + fs.hasOmitEmpty = true + } +} + +func (fs *fields) warnIfFieldExists(name string) { + if _, ok := fs.Map[name]; ok { + log.Printf("msgpack: %s already has field=%s", fs.Type, name) + } +} + +func (fs *fields) OmitEmpty(e *Encoder, strct reflect.Value) []*field { + forced := e.flags&omitEmptyFlag != 0 + if !fs.hasOmitEmpty && !forced { + return fs.List + } + + fields := make([]*field, 0, len(fs.List)) + + for _, f := range fs.List { + if !f.Omit(e, strct) { + fields = append(fields, f) + } + } + + return fields +} + +func getFields(typ reflect.Type, fallbackTag string) *fields { + fs := newFields(typ) + + var omitEmpty bool + for i := 0; i < typ.NumField(); i++ { + f := typ.Field(i) + + tagStr := f.Tag.Get(defaultStructTag) + if tagStr == "" && fallbackTag != "" { + tagStr = f.Tag.Get(fallbackTag) + } + + tag := tagparser.Parse(tagStr) + if tag.Name == "-" { + continue + } + + if f.Name == "_msgpack" { + fs.AsArray = tag.HasOption("as_array") || tag.HasOption("asArray") + if tag.HasOption("omitempty") { + omitEmpty = true + } + } + + if f.PkgPath != "" && !f.Anonymous { + continue + } + + field := &field{ + name: tag.Name, + index: f.Index, + omitEmpty: omitEmpty || tag.HasOption("omitempty"), + } + + if tag.HasOption("intern") { + switch f.Type.Kind() { + case reflect.Interface: + field.encoder = encodeInternedInterfaceValue + field.decoder = decodeInternedInterfaceValue + case reflect.String: + field.encoder = encodeInternedStringValue + field.decoder = decodeInternedStringValue + default: + err := fmt.Errorf("msgpack: intern strings are not supported on %s", f.Type) + panic(err) + } + } else { + field.encoder = getEncoder(f.Type) + field.decoder = getDecoder(f.Type) + } + + if field.name == "" { + field.name = f.Name + } + + if f.Anonymous && !tag.HasOption("noinline") { + inline := tag.HasOption("inline") + if inline { + inlineFields(fs, f.Type, field, fallbackTag) + } else { + inline = shouldInline(fs, f.Type, field, fallbackTag) + } + + if inline { + if _, ok := fs.Map[field.name]; ok { + log.Printf("msgpack: %s already has field=%s", fs.Type, field.name) + } + fs.Map[field.name] = field + continue + } + } + + fs.Add(field) + + if alias, ok := tag.Options["alias"]; ok { + fs.warnIfFieldExists(alias) + fs.Map[alias] = field + } + } + return fs +} + +var ( + encodeStructValuePtr uintptr + decodeStructValuePtr uintptr +) + +//nolint:gochecknoinits +func init() { + encodeStructValuePtr = reflect.ValueOf(encodeStructValue).Pointer() + decodeStructValuePtr = reflect.ValueOf(decodeStructValue).Pointer() +} + +func inlineFields(fs *fields, typ reflect.Type, f *field, tag string) { + inlinedFields := getFields(typ, tag).List + for _, field := range inlinedFields { + if _, ok := fs.Map[field.name]; ok { + // Don't inline shadowed fields. + continue + } + field.index = append(f.index, field.index...) + fs.Add(field) + } +} + +func shouldInline(fs *fields, typ reflect.Type, f *field, tag string) bool { + var encoder encoderFunc + var decoder decoderFunc + + if typ.Kind() == reflect.Struct { + encoder = f.encoder + decoder = f.decoder + } else { + for typ.Kind() == reflect.Ptr { + typ = typ.Elem() + encoder = getEncoder(typ) + decoder = getDecoder(typ) + } + if typ.Kind() != reflect.Struct { + return false + } + } + + if reflect.ValueOf(encoder).Pointer() != encodeStructValuePtr { + return false + } + if reflect.ValueOf(decoder).Pointer() != decodeStructValuePtr { + return false + } + + inlinedFields := getFields(typ, tag).List + for _, field := range inlinedFields { + if _, ok := fs.Map[field.name]; ok { + // Don't auto inline if there are shadowed fields. + return false + } + } + + for _, field := range inlinedFields { + field.index = append(f.index, field.index...) + fs.Add(field) + } + return true +} + +type isZeroer interface { + IsZero() bool +} + +func (e *Encoder) isEmptyValue(v reflect.Value) bool { + kind := v.Kind() + + for kind == reflect.Interface { + if v.IsNil() { + return true + } + v = v.Elem() + kind = v.Kind() + } + + if z, ok := v.Interface().(isZeroer); ok { + return nilable(kind) && v.IsNil() || z.IsZero() + } + + switch kind { + case reflect.Array, reflect.Map, reflect.Slice, reflect.String: + return v.Len() == 0 + case reflect.Struct: + structFields := structs.Fields(v.Type(), e.structTag) + fields := structFields.OmitEmpty(e, v) + return len(fields) == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Ptr: + return v.IsNil() + default: + return false + } +} + +func fieldByIndex(v reflect.Value, index []int) (_ reflect.Value, ok bool) { + if len(index) == 1 { + return v.Field(index[0]), true + } + + for i, idx := range index { + if i > 0 { + if v.Kind() == reflect.Ptr { + if v.IsNil() { + return v, false + } + v = v.Elem() + } + } + v = v.Field(idx) + } + + return v, true +} + +func fieldByIndexAlloc(v reflect.Value, index []int) reflect.Value { + if len(index) == 1 { + return v.Field(index[0]) + } + + for i, idx := range index { + if i > 0 { + var ok bool + v, ok = indirectNil(v) + if !ok { + return v + } + } + v = v.Field(idx) + } + + return v +} + +func indirectNil(v reflect.Value) (reflect.Value, bool) { + if v.Kind() == reflect.Ptr { + if v.IsNil() { + if !v.CanSet() { + return v, false + } + elemType := v.Type().Elem() + if elemType.Kind() != reflect.Struct { + return v, false + } + v.Set(cachedValue(elemType)) + } + v = v.Elem() + } + return v, true +} diff --git a/vendor/github.com/vmihailenco/msgpack/v5/unsafe.go b/vendor/github.com/vmihailenco/msgpack/v5/unsafe.go new file mode 100644 index 0000000..192ac47 --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/v5/unsafe.go @@ -0,0 +1,22 @@ +// +build !appengine + +package msgpack + +import ( + "unsafe" +) + +// bytesToString converts byte slice to string. +func bytesToString(b []byte) string { + return *(*string)(unsafe.Pointer(&b)) +} + +// stringToBytes converts string to byte slice. +func stringToBytes(s string) []byte { + return *(*[]byte)(unsafe.Pointer( + &struct { + string + Cap int + }{s, len(s)}, + )) +} diff --git a/vendor/github.com/vmihailenco/msgpack/v5/version.go b/vendor/github.com/vmihailenco/msgpack/v5/version.go new file mode 100644 index 0000000..ca10205 --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/v5/version.go @@ -0,0 +1,6 @@ +package msgpack + +// Version is the current release version. +func Version() string { + return "5.4.1" +} diff --git a/vendor/github.com/vmihailenco/tagparser/v2/.travis.yml b/vendor/github.com/vmihailenco/tagparser/v2/.travis.yml new file mode 100644 index 0000000..7194cd0 --- /dev/null +++ b/vendor/github.com/vmihailenco/tagparser/v2/.travis.yml @@ -0,0 +1,19 @@ +dist: xenial +language: go + +go: + - 1.14.x + - 1.15.x + - tip + +matrix: + allow_failures: + - go: tip + +env: + - GO111MODULE=on + +go_import_path: github.com/vmihailenco/tagparser + +before_install: + - curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh -s -- -b $(go env GOPATH)/bin v1.17.1 diff --git a/vendor/github.com/vmihailenco/tagparser/v2/LICENSE b/vendor/github.com/vmihailenco/tagparser/v2/LICENSE new file mode 100644 index 0000000..3fc93fd --- /dev/null +++ b/vendor/github.com/vmihailenco/tagparser/v2/LICENSE @@ -0,0 +1,25 @@ +Copyright (c) 2019 The github.com/vmihailenco/tagparser Authors. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/vmihailenco/tagparser/v2/Makefile b/vendor/github.com/vmihailenco/tagparser/v2/Makefile new file mode 100644 index 0000000..0b1b595 --- /dev/null +++ b/vendor/github.com/vmihailenco/tagparser/v2/Makefile @@ -0,0 +1,9 @@ +all: + go test ./... + go test ./... -short -race + go test ./... -run=NONE -bench=. -benchmem + env GOOS=linux GOARCH=386 go test ./... + go vet ./... + go get github.com/gordonklaus/ineffassign + ineffassign . + golangci-lint run diff --git a/vendor/github.com/vmihailenco/tagparser/v2/README.md b/vendor/github.com/vmihailenco/tagparser/v2/README.md new file mode 100644 index 0000000..c0259de --- /dev/null +++ b/vendor/github.com/vmihailenco/tagparser/v2/README.md @@ -0,0 +1,24 @@ +# Opinionated Golang tag parser + +[![Build Status](https://travis-ci.org/vmihailenco/tagparser.png?branch=master)](https://travis-ci.org/vmihailenco/tagparser) +[![GoDoc](https://godoc.org/github.com/vmihailenco/tagparser?status.svg)](https://godoc.org/github.com/vmihailenco/tagparser) + +## Installation + +Install: + +```shell +go get github.com/vmihailenco/tagparser/v2 +``` + +## Quickstart + +```go +func ExampleParse() { + tag := tagparser.Parse("some_name,key:value,key2:'complex value'") + fmt.Println(tag.Name) + fmt.Println(tag.Options) + // Output: some_name + // map[key:value key2:'complex value'] +} +``` diff --git a/vendor/github.com/vmihailenco/tagparser/v2/internal/parser/parser.go b/vendor/github.com/vmihailenco/tagparser/v2/internal/parser/parser.go new file mode 100644 index 0000000..21a9bc7 --- /dev/null +++ b/vendor/github.com/vmihailenco/tagparser/v2/internal/parser/parser.go @@ -0,0 +1,82 @@ +package parser + +import ( + "bytes" + + "github.com/vmihailenco/tagparser/v2/internal" +) + +type Parser struct { + b []byte + i int +} + +func New(b []byte) *Parser { + return &Parser{ + b: b, + } +} + +func NewString(s string) *Parser { + return New(internal.StringToBytes(s)) +} + +func (p *Parser) Bytes() []byte { + return p.b[p.i:] +} + +func (p *Parser) Valid() bool { + return p.i < len(p.b) +} + +func (p *Parser) Read() byte { + if p.Valid() { + c := p.b[p.i] + p.Advance() + return c + } + return 0 +} + +func (p *Parser) Peek() byte { + if p.Valid() { + return p.b[p.i] + } + return 0 +} + +func (p *Parser) Advance() { + p.i++ +} + +func (p *Parser) Skip(skip byte) bool { + if p.Peek() == skip { + p.Advance() + return true + } + return false +} + +func (p *Parser) SkipBytes(skip []byte) bool { + if len(skip) > len(p.b[p.i:]) { + return false + } + if !bytes.Equal(p.b[p.i:p.i+len(skip)], skip) { + return false + } + p.i += len(skip) + return true +} + +func (p *Parser) ReadSep(sep byte) ([]byte, bool) { + ind := bytes.IndexByte(p.b[p.i:], sep) + if ind == -1 { + b := p.b[p.i:] + p.i = len(p.b) + return b, false + } + + b := p.b[p.i : p.i+ind] + p.i += ind + 1 + return b, true +} diff --git a/vendor/github.com/vmihailenco/tagparser/v2/internal/safe.go b/vendor/github.com/vmihailenco/tagparser/v2/internal/safe.go new file mode 100644 index 0000000..870fe54 --- /dev/null +++ b/vendor/github.com/vmihailenco/tagparser/v2/internal/safe.go @@ -0,0 +1,11 @@ +// +build appengine + +package internal + +func BytesToString(b []byte) string { + return string(b) +} + +func StringToBytes(s string) []byte { + return []byte(s) +} diff --git a/vendor/github.com/vmihailenco/tagparser/v2/internal/unsafe.go b/vendor/github.com/vmihailenco/tagparser/v2/internal/unsafe.go new file mode 100644 index 0000000..f8bc18d --- /dev/null +++ b/vendor/github.com/vmihailenco/tagparser/v2/internal/unsafe.go @@ -0,0 +1,22 @@ +// +build !appengine + +package internal + +import ( + "unsafe" +) + +// BytesToString converts byte slice to string. +func BytesToString(b []byte) string { + return *(*string)(unsafe.Pointer(&b)) +} + +// StringToBytes converts string to byte slice. +func StringToBytes(s string) []byte { + return *(*[]byte)(unsafe.Pointer( + &struct { + string + Cap int + }{s, len(s)}, + )) +} diff --git a/vendor/github.com/vmihailenco/tagparser/v2/tagparser.go b/vendor/github.com/vmihailenco/tagparser/v2/tagparser.go new file mode 100644 index 0000000..5002e64 --- /dev/null +++ b/vendor/github.com/vmihailenco/tagparser/v2/tagparser.go @@ -0,0 +1,166 @@ +package tagparser + +import ( + "strings" + + "github.com/vmihailenco/tagparser/v2/internal/parser" +) + +type Tag struct { + Name string + Options map[string]string +} + +func (t *Tag) HasOption(name string) bool { + _, ok := t.Options[name] + return ok +} + +func Parse(s string) *Tag { + p := &tagParser{ + Parser: parser.NewString(s), + } + p.parseKey() + return &p.Tag +} + +type tagParser struct { + *parser.Parser + + Tag Tag + hasName bool + key string +} + +func (p *tagParser) setTagOption(key, value string) { + key = strings.TrimSpace(key) + value = strings.TrimSpace(value) + + if !p.hasName { + p.hasName = true + if key == "" { + p.Tag.Name = value + return + } + } + if p.Tag.Options == nil { + p.Tag.Options = make(map[string]string) + } + if key == "" { + p.Tag.Options[value] = "" + } else { + p.Tag.Options[key] = value + } +} + +func (p *tagParser) parseKey() { + p.key = "" + + var b []byte + for p.Valid() { + c := p.Read() + switch c { + case ',': + p.Skip(' ') + p.setTagOption("", string(b)) + p.parseKey() + return + case ':': + p.key = string(b) + p.parseValue() + return + case '\'': + p.parseQuotedValue() + return + default: + b = append(b, c) + } + } + + if len(b) > 0 { + p.setTagOption("", string(b)) + } +} + +func (p *tagParser) parseValue() { + const quote = '\'' + c := p.Peek() + if c == quote { + p.Skip(quote) + p.parseQuotedValue() + return + } + + var b []byte + for p.Valid() { + c = p.Read() + switch c { + case '\\': + b = append(b, p.Read()) + case '(': + b = append(b, c) + b = p.readBrackets(b) + case ',': + p.Skip(' ') + p.setTagOption(p.key, string(b)) + p.parseKey() + return + default: + b = append(b, c) + } + } + p.setTagOption(p.key, string(b)) +} + +func (p *tagParser) readBrackets(b []byte) []byte { + var lvl int +loop: + for p.Valid() { + c := p.Read() + switch c { + case '\\': + b = append(b, p.Read()) + case '(': + b = append(b, c) + lvl++ + case ')': + b = append(b, c) + lvl-- + if lvl < 0 { + break loop + } + default: + b = append(b, c) + } + } + return b +} + +func (p *tagParser) parseQuotedValue() { + const quote = '\'' + var b []byte + for p.Valid() { + bb, ok := p.ReadSep(quote) + if !ok { + b = append(b, bb...) + break + } + + // keep the escaped single-quote, and continue until we've found the + // one that isn't. + if len(bb) > 0 && bb[len(bb)-1] == '\\' { + b = append(b, bb[:len(bb)-1]...) + b = append(b, quote) + continue + } + + b = append(b, bb...) + break + } + + p.setTagOption(p.key, string(b)) + if p.Skip(',') { + p.Skip(' ') + } + p.parseKey() +} diff --git a/vendor/golang.org/x/sys/LICENSE b/vendor/golang.org/x/sys/LICENSE new file mode 100644 index 0000000..2a7cf70 --- /dev/null +++ b/vendor/golang.org/x/sys/LICENSE @@ -0,0 +1,27 @@ +Copyright 2009 The Go Authors. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google LLC nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/golang.org/x/sys/PATENTS b/vendor/golang.org/x/sys/PATENTS new file mode 100644 index 0000000..7330990 --- /dev/null +++ b/vendor/golang.org/x/sys/PATENTS @@ -0,0 +1,22 @@ +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the Go project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of Go, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of Go. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of Go or any code incorporated within this +implementation of Go constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of Go +shall terminate as of the date such litigation is filed. diff --git a/vendor/golang.org/x/sys/cpu/asm_aix_ppc64.s b/vendor/golang.org/x/sys/cpu/asm_aix_ppc64.s new file mode 100644 index 0000000..269e173 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/asm_aix_ppc64.s @@ -0,0 +1,17 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build gc + +#include "textflag.h" + +// +// System calls for ppc64, AIX are implemented in runtime/syscall_aix.go +// + +TEXT ·syscall6(SB),NOSPLIT,$0-88 + JMP syscall·syscall6(SB) + +TEXT ·rawSyscall6(SB),NOSPLIT,$0-88 + JMP syscall·rawSyscall6(SB) diff --git a/vendor/golang.org/x/sys/cpu/asm_darwin_x86_gc.s b/vendor/golang.org/x/sys/cpu/asm_darwin_x86_gc.s new file mode 100644 index 0000000..ec2acfe --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/asm_darwin_x86_gc.s @@ -0,0 +1,17 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build darwin && amd64 && gc + +#include "textflag.h" + +TEXT libc_sysctl_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_sysctl(SB) +GLOBL ·libc_sysctl_trampoline_addr(SB), RODATA, $8 +DATA ·libc_sysctl_trampoline_addr(SB)/8, $libc_sysctl_trampoline<>(SB) + +TEXT libc_sysctlbyname_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_sysctlbyname(SB) +GLOBL ·libc_sysctlbyname_trampoline_addr(SB), RODATA, $8 +DATA ·libc_sysctlbyname_trampoline_addr(SB)/8, $libc_sysctlbyname_trampoline<>(SB) diff --git a/vendor/golang.org/x/sys/cpu/byteorder.go b/vendor/golang.org/x/sys/cpu/byteorder.go new file mode 100644 index 0000000..271055b --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/byteorder.go @@ -0,0 +1,66 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cpu + +import ( + "runtime" +) + +// byteOrder is a subset of encoding/binary.ByteOrder. +type byteOrder interface { + Uint32([]byte) uint32 + Uint64([]byte) uint64 +} + +type littleEndian struct{} +type bigEndian struct{} + +func (littleEndian) Uint32(b []byte) uint32 { + _ = b[3] // bounds check hint to compiler; see golang.org/issue/14808 + return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 +} + +func (littleEndian) Uint64(b []byte) uint64 { + _ = b[7] // bounds check hint to compiler; see golang.org/issue/14808 + return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | + uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 +} + +func (bigEndian) Uint32(b []byte) uint32 { + _ = b[3] // bounds check hint to compiler; see golang.org/issue/14808 + return uint32(b[3]) | uint32(b[2])<<8 | uint32(b[1])<<16 | uint32(b[0])<<24 +} + +func (bigEndian) Uint64(b []byte) uint64 { + _ = b[7] // bounds check hint to compiler; see golang.org/issue/14808 + return uint64(b[7]) | uint64(b[6])<<8 | uint64(b[5])<<16 | uint64(b[4])<<24 | + uint64(b[3])<<32 | uint64(b[2])<<40 | uint64(b[1])<<48 | uint64(b[0])<<56 +} + +// hostByteOrder returns littleEndian on little-endian machines and +// bigEndian on big-endian machines. +func hostByteOrder() byteOrder { + switch runtime.GOARCH { + case "386", "amd64", "amd64p32", + "alpha", + "arm", "arm64", + "loong64", + "mipsle", "mips64le", "mips64p32le", + "nios2", + "ppc64le", + "riscv", "riscv64", + "sh": + return littleEndian{} + case "armbe", "arm64be", + "m68k", + "mips", "mips64", "mips64p32", + "ppc", "ppc64", + "s390", "s390x", + "shbe", + "sparc", "sparc64": + return bigEndian{} + } + panic("unknown architecture") +} diff --git a/vendor/golang.org/x/sys/cpu/cpu.go b/vendor/golang.org/x/sys/cpu/cpu.go new file mode 100644 index 0000000..34c9ae7 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu.go @@ -0,0 +1,341 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package cpu implements processor feature detection for +// various CPU architectures. +package cpu + +import ( + "os" + "strings" +) + +// Initialized reports whether the CPU features were initialized. +// +// For some GOOS/GOARCH combinations initialization of the CPU features depends +// on reading an operating specific file, e.g. /proc/self/auxv on linux/arm +// Initialized will report false if reading the file fails. +var Initialized bool + +// CacheLinePad is used to pad structs to avoid false sharing. +type CacheLinePad struct{ _ [cacheLineSize]byte } + +// X86 contains the supported CPU features of the +// current X86/AMD64 platform. If the current platform +// is not X86/AMD64 then all feature flags are false. +// +// X86 is padded to avoid false sharing. Further the HasAVX +// and HasAVX2 are only set if the OS supports XMM and YMM +// registers in addition to the CPUID feature bit being set. +var X86 struct { + _ CacheLinePad + HasAES bool // AES hardware implementation (AES NI) + HasADX bool // Multi-precision add-carry instruction extensions + HasAVX bool // Advanced vector extension + HasAVX2 bool // Advanced vector extension 2 + HasAVX512 bool // Advanced vector extension 512 + HasAVX512F bool // Advanced vector extension 512 Foundation Instructions + HasAVX512CD bool // Advanced vector extension 512 Conflict Detection Instructions + HasAVX512ER bool // Advanced vector extension 512 Exponential and Reciprocal Instructions + HasAVX512PF bool // Advanced vector extension 512 Prefetch Instructions + HasAVX512VL bool // Advanced vector extension 512 Vector Length Extensions + HasAVX512BW bool // Advanced vector extension 512 Byte and Word Instructions + HasAVX512DQ bool // Advanced vector extension 512 Doubleword and Quadword Instructions + HasAVX512IFMA bool // Advanced vector extension 512 Integer Fused Multiply Add + HasAVX512VBMI bool // Advanced vector extension 512 Vector Byte Manipulation Instructions + HasAVX5124VNNIW bool // Advanced vector extension 512 Vector Neural Network Instructions Word variable precision + HasAVX5124FMAPS bool // Advanced vector extension 512 Fused Multiply Accumulation Packed Single precision + HasAVX512VPOPCNTDQ bool // Advanced vector extension 512 Double and quad word population count instructions + HasAVX512VPCLMULQDQ bool // Advanced vector extension 512 Vector carry-less multiply operations + HasAVX512VNNI bool // Advanced vector extension 512 Vector Neural Network Instructions + HasAVX512GFNI bool // Advanced vector extension 512 Galois field New Instructions + HasAVX512VAES bool // Advanced vector extension 512 Vector AES instructions + HasAVX512VBMI2 bool // Advanced vector extension 512 Vector Byte Manipulation Instructions 2 + HasAVX512BITALG bool // Advanced vector extension 512 Bit Algorithms + HasAVX512BF16 bool // Advanced vector extension 512 BFloat16 Instructions + HasAMXTile bool // Advanced Matrix Extension Tile instructions + HasAMXInt8 bool // Advanced Matrix Extension Int8 instructions + HasAMXBF16 bool // Advanced Matrix Extension BFloat16 instructions + HasBMI1 bool // Bit manipulation instruction set 1 + HasBMI2 bool // Bit manipulation instruction set 2 + HasCX16 bool // Compare and exchange 16 Bytes + HasERMS bool // Enhanced REP for MOVSB and STOSB + HasFMA bool // Fused-multiply-add instructions + HasOSXSAVE bool // OS supports XSAVE/XRESTOR for saving/restoring XMM registers. + HasPCLMULQDQ bool // PCLMULQDQ instruction - most often used for AES-GCM + HasPOPCNT bool // Hamming weight instruction POPCNT. + HasRDRAND bool // RDRAND instruction (on-chip random number generator) + HasRDSEED bool // RDSEED instruction (on-chip random number generator) + HasSSE2 bool // Streaming SIMD extension 2 (always available on amd64) + HasSSE3 bool // Streaming SIMD extension 3 + HasSSSE3 bool // Supplemental streaming SIMD extension 3 + HasSSE41 bool // Streaming SIMD extension 4 and 4.1 + HasSSE42 bool // Streaming SIMD extension 4 and 4.2 + HasAVXIFMA bool // Advanced vector extension Integer Fused Multiply Add + HasAVXVNNI bool // Advanced vector extension Vector Neural Network Instructions + HasAVXVNNIInt8 bool // Advanced vector extension Vector Neural Network Int8 instructions + _ CacheLinePad +} + +// ARM64 contains the supported CPU features of the +// current ARMv8(aarch64) platform. If the current platform +// is not arm64 then all feature flags are false. +var ARM64 struct { + _ CacheLinePad + HasFP bool // Floating-point instruction set (always available) + HasASIMD bool // Advanced SIMD (always available) + HasEVTSTRM bool // Event stream support + HasAES bool // AES hardware implementation + HasPMULL bool // Polynomial multiplication instruction set + HasSHA1 bool // SHA1 hardware implementation + HasSHA2 bool // SHA2 hardware implementation + HasCRC32 bool // CRC32 hardware implementation + HasATOMICS bool // Atomic memory operation instruction set + HasHPDS bool // Hierarchical permission disables in translations tables + HasLOR bool // Limited ordering regions + HasPAN bool // Privileged access never + HasFPHP bool // Half precision floating-point instruction set + HasASIMDHP bool // Advanced SIMD half precision instruction set + HasCPUID bool // CPUID identification scheme registers + HasASIMDRDM bool // Rounding double multiply add/subtract instruction set + HasJSCVT bool // Javascript conversion from floating-point to integer + HasFCMA bool // Floating-point multiplication and addition of complex numbers + HasLRCPC bool // Release Consistent processor consistent support + HasDCPOP bool // Persistent memory support + HasSHA3 bool // SHA3 hardware implementation + HasSM3 bool // SM3 hardware implementation + HasSM4 bool // SM4 hardware implementation + HasASIMDDP bool // Advanced SIMD double precision instruction set + HasSHA512 bool // SHA512 hardware implementation + HasSVE bool // Scalable Vector Extensions + HasSVE2 bool // Scalable Vector Extensions 2 + HasASIMDFHM bool // Advanced SIMD multiplication FP16 to FP32 + HasDIT bool // Data Independent Timing support + HasI8MM bool // Advanced SIMD Int8 matrix multiplication instructions + _ CacheLinePad +} + +// ARM contains the supported CPU features of the current ARM (32-bit) platform. +// All feature flags are false if: +// 1. the current platform is not arm, or +// 2. the current operating system is not Linux. +var ARM struct { + _ CacheLinePad + HasSWP bool // SWP instruction support + HasHALF bool // Half-word load and store support + HasTHUMB bool // ARM Thumb instruction set + Has26BIT bool // Address space limited to 26-bits + HasFASTMUL bool // 32-bit operand, 64-bit result multiplication support + HasFPA bool // Floating point arithmetic support + HasVFP bool // Vector floating point support + HasEDSP bool // DSP Extensions support + HasJAVA bool // Java instruction set + HasIWMMXT bool // Intel Wireless MMX technology support + HasCRUNCH bool // MaverickCrunch context switching and handling + HasTHUMBEE bool // Thumb EE instruction set + HasNEON bool // NEON instruction set + HasVFPv3 bool // Vector floating point version 3 support + HasVFPv3D16 bool // Vector floating point version 3 D8-D15 + HasTLS bool // Thread local storage support + HasVFPv4 bool // Vector floating point version 4 support + HasIDIVA bool // Integer divide instruction support in ARM mode + HasIDIVT bool // Integer divide instruction support in Thumb mode + HasVFPD32 bool // Vector floating point version 3 D15-D31 + HasLPAE bool // Large Physical Address Extensions + HasEVTSTRM bool // Event stream support + HasAES bool // AES hardware implementation + HasPMULL bool // Polynomial multiplication instruction set + HasSHA1 bool // SHA1 hardware implementation + HasSHA2 bool // SHA2 hardware implementation + HasCRC32 bool // CRC32 hardware implementation + _ CacheLinePad +} + +// The booleans in Loong64 contain the correspondingly named cpu feature bit. +// The struct is padded to avoid false sharing. +var Loong64 struct { + _ CacheLinePad + HasLSX bool // support 128-bit vector extension + HasLASX bool // support 256-bit vector extension + HasCRC32 bool // support CRC instruction + HasLAM_BH bool // support AM{SWAP/ADD}[_DB].{B/H} instruction + HasLAMCAS bool // support AMCAS[_DB].{B/H/W/D} instruction + _ CacheLinePad +} + +// MIPS64X contains the supported CPU features of the current mips64/mips64le +// platforms. If the current platform is not mips64/mips64le or the current +// operating system is not Linux then all feature flags are false. +var MIPS64X struct { + _ CacheLinePad + HasMSA bool // MIPS SIMD architecture + _ CacheLinePad +} + +// PPC64 contains the supported CPU features of the current ppc64/ppc64le platforms. +// If the current platform is not ppc64/ppc64le then all feature flags are false. +// +// For ppc64/ppc64le, it is safe to check only for ISA level starting on ISA v3.00, +// since there are no optional categories. There are some exceptions that also +// require kernel support to work (DARN, SCV), so there are feature bits for +// those as well. The struct is padded to avoid false sharing. +var PPC64 struct { + _ CacheLinePad + HasDARN bool // Hardware random number generator (requires kernel enablement) + HasSCV bool // Syscall vectored (requires kernel enablement) + IsPOWER8 bool // ISA v2.07 (POWER8) + IsPOWER9 bool // ISA v3.00 (POWER9), implies IsPOWER8 + _ CacheLinePad +} + +// S390X contains the supported CPU features of the current IBM Z +// (s390x) platform. If the current platform is not IBM Z then all +// feature flags are false. +// +// S390X is padded to avoid false sharing. Further HasVX is only set +// if the OS supports vector registers in addition to the STFLE +// feature bit being set. +var S390X struct { + _ CacheLinePad + HasZARCH bool // z/Architecture mode is active [mandatory] + HasSTFLE bool // store facility list extended + HasLDISP bool // long (20-bit) displacements + HasEIMM bool // 32-bit immediates + HasDFP bool // decimal floating point + HasETF3EH bool // ETF-3 enhanced + HasMSA bool // message security assist (CPACF) + HasAES bool // KM-AES{128,192,256} functions + HasAESCBC bool // KMC-AES{128,192,256} functions + HasAESCTR bool // KMCTR-AES{128,192,256} functions + HasAESGCM bool // KMA-GCM-AES{128,192,256} functions + HasGHASH bool // KIMD-GHASH function + HasSHA1 bool // K{I,L}MD-SHA-1 functions + HasSHA256 bool // K{I,L}MD-SHA-256 functions + HasSHA512 bool // K{I,L}MD-SHA-512 functions + HasSHA3 bool // K{I,L}MD-SHA3-{224,256,384,512} and K{I,L}MD-SHAKE-{128,256} functions + HasVX bool // vector facility + HasVXE bool // vector-enhancements facility 1 + _ CacheLinePad +} + +// RISCV64 contains the supported CPU features and performance characteristics for riscv64 +// platforms. The booleans in RISCV64, with the exception of HasFastMisaligned, indicate +// the presence of RISC-V extensions. +// +// It is safe to assume that all the RV64G extensions are supported and so they are omitted from +// this structure. As riscv64 Go programs require at least RV64G, the code that populates +// this structure cannot run successfully if some of the RV64G extensions are missing. +// The struct is padded to avoid false sharing. +var RISCV64 struct { + _ CacheLinePad + HasFastMisaligned bool // Fast misaligned accesses + HasC bool // Compressed instruction-set extension + HasV bool // Vector extension compatible with RVV 1.0 + HasZba bool // Address generation instructions extension + HasZbb bool // Basic bit-manipulation extension + HasZbs bool // Single-bit instructions extension + HasZvbb bool // Vector Basic Bit-manipulation + HasZvbc bool // Vector Carryless Multiplication + HasZvkb bool // Vector Cryptography Bit-manipulation + HasZvkt bool // Vector Data-Independent Execution Latency + HasZvkg bool // Vector GCM/GMAC + HasZvkn bool // NIST Algorithm Suite (AES/SHA256/SHA512) + HasZvknc bool // NIST Algorithm Suite with carryless multiply + HasZvkng bool // NIST Algorithm Suite with GCM + HasZvks bool // ShangMi Algorithm Suite + HasZvksc bool // ShangMi Algorithm Suite with carryless multiplication + HasZvksg bool // ShangMi Algorithm Suite with GCM + _ CacheLinePad +} + +func init() { + archInit() + initOptions() + processOptions() +} + +// options contains the cpu debug options that can be used in GODEBUG. +// Options are arch dependent and are added by the arch specific initOptions functions. +// Features that are mandatory for the specific GOARCH should have the Required field set +// (e.g. SSE2 on amd64). +var options []option + +// Option names should be lower case. e.g. avx instead of AVX. +type option struct { + Name string + Feature *bool + Specified bool // whether feature value was specified in GODEBUG + Enable bool // whether feature should be enabled + Required bool // whether feature is mandatory and can not be disabled +} + +func processOptions() { + env := os.Getenv("GODEBUG") +field: + for env != "" { + field := "" + i := strings.IndexByte(env, ',') + if i < 0 { + field, env = env, "" + } else { + field, env = env[:i], env[i+1:] + } + if len(field) < 4 || field[:4] != "cpu." { + continue + } + i = strings.IndexByte(field, '=') + if i < 0 { + print("GODEBUG sys/cpu: no value specified for \"", field, "\"\n") + continue + } + key, value := field[4:i], field[i+1:] // e.g. "SSE2", "on" + + var enable bool + switch value { + case "on": + enable = true + case "off": + enable = false + default: + print("GODEBUG sys/cpu: value \"", value, "\" not supported for cpu option \"", key, "\"\n") + continue field + } + + if key == "all" { + for i := range options { + options[i].Specified = true + options[i].Enable = enable || options[i].Required + } + continue field + } + + for i := range options { + if options[i].Name == key { + options[i].Specified = true + options[i].Enable = enable + continue field + } + } + + print("GODEBUG sys/cpu: unknown cpu feature \"", key, "\"\n") + } + + for _, o := range options { + if !o.Specified { + continue + } + + if o.Enable && !*o.Feature { + print("GODEBUG sys/cpu: can not enable \"", o.Name, "\", missing CPU support\n") + continue + } + + if !o.Enable && o.Required { + print("GODEBUG sys/cpu: can not disable \"", o.Name, "\", required CPU feature\n") + continue + } + + *o.Feature = o.Enable + } +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_aix.go b/vendor/golang.org/x/sys/cpu/cpu_aix.go new file mode 100644 index 0000000..9bf0c32 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_aix.go @@ -0,0 +1,33 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build aix + +package cpu + +const ( + // getsystemcfg constants + _SC_IMPL = 2 + _IMPL_POWER8 = 0x10000 + _IMPL_POWER9 = 0x20000 +) + +func archInit() { + impl := getsystemcfg(_SC_IMPL) + if impl&_IMPL_POWER8 != 0 { + PPC64.IsPOWER8 = true + } + if impl&_IMPL_POWER9 != 0 { + PPC64.IsPOWER8 = true + PPC64.IsPOWER9 = true + } + + Initialized = true +} + +func getsystemcfg(label int) (n uint64) { + r0, _ := callgetsystemcfg(label) + n = uint64(r0) + return +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_arm.go b/vendor/golang.org/x/sys/cpu/cpu_arm.go new file mode 100644 index 0000000..301b752 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_arm.go @@ -0,0 +1,73 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cpu + +const cacheLineSize = 32 + +// HWCAP/HWCAP2 bits. +// These are specific to Linux. +const ( + hwcap_SWP = 1 << 0 + hwcap_HALF = 1 << 1 + hwcap_THUMB = 1 << 2 + hwcap_26BIT = 1 << 3 + hwcap_FAST_MULT = 1 << 4 + hwcap_FPA = 1 << 5 + hwcap_VFP = 1 << 6 + hwcap_EDSP = 1 << 7 + hwcap_JAVA = 1 << 8 + hwcap_IWMMXT = 1 << 9 + hwcap_CRUNCH = 1 << 10 + hwcap_THUMBEE = 1 << 11 + hwcap_NEON = 1 << 12 + hwcap_VFPv3 = 1 << 13 + hwcap_VFPv3D16 = 1 << 14 + hwcap_TLS = 1 << 15 + hwcap_VFPv4 = 1 << 16 + hwcap_IDIVA = 1 << 17 + hwcap_IDIVT = 1 << 18 + hwcap_VFPD32 = 1 << 19 + hwcap_LPAE = 1 << 20 + hwcap_EVTSTRM = 1 << 21 + + hwcap2_AES = 1 << 0 + hwcap2_PMULL = 1 << 1 + hwcap2_SHA1 = 1 << 2 + hwcap2_SHA2 = 1 << 3 + hwcap2_CRC32 = 1 << 4 +) + +func initOptions() { + options = []option{ + {Name: "pmull", Feature: &ARM.HasPMULL}, + {Name: "sha1", Feature: &ARM.HasSHA1}, + {Name: "sha2", Feature: &ARM.HasSHA2}, + {Name: "swp", Feature: &ARM.HasSWP}, + {Name: "thumb", Feature: &ARM.HasTHUMB}, + {Name: "thumbee", Feature: &ARM.HasTHUMBEE}, + {Name: "tls", Feature: &ARM.HasTLS}, + {Name: "vfp", Feature: &ARM.HasVFP}, + {Name: "vfpd32", Feature: &ARM.HasVFPD32}, + {Name: "vfpv3", Feature: &ARM.HasVFPv3}, + {Name: "vfpv3d16", Feature: &ARM.HasVFPv3D16}, + {Name: "vfpv4", Feature: &ARM.HasVFPv4}, + {Name: "half", Feature: &ARM.HasHALF}, + {Name: "26bit", Feature: &ARM.Has26BIT}, + {Name: "fastmul", Feature: &ARM.HasFASTMUL}, + {Name: "fpa", Feature: &ARM.HasFPA}, + {Name: "edsp", Feature: &ARM.HasEDSP}, + {Name: "java", Feature: &ARM.HasJAVA}, + {Name: "iwmmxt", Feature: &ARM.HasIWMMXT}, + {Name: "crunch", Feature: &ARM.HasCRUNCH}, + {Name: "neon", Feature: &ARM.HasNEON}, + {Name: "idivt", Feature: &ARM.HasIDIVT}, + {Name: "idiva", Feature: &ARM.HasIDIVA}, + {Name: "lpae", Feature: &ARM.HasLPAE}, + {Name: "evtstrm", Feature: &ARM.HasEVTSTRM}, + {Name: "aes", Feature: &ARM.HasAES}, + {Name: "crc32", Feature: &ARM.HasCRC32}, + } + +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_arm64.go b/vendor/golang.org/x/sys/cpu/cpu_arm64.go new file mode 100644 index 0000000..f449c67 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_arm64.go @@ -0,0 +1,210 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cpu + +import "runtime" + +// cacheLineSize is used to prevent false sharing of cache lines. +// We choose 128 because Apple Silicon, a.k.a. M1, has 128-byte cache line size. +// It doesn't cost much and is much more future-proof. +const cacheLineSize = 128 + +func initOptions() { + options = []option{ + {Name: "fp", Feature: &ARM64.HasFP}, + {Name: "asimd", Feature: &ARM64.HasASIMD}, + {Name: "evstrm", Feature: &ARM64.HasEVTSTRM}, + {Name: "aes", Feature: &ARM64.HasAES}, + {Name: "fphp", Feature: &ARM64.HasFPHP}, + {Name: "jscvt", Feature: &ARM64.HasJSCVT}, + {Name: "lrcpc", Feature: &ARM64.HasLRCPC}, + {Name: "pmull", Feature: &ARM64.HasPMULL}, + {Name: "sha1", Feature: &ARM64.HasSHA1}, + {Name: "sha2", Feature: &ARM64.HasSHA2}, + {Name: "sha3", Feature: &ARM64.HasSHA3}, + {Name: "sha512", Feature: &ARM64.HasSHA512}, + {Name: "sm3", Feature: &ARM64.HasSM3}, + {Name: "sm4", Feature: &ARM64.HasSM4}, + {Name: "sve", Feature: &ARM64.HasSVE}, + {Name: "sve2", Feature: &ARM64.HasSVE2}, + {Name: "crc32", Feature: &ARM64.HasCRC32}, + {Name: "atomics", Feature: &ARM64.HasATOMICS}, + {Name: "asimdhp", Feature: &ARM64.HasASIMDHP}, + {Name: "cpuid", Feature: &ARM64.HasCPUID}, + {Name: "asimrdm", Feature: &ARM64.HasASIMDRDM}, + {Name: "fcma", Feature: &ARM64.HasFCMA}, + {Name: "dcpop", Feature: &ARM64.HasDCPOP}, + {Name: "asimddp", Feature: &ARM64.HasASIMDDP}, + {Name: "asimdfhm", Feature: &ARM64.HasASIMDFHM}, + {Name: "dit", Feature: &ARM64.HasDIT}, + {Name: "i8mm", Feature: &ARM64.HasI8MM}, + } +} + +func archInit() { + switch runtime.GOOS { + case "freebsd": + readARM64Registers() + case "linux", "netbsd", "openbsd": + doinit() + default: + // Many platforms don't seem to allow reading these registers. + setMinimalFeatures() + } +} + +// setMinimalFeatures fakes the minimal ARM64 features expected by +// TestARM64minimalFeatures. +func setMinimalFeatures() { + ARM64.HasASIMD = true + ARM64.HasFP = true +} + +func readARM64Registers() { + Initialized = true + + parseARM64SystemRegisters(getisar0(), getisar1(), getmmfr1(), getpfr0()) +} + +func parseARM64SystemRegisters(isar0, isar1, mmfr1, pfr0 uint64) { + // ID_AA64ISAR0_EL1 + switch extractBits(isar0, 4, 7) { + case 1: + ARM64.HasAES = true + case 2: + ARM64.HasAES = true + ARM64.HasPMULL = true + } + + switch extractBits(isar0, 8, 11) { + case 1: + ARM64.HasSHA1 = true + } + + switch extractBits(isar0, 12, 15) { + case 1: + ARM64.HasSHA2 = true + case 2: + ARM64.HasSHA2 = true + ARM64.HasSHA512 = true + } + + switch extractBits(isar0, 16, 19) { + case 1: + ARM64.HasCRC32 = true + } + + switch extractBits(isar0, 20, 23) { + case 2: + ARM64.HasATOMICS = true + } + + switch extractBits(isar0, 28, 31) { + case 1: + ARM64.HasASIMDRDM = true + } + + switch extractBits(isar0, 32, 35) { + case 1: + ARM64.HasSHA3 = true + } + + switch extractBits(isar0, 36, 39) { + case 1: + ARM64.HasSM3 = true + } + + switch extractBits(isar0, 40, 43) { + case 1: + ARM64.HasSM4 = true + } + + switch extractBits(isar0, 44, 47) { + case 1: + ARM64.HasASIMDDP = true + } + + // ID_AA64ISAR1_EL1 + switch extractBits(isar1, 0, 3) { + case 1: + ARM64.HasDCPOP = true + } + + switch extractBits(isar1, 12, 15) { + case 1: + ARM64.HasJSCVT = true + } + + switch extractBits(isar1, 16, 19) { + case 1: + ARM64.HasFCMA = true + } + + switch extractBits(isar1, 20, 23) { + case 1: + ARM64.HasLRCPC = true + } + + switch extractBits(isar1, 52, 55) { + case 1: + ARM64.HasI8MM = true + } + + // ID_AA64MMFR1_EL1 + switch extractBits(mmfr1, 12, 15) { + case 1, 2: + ARM64.HasHPDS = true + } + + switch extractBits(mmfr1, 16, 19) { + case 1: + ARM64.HasLOR = true + } + + switch extractBits(mmfr1, 20, 23) { + case 1, 2, 3: + ARM64.HasPAN = true + } + + // ID_AA64PFR0_EL1 + switch extractBits(pfr0, 16, 19) { + case 0: + ARM64.HasFP = true + case 1: + ARM64.HasFP = true + ARM64.HasFPHP = true + } + + switch extractBits(pfr0, 20, 23) { + case 0: + ARM64.HasASIMD = true + case 1: + ARM64.HasASIMD = true + ARM64.HasASIMDHP = true + } + + switch extractBits(pfr0, 32, 35) { + case 1: + ARM64.HasSVE = true + + parseARM64SVERegister(getzfr0()) + } + + switch extractBits(pfr0, 48, 51) { + case 1: + ARM64.HasDIT = true + } +} + +func parseARM64SVERegister(zfr0 uint64) { + switch extractBits(zfr0, 0, 3) { + case 1: + ARM64.HasSVE2 = true + } +} + +func extractBits(data uint64, start, end uint) uint { + return (uint)(data>>start) & ((1 << (end - start + 1)) - 1) +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_arm64.s b/vendor/golang.org/x/sys/cpu/cpu_arm64.s new file mode 100644 index 0000000..a4f24b3 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_arm64.s @@ -0,0 +1,42 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build gc + +#include "textflag.h" + +// func getisar0() uint64 +TEXT ·getisar0(SB),NOSPLIT,$0-8 + // get Instruction Set Attributes 0 into x0 + MRS ID_AA64ISAR0_EL1, R0 + MOVD R0, ret+0(FP) + RET + +// func getisar1() uint64 +TEXT ·getisar1(SB),NOSPLIT,$0-8 + // get Instruction Set Attributes 1 into x0 + MRS ID_AA64ISAR1_EL1, R0 + MOVD R0, ret+0(FP) + RET + +// func getmmfr1() uint64 +TEXT ·getmmfr1(SB),NOSPLIT,$0-8 + // get Memory Model Feature Register 1 into x0 + MRS ID_AA64MMFR1_EL1, R0 + MOVD R0, ret+0(FP) + RET + +// func getpfr0() uint64 +TEXT ·getpfr0(SB),NOSPLIT,$0-8 + // get Processor Feature Register 0 into x0 + MRS ID_AA64PFR0_EL1, R0 + MOVD R0, ret+0(FP) + RET + +// func getzfr0() uint64 +TEXT ·getzfr0(SB),NOSPLIT,$0-8 + // get SVE Feature Register 0 into x0 + MRS ID_AA64ZFR0_EL1, R0 + MOVD R0, ret+0(FP) + RET diff --git a/vendor/golang.org/x/sys/cpu/cpu_darwin_x86.go b/vendor/golang.org/x/sys/cpu/cpu_darwin_x86.go new file mode 100644 index 0000000..b838cb9 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_darwin_x86.go @@ -0,0 +1,61 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build darwin && amd64 && gc + +package cpu + +// darwinSupportsAVX512 checks Darwin kernel for AVX512 support via sysctl +// call (see issue 43089). It also restricts AVX512 support for Darwin to +// kernel version 21.3.0 (MacOS 12.2.0) or later (see issue 49233). +// +// Background: +// Darwin implements a special mechanism to economize on thread state when +// AVX512 specific registers are not in use. This scheme minimizes state when +// preempting threads that haven't yet used any AVX512 instructions, but adds +// special requirements to check for AVX512 hardware support at runtime (e.g. +// via sysctl call or commpage inspection). See issue 43089 and link below for +// full background: +// https://github.com/apple-oss-distributions/xnu/blob/xnu-11215.1.10/osfmk/i386/fpu.c#L214-L240 +// +// Additionally, all versions of the Darwin kernel from 19.6.0 through 21.2.0 +// (corresponding to MacOS 10.15.6 - 12.1) have a bug that can cause corruption +// of the AVX512 mask registers (K0-K7) upon signal return. For this reason +// AVX512 is considered unsafe to use on Darwin for kernel versions prior to +// 21.3.0, where a fix has been confirmed. See issue 49233 for full background. +func darwinSupportsAVX512() bool { + return darwinSysctlEnabled([]byte("hw.optional.avx512f\x00")) && darwinKernelVersionCheck(21, 3, 0) +} + +// Ensure Darwin kernel version is at least major.minor.patch, avoiding dependencies +func darwinKernelVersionCheck(major, minor, patch int) bool { + var release [256]byte + err := darwinOSRelease(&release) + if err != nil { + return false + } + + var mmp [3]int + c := 0 +Loop: + for _, b := range release[:] { + switch { + case b >= '0' && b <= '9': + mmp[c] = 10*mmp[c] + int(b-'0') + case b == '.': + c++ + if c > 2 { + return false + } + case b == 0: + break Loop + default: + return false + } + } + if c != 2 { + return false + } + return mmp[0] > major || mmp[0] == major && (mmp[1] > minor || mmp[1] == minor && mmp[2] >= patch) +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_gc_arm64.go b/vendor/golang.org/x/sys/cpu/cpu_gc_arm64.go new file mode 100644 index 0000000..e3fc5a8 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_gc_arm64.go @@ -0,0 +1,13 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build gc + +package cpu + +func getisar0() uint64 +func getisar1() uint64 +func getmmfr1() uint64 +func getpfr0() uint64 +func getzfr0() uint64 diff --git a/vendor/golang.org/x/sys/cpu/cpu_gc_s390x.go b/vendor/golang.org/x/sys/cpu/cpu_gc_s390x.go new file mode 100644 index 0000000..c8ae6dd --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_gc_s390x.go @@ -0,0 +1,21 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build gc + +package cpu + +// haveAsmFunctions reports whether the other functions in this file can +// be safely called. +func haveAsmFunctions() bool { return true } + +// The following feature detection functions are defined in cpu_s390x.s. +// They are likely to be expensive to call so the results should be cached. +func stfle() facilityList +func kmQuery() queryResult +func kmcQuery() queryResult +func kmctrQuery() queryResult +func kmaQuery() queryResult +func kimdQuery() queryResult +func klmdQuery() queryResult diff --git a/vendor/golang.org/x/sys/cpu/cpu_gc_x86.go b/vendor/golang.org/x/sys/cpu/cpu_gc_x86.go new file mode 100644 index 0000000..32a4451 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_gc_x86.go @@ -0,0 +1,15 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build (386 || amd64 || amd64p32) && gc + +package cpu + +// cpuid is implemented in cpu_gc_x86.s for gc compiler +// and in cpu_gccgo.c for gccgo. +func cpuid(eaxArg, ecxArg uint32) (eax, ebx, ecx, edx uint32) + +// xgetbv with ecx = 0 is implemented in cpu_gc_x86.s for gc compiler +// and in cpu_gccgo.c for gccgo. +func xgetbv() (eax, edx uint32) diff --git a/vendor/golang.org/x/sys/cpu/cpu_gc_x86.s b/vendor/golang.org/x/sys/cpu/cpu_gc_x86.s new file mode 100644 index 0000000..ce208ce --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_gc_x86.s @@ -0,0 +1,26 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build (386 || amd64 || amd64p32) && gc + +#include "textflag.h" + +// func cpuid(eaxArg, ecxArg uint32) (eax, ebx, ecx, edx uint32) +TEXT ·cpuid(SB), NOSPLIT, $0-24 + MOVL eaxArg+0(FP), AX + MOVL ecxArg+4(FP), CX + CPUID + MOVL AX, eax+8(FP) + MOVL BX, ebx+12(FP) + MOVL CX, ecx+16(FP) + MOVL DX, edx+20(FP) + RET + +// func xgetbv() (eax, edx uint32) +TEXT ·xgetbv(SB), NOSPLIT, $0-8 + MOVL $0, CX + XGETBV + MOVL AX, eax+0(FP) + MOVL DX, edx+4(FP) + RET diff --git a/vendor/golang.org/x/sys/cpu/cpu_gccgo_arm64.go b/vendor/golang.org/x/sys/cpu/cpu_gccgo_arm64.go new file mode 100644 index 0000000..8df2079 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_gccgo_arm64.go @@ -0,0 +1,12 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build gccgo + +package cpu + +func getisar0() uint64 { return 0 } +func getisar1() uint64 { return 0 } +func getmmfr1() uint64 { return 0 } +func getpfr0() uint64 { return 0 } diff --git a/vendor/golang.org/x/sys/cpu/cpu_gccgo_s390x.go b/vendor/golang.org/x/sys/cpu/cpu_gccgo_s390x.go new file mode 100644 index 0000000..9526d2c --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_gccgo_s390x.go @@ -0,0 +1,22 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build gccgo + +package cpu + +// haveAsmFunctions reports whether the other functions in this file can +// be safely called. +func haveAsmFunctions() bool { return false } + +// TODO(mundaym): the following feature detection functions are currently +// stubs. See https://golang.org/cl/162887 for how to fix this. +// They are likely to be expensive to call so the results should be cached. +func stfle() facilityList { panic("not implemented for gccgo") } +func kmQuery() queryResult { panic("not implemented for gccgo") } +func kmcQuery() queryResult { panic("not implemented for gccgo") } +func kmctrQuery() queryResult { panic("not implemented for gccgo") } +func kmaQuery() queryResult { panic("not implemented for gccgo") } +func kimdQuery() queryResult { panic("not implemented for gccgo") } +func klmdQuery() queryResult { panic("not implemented for gccgo") } diff --git a/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.c b/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.c new file mode 100644 index 0000000..3f73a05 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.c @@ -0,0 +1,37 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build (386 || amd64 || amd64p32) && gccgo + +#include +#include +#include + +// Need to wrap __get_cpuid_count because it's declared as static. +int +gccgoGetCpuidCount(uint32_t leaf, uint32_t subleaf, + uint32_t *eax, uint32_t *ebx, + uint32_t *ecx, uint32_t *edx) +{ + return __get_cpuid_count(leaf, subleaf, eax, ebx, ecx, edx); +} + +#pragma GCC diagnostic ignored "-Wunknown-pragmas" +#pragma GCC push_options +#pragma GCC target("xsave") +#pragma clang attribute push (__attribute__((target("xsave"))), apply_to=function) + +// xgetbv reads the contents of an XCR (Extended Control Register) +// specified in the ECX register into registers EDX:EAX. +// Currently, the only supported value for XCR is 0. +void +gccgoXgetbv(uint32_t *eax, uint32_t *edx) +{ + uint64_t v = _xgetbv(0); + *eax = v & 0xffffffff; + *edx = v >> 32; +} + +#pragma clang attribute pop +#pragma GCC pop_options diff --git a/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.go b/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.go new file mode 100644 index 0000000..170d21d --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.go @@ -0,0 +1,25 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build (386 || amd64 || amd64p32) && gccgo + +package cpu + +//extern gccgoGetCpuidCount +func gccgoGetCpuidCount(eaxArg, ecxArg uint32, eax, ebx, ecx, edx *uint32) + +func cpuid(eaxArg, ecxArg uint32) (eax, ebx, ecx, edx uint32) { + var a, b, c, d uint32 + gccgoGetCpuidCount(eaxArg, ecxArg, &a, &b, &c, &d) + return a, b, c, d +} + +//extern gccgoXgetbv +func gccgoXgetbv(eax, edx *uint32) + +func xgetbv() (eax, edx uint32) { + var a, d uint32 + gccgoXgetbv(&a, &d) + return a, d +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_linux.go b/vendor/golang.org/x/sys/cpu/cpu_linux.go new file mode 100644 index 0000000..743eb54 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_linux.go @@ -0,0 +1,15 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !386 && !amd64 && !amd64p32 && !arm64 + +package cpu + +func archInit() { + if err := readHWCAP(); err != nil { + return + } + doinit() + Initialized = true +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_linux_arm.go b/vendor/golang.org/x/sys/cpu/cpu_linux_arm.go new file mode 100644 index 0000000..2057006 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_linux_arm.go @@ -0,0 +1,39 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cpu + +func doinit() { + ARM.HasSWP = isSet(hwCap, hwcap_SWP) + ARM.HasHALF = isSet(hwCap, hwcap_HALF) + ARM.HasTHUMB = isSet(hwCap, hwcap_THUMB) + ARM.Has26BIT = isSet(hwCap, hwcap_26BIT) + ARM.HasFASTMUL = isSet(hwCap, hwcap_FAST_MULT) + ARM.HasFPA = isSet(hwCap, hwcap_FPA) + ARM.HasVFP = isSet(hwCap, hwcap_VFP) + ARM.HasEDSP = isSet(hwCap, hwcap_EDSP) + ARM.HasJAVA = isSet(hwCap, hwcap_JAVA) + ARM.HasIWMMXT = isSet(hwCap, hwcap_IWMMXT) + ARM.HasCRUNCH = isSet(hwCap, hwcap_CRUNCH) + ARM.HasTHUMBEE = isSet(hwCap, hwcap_THUMBEE) + ARM.HasNEON = isSet(hwCap, hwcap_NEON) + ARM.HasVFPv3 = isSet(hwCap, hwcap_VFPv3) + ARM.HasVFPv3D16 = isSet(hwCap, hwcap_VFPv3D16) + ARM.HasTLS = isSet(hwCap, hwcap_TLS) + ARM.HasVFPv4 = isSet(hwCap, hwcap_VFPv4) + ARM.HasIDIVA = isSet(hwCap, hwcap_IDIVA) + ARM.HasIDIVT = isSet(hwCap, hwcap_IDIVT) + ARM.HasVFPD32 = isSet(hwCap, hwcap_VFPD32) + ARM.HasLPAE = isSet(hwCap, hwcap_LPAE) + ARM.HasEVTSTRM = isSet(hwCap, hwcap_EVTSTRM) + ARM.HasAES = isSet(hwCap2, hwcap2_AES) + ARM.HasPMULL = isSet(hwCap2, hwcap2_PMULL) + ARM.HasSHA1 = isSet(hwCap2, hwcap2_SHA1) + ARM.HasSHA2 = isSet(hwCap2, hwcap2_SHA2) + ARM.HasCRC32 = isSet(hwCap2, hwcap2_CRC32) +} + +func isSet(hwc uint, value uint) bool { + return hwc&value != 0 +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_linux_arm64.go b/vendor/golang.org/x/sys/cpu/cpu_linux_arm64.go new file mode 100644 index 0000000..f1caf0f --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_linux_arm64.go @@ -0,0 +1,120 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cpu + +import ( + "strings" + "syscall" +) + +// HWCAP/HWCAP2 bits. These are exposed by Linux. +const ( + hwcap_FP = 1 << 0 + hwcap_ASIMD = 1 << 1 + hwcap_EVTSTRM = 1 << 2 + hwcap_AES = 1 << 3 + hwcap_PMULL = 1 << 4 + hwcap_SHA1 = 1 << 5 + hwcap_SHA2 = 1 << 6 + hwcap_CRC32 = 1 << 7 + hwcap_ATOMICS = 1 << 8 + hwcap_FPHP = 1 << 9 + hwcap_ASIMDHP = 1 << 10 + hwcap_CPUID = 1 << 11 + hwcap_ASIMDRDM = 1 << 12 + hwcap_JSCVT = 1 << 13 + hwcap_FCMA = 1 << 14 + hwcap_LRCPC = 1 << 15 + hwcap_DCPOP = 1 << 16 + hwcap_SHA3 = 1 << 17 + hwcap_SM3 = 1 << 18 + hwcap_SM4 = 1 << 19 + hwcap_ASIMDDP = 1 << 20 + hwcap_SHA512 = 1 << 21 + hwcap_SVE = 1 << 22 + hwcap_ASIMDFHM = 1 << 23 + hwcap_DIT = 1 << 24 + + hwcap2_SVE2 = 1 << 1 + hwcap2_I8MM = 1 << 13 +) + +// linuxKernelCanEmulateCPUID reports whether we're running +// on Linux 4.11+. Ideally we'd like to ask the question about +// whether the current kernel contains +// https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=77c97b4ee21290f5f083173d957843b615abbff2 +// but the version number will have to do. +func linuxKernelCanEmulateCPUID() bool { + var un syscall.Utsname + syscall.Uname(&un) + var sb strings.Builder + for _, b := range un.Release[:] { + if b == 0 { + break + } + sb.WriteByte(byte(b)) + } + major, minor, _, ok := parseRelease(sb.String()) + return ok && (major > 4 || major == 4 && minor >= 11) +} + +func doinit() { + if err := readHWCAP(); err != nil { + // We failed to read /proc/self/auxv. This can happen if the binary has + // been given extra capabilities(7) with /bin/setcap. + // + // When this happens, we have two options. If the Linux kernel is new + // enough (4.11+), we can read the arm64 registers directly which'll + // trap into the kernel and then return back to userspace. + // + // But on older kernels, such as Linux 4.4.180 as used on many Synology + // devices, calling readARM64Registers (specifically getisar0) will + // cause a SIGILL and we'll die. So for older kernels, parse /proc/cpuinfo + // instead. + // + // See golang/go#57336. + if linuxKernelCanEmulateCPUID() { + readARM64Registers() + } else { + readLinuxProcCPUInfo() + } + return + } + + // HWCAP feature bits + ARM64.HasFP = isSet(hwCap, hwcap_FP) + ARM64.HasASIMD = isSet(hwCap, hwcap_ASIMD) + ARM64.HasEVTSTRM = isSet(hwCap, hwcap_EVTSTRM) + ARM64.HasAES = isSet(hwCap, hwcap_AES) + ARM64.HasPMULL = isSet(hwCap, hwcap_PMULL) + ARM64.HasSHA1 = isSet(hwCap, hwcap_SHA1) + ARM64.HasSHA2 = isSet(hwCap, hwcap_SHA2) + ARM64.HasCRC32 = isSet(hwCap, hwcap_CRC32) + ARM64.HasATOMICS = isSet(hwCap, hwcap_ATOMICS) + ARM64.HasFPHP = isSet(hwCap, hwcap_FPHP) + ARM64.HasASIMDHP = isSet(hwCap, hwcap_ASIMDHP) + ARM64.HasCPUID = isSet(hwCap, hwcap_CPUID) + ARM64.HasASIMDRDM = isSet(hwCap, hwcap_ASIMDRDM) + ARM64.HasJSCVT = isSet(hwCap, hwcap_JSCVT) + ARM64.HasFCMA = isSet(hwCap, hwcap_FCMA) + ARM64.HasLRCPC = isSet(hwCap, hwcap_LRCPC) + ARM64.HasDCPOP = isSet(hwCap, hwcap_DCPOP) + ARM64.HasSHA3 = isSet(hwCap, hwcap_SHA3) + ARM64.HasSM3 = isSet(hwCap, hwcap_SM3) + ARM64.HasSM4 = isSet(hwCap, hwcap_SM4) + ARM64.HasASIMDDP = isSet(hwCap, hwcap_ASIMDDP) + ARM64.HasSHA512 = isSet(hwCap, hwcap_SHA512) + ARM64.HasSVE = isSet(hwCap, hwcap_SVE) + ARM64.HasASIMDFHM = isSet(hwCap, hwcap_ASIMDFHM) + ARM64.HasDIT = isSet(hwCap, hwcap_DIT) + + // HWCAP2 feature bits + ARM64.HasSVE2 = isSet(hwCap2, hwcap2_SVE2) + ARM64.HasI8MM = isSet(hwCap2, hwcap2_I8MM) +} + +func isSet(hwc uint, value uint) bool { + return hwc&value != 0 +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_linux_loong64.go b/vendor/golang.org/x/sys/cpu/cpu_linux_loong64.go new file mode 100644 index 0000000..4f34114 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_linux_loong64.go @@ -0,0 +1,22 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cpu + +// HWCAP bits. These are exposed by the Linux kernel. +const ( + hwcap_LOONGARCH_LSX = 1 << 4 + hwcap_LOONGARCH_LASX = 1 << 5 +) + +func doinit() { + // TODO: Features that require kernel support like LSX and LASX can + // be detected here once needed in std library or by the compiler. + Loong64.HasLSX = hwcIsSet(hwCap, hwcap_LOONGARCH_LSX) + Loong64.HasLASX = hwcIsSet(hwCap, hwcap_LOONGARCH_LASX) +} + +func hwcIsSet(hwc uint, val uint) bool { + return hwc&val != 0 +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_linux_mips64x.go b/vendor/golang.org/x/sys/cpu/cpu_linux_mips64x.go new file mode 100644 index 0000000..4686c1d --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_linux_mips64x.go @@ -0,0 +1,22 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build linux && (mips64 || mips64le) + +package cpu + +// HWCAP bits. These are exposed by the Linux kernel 5.4. +const ( + // CPU features + hwcap_MIPS_MSA = 1 << 1 +) + +func doinit() { + // HWCAP feature bits + MIPS64X.HasMSA = isSet(hwCap, hwcap_MIPS_MSA) +} + +func isSet(hwc uint, value uint) bool { + return hwc&value != 0 +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_linux_noinit.go b/vendor/golang.org/x/sys/cpu/cpu_linux_noinit.go new file mode 100644 index 0000000..a428dec --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_linux_noinit.go @@ -0,0 +1,9 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build linux && !arm && !arm64 && !loong64 && !mips64 && !mips64le && !ppc64 && !ppc64le && !s390x && !riscv64 + +package cpu + +func doinit() {} diff --git a/vendor/golang.org/x/sys/cpu/cpu_linux_ppc64x.go b/vendor/golang.org/x/sys/cpu/cpu_linux_ppc64x.go new file mode 100644 index 0000000..197188e --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_linux_ppc64x.go @@ -0,0 +1,30 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build linux && (ppc64 || ppc64le) + +package cpu + +// HWCAP/HWCAP2 bits. These are exposed by the kernel. +const ( + // ISA Level + _PPC_FEATURE2_ARCH_2_07 = 0x80000000 + _PPC_FEATURE2_ARCH_3_00 = 0x00800000 + + // CPU features + _PPC_FEATURE2_DARN = 0x00200000 + _PPC_FEATURE2_SCV = 0x00100000 +) + +func doinit() { + // HWCAP2 feature bits + PPC64.IsPOWER8 = isSet(hwCap2, _PPC_FEATURE2_ARCH_2_07) + PPC64.IsPOWER9 = isSet(hwCap2, _PPC_FEATURE2_ARCH_3_00) + PPC64.HasDARN = isSet(hwCap2, _PPC_FEATURE2_DARN) + PPC64.HasSCV = isSet(hwCap2, _PPC_FEATURE2_SCV) +} + +func isSet(hwc uint, value uint) bool { + return hwc&value != 0 +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_linux_riscv64.go b/vendor/golang.org/x/sys/cpu/cpu_linux_riscv64.go new file mode 100644 index 0000000..ad74153 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_linux_riscv64.go @@ -0,0 +1,160 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cpu + +import ( + "syscall" + "unsafe" +) + +// RISC-V extension discovery code for Linux. The approach here is to first try the riscv_hwprobe +// syscall falling back to HWCAP to check for the C extension if riscv_hwprobe is not available. +// +// A note on detection of the Vector extension using HWCAP. +// +// Support for the Vector extension version 1.0 was added to the Linux kernel in release 6.5. +// Support for the riscv_hwprobe syscall was added in 6.4. It follows that if the riscv_hwprobe +// syscall is not available then neither is the Vector extension (which needs kernel support). +// The riscv_hwprobe syscall should then be all we need to detect the Vector extension. +// However, some RISC-V board manufacturers ship boards with an older kernel on top of which +// they have back-ported various versions of the Vector extension patches but not the riscv_hwprobe +// patches. These kernels advertise support for the Vector extension using HWCAP. Falling +// back to HWCAP to detect the Vector extension, if riscv_hwprobe is not available, or simply not +// bothering with riscv_hwprobe at all and just using HWCAP may then seem like an attractive option. +// +// Unfortunately, simply checking the 'V' bit in AT_HWCAP will not work as this bit is used by +// RISC-V board and cloud instance providers to mean different things. The Lichee Pi 4A board +// and the Scaleway RV1 cloud instances use the 'V' bit to advertise their support for the unratified +// 0.7.1 version of the Vector Specification. The Banana Pi BPI-F3 and the CanMV-K230 board use +// it to advertise support for 1.0 of the Vector extension. Versions 0.7.1 and 1.0 of the Vector +// extension are binary incompatible. HWCAP can then not be used in isolation to populate the +// HasV field as this field indicates that the underlying CPU is compatible with RVV 1.0. +// +// There is a way at runtime to distinguish between versions 0.7.1 and 1.0 of the Vector +// specification by issuing a RVV 1.0 vsetvli instruction and checking the vill bit of the vtype +// register. This check would allow us to safely detect version 1.0 of the Vector extension +// with HWCAP, if riscv_hwprobe were not available. However, the check cannot +// be added until the assembler supports the Vector instructions. +// +// Note the riscv_hwprobe syscall does not suffer from these ambiguities by design as all of the +// extensions it advertises support for are explicitly versioned. It's also worth noting that +// the riscv_hwprobe syscall is the only way to detect multi-letter RISC-V extensions, e.g., Zba. +// These cannot be detected using HWCAP and so riscv_hwprobe must be used to detect the majority +// of RISC-V extensions. +// +// Please see https://docs.kernel.org/arch/riscv/hwprobe.html for more information. + +// golang.org/x/sys/cpu is not allowed to depend on golang.org/x/sys/unix so we must +// reproduce the constants, types and functions needed to make the riscv_hwprobe syscall +// here. + +const ( + // Copied from golang.org/x/sys/unix/ztypes_linux_riscv64.go. + riscv_HWPROBE_KEY_IMA_EXT_0 = 0x4 + riscv_HWPROBE_IMA_C = 0x2 + riscv_HWPROBE_IMA_V = 0x4 + riscv_HWPROBE_EXT_ZBA = 0x8 + riscv_HWPROBE_EXT_ZBB = 0x10 + riscv_HWPROBE_EXT_ZBS = 0x20 + riscv_HWPROBE_EXT_ZVBB = 0x20000 + riscv_HWPROBE_EXT_ZVBC = 0x40000 + riscv_HWPROBE_EXT_ZVKB = 0x80000 + riscv_HWPROBE_EXT_ZVKG = 0x100000 + riscv_HWPROBE_EXT_ZVKNED = 0x200000 + riscv_HWPROBE_EXT_ZVKNHB = 0x800000 + riscv_HWPROBE_EXT_ZVKSED = 0x1000000 + riscv_HWPROBE_EXT_ZVKSH = 0x2000000 + riscv_HWPROBE_EXT_ZVKT = 0x4000000 + riscv_HWPROBE_KEY_CPUPERF_0 = 0x5 + riscv_HWPROBE_MISALIGNED_FAST = 0x3 + riscv_HWPROBE_MISALIGNED_MASK = 0x7 +) + +const ( + // sys_RISCV_HWPROBE is copied from golang.org/x/sys/unix/zsysnum_linux_riscv64.go. + sys_RISCV_HWPROBE = 258 +) + +// riscvHWProbePairs is copied from golang.org/x/sys/unix/ztypes_linux_riscv64.go. +type riscvHWProbePairs struct { + key int64 + value uint64 +} + +const ( + // CPU features + hwcap_RISCV_ISA_C = 1 << ('C' - 'A') +) + +func doinit() { + // A slice of key/value pair structures is passed to the RISCVHWProbe syscall. The key + // field should be initialised with one of the key constants defined above, e.g., + // RISCV_HWPROBE_KEY_IMA_EXT_0. The syscall will set the value field to the appropriate value. + // If the kernel does not recognise a key it will set the key field to -1 and the value field to 0. + + pairs := []riscvHWProbePairs{ + {riscv_HWPROBE_KEY_IMA_EXT_0, 0}, + {riscv_HWPROBE_KEY_CPUPERF_0, 0}, + } + + // This call only indicates that extensions are supported if they are implemented on all cores. + if riscvHWProbe(pairs, 0) { + if pairs[0].key != -1 { + v := uint(pairs[0].value) + RISCV64.HasC = isSet(v, riscv_HWPROBE_IMA_C) + RISCV64.HasV = isSet(v, riscv_HWPROBE_IMA_V) + RISCV64.HasZba = isSet(v, riscv_HWPROBE_EXT_ZBA) + RISCV64.HasZbb = isSet(v, riscv_HWPROBE_EXT_ZBB) + RISCV64.HasZbs = isSet(v, riscv_HWPROBE_EXT_ZBS) + RISCV64.HasZvbb = isSet(v, riscv_HWPROBE_EXT_ZVBB) + RISCV64.HasZvbc = isSet(v, riscv_HWPROBE_EXT_ZVBC) + RISCV64.HasZvkb = isSet(v, riscv_HWPROBE_EXT_ZVKB) + RISCV64.HasZvkg = isSet(v, riscv_HWPROBE_EXT_ZVKG) + RISCV64.HasZvkt = isSet(v, riscv_HWPROBE_EXT_ZVKT) + // Cryptography shorthand extensions + RISCV64.HasZvkn = isSet(v, riscv_HWPROBE_EXT_ZVKNED) && + isSet(v, riscv_HWPROBE_EXT_ZVKNHB) && RISCV64.HasZvkb && RISCV64.HasZvkt + RISCV64.HasZvknc = RISCV64.HasZvkn && RISCV64.HasZvbc + RISCV64.HasZvkng = RISCV64.HasZvkn && RISCV64.HasZvkg + RISCV64.HasZvks = isSet(v, riscv_HWPROBE_EXT_ZVKSED) && + isSet(v, riscv_HWPROBE_EXT_ZVKSH) && RISCV64.HasZvkb && RISCV64.HasZvkt + RISCV64.HasZvksc = RISCV64.HasZvks && RISCV64.HasZvbc + RISCV64.HasZvksg = RISCV64.HasZvks && RISCV64.HasZvkg + } + if pairs[1].key != -1 { + v := pairs[1].value & riscv_HWPROBE_MISALIGNED_MASK + RISCV64.HasFastMisaligned = v == riscv_HWPROBE_MISALIGNED_FAST + } + } + + // Let's double check with HWCAP if the C extension does not appear to be supported. + // This may happen if we're running on a kernel older than 6.4. + + if !RISCV64.HasC { + RISCV64.HasC = isSet(hwCap, hwcap_RISCV_ISA_C) + } +} + +func isSet(hwc uint, value uint) bool { + return hwc&value != 0 +} + +// riscvHWProbe is a simplified version of the generated wrapper function found in +// golang.org/x/sys/unix/zsyscall_linux_riscv64.go. We simplify it by removing the +// cpuCount and cpus parameters which we do not need. We always want to pass 0 for +// these parameters here so the kernel only reports the extensions that are present +// on all cores. +func riscvHWProbe(pairs []riscvHWProbePairs, flags uint) bool { + var _zero uintptr + var p0 unsafe.Pointer + if len(pairs) > 0 { + p0 = unsafe.Pointer(&pairs[0]) + } else { + p0 = unsafe.Pointer(&_zero) + } + + _, _, e1 := syscall.Syscall6(sys_RISCV_HWPROBE, uintptr(p0), uintptr(len(pairs)), uintptr(0), uintptr(0), uintptr(flags), 0) + return e1 == 0 +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_linux_s390x.go b/vendor/golang.org/x/sys/cpu/cpu_linux_s390x.go new file mode 100644 index 0000000..1517ac6 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_linux_s390x.go @@ -0,0 +1,40 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cpu + +const ( + // bit mask values from /usr/include/bits/hwcap.h + hwcap_ZARCH = 2 + hwcap_STFLE = 4 + hwcap_MSA = 8 + hwcap_LDISP = 16 + hwcap_EIMM = 32 + hwcap_DFP = 64 + hwcap_ETF3EH = 256 + hwcap_VX = 2048 + hwcap_VXE = 8192 +) + +func initS390Xbase() { + // test HWCAP bit vector + has := func(featureMask uint) bool { + return hwCap&featureMask == featureMask + } + + // mandatory + S390X.HasZARCH = has(hwcap_ZARCH) + + // optional + S390X.HasSTFLE = has(hwcap_STFLE) + S390X.HasLDISP = has(hwcap_LDISP) + S390X.HasEIMM = has(hwcap_EIMM) + S390X.HasETF3EH = has(hwcap_ETF3EH) + S390X.HasDFP = has(hwcap_DFP) + S390X.HasMSA = has(hwcap_MSA) + S390X.HasVX = has(hwcap_VX) + if S390X.HasVX { + S390X.HasVXE = has(hwcap_VXE) + } +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_loong64.go b/vendor/golang.org/x/sys/cpu/cpu_loong64.go new file mode 100644 index 0000000..45ecb29 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_loong64.go @@ -0,0 +1,50 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build loong64 + +package cpu + +const cacheLineSize = 64 + +// Bit fields for CPUCFG registers, Related reference documents: +// https://loongson.github.io/LoongArch-Documentation/LoongArch-Vol1-EN.html#_cpucfg +const ( + // CPUCFG1 bits + cpucfg1_CRC32 = 1 << 25 + + // CPUCFG2 bits + cpucfg2_LAM_BH = 1 << 27 + cpucfg2_LAMCAS = 1 << 28 +) + +func initOptions() { + options = []option{ + {Name: "lsx", Feature: &Loong64.HasLSX}, + {Name: "lasx", Feature: &Loong64.HasLASX}, + {Name: "crc32", Feature: &Loong64.HasCRC32}, + {Name: "lam_bh", Feature: &Loong64.HasLAM_BH}, + {Name: "lamcas", Feature: &Loong64.HasLAMCAS}, + } + + // The CPUCFG data on Loong64 only reflects the hardware capabilities, + // not the kernel support status, so features such as LSX and LASX that + // require kernel support cannot be obtained from the CPUCFG data. + // + // These features only require hardware capability support and do not + // require kernel specific support, so they can be obtained directly + // through CPUCFG + cfg1 := get_cpucfg(1) + cfg2 := get_cpucfg(2) + + Loong64.HasCRC32 = cfgIsSet(cfg1, cpucfg1_CRC32) + Loong64.HasLAMCAS = cfgIsSet(cfg2, cpucfg2_LAMCAS) + Loong64.HasLAM_BH = cfgIsSet(cfg2, cpucfg2_LAM_BH) +} + +func get_cpucfg(reg uint32) uint32 + +func cfgIsSet(cfg uint32, val uint32) bool { + return cfg&val != 0 +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_loong64.s b/vendor/golang.org/x/sys/cpu/cpu_loong64.s new file mode 100644 index 0000000..71cbaf1 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_loong64.s @@ -0,0 +1,13 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#include "textflag.h" + +// func get_cpucfg(reg uint32) uint32 +TEXT ·get_cpucfg(SB), NOSPLIT|NOFRAME, $0 + MOVW reg+0(FP), R5 + // CPUCFG R5, R4 = 0x00006ca4 + WORD $0x00006ca4 + MOVW R4, ret+8(FP) + RET diff --git a/vendor/golang.org/x/sys/cpu/cpu_mips64x.go b/vendor/golang.org/x/sys/cpu/cpu_mips64x.go new file mode 100644 index 0000000..fedb00c --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_mips64x.go @@ -0,0 +1,15 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build mips64 || mips64le + +package cpu + +const cacheLineSize = 32 + +func initOptions() { + options = []option{ + {Name: "msa", Feature: &MIPS64X.HasMSA}, + } +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_mipsx.go b/vendor/golang.org/x/sys/cpu/cpu_mipsx.go new file mode 100644 index 0000000..ffb4ec7 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_mipsx.go @@ -0,0 +1,11 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build mips || mipsle + +package cpu + +const cacheLineSize = 32 + +func initOptions() {} diff --git a/vendor/golang.org/x/sys/cpu/cpu_netbsd_arm64.go b/vendor/golang.org/x/sys/cpu/cpu_netbsd_arm64.go new file mode 100644 index 0000000..19aea06 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_netbsd_arm64.go @@ -0,0 +1,173 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cpu + +import ( + "syscall" + "unsafe" +) + +// Minimal copy of functionality from x/sys/unix so the cpu package can call +// sysctl without depending on x/sys/unix. + +const ( + _CTL_QUERY = -2 + + _SYSCTL_VERS_1 = 0x1000000 +) + +var _zero uintptr + +func sysctl(mib []int32, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { + var _p0 unsafe.Pointer + if len(mib) > 0 { + _p0 = unsafe.Pointer(&mib[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, errno := syscall.Syscall6( + syscall.SYS___SYSCTL, + uintptr(_p0), + uintptr(len(mib)), + uintptr(unsafe.Pointer(old)), + uintptr(unsafe.Pointer(oldlen)), + uintptr(unsafe.Pointer(new)), + uintptr(newlen)) + if errno != 0 { + return errno + } + return nil +} + +type sysctlNode struct { + Flags uint32 + Num int32 + Name [32]int8 + Ver uint32 + __rsvd uint32 + Un [16]byte + _sysctl_size [8]byte + _sysctl_func [8]byte + _sysctl_parent [8]byte + _sysctl_desc [8]byte +} + +func sysctlNodes(mib []int32) ([]sysctlNode, error) { + var olen uintptr + + // Get a list of all sysctl nodes below the given MIB by performing + // a sysctl for the given MIB with CTL_QUERY appended. + mib = append(mib, _CTL_QUERY) + qnode := sysctlNode{Flags: _SYSCTL_VERS_1} + qp := (*byte)(unsafe.Pointer(&qnode)) + sz := unsafe.Sizeof(qnode) + if err := sysctl(mib, nil, &olen, qp, sz); err != nil { + return nil, err + } + + // Now that we know the size, get the actual nodes. + nodes := make([]sysctlNode, olen/sz) + np := (*byte)(unsafe.Pointer(&nodes[0])) + if err := sysctl(mib, np, &olen, qp, sz); err != nil { + return nil, err + } + + return nodes, nil +} + +func nametomib(name string) ([]int32, error) { + // Split name into components. + var parts []string + last := 0 + for i := 0; i < len(name); i++ { + if name[i] == '.' { + parts = append(parts, name[last:i]) + last = i + 1 + } + } + parts = append(parts, name[last:]) + + mib := []int32{} + // Discover the nodes and construct the MIB OID. + for partno, part := range parts { + nodes, err := sysctlNodes(mib) + if err != nil { + return nil, err + } + for _, node := range nodes { + n := make([]byte, 0) + for i := range node.Name { + if node.Name[i] != 0 { + n = append(n, byte(node.Name[i])) + } + } + if string(n) == part { + mib = append(mib, int32(node.Num)) + break + } + } + if len(mib) != partno+1 { + return nil, err + } + } + + return mib, nil +} + +// aarch64SysctlCPUID is struct aarch64_sysctl_cpu_id from NetBSD's +type aarch64SysctlCPUID struct { + midr uint64 /* Main ID Register */ + revidr uint64 /* Revision ID Register */ + mpidr uint64 /* Multiprocessor Affinity Register */ + aa64dfr0 uint64 /* A64 Debug Feature Register 0 */ + aa64dfr1 uint64 /* A64 Debug Feature Register 1 */ + aa64isar0 uint64 /* A64 Instruction Set Attribute Register 0 */ + aa64isar1 uint64 /* A64 Instruction Set Attribute Register 1 */ + aa64mmfr0 uint64 /* A64 Memory Model Feature Register 0 */ + aa64mmfr1 uint64 /* A64 Memory Model Feature Register 1 */ + aa64mmfr2 uint64 /* A64 Memory Model Feature Register 2 */ + aa64pfr0 uint64 /* A64 Processor Feature Register 0 */ + aa64pfr1 uint64 /* A64 Processor Feature Register 1 */ + aa64zfr0 uint64 /* A64 SVE Feature ID Register 0 */ + mvfr0 uint32 /* Media and VFP Feature Register 0 */ + mvfr1 uint32 /* Media and VFP Feature Register 1 */ + mvfr2 uint32 /* Media and VFP Feature Register 2 */ + pad uint32 + clidr uint64 /* Cache Level ID Register */ + ctr uint64 /* Cache Type Register */ +} + +func sysctlCPUID(name string) (*aarch64SysctlCPUID, error) { + mib, err := nametomib(name) + if err != nil { + return nil, err + } + + out := aarch64SysctlCPUID{} + n := unsafe.Sizeof(out) + _, _, errno := syscall.Syscall6( + syscall.SYS___SYSCTL, + uintptr(unsafe.Pointer(&mib[0])), + uintptr(len(mib)), + uintptr(unsafe.Pointer(&out)), + uintptr(unsafe.Pointer(&n)), + uintptr(0), + uintptr(0)) + if errno != 0 { + return nil, errno + } + return &out, nil +} + +func doinit() { + cpuid, err := sysctlCPUID("machdep.cpu0.cpu_id") + if err != nil { + setMinimalFeatures() + return + } + parseARM64SystemRegisters(cpuid.aa64isar0, cpuid.aa64isar1, cpuid.aa64mmfr1, cpuid.aa64pfr0) + + Initialized = true +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_openbsd_arm64.go b/vendor/golang.org/x/sys/cpu/cpu_openbsd_arm64.go new file mode 100644 index 0000000..87fd3a7 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_openbsd_arm64.go @@ -0,0 +1,65 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cpu + +import ( + "syscall" + "unsafe" +) + +// Minimal copy of functionality from x/sys/unix so the cpu package can call +// sysctl without depending on x/sys/unix. + +const ( + // From OpenBSD's sys/sysctl.h. + _CTL_MACHDEP = 7 + + // From OpenBSD's machine/cpu.h. + _CPU_ID_AA64ISAR0 = 2 + _CPU_ID_AA64ISAR1 = 3 +) + +// Implemented in the runtime package (runtime/sys_openbsd3.go) +func syscall_syscall6(fn, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err syscall.Errno) + +//go:linkname syscall_syscall6 syscall.syscall6 + +func sysctl(mib []uint32, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { + _, _, errno := syscall_syscall6(libc_sysctl_trampoline_addr, uintptr(unsafe.Pointer(&mib[0])), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) + if errno != 0 { + return errno + } + return nil +} + +var libc_sysctl_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_sysctl sysctl "libc.so" + +func sysctlUint64(mib []uint32) (uint64, bool) { + var out uint64 + nout := unsafe.Sizeof(out) + if err := sysctl(mib, (*byte)(unsafe.Pointer(&out)), &nout, nil, 0); err != nil { + return 0, false + } + return out, true +} + +func doinit() { + setMinimalFeatures() + + // Get ID_AA64ISAR0 and ID_AA64ISAR1 from sysctl. + isar0, ok := sysctlUint64([]uint32{_CTL_MACHDEP, _CPU_ID_AA64ISAR0}) + if !ok { + return + } + isar1, ok := sysctlUint64([]uint32{_CTL_MACHDEP, _CPU_ID_AA64ISAR1}) + if !ok { + return + } + parseARM64SystemRegisters(isar0, isar1, 0, 0) + + Initialized = true +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_openbsd_arm64.s b/vendor/golang.org/x/sys/cpu/cpu_openbsd_arm64.s new file mode 100644 index 0000000..054ba05 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_openbsd_arm64.s @@ -0,0 +1,11 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#include "textflag.h" + +TEXT libc_sysctl_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_sysctl(SB) + +GLOBL ·libc_sysctl_trampoline_addr(SB), RODATA, $8 +DATA ·libc_sysctl_trampoline_addr(SB)/8, $libc_sysctl_trampoline<>(SB) diff --git a/vendor/golang.org/x/sys/cpu/cpu_other_arm.go b/vendor/golang.org/x/sys/cpu/cpu_other_arm.go new file mode 100644 index 0000000..e9ecf2a --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_other_arm.go @@ -0,0 +1,9 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !linux && arm + +package cpu + +func archInit() {} diff --git a/vendor/golang.org/x/sys/cpu/cpu_other_arm64.go b/vendor/golang.org/x/sys/cpu/cpu_other_arm64.go new file mode 100644 index 0000000..5341e7f --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_other_arm64.go @@ -0,0 +1,9 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !linux && !netbsd && !openbsd && arm64 + +package cpu + +func doinit() {} diff --git a/vendor/golang.org/x/sys/cpu/cpu_other_mips64x.go b/vendor/golang.org/x/sys/cpu/cpu_other_mips64x.go new file mode 100644 index 0000000..5f8f241 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_other_mips64x.go @@ -0,0 +1,11 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !linux && (mips64 || mips64le) + +package cpu + +func archInit() { + Initialized = true +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_other_ppc64x.go b/vendor/golang.org/x/sys/cpu/cpu_other_ppc64x.go new file mode 100644 index 0000000..89608fb --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_other_ppc64x.go @@ -0,0 +1,12 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !aix && !linux && (ppc64 || ppc64le) + +package cpu + +func archInit() { + PPC64.IsPOWER8 = true + Initialized = true +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_other_riscv64.go b/vendor/golang.org/x/sys/cpu/cpu_other_riscv64.go new file mode 100644 index 0000000..5ab8780 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_other_riscv64.go @@ -0,0 +1,11 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !linux && riscv64 + +package cpu + +func archInit() { + Initialized = true +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_other_x86.go b/vendor/golang.org/x/sys/cpu/cpu_other_x86.go new file mode 100644 index 0000000..a0fd7e2 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_other_x86.go @@ -0,0 +1,11 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build 386 || amd64p32 || (amd64 && (!darwin || !gc)) + +package cpu + +func darwinSupportsAVX512() bool { + panic("only implemented for gc && amd64 && darwin") +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_ppc64x.go b/vendor/golang.org/x/sys/cpu/cpu_ppc64x.go new file mode 100644 index 0000000..c14f12b --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_ppc64x.go @@ -0,0 +1,16 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build ppc64 || ppc64le + +package cpu + +const cacheLineSize = 128 + +func initOptions() { + options = []option{ + {Name: "darn", Feature: &PPC64.HasDARN}, + {Name: "scv", Feature: &PPC64.HasSCV}, + } +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_riscv64.go b/vendor/golang.org/x/sys/cpu/cpu_riscv64.go new file mode 100644 index 0000000..0f617ae --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_riscv64.go @@ -0,0 +1,32 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build riscv64 + +package cpu + +const cacheLineSize = 64 + +func initOptions() { + options = []option{ + {Name: "fastmisaligned", Feature: &RISCV64.HasFastMisaligned}, + {Name: "c", Feature: &RISCV64.HasC}, + {Name: "v", Feature: &RISCV64.HasV}, + {Name: "zba", Feature: &RISCV64.HasZba}, + {Name: "zbb", Feature: &RISCV64.HasZbb}, + {Name: "zbs", Feature: &RISCV64.HasZbs}, + // RISC-V Cryptography Extensions + {Name: "zvbb", Feature: &RISCV64.HasZvbb}, + {Name: "zvbc", Feature: &RISCV64.HasZvbc}, + {Name: "zvkb", Feature: &RISCV64.HasZvkb}, + {Name: "zvkg", Feature: &RISCV64.HasZvkg}, + {Name: "zvkt", Feature: &RISCV64.HasZvkt}, + {Name: "zvkn", Feature: &RISCV64.HasZvkn}, + {Name: "zvknc", Feature: &RISCV64.HasZvknc}, + {Name: "zvkng", Feature: &RISCV64.HasZvkng}, + {Name: "zvks", Feature: &RISCV64.HasZvks}, + {Name: "zvksc", Feature: &RISCV64.HasZvksc}, + {Name: "zvksg", Feature: &RISCV64.HasZvksg}, + } +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_s390x.go b/vendor/golang.org/x/sys/cpu/cpu_s390x.go new file mode 100644 index 0000000..5881b88 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_s390x.go @@ -0,0 +1,172 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cpu + +const cacheLineSize = 256 + +func initOptions() { + options = []option{ + {Name: "zarch", Feature: &S390X.HasZARCH, Required: true}, + {Name: "stfle", Feature: &S390X.HasSTFLE, Required: true}, + {Name: "ldisp", Feature: &S390X.HasLDISP, Required: true}, + {Name: "eimm", Feature: &S390X.HasEIMM, Required: true}, + {Name: "dfp", Feature: &S390X.HasDFP}, + {Name: "etf3eh", Feature: &S390X.HasETF3EH}, + {Name: "msa", Feature: &S390X.HasMSA}, + {Name: "aes", Feature: &S390X.HasAES}, + {Name: "aescbc", Feature: &S390X.HasAESCBC}, + {Name: "aesctr", Feature: &S390X.HasAESCTR}, + {Name: "aesgcm", Feature: &S390X.HasAESGCM}, + {Name: "ghash", Feature: &S390X.HasGHASH}, + {Name: "sha1", Feature: &S390X.HasSHA1}, + {Name: "sha256", Feature: &S390X.HasSHA256}, + {Name: "sha3", Feature: &S390X.HasSHA3}, + {Name: "sha512", Feature: &S390X.HasSHA512}, + {Name: "vx", Feature: &S390X.HasVX}, + {Name: "vxe", Feature: &S390X.HasVXE}, + } +} + +// bitIsSet reports whether the bit at index is set. The bit index +// is in big endian order, so bit index 0 is the leftmost bit. +func bitIsSet(bits []uint64, index uint) bool { + return bits[index/64]&((1<<63)>>(index%64)) != 0 +} + +// facility is a bit index for the named facility. +type facility uint8 + +const ( + // mandatory facilities + zarch facility = 1 // z architecture mode is active + stflef facility = 7 // store-facility-list-extended + ldisp facility = 18 // long-displacement + eimm facility = 21 // extended-immediate + + // miscellaneous facilities + dfp facility = 42 // decimal-floating-point + etf3eh facility = 30 // extended-translation 3 enhancement + + // cryptography facilities + msa facility = 17 // message-security-assist + msa3 facility = 76 // message-security-assist extension 3 + msa4 facility = 77 // message-security-assist extension 4 + msa5 facility = 57 // message-security-assist extension 5 + msa8 facility = 146 // message-security-assist extension 8 + msa9 facility = 155 // message-security-assist extension 9 + + // vector facilities + vx facility = 129 // vector facility + vxe facility = 135 // vector-enhancements 1 + vxe2 facility = 148 // vector-enhancements 2 +) + +// facilityList contains the result of an STFLE call. +// Bits are numbered in big endian order so the +// leftmost bit (the MSB) is at index 0. +type facilityList struct { + bits [4]uint64 +} + +// Has reports whether the given facilities are present. +func (s *facilityList) Has(fs ...facility) bool { + if len(fs) == 0 { + panic("no facility bits provided") + } + for _, f := range fs { + if !bitIsSet(s.bits[:], uint(f)) { + return false + } + } + return true +} + +// function is the code for the named cryptographic function. +type function uint8 + +const ( + // KM{,A,C,CTR} function codes + aes128 function = 18 // AES-128 + aes192 function = 19 // AES-192 + aes256 function = 20 // AES-256 + + // K{I,L}MD function codes + sha1 function = 1 // SHA-1 + sha256 function = 2 // SHA-256 + sha512 function = 3 // SHA-512 + sha3_224 function = 32 // SHA3-224 + sha3_256 function = 33 // SHA3-256 + sha3_384 function = 34 // SHA3-384 + sha3_512 function = 35 // SHA3-512 + shake128 function = 36 // SHAKE-128 + shake256 function = 37 // SHAKE-256 + + // KLMD function codes + ghash function = 65 // GHASH +) + +// queryResult contains the result of a Query function +// call. Bits are numbered in big endian order so the +// leftmost bit (the MSB) is at index 0. +type queryResult struct { + bits [2]uint64 +} + +// Has reports whether the given functions are present. +func (q *queryResult) Has(fns ...function) bool { + if len(fns) == 0 { + panic("no function codes provided") + } + for _, f := range fns { + if !bitIsSet(q.bits[:], uint(f)) { + return false + } + } + return true +} + +func doinit() { + initS390Xbase() + + // We need implementations of stfle, km and so on + // to detect cryptographic features. + if !haveAsmFunctions() { + return + } + + // optional cryptographic functions + if S390X.HasMSA { + aes := []function{aes128, aes192, aes256} + + // cipher message + km, kmc := kmQuery(), kmcQuery() + S390X.HasAES = km.Has(aes...) + S390X.HasAESCBC = kmc.Has(aes...) + if S390X.HasSTFLE { + facilities := stfle() + if facilities.Has(msa4) { + kmctr := kmctrQuery() + S390X.HasAESCTR = kmctr.Has(aes...) + } + if facilities.Has(msa8) { + kma := kmaQuery() + S390X.HasAESGCM = kma.Has(aes...) + } + } + + // compute message digest + kimd := kimdQuery() // intermediate (no padding) + klmd := klmdQuery() // last (padding) + S390X.HasSHA1 = kimd.Has(sha1) && klmd.Has(sha1) + S390X.HasSHA256 = kimd.Has(sha256) && klmd.Has(sha256) + S390X.HasSHA512 = kimd.Has(sha512) && klmd.Has(sha512) + S390X.HasGHASH = kimd.Has(ghash) // KLMD-GHASH does not exist + sha3 := []function{ + sha3_224, sha3_256, sha3_384, sha3_512, + shake128, shake256, + } + S390X.HasSHA3 = kimd.Has(sha3...) && klmd.Has(sha3...) + } +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_s390x.s b/vendor/golang.org/x/sys/cpu/cpu_s390x.s new file mode 100644 index 0000000..1fb4b70 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_s390x.s @@ -0,0 +1,57 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build gc + +#include "textflag.h" + +// func stfle() facilityList +TEXT ·stfle(SB), NOSPLIT|NOFRAME, $0-32 + MOVD $ret+0(FP), R1 + MOVD $3, R0 // last doubleword index to store + XC $32, (R1), (R1) // clear 4 doublewords (32 bytes) + WORD $0xb2b01000 // store facility list extended (STFLE) + RET + +// func kmQuery() queryResult +TEXT ·kmQuery(SB), NOSPLIT|NOFRAME, $0-16 + MOVD $0, R0 // set function code to 0 (KM-Query) + MOVD $ret+0(FP), R1 // address of 16-byte return value + WORD $0xB92E0024 // cipher message (KM) + RET + +// func kmcQuery() queryResult +TEXT ·kmcQuery(SB), NOSPLIT|NOFRAME, $0-16 + MOVD $0, R0 // set function code to 0 (KMC-Query) + MOVD $ret+0(FP), R1 // address of 16-byte return value + WORD $0xB92F0024 // cipher message with chaining (KMC) + RET + +// func kmctrQuery() queryResult +TEXT ·kmctrQuery(SB), NOSPLIT|NOFRAME, $0-16 + MOVD $0, R0 // set function code to 0 (KMCTR-Query) + MOVD $ret+0(FP), R1 // address of 16-byte return value + WORD $0xB92D4024 // cipher message with counter (KMCTR) + RET + +// func kmaQuery() queryResult +TEXT ·kmaQuery(SB), NOSPLIT|NOFRAME, $0-16 + MOVD $0, R0 // set function code to 0 (KMA-Query) + MOVD $ret+0(FP), R1 // address of 16-byte return value + WORD $0xb9296024 // cipher message with authentication (KMA) + RET + +// func kimdQuery() queryResult +TEXT ·kimdQuery(SB), NOSPLIT|NOFRAME, $0-16 + MOVD $0, R0 // set function code to 0 (KIMD-Query) + MOVD $ret+0(FP), R1 // address of 16-byte return value + WORD $0xB93E0024 // compute intermediate message digest (KIMD) + RET + +// func klmdQuery() queryResult +TEXT ·klmdQuery(SB), NOSPLIT|NOFRAME, $0-16 + MOVD $0, R0 // set function code to 0 (KLMD-Query) + MOVD $ret+0(FP), R1 // address of 16-byte return value + WORD $0xB93F0024 // compute last message digest (KLMD) + RET diff --git a/vendor/golang.org/x/sys/cpu/cpu_wasm.go b/vendor/golang.org/x/sys/cpu/cpu_wasm.go new file mode 100644 index 0000000..384787e --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_wasm.go @@ -0,0 +1,17 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build wasm + +package cpu + +// We're compiling the cpu package for an unknown (software-abstracted) CPU. +// Make CacheLinePad an empty struct and hope that the usual struct alignment +// rules are good enough. + +const cacheLineSize = 0 + +func initOptions() {} + +func archInit() {} diff --git a/vendor/golang.org/x/sys/cpu/cpu_x86.go b/vendor/golang.org/x/sys/cpu/cpu_x86.go new file mode 100644 index 0000000..1e642f3 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_x86.go @@ -0,0 +1,162 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build 386 || amd64 || amd64p32 + +package cpu + +import "runtime" + +const cacheLineSize = 64 + +func initOptions() { + options = []option{ + {Name: "adx", Feature: &X86.HasADX}, + {Name: "aes", Feature: &X86.HasAES}, + {Name: "avx", Feature: &X86.HasAVX}, + {Name: "avx2", Feature: &X86.HasAVX2}, + {Name: "avx512", Feature: &X86.HasAVX512}, + {Name: "avx512f", Feature: &X86.HasAVX512F}, + {Name: "avx512cd", Feature: &X86.HasAVX512CD}, + {Name: "avx512er", Feature: &X86.HasAVX512ER}, + {Name: "avx512pf", Feature: &X86.HasAVX512PF}, + {Name: "avx512vl", Feature: &X86.HasAVX512VL}, + {Name: "avx512bw", Feature: &X86.HasAVX512BW}, + {Name: "avx512dq", Feature: &X86.HasAVX512DQ}, + {Name: "avx512ifma", Feature: &X86.HasAVX512IFMA}, + {Name: "avx512vbmi", Feature: &X86.HasAVX512VBMI}, + {Name: "avx512vnniw", Feature: &X86.HasAVX5124VNNIW}, + {Name: "avx5124fmaps", Feature: &X86.HasAVX5124FMAPS}, + {Name: "avx512vpopcntdq", Feature: &X86.HasAVX512VPOPCNTDQ}, + {Name: "avx512vpclmulqdq", Feature: &X86.HasAVX512VPCLMULQDQ}, + {Name: "avx512vnni", Feature: &X86.HasAVX512VNNI}, + {Name: "avx512gfni", Feature: &X86.HasAVX512GFNI}, + {Name: "avx512vaes", Feature: &X86.HasAVX512VAES}, + {Name: "avx512vbmi2", Feature: &X86.HasAVX512VBMI2}, + {Name: "avx512bitalg", Feature: &X86.HasAVX512BITALG}, + {Name: "avx512bf16", Feature: &X86.HasAVX512BF16}, + {Name: "amxtile", Feature: &X86.HasAMXTile}, + {Name: "amxint8", Feature: &X86.HasAMXInt8}, + {Name: "amxbf16", Feature: &X86.HasAMXBF16}, + {Name: "bmi1", Feature: &X86.HasBMI1}, + {Name: "bmi2", Feature: &X86.HasBMI2}, + {Name: "cx16", Feature: &X86.HasCX16}, + {Name: "erms", Feature: &X86.HasERMS}, + {Name: "fma", Feature: &X86.HasFMA}, + {Name: "osxsave", Feature: &X86.HasOSXSAVE}, + {Name: "pclmulqdq", Feature: &X86.HasPCLMULQDQ}, + {Name: "popcnt", Feature: &X86.HasPOPCNT}, + {Name: "rdrand", Feature: &X86.HasRDRAND}, + {Name: "rdseed", Feature: &X86.HasRDSEED}, + {Name: "sse3", Feature: &X86.HasSSE3}, + {Name: "sse41", Feature: &X86.HasSSE41}, + {Name: "sse42", Feature: &X86.HasSSE42}, + {Name: "ssse3", Feature: &X86.HasSSSE3}, + {Name: "avxifma", Feature: &X86.HasAVXIFMA}, + {Name: "avxvnni", Feature: &X86.HasAVXVNNI}, + {Name: "avxvnniint8", Feature: &X86.HasAVXVNNIInt8}, + + // These capabilities should always be enabled on amd64: + {Name: "sse2", Feature: &X86.HasSSE2, Required: runtime.GOARCH == "amd64"}, + } +} + +func archInit() { + + Initialized = true + + maxID, _, _, _ := cpuid(0, 0) + + if maxID < 1 { + return + } + + _, _, ecx1, edx1 := cpuid(1, 0) + X86.HasSSE2 = isSet(26, edx1) + + X86.HasSSE3 = isSet(0, ecx1) + X86.HasPCLMULQDQ = isSet(1, ecx1) + X86.HasSSSE3 = isSet(9, ecx1) + X86.HasFMA = isSet(12, ecx1) + X86.HasCX16 = isSet(13, ecx1) + X86.HasSSE41 = isSet(19, ecx1) + X86.HasSSE42 = isSet(20, ecx1) + X86.HasPOPCNT = isSet(23, ecx1) + X86.HasAES = isSet(25, ecx1) + X86.HasOSXSAVE = isSet(27, ecx1) + X86.HasRDRAND = isSet(30, ecx1) + + var osSupportsAVX, osSupportsAVX512 bool + // For XGETBV, OSXSAVE bit is required and sufficient. + if X86.HasOSXSAVE { + eax, _ := xgetbv() + // Check if XMM and YMM registers have OS support. + osSupportsAVX = isSet(1, eax) && isSet(2, eax) + + if runtime.GOOS == "darwin" { + // Darwin requires special AVX512 checks, see cpu_darwin_x86.go + osSupportsAVX512 = osSupportsAVX && darwinSupportsAVX512() + } else { + // Check if OPMASK and ZMM registers have OS support. + osSupportsAVX512 = osSupportsAVX && isSet(5, eax) && isSet(6, eax) && isSet(7, eax) + } + } + + X86.HasAVX = isSet(28, ecx1) && osSupportsAVX + + if maxID < 7 { + return + } + + eax7, ebx7, ecx7, edx7 := cpuid(7, 0) + X86.HasBMI1 = isSet(3, ebx7) + X86.HasAVX2 = isSet(5, ebx7) && osSupportsAVX + X86.HasBMI2 = isSet(8, ebx7) + X86.HasERMS = isSet(9, ebx7) + X86.HasRDSEED = isSet(18, ebx7) + X86.HasADX = isSet(19, ebx7) + + X86.HasAVX512 = isSet(16, ebx7) && osSupportsAVX512 // Because avx-512 foundation is the core required extension + if X86.HasAVX512 { + X86.HasAVX512F = true + X86.HasAVX512CD = isSet(28, ebx7) + X86.HasAVX512ER = isSet(27, ebx7) + X86.HasAVX512PF = isSet(26, ebx7) + X86.HasAVX512VL = isSet(31, ebx7) + X86.HasAVX512BW = isSet(30, ebx7) + X86.HasAVX512DQ = isSet(17, ebx7) + X86.HasAVX512IFMA = isSet(21, ebx7) + X86.HasAVX512VBMI = isSet(1, ecx7) + X86.HasAVX5124VNNIW = isSet(2, edx7) + X86.HasAVX5124FMAPS = isSet(3, edx7) + X86.HasAVX512VPOPCNTDQ = isSet(14, ecx7) + X86.HasAVX512VPCLMULQDQ = isSet(10, ecx7) + X86.HasAVX512VNNI = isSet(11, ecx7) + X86.HasAVX512GFNI = isSet(8, ecx7) + X86.HasAVX512VAES = isSet(9, ecx7) + X86.HasAVX512VBMI2 = isSet(6, ecx7) + X86.HasAVX512BITALG = isSet(12, ecx7) + } + + X86.HasAMXTile = isSet(24, edx7) + X86.HasAMXInt8 = isSet(25, edx7) + X86.HasAMXBF16 = isSet(22, edx7) + + // These features depend on the second level of extended features. + if eax7 >= 1 { + eax71, _, _, edx71 := cpuid(7, 1) + if X86.HasAVX512 { + X86.HasAVX512BF16 = isSet(5, eax71) + } + if X86.HasAVX { + X86.HasAVXIFMA = isSet(23, eax71) + X86.HasAVXVNNI = isSet(4, eax71) + X86.HasAVXVNNIInt8 = isSet(4, edx71) + } + } +} + +func isSet(bitpos uint, value uint32) bool { + return value&(1<> 63)) +) + +// For those platforms don't have a 'cpuid' equivalent we use HWCAP/HWCAP2 +// These are initialized in cpu_$GOARCH.go +// and should not be changed after they are initialized. +var hwCap uint +var hwCap2 uint + +func readHWCAP() error { + // For Go 1.21+, get auxv from the Go runtime. + if a := getAuxv(); len(a) > 0 { + for len(a) >= 2 { + tag, val := a[0], uint(a[1]) + a = a[2:] + switch tag { + case _AT_HWCAP: + hwCap = val + case _AT_HWCAP2: + hwCap2 = val + } + } + return nil + } + + buf, err := os.ReadFile(procAuxv) + if err != nil { + // e.g. on android /proc/self/auxv is not accessible, so silently + // ignore the error and leave Initialized = false. On some + // architectures (e.g. arm64) doinit() implements a fallback + // readout and will set Initialized = true again. + return err + } + bo := hostByteOrder() + for len(buf) >= 2*(uintSize/8) { + var tag, val uint + switch uintSize { + case 32: + tag = uint(bo.Uint32(buf[0:])) + val = uint(bo.Uint32(buf[4:])) + buf = buf[8:] + case 64: + tag = uint(bo.Uint64(buf[0:])) + val = uint(bo.Uint64(buf[8:])) + buf = buf[16:] + } + switch tag { + case _AT_HWCAP: + hwCap = val + case _AT_HWCAP2: + hwCap2 = val + } + } + return nil +} diff --git a/vendor/golang.org/x/sys/cpu/parse.go b/vendor/golang.org/x/sys/cpu/parse.go new file mode 100644 index 0000000..56a7e1a --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/parse.go @@ -0,0 +1,43 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cpu + +import "strconv" + +// parseRelease parses a dot-separated version number. It follows the semver +// syntax, but allows the minor and patch versions to be elided. +// +// This is a copy of the Go runtime's parseRelease from +// https://golang.org/cl/209597. +func parseRelease(rel string) (major, minor, patch int, ok bool) { + // Strip anything after a dash or plus. + for i := range len(rel) { + if rel[i] == '-' || rel[i] == '+' { + rel = rel[:i] + break + } + } + + next := func() (int, bool) { + for i := range len(rel) { + if rel[i] == '.' { + ver, err := strconv.Atoi(rel[:i]) + rel = rel[i+1:] + return ver, err == nil + } + } + ver, err := strconv.Atoi(rel) + rel = "" + return ver, err == nil + } + if major, ok = next(); !ok || rel == "" { + return + } + if minor, ok = next(); !ok || rel == "" { + return + } + patch, ok = next() + return +} diff --git a/vendor/golang.org/x/sys/cpu/proc_cpuinfo_linux.go b/vendor/golang.org/x/sys/cpu/proc_cpuinfo_linux.go new file mode 100644 index 0000000..4cd64c7 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/proc_cpuinfo_linux.go @@ -0,0 +1,53 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build linux && arm64 + +package cpu + +import ( + "errors" + "io" + "os" + "strings" +) + +func readLinuxProcCPUInfo() error { + f, err := os.Open("/proc/cpuinfo") + if err != nil { + return err + } + defer f.Close() + + var buf [1 << 10]byte // enough for first CPU + n, err := io.ReadFull(f, buf[:]) + if err != nil && err != io.ErrUnexpectedEOF { + return err + } + in := string(buf[:n]) + const features = "\nFeatures : " + i := strings.Index(in, features) + if i == -1 { + return errors.New("no CPU features found") + } + in = in[i+len(features):] + if i := strings.Index(in, "\n"); i != -1 { + in = in[:i] + } + m := map[string]*bool{} + + initOptions() // need it early here; it's harmless to call twice + for _, o := range options { + m[o.Name] = o.Feature + } + // The EVTSTRM field has alias "evstrm" in Go, but Linux calls it "evtstrm". + m["evtstrm"] = &ARM64.HasEVTSTRM + + for _, f := range strings.Fields(in) { + if p, ok := m[f]; ok { + *p = true + } + } + return nil +} diff --git a/vendor/golang.org/x/sys/cpu/runtime_auxv.go b/vendor/golang.org/x/sys/cpu/runtime_auxv.go new file mode 100644 index 0000000..5f92ac9 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/runtime_auxv.go @@ -0,0 +1,16 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cpu + +// getAuxvFn is non-nil on Go 1.21+ (via runtime_auxv_go121.go init) +// on platforms that use auxv. +var getAuxvFn func() []uintptr + +func getAuxv() []uintptr { + if getAuxvFn == nil { + return nil + } + return getAuxvFn() +} diff --git a/vendor/golang.org/x/sys/cpu/runtime_auxv_go121.go b/vendor/golang.org/x/sys/cpu/runtime_auxv_go121.go new file mode 100644 index 0000000..4c9788e --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/runtime_auxv_go121.go @@ -0,0 +1,18 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.21 + +package cpu + +import ( + _ "unsafe" // for linkname +) + +//go:linkname runtime_getAuxv runtime.getAuxv +func runtime_getAuxv() []uintptr + +func init() { + getAuxvFn = runtime_getAuxv +} diff --git a/vendor/golang.org/x/sys/cpu/syscall_aix_gccgo.go b/vendor/golang.org/x/sys/cpu/syscall_aix_gccgo.go new file mode 100644 index 0000000..1b9ccb0 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/syscall_aix_gccgo.go @@ -0,0 +1,26 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Recreate a getsystemcfg syscall handler instead of +// using the one provided by x/sys/unix to avoid having +// the dependency between them. (See golang.org/issue/32102) +// Moreover, this file will be used during the building of +// gccgo's libgo and thus must not used a CGo method. + +//go:build aix && gccgo + +package cpu + +import ( + "syscall" +) + +//extern getsystemcfg +func gccgoGetsystemcfg(label uint32) (r uint64) + +func callgetsystemcfg(label int) (r1 uintptr, e1 syscall.Errno) { + r1 = uintptr(gccgoGetsystemcfg(uint32(label))) + e1 = syscall.GetErrno() + return +} diff --git a/vendor/golang.org/x/sys/cpu/syscall_aix_ppc64_gc.go b/vendor/golang.org/x/sys/cpu/syscall_aix_ppc64_gc.go new file mode 100644 index 0000000..e8b6cdb --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/syscall_aix_ppc64_gc.go @@ -0,0 +1,35 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Minimal copy of x/sys/unix so the cpu package can make a +// system call on AIX without depending on x/sys/unix. +// (See golang.org/issue/32102) + +//go:build aix && ppc64 && gc + +package cpu + +import ( + "syscall" + "unsafe" +) + +//go:cgo_import_dynamic libc_getsystemcfg getsystemcfg "libc.a/shr_64.o" + +//go:linkname libc_getsystemcfg libc_getsystemcfg + +type syscallFunc uintptr + +var libc_getsystemcfg syscallFunc + +type errno = syscall.Errno + +// Implemented in runtime/syscall_aix.go. +func rawSyscall6(trap, nargs, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err errno) +func syscall6(trap, nargs, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err errno) + +func callgetsystemcfg(label int) (r1 uintptr, e1 errno) { + r1, _, e1 = syscall6(uintptr(unsafe.Pointer(&libc_getsystemcfg)), 1, uintptr(label), 0, 0, 0, 0, 0) + return +} diff --git a/vendor/golang.org/x/sys/cpu/syscall_darwin_x86_gc.go b/vendor/golang.org/x/sys/cpu/syscall_darwin_x86_gc.go new file mode 100644 index 0000000..4d0888b --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/syscall_darwin_x86_gc.go @@ -0,0 +1,98 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Minimal copy of x/sys/unix so the cpu package can make a +// system call on Darwin without depending on x/sys/unix. + +//go:build darwin && amd64 && gc + +package cpu + +import ( + "syscall" + "unsafe" +) + +type _C_int int32 + +// adapted from unix.Uname() at x/sys/unix/syscall_darwin.go L419 +func darwinOSRelease(release *[256]byte) error { + // from x/sys/unix/zerrors_openbsd_amd64.go + const ( + CTL_KERN = 0x1 + KERN_OSRELEASE = 0x2 + ) + + mib := []_C_int{CTL_KERN, KERN_OSRELEASE} + n := unsafe.Sizeof(*release) + + return sysctl(mib, &release[0], &n, nil, 0) +} + +type Errno = syscall.Errno + +var _zero uintptr // Single-word zero for use when we need a valid pointer to 0 bytes. + +// from x/sys/unix/zsyscall_darwin_amd64.go L791-807 +func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) error { + var _p0 unsafe.Pointer + if len(mib) > 0 { + _p0 = unsafe.Pointer(&mib[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + if _, _, err := syscall_syscall6( + libc_sysctl_trampoline_addr, + uintptr(_p0), + uintptr(len(mib)), + uintptr(unsafe.Pointer(old)), + uintptr(unsafe.Pointer(oldlen)), + uintptr(unsafe.Pointer(new)), + uintptr(newlen), + ); err != 0 { + return err + } + + return nil +} + +var libc_sysctl_trampoline_addr uintptr + +// adapted from internal/cpu/cpu_arm64_darwin.go +func darwinSysctlEnabled(name []byte) bool { + out := int32(0) + nout := unsafe.Sizeof(out) + if ret := sysctlbyname(&name[0], (*byte)(unsafe.Pointer(&out)), &nout, nil, 0); ret != nil { + return false + } + return out > 0 +} + +//go:cgo_import_dynamic libc_sysctl sysctl "/usr/lib/libSystem.B.dylib" + +var libc_sysctlbyname_trampoline_addr uintptr + +// adapted from runtime/sys_darwin.go in the pattern of sysctl() above, as defined in x/sys/unix +func sysctlbyname(name *byte, old *byte, oldlen *uintptr, new *byte, newlen uintptr) error { + if _, _, err := syscall_syscall6( + libc_sysctlbyname_trampoline_addr, + uintptr(unsafe.Pointer(name)), + uintptr(unsafe.Pointer(old)), + uintptr(unsafe.Pointer(oldlen)), + uintptr(unsafe.Pointer(new)), + uintptr(newlen), + 0, + ); err != 0 { + return err + } + + return nil +} + +//go:cgo_import_dynamic libc_sysctlbyname sysctlbyname "/usr/lib/libSystem.B.dylib" + +// Implemented in the runtime package (runtime/sys_darwin.go) +func syscall_syscall6(fn, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err Errno) + +//go:linkname syscall_syscall6 syscall.syscall6 diff --git a/vendor/modules.txt b/vendor/modules.txt index 2f0e94f..82f5b5c 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -25,11 +25,17 @@ github.com/jackc/pgx/v5/pgconn/ctxwatch github.com/jackc/pgx/v5/pgconn/internal/bgreader github.com/jackc/pgx/v5/pgproto3 github.com/jackc/pgx/v5/pgtype +# github.com/jinzhu/inflection v1.0.0 +## explicit +github.com/jinzhu/inflection # github.com/kr/pretty v0.3.1 ## explicit; go 1.12 # github.com/pmezard/go-difflib v1.0.0 ## explicit github.com/pmezard/go-difflib/difflib +# github.com/puzpuzpuz/xsync/v3 v3.5.1 +## explicit; go 1.18 +github.com/puzpuzpuz/xsync/v3 # github.com/rogpeppe/go-internal v1.14.1 ## explicit; go 1.23 # github.com/spf13/cobra v1.10.2 @@ -43,9 +49,35 @@ github.com/spf13/pflag github.com/stretchr/testify/assert github.com/stretchr/testify/assert/yaml github.com/stretchr/testify/require +# github.com/tmthrgd/go-hex v0.0.0-20190904060850-447a3041c3bc +## explicit +github.com/tmthrgd/go-hex +# github.com/uptrace/bun v1.2.16 +## explicit; go 1.24.0 +github.com/uptrace/bun +github.com/uptrace/bun/dialect +github.com/uptrace/bun/dialect/feature +github.com/uptrace/bun/dialect/sqltype +github.com/uptrace/bun/extra/bunjson +github.com/uptrace/bun/internal +github.com/uptrace/bun/internal/parser +github.com/uptrace/bun/internal/tagparser +github.com/uptrace/bun/schema +# github.com/vmihailenco/msgpack/v5 v5.4.1 +## explicit; go 1.19 +github.com/vmihailenco/msgpack/v5 +github.com/vmihailenco/msgpack/v5/msgpcode +# github.com/vmihailenco/tagparser/v2 v2.0.0 +## explicit; go 1.15 +github.com/vmihailenco/tagparser/v2 +github.com/vmihailenco/tagparser/v2/internal +github.com/vmihailenco/tagparser/v2/internal/parser # golang.org/x/crypto v0.41.0 ## explicit; go 1.23.0 golang.org/x/crypto/pbkdf2 +# golang.org/x/sys v0.38.0 +## explicit; go 1.24.0 +golang.org/x/sys/cpu # golang.org/x/text v0.28.0 ## explicit; go 1.23.0 golang.org/x/text/cases