Vendor packages update

This commit is contained in:
2025-12-19 22:27:20 +02:00
parent 79effe6921
commit d9225a7310
196 changed files with 28595 additions and 1 deletions

9
go.mod
View File

@@ -1,12 +1,13 @@
module git.warky.dev/wdevs/relspecgo
go 1.24
go 1.24.0
require (
github.com/google/uuid v1.6.0
github.com/jackc/pgx/v5 v5.7.6
github.com/spf13/cobra v1.10.2
github.com/stretchr/testify v1.11.1
github.com/uptrace/bun v1.2.16
gopkg.in/yaml.v3 v3.0.1
)
@@ -15,10 +16,16 @@ require (
github.com/inconshreveable/mousetrap v1.1.0 // indirect
github.com/jackc/pgpassfile v1.0.0 // indirect
github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 // indirect
github.com/jinzhu/inflection v1.0.0 // indirect
github.com/kr/pretty v0.3.1 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/puzpuzpuz/xsync/v3 v3.5.1 // indirect
github.com/rogpeppe/go-internal v1.14.1 // indirect
github.com/spf13/pflag v1.0.10 // indirect
github.com/tmthrgd/go-hex v0.0.0-20190904060850-447a3041c3bc // indirect
github.com/vmihailenco/msgpack/v5 v5.4.1 // indirect
github.com/vmihailenco/tagparser/v2 v2.0.0 // indirect
golang.org/x/crypto v0.41.0 // indirect
golang.org/x/sys v0.38.0 // indirect
golang.org/x/text v0.28.0 // indirect
)

14
go.sum
View File

@@ -15,6 +15,8 @@ github.com/jackc/pgx/v5 v5.7.6 h1:rWQc5FwZSPX58r1OQmkuaNicxdmExaEz5A2DO2hUuTk=
github.com/jackc/pgx/v5 v5.7.6/go.mod h1:aruU7o91Tc2q2cFp5h4uP3f6ztExVpyVv88Xl/8Vl8M=
github.com/jackc/puddle/v2 v2.2.2 h1:PR8nw+E/1w0GLuRFSmiioY6UooMp6KJv0/61nB7icHo=
github.com/jackc/puddle/v2 v2.2.2/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4=
github.com/jinzhu/inflection v1.0.0 h1:K317FqzuhWc8YvSVlFMCCUb36O/S9MCKRDI7QkRKD/E=
github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc=
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
@@ -22,6 +24,8 @@ github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/puzpuzpuz/xsync/v3 v3.5.1 h1:GJYJZwO6IdxN/IKbneznS6yPkVC+c3zyY/j19c++5Fg=
github.com/puzpuzpuz/xsync/v3 v3.5.1/go.mod h1:VjzYrABPabuM4KyBh1Ftq6u8nhwY5tBPKP9jpmh0nnA=
github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs=
github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ=
github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc=
@@ -36,11 +40,21 @@ github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UV
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U=
github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U=
github.com/tmthrgd/go-hex v0.0.0-20190904060850-447a3041c3bc h1:9lRDQMhESg+zvGYmW5DyG0UqvY96Bu5QYsTLvCHdrgo=
github.com/tmthrgd/go-hex v0.0.0-20190904060850-447a3041c3bc/go.mod h1:bciPuU6GHm1iF1pBvUfxfsH0Wmnc2VbpgvbI9ZWuIRs=
github.com/uptrace/bun v1.2.16 h1:QlObi6ZIK5Ao7kAALnh91HWYNZUBbVwye52fmlQM9kc=
github.com/uptrace/bun v1.2.16/go.mod h1:jMoNg2n56ckaawi/O/J92BHaECmrz6IRjuMWqlMaMTM=
github.com/vmihailenco/msgpack/v5 v5.4.1 h1:cQriyiUvjTwOHg8QZaPihLWeRAAVoCpE00IUPn0Bjt8=
github.com/vmihailenco/msgpack/v5 v5.4.1/go.mod h1:GaZTsDaehaPpQVyxrf5mtQlH+pc21PIudVV/E3rRQok=
github.com/vmihailenco/tagparser/v2 v2.0.0 h1:y09buUbR+b5aycVFQs/g70pqKVZNBmxwAhO7/IwNM9g=
github.com/vmihailenco/tagparser/v2 v2.0.0/go.mod h1:Wri+At7QHww0WTrCBeu4J6bNtoV6mEfg5OIWRZA9qds=
go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg=
golang.org/x/crypto v0.41.0 h1:WKYxWedPGCTVVl5+WHSSrOBT0O8lx32+zxmHxijgXp4=
golang.org/x/crypto v0.41.0/go.mod h1:pO5AFd7FA68rFak7rOAGVuygIISepHftHnr8dr6+sUc=
golang.org/x/sync v0.16.0 h1:ycBJEhp9p4vXvUZNszeOq0kGTPghopOL8q0fq3vstxw=
golang.org/x/sync v0.16.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
golang.org/x/sys v0.38.0 h1:3yZWxaJjBmCWXqhN1qh02AkOnCQ1poK6oF+a7xWL6Gc=
golang.org/x/sys v0.38.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
golang.org/x/text v0.28.0 h1:rhazDwis8INMIwQ4tpjLDzUhx6RlXqZNPEM0huQojng=
golang.org/x/text v0.28.0/go.mod h1:U8nCwOR8jO/marOQ0QbDiOngZVEBB7MAiitBuMjXiNU=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=

21
vendor/github.com/jinzhu/inflection/LICENSE generated vendored Normal file
View File

@@ -0,0 +1,21 @@
The MIT License (MIT)
Copyright (c) 2015 - Jinzhu
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

55
vendor/github.com/jinzhu/inflection/README.md generated vendored Normal file
View File

@@ -0,0 +1,55 @@
# Inflection
Inflection pluralizes and singularizes English nouns
[![wercker status](https://app.wercker.com/status/f8c7432b097d1f4ce636879670be0930/s/master "wercker status")](https://app.wercker.com/project/byKey/f8c7432b097d1f4ce636879670be0930)
## Basic Usage
```go
inflection.Plural("person") => "people"
inflection.Plural("Person") => "People"
inflection.Plural("PERSON") => "PEOPLE"
inflection.Plural("bus") => "buses"
inflection.Plural("BUS") => "BUSES"
inflection.Plural("Bus") => "Buses"
inflection.Singular("people") => "person"
inflection.Singular("People") => "Person"
inflection.Singular("PEOPLE") => "PERSON"
inflection.Singular("buses") => "bus"
inflection.Singular("BUSES") => "BUS"
inflection.Singular("Buses") => "Bus"
inflection.Plural("FancyPerson") => "FancyPeople"
inflection.Singular("FancyPeople") => "FancyPerson"
```
## Register Rules
Standard rules are from Rails's ActiveSupport (https://github.com/rails/rails/blob/master/activesupport/lib/active_support/inflections.rb)
If you want to register more rules, follow:
```
inflection.AddUncountable("fish")
inflection.AddIrregular("person", "people")
inflection.AddPlural("(bu)s$", "${1}ses") # "bus" => "buses" / "BUS" => "BUSES" / "Bus" => "Buses"
inflection.AddSingular("(bus)(es)?$", "${1}") # "buses" => "bus" / "Buses" => "Bus" / "BUSES" => "BUS"
```
## Contributing
You can help to make the project better, check out [http://gorm.io/contribute.html](http://gorm.io/contribute.html) for things you can do.
## Author
**jinzhu**
* <http://github.com/jinzhu>
* <wosmvp@gmail.com>
* <http://twitter.com/zhangjinzhu>
## License
Released under the [MIT License](http://www.opensource.org/licenses/MIT).

273
vendor/github.com/jinzhu/inflection/inflections.go generated vendored Normal file
View File

@@ -0,0 +1,273 @@
/*
Package inflection pluralizes and singularizes English nouns.
inflection.Plural("person") => "people"
inflection.Plural("Person") => "People"
inflection.Plural("PERSON") => "PEOPLE"
inflection.Singular("people") => "person"
inflection.Singular("People") => "Person"
inflection.Singular("PEOPLE") => "PERSON"
inflection.Plural("FancyPerson") => "FancydPeople"
inflection.Singular("FancyPeople") => "FancydPerson"
Standard rules are from Rails's ActiveSupport (https://github.com/rails/rails/blob/master/activesupport/lib/active_support/inflections.rb)
If you want to register more rules, follow:
inflection.AddUncountable("fish")
inflection.AddIrregular("person", "people")
inflection.AddPlural("(bu)s$", "${1}ses") # "bus" => "buses" / "BUS" => "BUSES" / "Bus" => "Buses"
inflection.AddSingular("(bus)(es)?$", "${1}") # "buses" => "bus" / "Buses" => "Bus" / "BUSES" => "BUS"
*/
package inflection
import (
"regexp"
"strings"
)
type inflection struct {
regexp *regexp.Regexp
replace string
}
// Regular is a regexp find replace inflection
type Regular struct {
find string
replace string
}
// Irregular is a hard replace inflection,
// containing both singular and plural forms
type Irregular struct {
singular string
plural string
}
// RegularSlice is a slice of Regular inflections
type RegularSlice []Regular
// IrregularSlice is a slice of Irregular inflections
type IrregularSlice []Irregular
var pluralInflections = RegularSlice{
{"([a-z])$", "${1}s"},
{"s$", "s"},
{"^(ax|test)is$", "${1}es"},
{"(octop|vir)us$", "${1}i"},
{"(octop|vir)i$", "${1}i"},
{"(alias|status)$", "${1}es"},
{"(bu)s$", "${1}ses"},
{"(buffal|tomat)o$", "${1}oes"},
{"([ti])um$", "${1}a"},
{"([ti])a$", "${1}a"},
{"sis$", "ses"},
{"(?:([^f])fe|([lr])f)$", "${1}${2}ves"},
{"(hive)$", "${1}s"},
{"([^aeiouy]|qu)y$", "${1}ies"},
{"(x|ch|ss|sh)$", "${1}es"},
{"(matr|vert|ind)(?:ix|ex)$", "${1}ices"},
{"^(m|l)ouse$", "${1}ice"},
{"^(m|l)ice$", "${1}ice"},
{"^(ox)$", "${1}en"},
{"^(oxen)$", "${1}"},
{"(quiz)$", "${1}zes"},
}
var singularInflections = RegularSlice{
{"s$", ""},
{"(ss)$", "${1}"},
{"(n)ews$", "${1}ews"},
{"([ti])a$", "${1}um"},
{"((a)naly|(b)a|(d)iagno|(p)arenthe|(p)rogno|(s)ynop|(t)he)(sis|ses)$", "${1}sis"},
{"(^analy)(sis|ses)$", "${1}sis"},
{"([^f])ves$", "${1}fe"},
{"(hive)s$", "${1}"},
{"(tive)s$", "${1}"},
{"([lr])ves$", "${1}f"},
{"([^aeiouy]|qu)ies$", "${1}y"},
{"(s)eries$", "${1}eries"},
{"(m)ovies$", "${1}ovie"},
{"(c)ookies$", "${1}ookie"},
{"(x|ch|ss|sh)es$", "${1}"},
{"^(m|l)ice$", "${1}ouse"},
{"(bus)(es)?$", "${1}"},
{"(o)es$", "${1}"},
{"(shoe)s$", "${1}"},
{"(cris|test)(is|es)$", "${1}is"},
{"^(a)x[ie]s$", "${1}xis"},
{"(octop|vir)(us|i)$", "${1}us"},
{"(alias|status)(es)?$", "${1}"},
{"^(ox)en", "${1}"},
{"(vert|ind)ices$", "${1}ex"},
{"(matr)ices$", "${1}ix"},
{"(quiz)zes$", "${1}"},
{"(database)s$", "${1}"},
}
var irregularInflections = IrregularSlice{
{"person", "people"},
{"man", "men"},
{"child", "children"},
{"sex", "sexes"},
{"move", "moves"},
{"mombie", "mombies"},
}
var uncountableInflections = []string{"equipment", "information", "rice", "money", "species", "series", "fish", "sheep", "jeans", "police"}
var compiledPluralMaps []inflection
var compiledSingularMaps []inflection
func compile() {
compiledPluralMaps = []inflection{}
compiledSingularMaps = []inflection{}
for _, uncountable := range uncountableInflections {
inf := inflection{
regexp: regexp.MustCompile("^(?i)(" + uncountable + ")$"),
replace: "${1}",
}
compiledPluralMaps = append(compiledPluralMaps, inf)
compiledSingularMaps = append(compiledSingularMaps, inf)
}
for _, value := range irregularInflections {
infs := []inflection{
inflection{regexp: regexp.MustCompile(strings.ToUpper(value.singular) + "$"), replace: strings.ToUpper(value.plural)},
inflection{regexp: regexp.MustCompile(strings.Title(value.singular) + "$"), replace: strings.Title(value.plural)},
inflection{regexp: regexp.MustCompile(value.singular + "$"), replace: value.plural},
}
compiledPluralMaps = append(compiledPluralMaps, infs...)
}
for _, value := range irregularInflections {
infs := []inflection{
inflection{regexp: regexp.MustCompile(strings.ToUpper(value.plural) + "$"), replace: strings.ToUpper(value.singular)},
inflection{regexp: regexp.MustCompile(strings.Title(value.plural) + "$"), replace: strings.Title(value.singular)},
inflection{regexp: regexp.MustCompile(value.plural + "$"), replace: value.singular},
}
compiledSingularMaps = append(compiledSingularMaps, infs...)
}
for i := len(pluralInflections) - 1; i >= 0; i-- {
value := pluralInflections[i]
infs := []inflection{
inflection{regexp: regexp.MustCompile(strings.ToUpper(value.find)), replace: strings.ToUpper(value.replace)},
inflection{regexp: regexp.MustCompile(value.find), replace: value.replace},
inflection{regexp: regexp.MustCompile("(?i)" + value.find), replace: value.replace},
}
compiledPluralMaps = append(compiledPluralMaps, infs...)
}
for i := len(singularInflections) - 1; i >= 0; i-- {
value := singularInflections[i]
infs := []inflection{
inflection{regexp: regexp.MustCompile(strings.ToUpper(value.find)), replace: strings.ToUpper(value.replace)},
inflection{regexp: regexp.MustCompile(value.find), replace: value.replace},
inflection{regexp: regexp.MustCompile("(?i)" + value.find), replace: value.replace},
}
compiledSingularMaps = append(compiledSingularMaps, infs...)
}
}
func init() {
compile()
}
// AddPlural adds a plural inflection
func AddPlural(find, replace string) {
pluralInflections = append(pluralInflections, Regular{find, replace})
compile()
}
// AddSingular adds a singular inflection
func AddSingular(find, replace string) {
singularInflections = append(singularInflections, Regular{find, replace})
compile()
}
// AddIrregular adds an irregular inflection
func AddIrregular(singular, plural string) {
irregularInflections = append(irregularInflections, Irregular{singular, plural})
compile()
}
// AddUncountable adds an uncountable inflection
func AddUncountable(values ...string) {
uncountableInflections = append(uncountableInflections, values...)
compile()
}
// GetPlural retrieves the plural inflection values
func GetPlural() RegularSlice {
plurals := make(RegularSlice, len(pluralInflections))
copy(plurals, pluralInflections)
return plurals
}
// GetSingular retrieves the singular inflection values
func GetSingular() RegularSlice {
singulars := make(RegularSlice, len(singularInflections))
copy(singulars, singularInflections)
return singulars
}
// GetIrregular retrieves the irregular inflection values
func GetIrregular() IrregularSlice {
irregular := make(IrregularSlice, len(irregularInflections))
copy(irregular, irregularInflections)
return irregular
}
// GetUncountable retrieves the uncountable inflection values
func GetUncountable() []string {
uncountables := make([]string, len(uncountableInflections))
copy(uncountables, uncountableInflections)
return uncountables
}
// SetPlural sets the plural inflections slice
func SetPlural(inflections RegularSlice) {
pluralInflections = inflections
compile()
}
// SetSingular sets the singular inflections slice
func SetSingular(inflections RegularSlice) {
singularInflections = inflections
compile()
}
// SetIrregular sets the irregular inflections slice
func SetIrregular(inflections IrregularSlice) {
irregularInflections = inflections
compile()
}
// SetUncountable sets the uncountable inflections slice
func SetUncountable(inflections []string) {
uncountableInflections = inflections
compile()
}
// Plural converts a word to its plural form
func Plural(str string) string {
for _, inflection := range compiledPluralMaps {
if inflection.regexp.MatchString(str) {
return inflection.regexp.ReplaceAllString(str, inflection.replace)
}
}
return str
}
// Singular converts a word to its singular form
func Singular(str string) string {
for _, inflection := range compiledSingularMaps {
if inflection.regexp.MatchString(str) {
return inflection.regexp.ReplaceAllString(str, inflection.replace)
}
}
return str
}

23
vendor/github.com/jinzhu/inflection/wercker.yml generated vendored Normal file
View File

@@ -0,0 +1,23 @@
box: golang
build:
steps:
- setup-go-workspace
# Gets the dependencies
- script:
name: go get
code: |
go get
# Build the project
- script:
name: go build
code: |
go build ./...
# Test the project
- script:
name: go test
code: |
go test ./...

15
vendor/github.com/puzpuzpuz/xsync/v3/.gitignore generated vendored Normal file
View File

@@ -0,0 +1,15 @@
# Binaries for programs and plugins
*.exe
*.exe~
*.dll
*.so
*.dylib
# Test binary, built with `go test -c`
*.test
# Output of the go coverage tool, specifically when used with LiteIDE
*.out
# Dependency directories (remove the comment below to include it)
# vendor/

133
vendor/github.com/puzpuzpuz/xsync/v3/BENCHMARKS.md generated vendored Normal file
View File

@@ -0,0 +1,133 @@
# xsync benchmarks
If you're interested in `MapOf` comparison with some of the popular concurrent hash maps written in Go, check [this](https://github.com/cornelk/hashmap/pull/70) and [this](https://github.com/alphadose/haxmap/pull/22) PRs.
The below results were obtained for xsync v2.3.1 on a c6g.metal EC2 instance (64 CPU, 128GB RAM) running Linux and Go 1.19.3. I'd like to thank [@felixge](https://github.com/felixge) who kindly ran the benchmarks.
The following commands were used to run the benchmarks:
```bash
$ go test -run='^$' -cpu=1,2,4,8,16,32,64 -bench . -count=30 -timeout=0 | tee bench.txt
$ benchstat bench.txt | tee benchstat.txt
```
The below sections contain some of the results. Refer to [this gist](https://gist.github.com/puzpuzpuz/e62e38e06feadecfdc823c0f941ece0b) for the complete output.
Please note that `MapOf` got a number of optimizations since v2.3.1, so the current result is likely to be different.
### Counter vs. atomic int64
```
name time/op
Counter 27.3ns ± 1%
Counter-2 27.2ns ±11%
Counter-4 15.3ns ± 8%
Counter-8 7.43ns ± 7%
Counter-16 3.70ns ±10%
Counter-32 1.77ns ± 3%
Counter-64 0.96ns ±10%
AtomicInt64 7.60ns ± 0%
AtomicInt64-2 12.6ns ±13%
AtomicInt64-4 13.5ns ±14%
AtomicInt64-8 12.7ns ± 9%
AtomicInt64-16 12.8ns ± 8%
AtomicInt64-32 13.0ns ± 6%
AtomicInt64-64 12.9ns ± 7%
```
Here `time/op` stands for average time spent on operation. If you divide `10^9` by the result in nanoseconds per operation, you'd get the throughput in operations per second. Thus, the ideal theoretical scalability of a concurrent data structure implies that the reported `time/op` decreases proportionally with the increased number of CPU cores. On the contrary, if the measured time per operation increases when run on more cores, it means performance degradation.
### MapOf vs. sync.Map
1,000 `[int, int]` entries with a warm-up, 100% Loads:
```
IntegerMapOf_WarmUp/reads=100% 24.0ns ± 0%
IntegerMapOf_WarmUp/reads=100%-2 12.0ns ± 0%
IntegerMapOf_WarmUp/reads=100%-4 6.02ns ± 0%
IntegerMapOf_WarmUp/reads=100%-8 3.01ns ± 0%
IntegerMapOf_WarmUp/reads=100%-16 1.50ns ± 0%
IntegerMapOf_WarmUp/reads=100%-32 0.75ns ± 0%
IntegerMapOf_WarmUp/reads=100%-64 0.38ns ± 0%
IntegerMapStandard_WarmUp/reads=100% 55.3ns ± 0%
IntegerMapStandard_WarmUp/reads=100%-2 27.6ns ± 0%
IntegerMapStandard_WarmUp/reads=100%-4 16.1ns ± 3%
IntegerMapStandard_WarmUp/reads=100%-8 8.35ns ± 7%
IntegerMapStandard_WarmUp/reads=100%-16 4.24ns ± 7%
IntegerMapStandard_WarmUp/reads=100%-32 2.18ns ± 6%
IntegerMapStandard_WarmUp/reads=100%-64 1.11ns ± 3%
```
1,000 `[int, int]` entries with a warm-up, 99% Loads, 0.5% Stores, 0.5% Deletes:
```
IntegerMapOf_WarmUp/reads=99% 31.0ns ± 0%
IntegerMapOf_WarmUp/reads=99%-2 16.4ns ± 1%
IntegerMapOf_WarmUp/reads=99%-4 8.42ns ± 0%
IntegerMapOf_WarmUp/reads=99%-8 4.41ns ± 0%
IntegerMapOf_WarmUp/reads=99%-16 2.38ns ± 2%
IntegerMapOf_WarmUp/reads=99%-32 1.37ns ± 4%
IntegerMapOf_WarmUp/reads=99%-64 0.85ns ± 2%
IntegerMapStandard_WarmUp/reads=99% 121ns ± 1%
IntegerMapStandard_WarmUp/reads=99%-2 109ns ± 3%
IntegerMapStandard_WarmUp/reads=99%-4 115ns ± 4%
IntegerMapStandard_WarmUp/reads=99%-8 114ns ± 2%
IntegerMapStandard_WarmUp/reads=99%-16 105ns ± 2%
IntegerMapStandard_WarmUp/reads=99%-32 97.0ns ± 3%
IntegerMapStandard_WarmUp/reads=99%-64 98.0ns ± 2%
```
1,000 `[int, int]` entries with a warm-up, 75% Loads, 12.5% Stores, 12.5% Deletes:
```
IntegerMapOf_WarmUp/reads=75%-reads 46.2ns ± 1%
IntegerMapOf_WarmUp/reads=75%-reads-2 36.7ns ± 2%
IntegerMapOf_WarmUp/reads=75%-reads-4 22.0ns ± 1%
IntegerMapOf_WarmUp/reads=75%-reads-8 12.8ns ± 2%
IntegerMapOf_WarmUp/reads=75%-reads-16 7.69ns ± 1%
IntegerMapOf_WarmUp/reads=75%-reads-32 5.16ns ± 1%
IntegerMapOf_WarmUp/reads=75%-reads-64 4.91ns ± 1%
IntegerMapStandard_WarmUp/reads=75%-reads 156ns ± 0%
IntegerMapStandard_WarmUp/reads=75%-reads-2 177ns ± 1%
IntegerMapStandard_WarmUp/reads=75%-reads-4 197ns ± 1%
IntegerMapStandard_WarmUp/reads=75%-reads-8 221ns ± 2%
IntegerMapStandard_WarmUp/reads=75%-reads-16 242ns ± 1%
IntegerMapStandard_WarmUp/reads=75%-reads-32 258ns ± 1%
IntegerMapStandard_WarmUp/reads=75%-reads-64 264ns ± 1%
```
### MPMCQueue vs. Go channels
Concurrent producers and consumers (1:1), queue/channel size 1,000, some work done by both producers and consumers:
```
QueueProdConsWork100 252ns ± 0%
QueueProdConsWork100-2 206ns ± 5%
QueueProdConsWork100-4 136ns ±12%
QueueProdConsWork100-8 110ns ± 6%
QueueProdConsWork100-16 108ns ± 2%
QueueProdConsWork100-32 102ns ± 2%
QueueProdConsWork100-64 101ns ± 0%
ChanProdConsWork100 283ns ± 0%
ChanProdConsWork100-2 406ns ±21%
ChanProdConsWork100-4 549ns ± 7%
ChanProdConsWork100-8 754ns ± 7%
ChanProdConsWork100-16 828ns ± 7%
ChanProdConsWork100-32 810ns ± 8%
ChanProdConsWork100-64 832ns ± 4%
```
### RBMutex vs. sync.RWMutex
The writer locks on each 100,000 iteration with some work in the critical section for both readers and the writer:
```
RBMutexWorkWrite100000 146ns ± 0%
RBMutexWorkWrite100000-2 73.3ns ± 0%
RBMutexWorkWrite100000-4 36.7ns ± 0%
RBMutexWorkWrite100000-8 18.6ns ± 0%
RBMutexWorkWrite100000-16 9.83ns ± 3%
RBMutexWorkWrite100000-32 5.53ns ± 0%
RBMutexWorkWrite100000-64 4.04ns ± 3%
RWMutexWorkWrite100000 121ns ± 0%
RWMutexWorkWrite100000-2 128ns ± 1%
RWMutexWorkWrite100000-4 124ns ± 2%
RWMutexWorkWrite100000-8 101ns ± 1%
RWMutexWorkWrite100000-16 92.9ns ± 1%
RWMutexWorkWrite100000-32 89.9ns ± 1%
RWMutexWorkWrite100000-64 88.4ns ± 1%
```

201
vendor/github.com/puzpuzpuz/xsync/v3/LICENSE generated vendored Normal file
View File

@@ -0,0 +1,201 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

195
vendor/github.com/puzpuzpuz/xsync/v3/README.md generated vendored Normal file
View File

@@ -0,0 +1,195 @@
[![GoDoc reference](https://img.shields.io/badge/godoc-reference-blue.svg)](https://pkg.go.dev/github.com/puzpuzpuz/xsync/v3)
[![GoReport](https://goreportcard.com/badge/github.com/puzpuzpuz/xsync/v3)](https://goreportcard.com/report/github.com/puzpuzpuz/xsync/v3)
[![codecov](https://codecov.io/gh/puzpuzpuz/xsync/branch/main/graph/badge.svg)](https://codecov.io/gh/puzpuzpuz/xsync)
# xsync
Concurrent data structures for Go. Aims to provide more scalable alternatives for some of the data structures from the standard `sync` package, but not only.
Covered with tests following the approach described [here](https://puzpuzpuz.dev/testing-concurrent-code-for-fun-and-profit).
## Benchmarks
Benchmark results may be found [here](BENCHMARKS.md). I'd like to thank [@felixge](https://github.com/felixge) who kindly ran the benchmarks on a beefy multicore machine.
Also, a non-scientific, unfair benchmark comparing Java's [j.u.c.ConcurrentHashMap](https://docs.oracle.com/en/java/javase/17/docs/api/java.base/java/util/concurrent/ConcurrentHashMap.html) and `xsync.MapOf` is available [here](https://puzpuzpuz.dev/concurrent-map-in-go-vs-java-yet-another-meaningless-benchmark).
## Usage
The latest xsync major version is v3, so `/v3` suffix should be used when importing the library:
```go
import (
"github.com/puzpuzpuz/xsync/v3"
)
```
*Note for pre-v3 users*: v1 and v2 support is discontinued, so please upgrade to v3. While the API has some breaking changes, the migration should be trivial.
### Counter
A `Counter` is a striped `int64` counter inspired by the `j.u.c.a.LongAdder` class from the Java standard library.
```go
c := xsync.NewCounter()
// increment and decrement the counter
c.Inc()
c.Dec()
// read the current value
v := c.Value()
```
Works better in comparison with a single atomically updated `int64` counter in high contention scenarios.
### Map
A `Map` is like a concurrent hash table-based map. It follows the interface of `sync.Map` with a number of valuable extensions like `Compute` or `Size`.
```go
m := xsync.NewMap()
m.Store("foo", "bar")
v, ok := m.Load("foo")
s := m.Size()
```
`Map` uses a modified version of Cache-Line Hash Table (CLHT) data structure: https://github.com/LPD-EPFL/CLHT
CLHT is built around the idea of organizing the hash table in cache-line-sized buckets, so that on all modern CPUs update operations complete with minimal cache-line transfer. Also, `Get` operations are obstruction-free and involve no writes to shared memory, hence no mutexes or any other sort of locks. Due to this design, in all considered scenarios `Map` outperforms `sync.Map`.
One important difference with `sync.Map` is that only string keys are supported. That's because Golang standard library does not expose the built-in hash functions for `interface{}` values.
`MapOf[K, V]` is an implementation with parametrized key and value types. While it's still a CLHT-inspired hash map, `MapOf`'s design is quite different from `Map`. As a result, less GC pressure and fewer atomic operations on reads.
```go
m := xsync.NewMapOf[string, string]()
m.Store("foo", "bar")
v, ok := m.Load("foo")
```
Apart from CLHT, `MapOf` borrows ideas from Java's `j.u.c.ConcurrentHashMap` (immutable K/V pair structs instead of atomic snapshots) and C++'s `absl::flat_hash_map` (meta memory and SWAR-based lookups). It also has more dense memory layout when compared with `Map`. Long story short, `MapOf` should be preferred over `Map` when possible.
An important difference with `Map` is that `MapOf` supports arbitrary `comparable` key types:
```go
type Point struct {
x int32
y int32
}
m := NewMapOf[Point, int]()
m.Store(Point{42, 42}, 42)
v, ok := m.Load(point{42, 42})
```
Apart from `Range` method available for map iteration, there are also `ToPlainMap`/`ToPlainMapOf` utility functions to convert a `Map`/`MapOf` to a built-in Go's `map`:
```go
m := xsync.NewMapOf[int, int]()
m.Store(42, 42)
pm := xsync.ToPlainMapOf(m)
```
Both `Map` and `MapOf` use the built-in Golang's hash function which has DDOS protection. This means that each map instance gets its own seed number and the hash function uses that seed for hash code calculation. However, for smaller keys this hash function has some overhead. So, if you don't need DDOS protection, you may provide a custom hash function when creating a `MapOf`. For instance, Murmur3 finalizer does a decent job when it comes to integers:
```go
m := NewMapOfWithHasher[int, int](func(i int, _ uint64) uint64 {
h := uint64(i)
h = (h ^ (h >> 33)) * 0xff51afd7ed558ccd
h = (h ^ (h >> 33)) * 0xc4ceb9fe1a85ec53
return h ^ (h >> 33)
})
```
When benchmarking concurrent maps, make sure to configure all of the competitors with the same hash function or, at least, take hash function performance into the consideration.
### SPSCQueue
A `SPSCQueue` is a bounded single-producer single-consumer concurrent queue. This means that not more than a single goroutine must be publishing items to the queue while not more than a single goroutine must be consuming those items.
```go
q := xsync.NewSPSCQueue(1024)
// producer inserts an item into the queue
// optimistic insertion attempt; doesn't block
inserted := q.TryEnqueue("bar")
// consumer obtains an item from the queue
// optimistic obtain attempt; doesn't block
item, ok := q.TryDequeue() // interface{} pointing to a string
```
`SPSCQueueOf[I]` is an implementation with parametrized item type. It is available for Go 1.19 or later.
```go
q := xsync.NewSPSCQueueOf[string](1024)
inserted := q.TryEnqueue("foo")
item, ok := q.TryDequeue() // string
```
The queue is based on the data structure from this [article](https://rigtorp.se/ringbuffer). The idea is to reduce the CPU cache coherency traffic by keeping cached copies of read and write indexes used by producer and consumer respectively.
### MPMCQueue
A `MPMCQueue` is a bounded multi-producer multi-consumer concurrent queue.
```go
q := xsync.NewMPMCQueue(1024)
// producer optimistically inserts an item into the queue
// optimistic insertion attempt; doesn't block
inserted := q.TryEnqueue("bar")
// consumer obtains an item from the queue
// optimistic obtain attempt; doesn't block
item, ok := q.TryDequeue() // interface{} pointing to a string
```
`MPMCQueueOf[I]` is an implementation with parametrized item type. It is available for Go 1.19 or later.
```go
q := xsync.NewMPMCQueueOf[string](1024)
inserted := q.TryEnqueue("foo")
item, ok := q.TryDequeue() // string
```
The queue is based on the algorithm from the [MPMCQueue](https://github.com/rigtorp/MPMCQueue) C++ library which in its turn references D.Vyukov's [MPMC queue](https://www.1024cores.net/home/lock-free-algorithms/queues/bounded-mpmc-queue). According to the following [classification](https://www.1024cores.net/home/lock-free-algorithms/queues), the queue is array-based, fails on overflow, provides causal FIFO, has blocking producers and consumers.
The idea of the algorithm is to allow parallelism for concurrent producers and consumers by introducing the notion of tickets, i.e. values of two counters, one per producers/consumers. An atomic increment of one of those counters is the only noticeable contention point in queue operations. The rest of the operation avoids contention on writes thanks to the turn-based read/write access for each of the queue items.
In essence, `MPMCQueue` is a specialized queue for scenarios where there are multiple concurrent producers and consumers of a single queue running on a large multicore machine.
To get the optimal performance, you may want to set the queue size to be large enough, say, an order of magnitude greater than the number of producers/consumers, to allow producers and consumers to progress with their queue operations in parallel most of the time.
### RBMutex
A `RBMutex` is a reader-biased reader/writer mutual exclusion lock. The lock can be held by many readers or a single writer.
```go
mu := xsync.NewRBMutex()
// reader lock calls return a token
t := mu.RLock()
// the token must be later used to unlock the mutex
mu.RUnlock(t)
// writer locks are the same as in sync.RWMutex
mu.Lock()
mu.Unlock()
```
`RBMutex` is based on a modified version of BRAVO (Biased Locking for Reader-Writer Locks) algorithm: https://arxiv.org/pdf/1810.01553.pdf
The idea of the algorithm is to build on top of an existing reader-writer mutex and introduce a fast path for readers. On the fast path, reader lock attempts are sharded over an internal array based on the reader identity (a token in the case of Golang). This means that readers do not contend over a single atomic counter like it's done in, say, `sync.RWMutex` allowing for better scalability in terms of cores.
Hence, by the design `RBMutex` is a specialized mutex for scenarios, such as caches, where the vast majority of locks are acquired by readers and write lock acquire attempts are infrequent. In such scenarios, `RBMutex` should perform better than the `sync.RWMutex` on large multicore machines.
`RBMutex` extends `sync.RWMutex` internally and uses it as the "reader bias disabled" fallback, so the same semantics apply. The only noticeable difference is in the reader tokens returned from the `RLock`/`RUnlock` methods.
Apart from blocking methods, `RBMutex` also has methods for optimistic locking:
```go
mu := xsync.NewRBMutex()
if locked, t := mu.TryRLock(); locked {
// critical reader section...
mu.RUnlock(t)
}
if mu.TryLock() {
// critical writer section...
mu.Unlock()
}
```
## License
Licensed under MIT.

99
vendor/github.com/puzpuzpuz/xsync/v3/counter.go generated vendored Normal file
View File

@@ -0,0 +1,99 @@
package xsync
import (
"sync"
"sync/atomic"
)
// pool for P tokens
var ptokenPool sync.Pool
// a P token is used to point at the current OS thread (P)
// on which the goroutine is run; exact identity of the thread,
// as well as P migration tolerance, is not important since
// it's used to as a best effort mechanism for assigning
// concurrent operations (goroutines) to different stripes of
// the counter
type ptoken struct {
idx uint32
//lint:ignore U1000 prevents false sharing
pad [cacheLineSize - 4]byte
}
// A Counter is a striped int64 counter.
//
// Should be preferred over a single atomically updated int64
// counter in high contention scenarios.
//
// A Counter must not be copied after first use.
type Counter struct {
stripes []cstripe
mask uint32
}
type cstripe struct {
c int64
//lint:ignore U1000 prevents false sharing
pad [cacheLineSize - 8]byte
}
// NewCounter creates a new Counter instance.
func NewCounter() *Counter {
nstripes := nextPowOf2(parallelism())
c := Counter{
stripes: make([]cstripe, nstripes),
mask: nstripes - 1,
}
return &c
}
// Inc increments the counter by 1.
func (c *Counter) Inc() {
c.Add(1)
}
// Dec decrements the counter by 1.
func (c *Counter) Dec() {
c.Add(-1)
}
// Add adds the delta to the counter.
func (c *Counter) Add(delta int64) {
t, ok := ptokenPool.Get().(*ptoken)
if !ok {
t = new(ptoken)
t.idx = runtime_fastrand()
}
for {
stripe := &c.stripes[t.idx&c.mask]
cnt := atomic.LoadInt64(&stripe.c)
if atomic.CompareAndSwapInt64(&stripe.c, cnt, cnt+delta) {
break
}
// Give a try with another randomly selected stripe.
t.idx = runtime_fastrand()
}
ptokenPool.Put(t)
}
// Value returns the current counter value.
// The returned value may not include all of the latest operations in
// presence of concurrent modifications of the counter.
func (c *Counter) Value() int64 {
v := int64(0)
for i := 0; i < len(c.stripes); i++ {
stripe := &c.stripes[i]
v += atomic.LoadInt64(&stripe.c)
}
return v
}
// Reset resets the counter to zero.
// This method should only be used when it is known that there are
// no concurrent modifications of the counter.
func (c *Counter) Reset() {
for i := 0; i < len(c.stripes); i++ {
stripe := &c.stripes[i]
atomic.StoreInt64(&stripe.c, 0)
}
}

917
vendor/github.com/puzpuzpuz/xsync/v3/map.go generated vendored Normal file
View File

@@ -0,0 +1,917 @@
package xsync
import (
"fmt"
"math"
"runtime"
"strings"
"sync"
"sync/atomic"
"unsafe"
)
type mapResizeHint int
const (
mapGrowHint mapResizeHint = 0
mapShrinkHint mapResizeHint = 1
mapClearHint mapResizeHint = 2
)
const (
// number of Map entries per bucket; 3 entries lead to size of 64B
// (one cache line) on 64-bit machines
entriesPerMapBucket = 3
// threshold fraction of table occupation to start a table shrinking
// when deleting the last entry in a bucket chain
mapShrinkFraction = 128
// map load factor to trigger a table resize during insertion;
// a map holds up to mapLoadFactor*entriesPerMapBucket*mapTableLen
// key-value pairs (this is a soft limit)
mapLoadFactor = 0.75
// minimal table size, i.e. number of buckets; thus, minimal map
// capacity can be calculated as entriesPerMapBucket*defaultMinMapTableLen
defaultMinMapTableLen = 32
// minimum counter stripes to use
minMapCounterLen = 8
// maximum counter stripes to use; stands for around 4KB of memory
maxMapCounterLen = 32
)
var (
topHashMask = uint64((1<<20)-1) << 44
topHashEntryMasks = [3]uint64{
topHashMask,
topHashMask >> 20,
topHashMask >> 40,
}
)
// Map is like a Go map[string]interface{} but is safe for concurrent
// use by multiple goroutines without additional locking or
// coordination. It follows the interface of sync.Map with
// a number of valuable extensions like Compute or Size.
//
// A Map must not be copied after first use.
//
// Map uses a modified version of Cache-Line Hash Table (CLHT)
// data structure: https://github.com/LPD-EPFL/CLHT
//
// CLHT is built around idea to organize the hash table in
// cache-line-sized buckets, so that on all modern CPUs update
// operations complete with at most one cache-line transfer.
// Also, Get operations involve no write to memory, as well as no
// mutexes or any other sort of locks. Due to this design, in all
// considered scenarios Map outperforms sync.Map.
//
// One important difference with sync.Map is that only string keys
// are supported. That's because Golang standard library does not
// expose the built-in hash functions for interface{} values.
type Map struct {
totalGrowths int64
totalShrinks int64
resizing int64 // resize in progress flag; updated atomically
resizeMu sync.Mutex // only used along with resizeCond
resizeCond sync.Cond // used to wake up resize waiters (concurrent modifications)
table unsafe.Pointer // *mapTable
minTableLen int
growOnly bool
}
type mapTable struct {
buckets []bucketPadded
// striped counter for number of table entries;
// used to determine if a table shrinking is needed
// occupies min(buckets_memory/1024, 64KB) of memory
size []counterStripe
seed uint64
}
type counterStripe struct {
c int64
//lint:ignore U1000 prevents false sharing
pad [cacheLineSize - 8]byte
}
type bucketPadded struct {
//lint:ignore U1000 ensure each bucket takes two cache lines on both 32 and 64-bit archs
pad [cacheLineSize - unsafe.Sizeof(bucket{})]byte
bucket
}
type bucket struct {
next unsafe.Pointer // *bucketPadded
keys [entriesPerMapBucket]unsafe.Pointer
values [entriesPerMapBucket]unsafe.Pointer
// topHashMutex is a 2-in-1 value.
//
// It contains packed top 20 bits (20 MSBs) of hash codes for keys
// stored in the bucket:
// | key 0's top hash | key 1's top hash | key 2's top hash | bitmap for keys | mutex |
// | 20 bits | 20 bits | 20 bits | 3 bits | 1 bit |
//
// The least significant bit is used for the mutex (TTAS spinlock).
topHashMutex uint64
}
type rangeEntry struct {
key unsafe.Pointer
value unsafe.Pointer
}
// MapConfig defines configurable Map/MapOf options.
type MapConfig struct {
sizeHint int
growOnly bool
}
// WithPresize configures new Map/MapOf instance with capacity enough
// to hold sizeHint entries. The capacity is treated as the minimal
// capacity meaning that the underlying hash table will never shrink
// to a smaller capacity. If sizeHint is zero or negative, the value
// is ignored.
func WithPresize(sizeHint int) func(*MapConfig) {
return func(c *MapConfig) {
c.sizeHint = sizeHint
}
}
// WithGrowOnly configures new Map/MapOf instance to be grow-only.
// This means that the underlying hash table grows in capacity when
// new keys are added, but does not shrink when keys are deleted.
// The only exception to this rule is the Clear method which
// shrinks the hash table back to the initial capacity.
func WithGrowOnly() func(*MapConfig) {
return func(c *MapConfig) {
c.growOnly = true
}
}
// NewMap creates a new Map instance configured with the given
// options.
func NewMap(options ...func(*MapConfig)) *Map {
c := &MapConfig{
sizeHint: defaultMinMapTableLen * entriesPerMapBucket,
}
for _, o := range options {
o(c)
}
m := &Map{}
m.resizeCond = *sync.NewCond(&m.resizeMu)
var table *mapTable
if c.sizeHint <= defaultMinMapTableLen*entriesPerMapBucket {
table = newMapTable(defaultMinMapTableLen)
} else {
tableLen := nextPowOf2(uint32((float64(c.sizeHint) / entriesPerMapBucket) / mapLoadFactor))
table = newMapTable(int(tableLen))
}
m.minTableLen = len(table.buckets)
m.growOnly = c.growOnly
atomic.StorePointer(&m.table, unsafe.Pointer(table))
return m
}
// NewMapPresized creates a new Map instance with capacity enough to hold
// sizeHint entries. The capacity is treated as the minimal capacity
// meaning that the underlying hash table will never shrink to
// a smaller capacity. If sizeHint is zero or negative, the value
// is ignored.
//
// Deprecated: use NewMap in combination with WithPresize.
func NewMapPresized(sizeHint int) *Map {
return NewMap(WithPresize(sizeHint))
}
func newMapTable(minTableLen int) *mapTable {
buckets := make([]bucketPadded, minTableLen)
counterLen := minTableLen >> 10
if counterLen < minMapCounterLen {
counterLen = minMapCounterLen
} else if counterLen > maxMapCounterLen {
counterLen = maxMapCounterLen
}
counter := make([]counterStripe, counterLen)
t := &mapTable{
buckets: buckets,
size: counter,
seed: makeSeed(),
}
return t
}
// ToPlainMap returns a native map with a copy of xsync Map's
// contents. The copied xsync Map should not be modified while
// this call is made. If the copied Map is modified, the copying
// behavior is the same as in the Range method.
func ToPlainMap(m *Map) map[string]interface{} {
pm := make(map[string]interface{})
if m != nil {
m.Range(func(key string, value interface{}) bool {
pm[key] = value
return true
})
}
return pm
}
// Load returns the value stored in the map for a key, or nil if no
// value is present.
// The ok result indicates whether value was found in the map.
func (m *Map) Load(key string) (value interface{}, ok bool) {
table := (*mapTable)(atomic.LoadPointer(&m.table))
hash := hashString(key, table.seed)
bidx := uint64(len(table.buckets)-1) & hash
b := &table.buckets[bidx]
for {
topHashes := atomic.LoadUint64(&b.topHashMutex)
for i := 0; i < entriesPerMapBucket; i++ {
if !topHashMatch(hash, topHashes, i) {
continue
}
atomic_snapshot:
// Start atomic snapshot.
vp := atomic.LoadPointer(&b.values[i])
kp := atomic.LoadPointer(&b.keys[i])
if kp != nil && vp != nil {
if key == derefKey(kp) {
if uintptr(vp) == uintptr(atomic.LoadPointer(&b.values[i])) {
// Atomic snapshot succeeded.
return derefValue(vp), true
}
// Concurrent update/remove. Go for another spin.
goto atomic_snapshot
}
}
}
bptr := atomic.LoadPointer(&b.next)
if bptr == nil {
return
}
b = (*bucketPadded)(bptr)
}
}
// Store sets the value for a key.
func (m *Map) Store(key string, value interface{}) {
m.doCompute(
key,
func(interface{}, bool) (interface{}, bool) {
return value, false
},
false,
false,
)
}
// LoadOrStore returns the existing value for the key if present.
// Otherwise, it stores and returns the given value.
// The loaded result is true if the value was loaded, false if stored.
func (m *Map) LoadOrStore(key string, value interface{}) (actual interface{}, loaded bool) {
return m.doCompute(
key,
func(interface{}, bool) (interface{}, bool) {
return value, false
},
true,
false,
)
}
// LoadAndStore returns the existing value for the key if present,
// while setting the new value for the key.
// It stores the new value and returns the existing one, if present.
// The loaded result is true if the existing value was loaded,
// false otherwise.
func (m *Map) LoadAndStore(key string, value interface{}) (actual interface{}, loaded bool) {
return m.doCompute(
key,
func(interface{}, bool) (interface{}, bool) {
return value, false
},
false,
false,
)
}
// LoadOrCompute returns the existing value for the key if present.
// Otherwise, it computes the value using the provided function, and
// then stores and returns the computed value. The loaded result is
// true if the value was loaded, false if computed.
//
// This call locks a hash table bucket while the compute function
// is executed. It means that modifications on other entries in
// the bucket will be blocked until the valueFn executes. Consider
// this when the function includes long-running operations.
func (m *Map) LoadOrCompute(key string, valueFn func() interface{}) (actual interface{}, loaded bool) {
return m.doCompute(
key,
func(interface{}, bool) (interface{}, bool) {
return valueFn(), false
},
true,
false,
)
}
// LoadOrTryCompute returns the existing value for the key if present.
// Otherwise, it tries to compute the value using the provided function
// and, if successful, stores and returns the computed value. The loaded
// result is true if the value was loaded, or false if computed (whether
// successfully or not). If the compute attempt was cancelled (due to an
// error, for example), a nil value will be returned.
//
// This call locks a hash table bucket while the compute function
// is executed. It means that modifications on other entries in
// the bucket will be blocked until the valueFn executes. Consider
// this when the function includes long-running operations.
func (m *Map) LoadOrTryCompute(
key string,
valueFn func() (newValue interface{}, cancel bool),
) (value interface{}, loaded bool) {
return m.doCompute(
key,
func(interface{}, bool) (interface{}, bool) {
nv, c := valueFn()
if !c {
return nv, false
}
return nil, true
},
true,
false,
)
}
// Compute either sets the computed new value for the key or deletes
// the value for the key. When the delete result of the valueFn function
// is set to true, the value will be deleted, if it exists. When delete
// is set to false, the value is updated to the newValue.
// The ok result indicates whether value was computed and stored, thus, is
// present in the map. The actual result contains the new value in cases where
// the value was computed and stored. See the example for a few use cases.
//
// This call locks a hash table bucket while the compute function
// is executed. It means that modifications on other entries in
// the bucket will be blocked until the valueFn executes. Consider
// this when the function includes long-running operations.
func (m *Map) Compute(
key string,
valueFn func(oldValue interface{}, loaded bool) (newValue interface{}, delete bool),
) (actual interface{}, ok bool) {
return m.doCompute(key, valueFn, false, true)
}
// LoadAndDelete deletes the value for a key, returning the previous
// value if any. The loaded result reports whether the key was
// present.
func (m *Map) LoadAndDelete(key string) (value interface{}, loaded bool) {
return m.doCompute(
key,
func(value interface{}, loaded bool) (interface{}, bool) {
return value, true
},
false,
false,
)
}
// Delete deletes the value for a key.
func (m *Map) Delete(key string) {
m.doCompute(
key,
func(value interface{}, loaded bool) (interface{}, bool) {
return value, true
},
false,
false,
)
}
func (m *Map) doCompute(
key string,
valueFn func(oldValue interface{}, loaded bool) (interface{}, bool),
loadIfExists, computeOnly bool,
) (interface{}, bool) {
// Read-only path.
if loadIfExists {
if v, ok := m.Load(key); ok {
return v, !computeOnly
}
}
// Write path.
for {
compute_attempt:
var (
emptyb *bucketPadded
emptyidx int
hintNonEmpty int
)
table := (*mapTable)(atomic.LoadPointer(&m.table))
tableLen := len(table.buckets)
hash := hashString(key, table.seed)
bidx := uint64(len(table.buckets)-1) & hash
rootb := &table.buckets[bidx]
lockBucket(&rootb.topHashMutex)
// The following two checks must go in reverse to what's
// in the resize method.
if m.resizeInProgress() {
// Resize is in progress. Wait, then go for another attempt.
unlockBucket(&rootb.topHashMutex)
m.waitForResize()
goto compute_attempt
}
if m.newerTableExists(table) {
// Someone resized the table. Go for another attempt.
unlockBucket(&rootb.topHashMutex)
goto compute_attempt
}
b := rootb
for {
topHashes := atomic.LoadUint64(&b.topHashMutex)
for i := 0; i < entriesPerMapBucket; i++ {
if b.keys[i] == nil {
if emptyb == nil {
emptyb = b
emptyidx = i
}
continue
}
if !topHashMatch(hash, topHashes, i) {
hintNonEmpty++
continue
}
if key == derefKey(b.keys[i]) {
vp := b.values[i]
if loadIfExists {
unlockBucket(&rootb.topHashMutex)
return derefValue(vp), !computeOnly
}
// In-place update/delete.
// We get a copy of the value via an interface{} on each call,
// thus the live value pointers are unique. Otherwise atomic
// snapshot won't be correct in case of multiple Store calls
// using the same value.
oldValue := derefValue(vp)
newValue, del := valueFn(oldValue, true)
if del {
// Deletion.
// First we update the value, then the key.
// This is important for atomic snapshot states.
atomic.StoreUint64(&b.topHashMutex, eraseTopHash(topHashes, i))
atomic.StorePointer(&b.values[i], nil)
atomic.StorePointer(&b.keys[i], nil)
leftEmpty := false
if hintNonEmpty == 0 {
leftEmpty = isEmptyBucket(b)
}
unlockBucket(&rootb.topHashMutex)
table.addSize(bidx, -1)
// Might need to shrink the table.
if leftEmpty {
m.resize(table, mapShrinkHint)
}
return oldValue, !computeOnly
}
nvp := unsafe.Pointer(&newValue)
if assertionsEnabled && vp == nvp {
panic("non-unique value pointer")
}
atomic.StorePointer(&b.values[i], nvp)
unlockBucket(&rootb.topHashMutex)
if computeOnly {
// Compute expects the new value to be returned.
return newValue, true
}
// LoadAndStore expects the old value to be returned.
return oldValue, true
}
hintNonEmpty++
}
if b.next == nil {
if emptyb != nil {
// Insertion into an existing bucket.
var zeroV interface{}
newValue, del := valueFn(zeroV, false)
if del {
unlockBucket(&rootb.topHashMutex)
return zeroV, false
}
// First we update the value, then the key.
// This is important for atomic snapshot states.
topHashes = atomic.LoadUint64(&emptyb.topHashMutex)
atomic.StoreUint64(&emptyb.topHashMutex, storeTopHash(hash, topHashes, emptyidx))
atomic.StorePointer(&emptyb.values[emptyidx], unsafe.Pointer(&newValue))
atomic.StorePointer(&emptyb.keys[emptyidx], unsafe.Pointer(&key))
unlockBucket(&rootb.topHashMutex)
table.addSize(bidx, 1)
return newValue, computeOnly
}
growThreshold := float64(tableLen) * entriesPerMapBucket * mapLoadFactor
if table.sumSize() > int64(growThreshold) {
// Need to grow the table. Then go for another attempt.
unlockBucket(&rootb.topHashMutex)
m.resize(table, mapGrowHint)
goto compute_attempt
}
// Insertion into a new bucket.
var zeroV interface{}
newValue, del := valueFn(zeroV, false)
if del {
unlockBucket(&rootb.topHashMutex)
return newValue, false
}
// Create and append a bucket.
newb := new(bucketPadded)
newb.keys[0] = unsafe.Pointer(&key)
newb.values[0] = unsafe.Pointer(&newValue)
newb.topHashMutex = storeTopHash(hash, newb.topHashMutex, 0)
atomic.StorePointer(&b.next, unsafe.Pointer(newb))
unlockBucket(&rootb.topHashMutex)
table.addSize(bidx, 1)
return newValue, computeOnly
}
b = (*bucketPadded)(b.next)
}
}
}
func (m *Map) newerTableExists(table *mapTable) bool {
curTablePtr := atomic.LoadPointer(&m.table)
return uintptr(curTablePtr) != uintptr(unsafe.Pointer(table))
}
func (m *Map) resizeInProgress() bool {
return atomic.LoadInt64(&m.resizing) == 1
}
func (m *Map) waitForResize() {
m.resizeMu.Lock()
for m.resizeInProgress() {
m.resizeCond.Wait()
}
m.resizeMu.Unlock()
}
func (m *Map) resize(knownTable *mapTable, hint mapResizeHint) {
knownTableLen := len(knownTable.buckets)
// Fast path for shrink attempts.
if hint == mapShrinkHint {
if m.growOnly ||
m.minTableLen == knownTableLen ||
knownTable.sumSize() > int64((knownTableLen*entriesPerMapBucket)/mapShrinkFraction) {
return
}
}
// Slow path.
if !atomic.CompareAndSwapInt64(&m.resizing, 0, 1) {
// Someone else started resize. Wait for it to finish.
m.waitForResize()
return
}
var newTable *mapTable
table := (*mapTable)(atomic.LoadPointer(&m.table))
tableLen := len(table.buckets)
switch hint {
case mapGrowHint:
// Grow the table with factor of 2.
atomic.AddInt64(&m.totalGrowths, 1)
newTable = newMapTable(tableLen << 1)
case mapShrinkHint:
shrinkThreshold := int64((tableLen * entriesPerMapBucket) / mapShrinkFraction)
if tableLen > m.minTableLen && table.sumSize() <= shrinkThreshold {
// Shrink the table with factor of 2.
atomic.AddInt64(&m.totalShrinks, 1)
newTable = newMapTable(tableLen >> 1)
} else {
// No need to shrink. Wake up all waiters and give up.
m.resizeMu.Lock()
atomic.StoreInt64(&m.resizing, 0)
m.resizeCond.Broadcast()
m.resizeMu.Unlock()
return
}
case mapClearHint:
newTable = newMapTable(m.minTableLen)
default:
panic(fmt.Sprintf("unexpected resize hint: %d", hint))
}
// Copy the data only if we're not clearing the map.
if hint != mapClearHint {
for i := 0; i < tableLen; i++ {
copied := copyBucket(&table.buckets[i], newTable)
newTable.addSizePlain(uint64(i), copied)
}
}
// Publish the new table and wake up all waiters.
atomic.StorePointer(&m.table, unsafe.Pointer(newTable))
m.resizeMu.Lock()
atomic.StoreInt64(&m.resizing, 0)
m.resizeCond.Broadcast()
m.resizeMu.Unlock()
}
func copyBucket(b *bucketPadded, destTable *mapTable) (copied int) {
rootb := b
lockBucket(&rootb.topHashMutex)
for {
for i := 0; i < entriesPerMapBucket; i++ {
if b.keys[i] != nil {
k := derefKey(b.keys[i])
hash := hashString(k, destTable.seed)
bidx := uint64(len(destTable.buckets)-1) & hash
destb := &destTable.buckets[bidx]
appendToBucket(hash, b.keys[i], b.values[i], destb)
copied++
}
}
if b.next == nil {
unlockBucket(&rootb.topHashMutex)
return
}
b = (*bucketPadded)(b.next)
}
}
func appendToBucket(hash uint64, keyPtr, valPtr unsafe.Pointer, b *bucketPadded) {
for {
for i := 0; i < entriesPerMapBucket; i++ {
if b.keys[i] == nil {
b.keys[i] = keyPtr
b.values[i] = valPtr
b.topHashMutex = storeTopHash(hash, b.topHashMutex, i)
return
}
}
if b.next == nil {
newb := new(bucketPadded)
newb.keys[0] = keyPtr
newb.values[0] = valPtr
newb.topHashMutex = storeTopHash(hash, newb.topHashMutex, 0)
b.next = unsafe.Pointer(newb)
return
}
b = (*bucketPadded)(b.next)
}
}
func isEmptyBucket(rootb *bucketPadded) bool {
b := rootb
for {
for i := 0; i < entriesPerMapBucket; i++ {
if b.keys[i] != nil {
return false
}
}
if b.next == nil {
return true
}
b = (*bucketPadded)(b.next)
}
}
// Range calls f sequentially for each key and value present in the
// map. If f returns false, range stops the iteration.
//
// Range does not necessarily correspond to any consistent snapshot
// of the Map's contents: no key will be visited more than once, but
// if the value for any key is stored or deleted concurrently, Range
// may reflect any mapping for that key from any point during the
// Range call.
//
// It is safe to modify the map while iterating it, including entry
// creation, modification and deletion. However, the concurrent
// modification rule apply, i.e. the changes may be not reflected
// in the subsequently iterated entries.
func (m *Map) Range(f func(key string, value interface{}) bool) {
var zeroEntry rangeEntry
// Pre-allocate array big enough to fit entries for most hash tables.
bentries := make([]rangeEntry, 0, 16*entriesPerMapBucket)
tablep := atomic.LoadPointer(&m.table)
table := *(*mapTable)(tablep)
for i := range table.buckets {
rootb := &table.buckets[i]
b := rootb
// Prevent concurrent modifications and copy all entries into
// the intermediate slice.
lockBucket(&rootb.topHashMutex)
for {
for i := 0; i < entriesPerMapBucket; i++ {
if b.keys[i] != nil {
bentries = append(bentries, rangeEntry{
key: b.keys[i],
value: b.values[i],
})
}
}
if b.next == nil {
unlockBucket(&rootb.topHashMutex)
break
}
b = (*bucketPadded)(b.next)
}
// Call the function for all copied entries.
for j := range bentries {
k := derefKey(bentries[j].key)
v := derefValue(bentries[j].value)
if !f(k, v) {
return
}
// Remove the reference to avoid preventing the copied
// entries from being GCed until this method finishes.
bentries[j] = zeroEntry
}
bentries = bentries[:0]
}
}
// Clear deletes all keys and values currently stored in the map.
func (m *Map) Clear() {
table := (*mapTable)(atomic.LoadPointer(&m.table))
m.resize(table, mapClearHint)
}
// Size returns current size of the map.
func (m *Map) Size() int {
table := (*mapTable)(atomic.LoadPointer(&m.table))
return int(table.sumSize())
}
func derefKey(keyPtr unsafe.Pointer) string {
return *(*string)(keyPtr)
}
func derefValue(valuePtr unsafe.Pointer) interface{} {
return *(*interface{})(valuePtr)
}
func lockBucket(mu *uint64) {
for {
var v uint64
for {
v = atomic.LoadUint64(mu)
if v&1 != 1 {
break
}
runtime.Gosched()
}
if atomic.CompareAndSwapUint64(mu, v, v|1) {
return
}
runtime.Gosched()
}
}
func unlockBucket(mu *uint64) {
v := atomic.LoadUint64(mu)
atomic.StoreUint64(mu, v&^1)
}
func topHashMatch(hash, topHashes uint64, idx int) bool {
if topHashes&(1<<(idx+1)) == 0 {
// Entry is not present.
return false
}
hash = hash & topHashMask
topHashes = (topHashes & topHashEntryMasks[idx]) << (20 * idx)
return hash == topHashes
}
func storeTopHash(hash, topHashes uint64, idx int) uint64 {
// Zero out top hash at idx.
topHashes = topHashes &^ topHashEntryMasks[idx]
// Chop top 20 MSBs of the given hash and position them at idx.
hash = (hash & topHashMask) >> (20 * idx)
// Store the MSBs.
topHashes = topHashes | hash
// Mark the entry as present.
return topHashes | (1 << (idx + 1))
}
func eraseTopHash(topHashes uint64, idx int) uint64 {
return topHashes &^ (1 << (idx + 1))
}
func (table *mapTable) addSize(bucketIdx uint64, delta int) {
cidx := uint64(len(table.size)-1) & bucketIdx
atomic.AddInt64(&table.size[cidx].c, int64(delta))
}
func (table *mapTable) addSizePlain(bucketIdx uint64, delta int) {
cidx := uint64(len(table.size)-1) & bucketIdx
table.size[cidx].c += int64(delta)
}
func (table *mapTable) sumSize() int64 {
sum := int64(0)
for i := range table.size {
sum += atomic.LoadInt64(&table.size[i].c)
}
return sum
}
// MapStats is Map/MapOf statistics.
//
// Warning: map statistics are intented to be used for diagnostic
// purposes, not for production code. This means that breaking changes
// may be introduced into this struct even between minor releases.
type MapStats struct {
// RootBuckets is the number of root buckets in the hash table.
// Each bucket holds a few entries.
RootBuckets int
// TotalBuckets is the total number of buckets in the hash table,
// including root and their chained buckets. Each bucket holds
// a few entries.
TotalBuckets int
// EmptyBuckets is the number of buckets that hold no entries.
EmptyBuckets int
// Capacity is the Map/MapOf capacity, i.e. the total number of
// entries that all buckets can physically hold. This number
// does not consider the load factor.
Capacity int
// Size is the exact number of entries stored in the map.
Size int
// Counter is the number of entries stored in the map according
// to the internal atomic counter. In case of concurrent map
// modifications this number may be different from Size.
Counter int
// CounterLen is the number of internal atomic counter stripes.
// This number may grow with the map capacity to improve
// multithreaded scalability.
CounterLen int
// MinEntries is the minimum number of entries per a chain of
// buckets, i.e. a root bucket and its chained buckets.
MinEntries int
// MinEntries is the maximum number of entries per a chain of
// buckets, i.e. a root bucket and its chained buckets.
MaxEntries int
// TotalGrowths is the number of times the hash table grew.
TotalGrowths int64
// TotalGrowths is the number of times the hash table shrinked.
TotalShrinks int64
}
// ToString returns string representation of map stats.
func (s *MapStats) ToString() string {
var sb strings.Builder
sb.WriteString("MapStats{\n")
sb.WriteString(fmt.Sprintf("RootBuckets: %d\n", s.RootBuckets))
sb.WriteString(fmt.Sprintf("TotalBuckets: %d\n", s.TotalBuckets))
sb.WriteString(fmt.Sprintf("EmptyBuckets: %d\n", s.EmptyBuckets))
sb.WriteString(fmt.Sprintf("Capacity: %d\n", s.Capacity))
sb.WriteString(fmt.Sprintf("Size: %d\n", s.Size))
sb.WriteString(fmt.Sprintf("Counter: %d\n", s.Counter))
sb.WriteString(fmt.Sprintf("CounterLen: %d\n", s.CounterLen))
sb.WriteString(fmt.Sprintf("MinEntries: %d\n", s.MinEntries))
sb.WriteString(fmt.Sprintf("MaxEntries: %d\n", s.MaxEntries))
sb.WriteString(fmt.Sprintf("TotalGrowths: %d\n", s.TotalGrowths))
sb.WriteString(fmt.Sprintf("TotalShrinks: %d\n", s.TotalShrinks))
sb.WriteString("}\n")
return sb.String()
}
// Stats returns statistics for the Map. Just like other map
// methods, this one is thread-safe. Yet it's an O(N) operation,
// so it should be used only for diagnostics or debugging purposes.
func (m *Map) Stats() MapStats {
stats := MapStats{
TotalGrowths: atomic.LoadInt64(&m.totalGrowths),
TotalShrinks: atomic.LoadInt64(&m.totalShrinks),
MinEntries: math.MaxInt32,
}
table := (*mapTable)(atomic.LoadPointer(&m.table))
stats.RootBuckets = len(table.buckets)
stats.Counter = int(table.sumSize())
stats.CounterLen = len(table.size)
for i := range table.buckets {
nentries := 0
b := &table.buckets[i]
stats.TotalBuckets++
for {
nentriesLocal := 0
stats.Capacity += entriesPerMapBucket
for i := 0; i < entriesPerMapBucket; i++ {
if atomic.LoadPointer(&b.keys[i]) != nil {
stats.Size++
nentriesLocal++
}
}
nentries += nentriesLocal
if nentriesLocal == 0 {
stats.EmptyBuckets++
}
if b.next == nil {
break
}
b = (*bucketPadded)(atomic.LoadPointer(&b.next))
stats.TotalBuckets++
}
if nentries < stats.MinEntries {
stats.MinEntries = nentries
}
if nentries > stats.MaxEntries {
stats.MaxEntries = nentries
}
}
return stats
}

738
vendor/github.com/puzpuzpuz/xsync/v3/mapof.go generated vendored Normal file
View File

@@ -0,0 +1,738 @@
package xsync
import (
"fmt"
"math"
"sync"
"sync/atomic"
"unsafe"
)
const (
// number of MapOf entries per bucket; 5 entries lead to size of 64B
// (one cache line) on 64-bit machines
entriesPerMapOfBucket = 5
defaultMeta uint64 = 0x8080808080808080
metaMask uint64 = 0xffffffffff
defaultMetaMasked uint64 = defaultMeta & metaMask
emptyMetaSlot uint8 = 0x80
)
// MapOf is like a Go map[K]V but is safe for concurrent
// use by multiple goroutines without additional locking or
// coordination. It follows the interface of sync.Map with
// a number of valuable extensions like Compute or Size.
//
// A MapOf must not be copied after first use.
//
// MapOf uses a modified version of Cache-Line Hash Table (CLHT)
// data structure: https://github.com/LPD-EPFL/CLHT
//
// CLHT is built around idea to organize the hash table in
// cache-line-sized buckets, so that on all modern CPUs update
// operations complete with at most one cache-line transfer.
// Also, Get operations involve no write to memory, as well as no
// mutexes or any other sort of locks. Due to this design, in all
// considered scenarios MapOf outperforms sync.Map.
//
// MapOf also borrows ideas from Java's j.u.c.ConcurrentHashMap
// (immutable K/V pair structs instead of atomic snapshots)
// and C++'s absl::flat_hash_map (meta memory and SWAR-based
// lookups).
type MapOf[K comparable, V any] struct {
totalGrowths int64
totalShrinks int64
resizing int64 // resize in progress flag; updated atomically
resizeMu sync.Mutex // only used along with resizeCond
resizeCond sync.Cond // used to wake up resize waiters (concurrent modifications)
table unsafe.Pointer // *mapOfTable
hasher func(K, uint64) uint64
minTableLen int
growOnly bool
}
type mapOfTable[K comparable, V any] struct {
buckets []bucketOfPadded
// striped counter for number of table entries;
// used to determine if a table shrinking is needed
// occupies min(buckets_memory/1024, 64KB) of memory
size []counterStripe
seed uint64
}
// bucketOfPadded is a CL-sized map bucket holding up to
// entriesPerMapOfBucket entries.
type bucketOfPadded struct {
//lint:ignore U1000 ensure each bucket takes two cache lines on both 32 and 64-bit archs
pad [cacheLineSize - unsafe.Sizeof(bucketOf{})]byte
bucketOf
}
type bucketOf struct {
meta uint64
entries [entriesPerMapOfBucket]unsafe.Pointer // *entryOf
next unsafe.Pointer // *bucketOfPadded
mu sync.Mutex
}
// entryOf is an immutable map entry.
type entryOf[K comparable, V any] struct {
key K
value V
}
// NewMapOf creates a new MapOf instance configured with the given
// options.
func NewMapOf[K comparable, V any](options ...func(*MapConfig)) *MapOf[K, V] {
return NewMapOfWithHasher[K, V](defaultHasher[K](), options...)
}
// NewMapOfWithHasher creates a new MapOf instance configured with
// the given hasher and options. The hash function is used instead
// of the built-in hash function configured when a map is created
// with the NewMapOf function.
func NewMapOfWithHasher[K comparable, V any](
hasher func(K, uint64) uint64,
options ...func(*MapConfig),
) *MapOf[K, V] {
c := &MapConfig{
sizeHint: defaultMinMapTableLen * entriesPerMapOfBucket,
}
for _, o := range options {
o(c)
}
m := &MapOf[K, V]{}
m.resizeCond = *sync.NewCond(&m.resizeMu)
m.hasher = hasher
var table *mapOfTable[K, V]
if c.sizeHint <= defaultMinMapTableLen*entriesPerMapOfBucket {
table = newMapOfTable[K, V](defaultMinMapTableLen)
} else {
tableLen := nextPowOf2(uint32((float64(c.sizeHint) / entriesPerMapOfBucket) / mapLoadFactor))
table = newMapOfTable[K, V](int(tableLen))
}
m.minTableLen = len(table.buckets)
m.growOnly = c.growOnly
atomic.StorePointer(&m.table, unsafe.Pointer(table))
return m
}
// NewMapOfPresized creates a new MapOf instance with capacity enough
// to hold sizeHint entries. The capacity is treated as the minimal capacity
// meaning that the underlying hash table will never shrink to
// a smaller capacity. If sizeHint is zero or negative, the value
// is ignored.
//
// Deprecated: use NewMapOf in combination with WithPresize.
func NewMapOfPresized[K comparable, V any](sizeHint int) *MapOf[K, V] {
return NewMapOf[K, V](WithPresize(sizeHint))
}
func newMapOfTable[K comparable, V any](minTableLen int) *mapOfTable[K, V] {
buckets := make([]bucketOfPadded, minTableLen)
for i := range buckets {
buckets[i].meta = defaultMeta
}
counterLen := minTableLen >> 10
if counterLen < minMapCounterLen {
counterLen = minMapCounterLen
} else if counterLen > maxMapCounterLen {
counterLen = maxMapCounterLen
}
counter := make([]counterStripe, counterLen)
t := &mapOfTable[K, V]{
buckets: buckets,
size: counter,
seed: makeSeed(),
}
return t
}
// ToPlainMapOf returns a native map with a copy of xsync Map's
// contents. The copied xsync Map should not be modified while
// this call is made. If the copied Map is modified, the copying
// behavior is the same as in the Range method.
func ToPlainMapOf[K comparable, V any](m *MapOf[K, V]) map[K]V {
pm := make(map[K]V)
if m != nil {
m.Range(func(key K, value V) bool {
pm[key] = value
return true
})
}
return pm
}
// Load returns the value stored in the map for a key, or zero value
// of type V if no value is present.
// The ok result indicates whether value was found in the map.
func (m *MapOf[K, V]) Load(key K) (value V, ok bool) {
table := (*mapOfTable[K, V])(atomic.LoadPointer(&m.table))
hash := m.hasher(key, table.seed)
h1 := h1(hash)
h2w := broadcast(h2(hash))
bidx := uint64(len(table.buckets)-1) & h1
b := &table.buckets[bidx]
for {
metaw := atomic.LoadUint64(&b.meta)
markedw := markZeroBytes(metaw^h2w) & metaMask
for markedw != 0 {
idx := firstMarkedByteIndex(markedw)
eptr := atomic.LoadPointer(&b.entries[idx])
if eptr != nil {
e := (*entryOf[K, V])(eptr)
if e.key == key {
return e.value, true
}
}
markedw &= markedw - 1
}
bptr := atomic.LoadPointer(&b.next)
if bptr == nil {
return
}
b = (*bucketOfPadded)(bptr)
}
}
// Store sets the value for a key.
func (m *MapOf[K, V]) Store(key K, value V) {
m.doCompute(
key,
func(V, bool) (V, bool) {
return value, false
},
false,
false,
)
}
// LoadOrStore returns the existing value for the key if present.
// Otherwise, it stores and returns the given value.
// The loaded result is true if the value was loaded, false if stored.
func (m *MapOf[K, V]) LoadOrStore(key K, value V) (actual V, loaded bool) {
return m.doCompute(
key,
func(V, bool) (V, bool) {
return value, false
},
true,
false,
)
}
// LoadAndStore returns the existing value for the key if present,
// while setting the new value for the key.
// It stores the new value and returns the existing one, if present.
// The loaded result is true if the existing value was loaded,
// false otherwise.
func (m *MapOf[K, V]) LoadAndStore(key K, value V) (actual V, loaded bool) {
return m.doCompute(
key,
func(V, bool) (V, bool) {
return value, false
},
false,
false,
)
}
// LoadOrCompute returns the existing value for the key if present.
// Otherwise, it computes the value using the provided function, and
// then stores and returns the computed value. The loaded result is
// true if the value was loaded, false if computed.
//
// This call locks a hash table bucket while the compute function
// is executed. It means that modifications on other entries in
// the bucket will be blocked until the valueFn executes. Consider
// this when the function includes long-running operations.
func (m *MapOf[K, V]) LoadOrCompute(key K, valueFn func() V) (actual V, loaded bool) {
return m.doCompute(
key,
func(V, bool) (V, bool) {
return valueFn(), false
},
true,
false,
)
}
// LoadOrTryCompute returns the existing value for the key if present.
// Otherwise, it tries to compute the value using the provided function
// and, if successful, stores and returns the computed value. The loaded
// result is true if the value was loaded, or false if computed (whether
// successfully or not). If the compute attempt was cancelled (due to an
// error, for example), a zero value of type V will be returned.
//
// This call locks a hash table bucket while the compute function
// is executed. It means that modifications on other entries in
// the bucket will be blocked until the valueFn executes. Consider
// this when the function includes long-running operations.
func (m *MapOf[K, V]) LoadOrTryCompute(
key K,
valueFn func() (newValue V, cancel bool),
) (value V, loaded bool) {
return m.doCompute(
key,
func(V, bool) (V, bool) {
nv, c := valueFn()
if !c {
return nv, false
}
return nv, true // nv is ignored
},
true,
false,
)
}
// Compute either sets the computed new value for the key or deletes
// the value for the key. When the delete result of the valueFn function
// is set to true, the value will be deleted, if it exists. When delete
// is set to false, the value is updated to the newValue.
// The ok result indicates whether value was computed and stored, thus, is
// present in the map. The actual result contains the new value in cases where
// the value was computed and stored. See the example for a few use cases.
//
// This call locks a hash table bucket while the compute function
// is executed. It means that modifications on other entries in
// the bucket will be blocked until the valueFn executes. Consider
// this when the function includes long-running operations.
func (m *MapOf[K, V]) Compute(
key K,
valueFn func(oldValue V, loaded bool) (newValue V, delete bool),
) (actual V, ok bool) {
return m.doCompute(key, valueFn, false, true)
}
// LoadAndDelete deletes the value for a key, returning the previous
// value if any. The loaded result reports whether the key was
// present.
func (m *MapOf[K, V]) LoadAndDelete(key K) (value V, loaded bool) {
return m.doCompute(
key,
func(value V, loaded bool) (V, bool) {
return value, true
},
false,
false,
)
}
// Delete deletes the value for a key.
func (m *MapOf[K, V]) Delete(key K) {
m.doCompute(
key,
func(value V, loaded bool) (V, bool) {
return value, true
},
false,
false,
)
}
func (m *MapOf[K, V]) doCompute(
key K,
valueFn func(oldValue V, loaded bool) (V, bool),
loadIfExists, computeOnly bool,
) (V, bool) {
// Read-only path.
if loadIfExists {
if v, ok := m.Load(key); ok {
return v, !computeOnly
}
}
// Write path.
for {
compute_attempt:
var (
emptyb *bucketOfPadded
emptyidx int
)
table := (*mapOfTable[K, V])(atomic.LoadPointer(&m.table))
tableLen := len(table.buckets)
hash := m.hasher(key, table.seed)
h1 := h1(hash)
h2 := h2(hash)
h2w := broadcast(h2)
bidx := uint64(len(table.buckets)-1) & h1
rootb := &table.buckets[bidx]
rootb.mu.Lock()
// The following two checks must go in reverse to what's
// in the resize method.
if m.resizeInProgress() {
// Resize is in progress. Wait, then go for another attempt.
rootb.mu.Unlock()
m.waitForResize()
goto compute_attempt
}
if m.newerTableExists(table) {
// Someone resized the table. Go for another attempt.
rootb.mu.Unlock()
goto compute_attempt
}
b := rootb
for {
metaw := b.meta
markedw := markZeroBytes(metaw^h2w) & metaMask
for markedw != 0 {
idx := firstMarkedByteIndex(markedw)
eptr := b.entries[idx]
if eptr != nil {
e := (*entryOf[K, V])(eptr)
if e.key == key {
if loadIfExists {
rootb.mu.Unlock()
return e.value, !computeOnly
}
// In-place update/delete.
// We get a copy of the value via an interface{} on each call,
// thus the live value pointers are unique. Otherwise atomic
// snapshot won't be correct in case of multiple Store calls
// using the same value.
oldv := e.value
newv, del := valueFn(oldv, true)
if del {
// Deletion.
// First we update the hash, then the entry.
newmetaw := setByte(metaw, emptyMetaSlot, idx)
atomic.StoreUint64(&b.meta, newmetaw)
atomic.StorePointer(&b.entries[idx], nil)
rootb.mu.Unlock()
table.addSize(bidx, -1)
// Might need to shrink the table if we left bucket empty.
if newmetaw == defaultMeta {
m.resize(table, mapShrinkHint)
}
return oldv, !computeOnly
}
newe := new(entryOf[K, V])
newe.key = key
newe.value = newv
atomic.StorePointer(&b.entries[idx], unsafe.Pointer(newe))
rootb.mu.Unlock()
if computeOnly {
// Compute expects the new value to be returned.
return newv, true
}
// LoadAndStore expects the old value to be returned.
return oldv, true
}
}
markedw &= markedw - 1
}
if emptyb == nil {
// Search for empty entries (up to 5 per bucket).
emptyw := metaw & defaultMetaMasked
if emptyw != 0 {
idx := firstMarkedByteIndex(emptyw)
emptyb = b
emptyidx = idx
}
}
if b.next == nil {
if emptyb != nil {
// Insertion into an existing bucket.
var zeroV V
newValue, del := valueFn(zeroV, false)
if del {
rootb.mu.Unlock()
return zeroV, false
}
newe := new(entryOf[K, V])
newe.key = key
newe.value = newValue
// First we update meta, then the entry.
atomic.StoreUint64(&emptyb.meta, setByte(emptyb.meta, h2, emptyidx))
atomic.StorePointer(&emptyb.entries[emptyidx], unsafe.Pointer(newe))
rootb.mu.Unlock()
table.addSize(bidx, 1)
return newValue, computeOnly
}
growThreshold := float64(tableLen) * entriesPerMapOfBucket * mapLoadFactor
if table.sumSize() > int64(growThreshold) {
// Need to grow the table. Then go for another attempt.
rootb.mu.Unlock()
m.resize(table, mapGrowHint)
goto compute_attempt
}
// Insertion into a new bucket.
var zeroV V
newValue, del := valueFn(zeroV, false)
if del {
rootb.mu.Unlock()
return newValue, false
}
// Create and append a bucket.
newb := new(bucketOfPadded)
newb.meta = setByte(defaultMeta, h2, 0)
newe := new(entryOf[K, V])
newe.key = key
newe.value = newValue
newb.entries[0] = unsafe.Pointer(newe)
atomic.StorePointer(&b.next, unsafe.Pointer(newb))
rootb.mu.Unlock()
table.addSize(bidx, 1)
return newValue, computeOnly
}
b = (*bucketOfPadded)(b.next)
}
}
}
func (m *MapOf[K, V]) newerTableExists(table *mapOfTable[K, V]) bool {
curTablePtr := atomic.LoadPointer(&m.table)
return uintptr(curTablePtr) != uintptr(unsafe.Pointer(table))
}
func (m *MapOf[K, V]) resizeInProgress() bool {
return atomic.LoadInt64(&m.resizing) == 1
}
func (m *MapOf[K, V]) waitForResize() {
m.resizeMu.Lock()
for m.resizeInProgress() {
m.resizeCond.Wait()
}
m.resizeMu.Unlock()
}
func (m *MapOf[K, V]) resize(knownTable *mapOfTable[K, V], hint mapResizeHint) {
knownTableLen := len(knownTable.buckets)
// Fast path for shrink attempts.
if hint == mapShrinkHint {
if m.growOnly ||
m.minTableLen == knownTableLen ||
knownTable.sumSize() > int64((knownTableLen*entriesPerMapOfBucket)/mapShrinkFraction) {
return
}
}
// Slow path.
if !atomic.CompareAndSwapInt64(&m.resizing, 0, 1) {
// Someone else started resize. Wait for it to finish.
m.waitForResize()
return
}
var newTable *mapOfTable[K, V]
table := (*mapOfTable[K, V])(atomic.LoadPointer(&m.table))
tableLen := len(table.buckets)
switch hint {
case mapGrowHint:
// Grow the table with factor of 2.
atomic.AddInt64(&m.totalGrowths, 1)
newTable = newMapOfTable[K, V](tableLen << 1)
case mapShrinkHint:
shrinkThreshold := int64((tableLen * entriesPerMapOfBucket) / mapShrinkFraction)
if tableLen > m.minTableLen && table.sumSize() <= shrinkThreshold {
// Shrink the table with factor of 2.
atomic.AddInt64(&m.totalShrinks, 1)
newTable = newMapOfTable[K, V](tableLen >> 1)
} else {
// No need to shrink. Wake up all waiters and give up.
m.resizeMu.Lock()
atomic.StoreInt64(&m.resizing, 0)
m.resizeCond.Broadcast()
m.resizeMu.Unlock()
return
}
case mapClearHint:
newTable = newMapOfTable[K, V](m.minTableLen)
default:
panic(fmt.Sprintf("unexpected resize hint: %d", hint))
}
// Copy the data only if we're not clearing the map.
if hint != mapClearHint {
for i := 0; i < tableLen; i++ {
copied := copyBucketOf(&table.buckets[i], newTable, m.hasher)
newTable.addSizePlain(uint64(i), copied)
}
}
// Publish the new table and wake up all waiters.
atomic.StorePointer(&m.table, unsafe.Pointer(newTable))
m.resizeMu.Lock()
atomic.StoreInt64(&m.resizing, 0)
m.resizeCond.Broadcast()
m.resizeMu.Unlock()
}
func copyBucketOf[K comparable, V any](
b *bucketOfPadded,
destTable *mapOfTable[K, V],
hasher func(K, uint64) uint64,
) (copied int) {
rootb := b
rootb.mu.Lock()
for {
for i := 0; i < entriesPerMapOfBucket; i++ {
if b.entries[i] != nil {
e := (*entryOf[K, V])(b.entries[i])
hash := hasher(e.key, destTable.seed)
bidx := uint64(len(destTable.buckets)-1) & h1(hash)
destb := &destTable.buckets[bidx]
appendToBucketOf(h2(hash), b.entries[i], destb)
copied++
}
}
if b.next == nil {
rootb.mu.Unlock()
return
}
b = (*bucketOfPadded)(b.next)
}
}
// Range calls f sequentially for each key and value present in the
// map. If f returns false, range stops the iteration.
//
// Range does not necessarily correspond to any consistent snapshot
// of the Map's contents: no key will be visited more than once, but
// if the value for any key is stored or deleted concurrently, Range
// may reflect any mapping for that key from any point during the
// Range call.
//
// It is safe to modify the map while iterating it, including entry
// creation, modification and deletion. However, the concurrent
// modification rule apply, i.e. the changes may be not reflected
// in the subsequently iterated entries.
func (m *MapOf[K, V]) Range(f func(key K, value V) bool) {
var zeroPtr unsafe.Pointer
// Pre-allocate array big enough to fit entries for most hash tables.
bentries := make([]unsafe.Pointer, 0, 16*entriesPerMapOfBucket)
tablep := atomic.LoadPointer(&m.table)
table := *(*mapOfTable[K, V])(tablep)
for i := range table.buckets {
rootb := &table.buckets[i]
b := rootb
// Prevent concurrent modifications and copy all entries into
// the intermediate slice.
rootb.mu.Lock()
for {
for i := 0; i < entriesPerMapOfBucket; i++ {
if b.entries[i] != nil {
bentries = append(bentries, b.entries[i])
}
}
if b.next == nil {
rootb.mu.Unlock()
break
}
b = (*bucketOfPadded)(b.next)
}
// Call the function for all copied entries.
for j := range bentries {
entry := (*entryOf[K, V])(bentries[j])
if !f(entry.key, entry.value) {
return
}
// Remove the reference to avoid preventing the copied
// entries from being GCed until this method finishes.
bentries[j] = zeroPtr
}
bentries = bentries[:0]
}
}
// Clear deletes all keys and values currently stored in the map.
func (m *MapOf[K, V]) Clear() {
table := (*mapOfTable[K, V])(atomic.LoadPointer(&m.table))
m.resize(table, mapClearHint)
}
// Size returns current size of the map.
func (m *MapOf[K, V]) Size() int {
table := (*mapOfTable[K, V])(atomic.LoadPointer(&m.table))
return int(table.sumSize())
}
func appendToBucketOf(h2 uint8, entryPtr unsafe.Pointer, b *bucketOfPadded) {
for {
for i := 0; i < entriesPerMapOfBucket; i++ {
if b.entries[i] == nil {
b.meta = setByte(b.meta, h2, i)
b.entries[i] = entryPtr
return
}
}
if b.next == nil {
newb := new(bucketOfPadded)
newb.meta = setByte(defaultMeta, h2, 0)
newb.entries[0] = entryPtr
b.next = unsafe.Pointer(newb)
return
}
b = (*bucketOfPadded)(b.next)
}
}
func (table *mapOfTable[K, V]) addSize(bucketIdx uint64, delta int) {
cidx := uint64(len(table.size)-1) & bucketIdx
atomic.AddInt64(&table.size[cidx].c, int64(delta))
}
func (table *mapOfTable[K, V]) addSizePlain(bucketIdx uint64, delta int) {
cidx := uint64(len(table.size)-1) & bucketIdx
table.size[cidx].c += int64(delta)
}
func (table *mapOfTable[K, V]) sumSize() int64 {
sum := int64(0)
for i := range table.size {
sum += atomic.LoadInt64(&table.size[i].c)
}
return sum
}
func h1(h uint64) uint64 {
return h >> 7
}
func h2(h uint64) uint8 {
return uint8(h & 0x7f)
}
// Stats returns statistics for the MapOf. Just like other map
// methods, this one is thread-safe. Yet it's an O(N) operation,
// so it should be used only for diagnostics or debugging purposes.
func (m *MapOf[K, V]) Stats() MapStats {
stats := MapStats{
TotalGrowths: atomic.LoadInt64(&m.totalGrowths),
TotalShrinks: atomic.LoadInt64(&m.totalShrinks),
MinEntries: math.MaxInt32,
}
table := (*mapOfTable[K, V])(atomic.LoadPointer(&m.table))
stats.RootBuckets = len(table.buckets)
stats.Counter = int(table.sumSize())
stats.CounterLen = len(table.size)
for i := range table.buckets {
nentries := 0
b := &table.buckets[i]
stats.TotalBuckets++
for {
nentriesLocal := 0
stats.Capacity += entriesPerMapOfBucket
for i := 0; i < entriesPerMapOfBucket; i++ {
if atomic.LoadPointer(&b.entries[i]) != nil {
stats.Size++
nentriesLocal++
}
}
nentries += nentriesLocal
if nentriesLocal == 0 {
stats.EmptyBuckets++
}
if b.next == nil {
break
}
b = (*bucketOfPadded)(atomic.LoadPointer(&b.next))
stats.TotalBuckets++
}
if nentries < stats.MinEntries {
stats.MinEntries = nentries
}
if nentries > stats.MaxEntries {
stats.MaxEntries = nentries
}
}
return stats
}

125
vendor/github.com/puzpuzpuz/xsync/v3/mpmcqueue.go generated vendored Normal file
View File

@@ -0,0 +1,125 @@
package xsync
import (
"runtime"
"sync/atomic"
"unsafe"
)
// A MPMCQueue is a bounded multi-producer multi-consumer concurrent
// queue.
//
// MPMCQueue instances must be created with NewMPMCQueue function.
// A MPMCQueue must not be copied after first use.
//
// Based on the data structure from the following C++ library:
// https://github.com/rigtorp/MPMCQueue
type MPMCQueue struct {
cap uint64
head uint64
//lint:ignore U1000 prevents false sharing
hpad [cacheLineSize - 8]byte
tail uint64
//lint:ignore U1000 prevents false sharing
tpad [cacheLineSize - 8]byte
slots []slotPadded
}
type slotPadded struct {
slot
//lint:ignore U1000 prevents false sharing
pad [cacheLineSize - unsafe.Sizeof(slot{})]byte
}
type slot struct {
turn uint64
item interface{}
}
// NewMPMCQueue creates a new MPMCQueue instance with the given
// capacity.
func NewMPMCQueue(capacity int) *MPMCQueue {
if capacity < 1 {
panic("capacity must be positive number")
}
return &MPMCQueue{
cap: uint64(capacity),
slots: make([]slotPadded, capacity),
}
}
// Enqueue inserts the given item into the queue.
// Blocks, if the queue is full.
//
// Deprecated: use TryEnqueue in combination with runtime.Gosched().
func (q *MPMCQueue) Enqueue(item interface{}) {
head := atomic.AddUint64(&q.head, 1) - 1
slot := &q.slots[q.idx(head)]
turn := q.turn(head) * 2
for atomic.LoadUint64(&slot.turn) != turn {
runtime.Gosched()
}
slot.item = item
atomic.StoreUint64(&slot.turn, turn+1)
}
// Dequeue retrieves and removes the item from the head of the queue.
// Blocks, if the queue is empty.
//
// Deprecated: use TryDequeue in combination with runtime.Gosched().
func (q *MPMCQueue) Dequeue() interface{} {
tail := atomic.AddUint64(&q.tail, 1) - 1
slot := &q.slots[q.idx(tail)]
turn := q.turn(tail)*2 + 1
for atomic.LoadUint64(&slot.turn) != turn {
runtime.Gosched()
}
item := slot.item
slot.item = nil
atomic.StoreUint64(&slot.turn, turn+1)
return item
}
// TryEnqueue inserts the given item into the queue. Does not block
// and returns immediately. The result indicates that the queue isn't
// full and the item was inserted.
func (q *MPMCQueue) TryEnqueue(item interface{}) bool {
head := atomic.LoadUint64(&q.head)
slot := &q.slots[q.idx(head)]
turn := q.turn(head) * 2
if atomic.LoadUint64(&slot.turn) == turn {
if atomic.CompareAndSwapUint64(&q.head, head, head+1) {
slot.item = item
atomic.StoreUint64(&slot.turn, turn+1)
return true
}
}
return false
}
// TryDequeue retrieves and removes the item from the head of the
// queue. Does not block and returns immediately. The ok result
// indicates that the queue isn't empty and an item was retrieved.
func (q *MPMCQueue) TryDequeue() (item interface{}, ok bool) {
tail := atomic.LoadUint64(&q.tail)
slot := &q.slots[q.idx(tail)]
turn := q.turn(tail)*2 + 1
if atomic.LoadUint64(&slot.turn) == turn {
if atomic.CompareAndSwapUint64(&q.tail, tail, tail+1) {
item = slot.item
ok = true
slot.item = nil
atomic.StoreUint64(&slot.turn, turn+1)
return
}
}
return
}
func (q *MPMCQueue) idx(i uint64) uint64 {
return i % q.cap
}
func (q *MPMCQueue) turn(i uint64) uint64 {
return i / q.cap
}

138
vendor/github.com/puzpuzpuz/xsync/v3/mpmcqueueof.go generated vendored Normal file
View File

@@ -0,0 +1,138 @@
//go:build go1.19
// +build go1.19
package xsync
import (
"runtime"
"sync/atomic"
"unsafe"
)
// A MPMCQueueOf is a bounded multi-producer multi-consumer concurrent
// queue. It's a generic version of MPMCQueue.
//
// MPMCQueueOf instances must be created with NewMPMCQueueOf function.
// A MPMCQueueOf must not be copied after first use.
//
// Based on the data structure from the following C++ library:
// https://github.com/rigtorp/MPMCQueue
type MPMCQueueOf[I any] struct {
cap uint64
head uint64
//lint:ignore U1000 prevents false sharing
hpad [cacheLineSize - 8]byte
tail uint64
//lint:ignore U1000 prevents false sharing
tpad [cacheLineSize - 8]byte
slots []slotOfPadded[I]
}
type slotOfPadded[I any] struct {
slotOf[I]
// Unfortunately, proper padding like the below one:
//
// pad [cacheLineSize - (unsafe.Sizeof(slotOf[I]{}) % cacheLineSize)]byte
//
// won't compile, so here we add a best-effort padding for items up to
// 56 bytes size.
//lint:ignore U1000 prevents false sharing
pad [cacheLineSize - unsafe.Sizeof(atomic.Uint64{})]byte
}
type slotOf[I any] struct {
// atomic.Uint64 is used here to get proper 8 byte alignment on
// 32-bit archs.
turn atomic.Uint64
item I
}
// NewMPMCQueueOf creates a new MPMCQueueOf instance with the given
// capacity.
func NewMPMCQueueOf[I any](capacity int) *MPMCQueueOf[I] {
if capacity < 1 {
panic("capacity must be positive number")
}
return &MPMCQueueOf[I]{
cap: uint64(capacity),
slots: make([]slotOfPadded[I], capacity),
}
}
// Enqueue inserts the given item into the queue.
// Blocks, if the queue is full.
//
// Deprecated: use TryEnqueue in combination with runtime.Gosched().
func (q *MPMCQueueOf[I]) Enqueue(item I) {
head := atomic.AddUint64(&q.head, 1) - 1
slot := &q.slots[q.idx(head)]
turn := q.turn(head) * 2
for slot.turn.Load() != turn {
runtime.Gosched()
}
slot.item = item
slot.turn.Store(turn + 1)
}
// Dequeue retrieves and removes the item from the head of the queue.
// Blocks, if the queue is empty.
//
// Deprecated: use TryDequeue in combination with runtime.Gosched().
func (q *MPMCQueueOf[I]) Dequeue() I {
var zeroI I
tail := atomic.AddUint64(&q.tail, 1) - 1
slot := &q.slots[q.idx(tail)]
turn := q.turn(tail)*2 + 1
for slot.turn.Load() != turn {
runtime.Gosched()
}
item := slot.item
slot.item = zeroI
slot.turn.Store(turn + 1)
return item
}
// TryEnqueue inserts the given item into the queue. Does not block
// and returns immediately. The result indicates that the queue isn't
// full and the item was inserted.
func (q *MPMCQueueOf[I]) TryEnqueue(item I) bool {
head := atomic.LoadUint64(&q.head)
slot := &q.slots[q.idx(head)]
turn := q.turn(head) * 2
if slot.turn.Load() == turn {
if atomic.CompareAndSwapUint64(&q.head, head, head+1) {
slot.item = item
slot.turn.Store(turn + 1)
return true
}
}
return false
}
// TryDequeue retrieves and removes the item from the head of the
// queue. Does not block and returns immediately. The ok result
// indicates that the queue isn't empty and an item was retrieved.
func (q *MPMCQueueOf[I]) TryDequeue() (item I, ok bool) {
tail := atomic.LoadUint64(&q.tail)
slot := &q.slots[q.idx(tail)]
turn := q.turn(tail)*2 + 1
if slot.turn.Load() == turn {
if atomic.CompareAndSwapUint64(&q.tail, tail, tail+1) {
var zeroI I
item = slot.item
ok = true
slot.item = zeroI
slot.turn.Store(turn + 1)
return
}
}
return
}
func (q *MPMCQueueOf[I]) idx(i uint64) uint64 {
return i % q.cap
}
func (q *MPMCQueueOf[I]) turn(i uint64) uint64 {
return i / q.cap
}

188
vendor/github.com/puzpuzpuz/xsync/v3/rbmutex.go generated vendored Normal file
View File

@@ -0,0 +1,188 @@
package xsync
import (
"runtime"
"sync"
"sync/atomic"
"time"
)
// slow-down guard
const nslowdown = 7
// pool for reader tokens
var rtokenPool sync.Pool
// RToken is a reader lock token.
type RToken struct {
slot uint32
//lint:ignore U1000 prevents false sharing
pad [cacheLineSize - 4]byte
}
// A RBMutex is a reader biased reader/writer mutual exclusion lock.
// The lock can be held by an many readers or a single writer.
// The zero value for a RBMutex is an unlocked mutex.
//
// A RBMutex must not be copied after first use.
//
// RBMutex is based on a modified version of BRAVO
// (Biased Locking for Reader-Writer Locks) algorithm:
// https://arxiv.org/pdf/1810.01553.pdf
//
// RBMutex is a specialized mutex for scenarios, such as caches,
// where the vast majority of locks are acquired by readers and write
// lock acquire attempts are infrequent. In such scenarios, RBMutex
// performs better than sync.RWMutex on large multicore machines.
//
// RBMutex extends sync.RWMutex internally and uses it as the "reader
// bias disabled" fallback, so the same semantics apply. The only
// noticeable difference is in reader tokens returned from the
// RLock/RUnlock methods.
type RBMutex struct {
rslots []rslot
rmask uint32
rbias int32
inhibitUntil time.Time
rw sync.RWMutex
}
type rslot struct {
mu int32
//lint:ignore U1000 prevents false sharing
pad [cacheLineSize - 4]byte
}
// NewRBMutex creates a new RBMutex instance.
func NewRBMutex() *RBMutex {
nslots := nextPowOf2(parallelism())
mu := RBMutex{
rslots: make([]rslot, nslots),
rmask: nslots - 1,
rbias: 1,
}
return &mu
}
// TryRLock tries to lock m for reading without blocking.
// When TryRLock succeeds, it returns true and a reader token.
// In case of a failure, a false is returned.
func (mu *RBMutex) TryRLock() (bool, *RToken) {
if t := mu.fastRlock(); t != nil {
return true, t
}
// Optimistic slow path.
if mu.rw.TryRLock() {
if atomic.LoadInt32(&mu.rbias) == 0 && time.Now().After(mu.inhibitUntil) {
atomic.StoreInt32(&mu.rbias, 1)
}
return true, nil
}
return false, nil
}
// RLock locks m for reading and returns a reader token. The
// token must be used in the later RUnlock call.
//
// Should not be used for recursive read locking; a blocked Lock
// call excludes new readers from acquiring the lock.
func (mu *RBMutex) RLock() *RToken {
if t := mu.fastRlock(); t != nil {
return t
}
// Slow path.
mu.rw.RLock()
if atomic.LoadInt32(&mu.rbias) == 0 && time.Now().After(mu.inhibitUntil) {
atomic.StoreInt32(&mu.rbias, 1)
}
return nil
}
func (mu *RBMutex) fastRlock() *RToken {
if atomic.LoadInt32(&mu.rbias) == 1 {
t, ok := rtokenPool.Get().(*RToken)
if !ok {
t = new(RToken)
t.slot = runtime_fastrand()
}
// Try all available slots to distribute reader threads to slots.
for i := 0; i < len(mu.rslots); i++ {
slot := t.slot + uint32(i)
rslot := &mu.rslots[slot&mu.rmask]
rslotmu := atomic.LoadInt32(&rslot.mu)
if atomic.CompareAndSwapInt32(&rslot.mu, rslotmu, rslotmu+1) {
if atomic.LoadInt32(&mu.rbias) == 1 {
// Hot path succeeded.
t.slot = slot
return t
}
// The mutex is no longer reader biased. Roll back.
atomic.AddInt32(&rslot.mu, -1)
rtokenPool.Put(t)
return nil
}
// Contention detected. Give a try with the next slot.
}
}
return nil
}
// RUnlock undoes a single RLock call. A reader token obtained from
// the RLock call must be provided. RUnlock does not affect other
// simultaneous readers. A panic is raised if m is not locked for
// reading on entry to RUnlock.
func (mu *RBMutex) RUnlock(t *RToken) {
if t == nil {
mu.rw.RUnlock()
return
}
if atomic.AddInt32(&mu.rslots[t.slot&mu.rmask].mu, -1) < 0 {
panic("invalid reader state detected")
}
rtokenPool.Put(t)
}
// TryLock tries to lock m for writing without blocking.
func (mu *RBMutex) TryLock() bool {
if mu.rw.TryLock() {
if atomic.LoadInt32(&mu.rbias) == 1 {
atomic.StoreInt32(&mu.rbias, 0)
for i := 0; i < len(mu.rslots); i++ {
if atomic.LoadInt32(&mu.rslots[i].mu) > 0 {
// There is a reader. Roll back.
atomic.StoreInt32(&mu.rbias, 1)
mu.rw.Unlock()
return false
}
}
}
return true
}
return false
}
// Lock locks m for writing. If the lock is already locked for
// reading or writing, Lock blocks until the lock is available.
func (mu *RBMutex) Lock() {
mu.rw.Lock()
if atomic.LoadInt32(&mu.rbias) == 1 {
atomic.StoreInt32(&mu.rbias, 0)
start := time.Now()
for i := 0; i < len(mu.rslots); i++ {
for atomic.LoadInt32(&mu.rslots[i].mu) > 0 {
runtime.Gosched()
}
}
mu.inhibitUntil = time.Now().Add(time.Since(start) * nslowdown)
}
}
// Unlock unlocks m for writing. A panic is raised if m is not locked
// for writing on entry to Unlock.
//
// As with RWMutex, a locked RBMutex is not associated with a
// particular goroutine. One goroutine may RLock (Lock) a RBMutex and
// then arrange for another goroutine to RUnlock (Unlock) it.
func (mu *RBMutex) Unlock() {
mu.rw.Unlock()
}

92
vendor/github.com/puzpuzpuz/xsync/v3/spscqueue.go generated vendored Normal file
View File

@@ -0,0 +1,92 @@
package xsync
import (
"sync/atomic"
)
// A SPSCQueue is a bounded single-producer single-consumer concurrent
// queue. This means that not more than a single goroutine must be
// publishing items to the queue while not more than a single goroutine
// must be consuming those items.
//
// SPSCQueue instances must be created with NewSPSCQueue function.
// A SPSCQueue must not be copied after first use.
//
// Based on the data structure from the following article:
// https://rigtorp.se/ringbuffer/
type SPSCQueue struct {
cap uint64
pidx uint64
//lint:ignore U1000 prevents false sharing
pad0 [cacheLineSize - 8]byte
pcachedIdx uint64
//lint:ignore U1000 prevents false sharing
pad1 [cacheLineSize - 8]byte
cidx uint64
//lint:ignore U1000 prevents false sharing
pad2 [cacheLineSize - 8]byte
ccachedIdx uint64
//lint:ignore U1000 prevents false sharing
pad3 [cacheLineSize - 8]byte
items []interface{}
}
// NewSPSCQueue creates a new SPSCQueue instance with the given
// capacity.
func NewSPSCQueue(capacity int) *SPSCQueue {
if capacity < 1 {
panic("capacity must be positive number")
}
return &SPSCQueue{
cap: uint64(capacity + 1),
items: make([]interface{}, capacity+1),
}
}
// TryEnqueue inserts the given item into the queue. Does not block
// and returns immediately. The result indicates that the queue isn't
// full and the item was inserted.
func (q *SPSCQueue) TryEnqueue(item interface{}) bool {
// relaxed memory order would be enough here
idx := atomic.LoadUint64(&q.pidx)
nextIdx := idx + 1
if nextIdx == q.cap {
nextIdx = 0
}
cachedIdx := q.ccachedIdx
if nextIdx == cachedIdx {
cachedIdx = atomic.LoadUint64(&q.cidx)
q.ccachedIdx = cachedIdx
if nextIdx == cachedIdx {
return false
}
}
q.items[idx] = item
atomic.StoreUint64(&q.pidx, nextIdx)
return true
}
// TryDequeue retrieves and removes the item from the head of the
// queue. Does not block and returns immediately. The ok result
// indicates that the queue isn't empty and an item was retrieved.
func (q *SPSCQueue) TryDequeue() (item interface{}, ok bool) {
// relaxed memory order would be enough here
idx := atomic.LoadUint64(&q.cidx)
cachedIdx := q.pcachedIdx
if idx == cachedIdx {
cachedIdx = atomic.LoadUint64(&q.pidx)
q.pcachedIdx = cachedIdx
if idx == cachedIdx {
return
}
}
item = q.items[idx]
q.items[idx] = nil
ok = true
nextIdx := idx + 1
if nextIdx == q.cap {
nextIdx = 0
}
atomic.StoreUint64(&q.cidx, nextIdx)
return
}

96
vendor/github.com/puzpuzpuz/xsync/v3/spscqueueof.go generated vendored Normal file
View File

@@ -0,0 +1,96 @@
//go:build go1.19
// +build go1.19
package xsync
import (
"sync/atomic"
)
// A SPSCQueueOf is a bounded single-producer single-consumer concurrent
// queue. This means that not more than a single goroutine must be
// publishing items to the queue while not more than a single goroutine
// must be consuming those items.
//
// SPSCQueueOf instances must be created with NewSPSCQueueOf function.
// A SPSCQueueOf must not be copied after first use.
//
// Based on the data structure from the following article:
// https://rigtorp.se/ringbuffer/
type SPSCQueueOf[I any] struct {
cap uint64
pidx uint64
//lint:ignore U1000 prevents false sharing
pad0 [cacheLineSize - 8]byte
pcachedIdx uint64
//lint:ignore U1000 prevents false sharing
pad1 [cacheLineSize - 8]byte
cidx uint64
//lint:ignore U1000 prevents false sharing
pad2 [cacheLineSize - 8]byte
ccachedIdx uint64
//lint:ignore U1000 prevents false sharing
pad3 [cacheLineSize - 8]byte
items []I
}
// NewSPSCQueueOf creates a new SPSCQueueOf instance with the given
// capacity.
func NewSPSCQueueOf[I any](capacity int) *SPSCQueueOf[I] {
if capacity < 1 {
panic("capacity must be positive number")
}
return &SPSCQueueOf[I]{
cap: uint64(capacity + 1),
items: make([]I, capacity+1),
}
}
// TryEnqueue inserts the given item into the queue. Does not block
// and returns immediately. The result indicates that the queue isn't
// full and the item was inserted.
func (q *SPSCQueueOf[I]) TryEnqueue(item I) bool {
// relaxed memory order would be enough here
idx := atomic.LoadUint64(&q.pidx)
next_idx := idx + 1
if next_idx == q.cap {
next_idx = 0
}
cached_idx := q.ccachedIdx
if next_idx == cached_idx {
cached_idx = atomic.LoadUint64(&q.cidx)
q.ccachedIdx = cached_idx
if next_idx == cached_idx {
return false
}
}
q.items[idx] = item
atomic.StoreUint64(&q.pidx, next_idx)
return true
}
// TryDequeue retrieves and removes the item from the head of the
// queue. Does not block and returns immediately. The ok result
// indicates that the queue isn't empty and an item was retrieved.
func (q *SPSCQueueOf[I]) TryDequeue() (item I, ok bool) {
// relaxed memory order would be enough here
idx := atomic.LoadUint64(&q.cidx)
cached_idx := q.pcachedIdx
if idx == cached_idx {
cached_idx = atomic.LoadUint64(&q.pidx)
q.pcachedIdx = cached_idx
if idx == cached_idx {
return
}
}
var zeroI I
item = q.items[idx]
q.items[idx] = zeroI
ok = true
next_idx := idx + 1
if next_idx == q.cap {
next_idx = 0
}
atomic.StoreUint64(&q.cidx, next_idx)
return
}

66
vendor/github.com/puzpuzpuz/xsync/v3/util.go generated vendored Normal file
View File

@@ -0,0 +1,66 @@
package xsync
import (
"math/bits"
"runtime"
_ "unsafe"
)
// test-only assert()-like flag
var assertionsEnabled = false
const (
// cacheLineSize is used in paddings to prevent false sharing;
// 64B are used instead of 128B as a compromise between
// memory footprint and performance; 128B usage may give ~30%
// improvement on NUMA machines.
cacheLineSize = 64
)
// nextPowOf2 computes the next highest power of 2 of 32-bit v.
// Source: https://graphics.stanford.edu/~seander/bithacks.html#RoundUpPowerOf2
func nextPowOf2(v uint32) uint32 {
if v == 0 {
return 1
}
v--
v |= v >> 1
v |= v >> 2
v |= v >> 4
v |= v >> 8
v |= v >> 16
v++
return v
}
func parallelism() uint32 {
maxProcs := uint32(runtime.GOMAXPROCS(0))
numCores := uint32(runtime.NumCPU())
if maxProcs < numCores {
return maxProcs
}
return numCores
}
//go:noescape
//go:linkname runtime_fastrand runtime.fastrand
func runtime_fastrand() uint32
func broadcast(b uint8) uint64 {
return 0x101010101010101 * uint64(b)
}
func firstMarkedByteIndex(w uint64) int {
return bits.TrailingZeros64(w) >> 3
}
// SWAR byte search: may produce false positives, e.g. for 0x0100,
// so make sure to double-check bytes found by this function.
func markZeroBytes(w uint64) uint64 {
return ((w - 0x0101010101010101) & (^w) & 0x8080808080808080)
}
func setByte(w uint64, b uint8, idx int) uint64 {
shift := idx << 3
return (w &^ (0xff << shift)) | (uint64(b) << shift)
}

77
vendor/github.com/puzpuzpuz/xsync/v3/util_hash.go generated vendored Normal file
View File

@@ -0,0 +1,77 @@
package xsync
import (
"reflect"
"unsafe"
)
// makeSeed creates a random seed.
func makeSeed() uint64 {
var s1 uint32
for {
s1 = runtime_fastrand()
// We use seed 0 to indicate an uninitialized seed/hash,
// so keep trying until we get a non-zero seed.
if s1 != 0 {
break
}
}
s2 := runtime_fastrand()
return uint64(s1)<<32 | uint64(s2)
}
// hashString calculates a hash of s with the given seed.
func hashString(s string, seed uint64) uint64 {
if s == "" {
return seed
}
strh := (*reflect.StringHeader)(unsafe.Pointer(&s))
return uint64(runtime_memhash(unsafe.Pointer(strh.Data), uintptr(seed), uintptr(strh.Len)))
}
//go:noescape
//go:linkname runtime_memhash runtime.memhash
func runtime_memhash(p unsafe.Pointer, h, s uintptr) uintptr
// defaultHasher creates a fast hash function for the given comparable type.
// The only limitation is that the type should not contain interfaces inside
// based on runtime.typehash.
func defaultHasher[T comparable]() func(T, uint64) uint64 {
var zero T
if reflect.TypeOf(&zero).Elem().Kind() == reflect.Interface {
return func(value T, seed uint64) uint64 {
iValue := any(value)
i := (*iface)(unsafe.Pointer(&iValue))
return runtime_typehash64(i.typ, i.word, seed)
}
} else {
var iZero any = zero
i := (*iface)(unsafe.Pointer(&iZero))
return func(value T, seed uint64) uint64 {
return runtime_typehash64(i.typ, unsafe.Pointer(&value), seed)
}
}
}
// how interface is represented in memory
type iface struct {
typ uintptr
word unsafe.Pointer
}
// same as runtime_typehash, but always returns a uint64
// see: maphash.rthash function for details
func runtime_typehash64(t uintptr, p unsafe.Pointer, seed uint64) uint64 {
if unsafe.Sizeof(uintptr(0)) == 8 {
return uint64(runtime_typehash(t, p, uintptr(seed)))
}
lo := runtime_typehash(t, p, uintptr(seed))
hi := runtime_typehash(t, p, uintptr(seed>>32))
return uint64(hi)<<32 | uint64(lo)
}
//go:noescape
//go:linkname runtime_typehash runtime.typehash
func runtime_typehash(t uintptr, p unsafe.Pointer, h uintptr) uintptr

11
vendor/github.com/tmthrgd/go-hex/.travis.yml generated vendored Normal file
View File

@@ -0,0 +1,11 @@
language: go
go:
- 1.10.x
- 1.11.x
- 1.12.x
- 1.13.x
- tip
matrix:
fast_finish: true
allow_failures:
- go: tip

82
vendor/github.com/tmthrgd/go-hex/LICENSE generated vendored Normal file
View File

@@ -0,0 +1,82 @@
Copyright (c) 2016, Tom Thorogood.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the Tom Thorogood nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
---- Portions of the source code are also covered by the following license: ----
Copyright (c) 2012 The Go Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
---- Portions of the source code are also covered by the following license: ----
Copyright (c) 2005-2016, Wojciech Muła
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

108
vendor/github.com/tmthrgd/go-hex/README.md generated vendored Normal file
View File

@@ -0,0 +1,108 @@
# go-hex
[![GoDoc](https://godoc.org/github.com/tmthrgd/go-hex?status.svg)](https://godoc.org/github.com/tmthrgd/go-hex)
[![Build Status](https://travis-ci.org/tmthrgd/go-hex.svg?branch=master)](https://travis-ci.org/tmthrgd/go-hex)
An efficient hexadecimal implementation for Golang.
go-hex provides hex encoding and decoding using SSE/AVX instructions on x86-64.
## Download
```
go get github.com/tmthrgd/go-hex
```
## Benchmark
go-hex:
```
BenchmarkEncode/15-8 100000000 17.4 ns/op 863.43 MB/s
BenchmarkEncode/32-8 100000000 11.9 ns/op 2690.43 MB/s
BenchmarkEncode/128-8 100000000 21.4 ns/op 5982.92 MB/s
BenchmarkEncode/1k-8 20000000 88.5 ns/op 11572.80 MB/s
BenchmarkEncode/16k-8 1000000 1254 ns/op 13058.10 MB/s
BenchmarkEncode/128k-8 100000 12965 ns/op 10109.53 MB/s
BenchmarkEncode/1M-8 10000 119465 ns/op 8777.23 MB/s
BenchmarkEncode/16M-8 500 3530380 ns/op 4752.24 MB/s
BenchmarkEncode/128M-8 50 28001913 ns/op 4793.16 MB/s
BenchmarkDecode/14-8 100000000 12.6 ns/op 1110.01 MB/s
BenchmarkDecode/32-8 100000000 12.5 ns/op 2558.10 MB/s
BenchmarkDecode/128-8 50000000 27.2 ns/op 4697.66 MB/s
BenchmarkDecode/1k-8 10000000 168 ns/op 6093.43 MB/s
BenchmarkDecode/16k-8 500000 2543 ns/op 6442.09 MB/s
BenchmarkDecode/128k-8 100000 20339 ns/op 6444.24 MB/s
BenchmarkDecode/1M-8 10000 164313 ns/op 6381.57 MB/s
BenchmarkDecode/16M-8 500 3099822 ns/op 5412.31 MB/s
BenchmarkDecode/128M-8 50 24865822 ns/op 5397.68 MB/s
```
[encoding/hex](https://golang.org/pkg/encoding/hex/):
```
BenchmarkRefEncode/15-8 50000000 36.1 ns/op 415.07 MB/s
BenchmarkRefEncode/32-8 20000000 72.9 ns/op 439.14 MB/s
BenchmarkRefEncode/128-8 5000000 289 ns/op 441.54 MB/s
BenchmarkRefEncode/1k-8 1000000 2268 ns/op 451.49 MB/s
BenchmarkRefEncode/16k-8 30000 39110 ns/op 418.91 MB/s
BenchmarkRefEncode/128k-8 5000 291260 ns/op 450.02 MB/s
BenchmarkRefEncode/1M-8 1000 2277578 ns/op 460.39 MB/s
BenchmarkRefEncode/16M-8 30 37087543 ns/op 452.37 MB/s
BenchmarkRefEncode/128M-8 5 293611713 ns/op 457.13 MB/s
BenchmarkRefDecode/14-8 30000000 53.7 ns/op 260.49 MB/s
BenchmarkRefDecode/32-8 10000000 128 ns/op 248.44 MB/s
BenchmarkRefDecode/128-8 3000000 481 ns/op 265.95 MB/s
BenchmarkRefDecode/1k-8 300000 4172 ns/op 245.43 MB/s
BenchmarkRefDecode/16k-8 10000 111989 ns/op 146.30 MB/s
BenchmarkRefDecode/128k-8 2000 909077 ns/op 144.18 MB/s
BenchmarkRefDecode/1M-8 200 7275779 ns/op 144.12 MB/s
BenchmarkRefDecode/16M-8 10 116574839 ns/op 143.92 MB/s
BenchmarkRefDecode/128M-8 2 933871637 ns/op 143.72 MB/s
```
[encoding/hex](https://golang.org/pkg/encoding/hex/) -> go-hex:
```
benchmark old ns/op new ns/op delta
BenchmarkEncode/15-8 36.1 17.4 -51.80%
BenchmarkEncode/32-8 72.9 11.9 -83.68%
BenchmarkEncode/128-8 289 21.4 -92.60%
BenchmarkEncode/1k-8 2268 88.5 -96.10%
BenchmarkEncode/16k-8 39110 1254 -96.79%
BenchmarkEncode/128k-8 291260 12965 -95.55%
BenchmarkEncode/1M-8 2277578 119465 -94.75%
BenchmarkEncode/16M-8 37087543 3530380 -90.48%
BenchmarkEncode/128M-8 293611713 28001913 -90.46%
BenchmarkDecode/14-8 53.7 12.6 -76.54%
BenchmarkDecode/32-8 128 12.5 -90.23%
BenchmarkDecode/128-8 481 27.2 -94.35%
BenchmarkDecode/1k-8 4172 168 -95.97%
BenchmarkDecode/16k-8 111989 2543 -97.73%
BenchmarkDecode/128k-8 909077 20339 -97.76%
BenchmarkDecode/1M-8 7275779 164313 -97.74%
BenchmarkDecode/16M-8 116574839 3099822 -97.34%
BenchmarkDecode/128M-8 933871637 24865822 -97.34%
benchmark old MB/s new MB/s speedup
BenchmarkEncode/15-8 415.07 863.43 2.08x
BenchmarkEncode/32-8 439.14 2690.43 6.13x
BenchmarkEncode/128-8 441.54 5982.92 13.55x
BenchmarkEncode/1k-8 451.49 11572.80 25.63x
BenchmarkEncode/16k-8 418.91 13058.10 31.17x
BenchmarkEncode/128k-8 450.02 10109.53 22.46x
BenchmarkEncode/1M-8 460.39 8777.23 19.06x
BenchmarkEncode/16M-8 452.37 4752.24 10.51x
BenchmarkEncode/128M-8 457.13 4793.16 10.49x
BenchmarkDecode/14-8 260.49 1110.01 4.26x
BenchmarkDecode/32-8 248.44 2558.10 10.30x
BenchmarkDecode/128-8 265.95 4697.66 17.66x
BenchmarkDecode/1k-8 245.43 6093.43 24.83x
BenchmarkDecode/16k-8 146.30 6442.09 44.03x
BenchmarkDecode/128k-8 144.18 6444.24 44.70x
BenchmarkDecode/1M-8 144.12 6381.57 44.28x
BenchmarkDecode/16M-8 143.92 5412.31 37.61x
BenchmarkDecode/128M-8 143.72 5397.68 37.56x
```
## License
Unless otherwise noted, the go-hex source files are distributed under the Modified BSD License
found in the LICENSE file.

137
vendor/github.com/tmthrgd/go-hex/hex.go generated vendored Normal file
View File

@@ -0,0 +1,137 @@
// Copyright 2016 Tom Thorogood. All rights reserved.
// Use of this source code is governed by a
// Modified BSD License license that can be found in
// the LICENSE file.
//
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package hex is an efficient hexadecimal implementation for Golang.
package hex
import (
"errors"
"fmt"
)
var errLength = errors.New("go-hex: odd length hex string")
var (
lower = []byte("0123456789abcdef")
upper = []byte("0123456789ABCDEF")
)
// InvalidByteError values describe errors resulting from an invalid byte in a hex string.
type InvalidByteError byte
func (e InvalidByteError) Error() string {
return fmt.Sprintf("go-hex: invalid byte: %#U", rune(e))
}
// EncodedLen returns the length of an encoding of n source bytes.
func EncodedLen(n int) int {
return n * 2
}
// DecodedLen returns the length of a decoding of n source bytes.
func DecodedLen(n int) int {
return n / 2
}
// Encode encodes src into EncodedLen(len(src))
// bytes of dst. As a convenience, it returns the number
// of bytes written to dst, but this value is always EncodedLen(len(src)).
// Encode implements lowercase hexadecimal encoding.
func Encode(dst, src []byte) int {
return RawEncode(dst, src, lower)
}
// EncodeUpper encodes src into EncodedLen(len(src))
// bytes of dst. As a convenience, it returns the number
// of bytes written to dst, but this value is always EncodedLen(len(src)).
// EncodeUpper implements uppercase hexadecimal encoding.
func EncodeUpper(dst, src []byte) int {
return RawEncode(dst, src, upper)
}
// EncodeToString returns the lowercase hexadecimal encoding of src.
func EncodeToString(src []byte) string {
return RawEncodeToString(src, lower)
}
// EncodeUpperToString returns the uppercase hexadecimal encoding of src.
func EncodeUpperToString(src []byte) string {
return RawEncodeToString(src, upper)
}
// RawEncodeToString returns the hexadecimal encoding of src for a given
// alphabet.
func RawEncodeToString(src, alpha []byte) string {
dst := make([]byte, EncodedLen(len(src)))
RawEncode(dst, src, alpha)
return string(dst)
}
// DecodeString returns the bytes represented by the hexadecimal string s.
func DecodeString(s string) ([]byte, error) {
src := []byte(s)
dst := make([]byte, DecodedLen(len(src)))
if _, err := Decode(dst, src); err != nil {
return nil, err
}
return dst, nil
}
// MustDecodeString is like DecodeString but panics if the string cannot be
// parsed. It simplifies safe initialization of global variables holding
// binary data.
func MustDecodeString(str string) []byte {
dst, err := DecodeString(str)
if err != nil {
panic(err)
}
return dst
}
func encodeGeneric(dst, src, alpha []byte) {
for i, v := range src {
dst[i*2] = alpha[v>>4]
dst[i*2+1] = alpha[v&0x0f]
}
}
func decodeGeneric(dst, src []byte) (uint64, bool) {
for i := 0; i < len(src)/2; i++ {
a, ok := fromHexChar(src[i*2])
if !ok {
return uint64(i * 2), false
}
b, ok := fromHexChar(src[i*2+1])
if !ok {
return uint64(i*2 + 1), false
}
dst[i] = (a << 4) | b
}
return 0, true
}
// fromHexChar converts a hex character into its value and a success flag.
func fromHexChar(c byte) (byte, bool) {
switch {
case '0' <= c && c <= '9':
return c - '0', true
case 'a' <= c && c <= 'f':
return c - 'a' + 10, true
case 'A' <= c && c <= 'F':
return c - 'A' + 10, true
}
return 0, false
}

94
vendor/github.com/tmthrgd/go-hex/hex_amd64.go generated vendored Normal file
View File

@@ -0,0 +1,94 @@
// Copyright 2016 Tom Thorogood. All rights reserved.
// Use of this source code is governed by a
// Modified BSD License license that can be found in
// the LICENSE file.
// +build amd64,!gccgo,!appengine
package hex
import "golang.org/x/sys/cpu"
// RawEncode encodes src into EncodedLen(len(src))
// bytes of dst. As a convenience, it returns the number
// of bytes written to dst, but this value is always EncodedLen(len(src)).
// RawEncode implements hexadecimal encoding for a given alphabet.
func RawEncode(dst, src, alpha []byte) int {
if len(alpha) != 16 {
panic("invalid alphabet")
}
if len(dst) < len(src)*2 {
panic("dst buffer is too small")
}
if len(src) == 0 {
return 0
}
switch {
case cpu.X86.HasAVX:
encodeAVX(&dst[0], &src[0], uint64(len(src)), &alpha[0])
case cpu.X86.HasSSE41:
encodeSSE(&dst[0], &src[0], uint64(len(src)), &alpha[0])
default:
encodeGeneric(dst, src, alpha)
}
return len(src) * 2
}
// Decode decodes src into DecodedLen(len(src)) bytes, returning the actual
// number of bytes written to dst.
//
// If Decode encounters invalid input, it returns an error describing the failure.
func Decode(dst, src []byte) (int, error) {
if len(src)%2 != 0 {
return 0, errLength
}
if len(dst) < len(src)/2 {
panic("dst buffer is too small")
}
if len(src) == 0 {
return 0, nil
}
var (
n uint64
ok bool
)
switch {
case cpu.X86.HasAVX:
n, ok = decodeAVX(&dst[0], &src[0], uint64(len(src)))
case cpu.X86.HasSSE41:
n, ok = decodeSSE(&dst[0], &src[0], uint64(len(src)))
default:
n, ok = decodeGeneric(dst, src)
}
if !ok {
return 0, InvalidByteError(src[n])
}
return len(src) / 2, nil
}
//go:generate go run asm_gen.go
// This function is implemented in hex_encode_amd64.s
//go:noescape
func encodeAVX(dst *byte, src *byte, len uint64, alpha *byte)
// This function is implemented in hex_encode_amd64.s
//go:noescape
func encodeSSE(dst *byte, src *byte, len uint64, alpha *byte)
// This function is implemented in hex_decode_amd64.s
//go:noescape
func decodeAVX(dst *byte, src *byte, len uint64) (n uint64, ok bool)
// This function is implemented in hex_decode_amd64.s
//go:noescape
func decodeSSE(dst *byte, src *byte, len uint64) (n uint64, ok bool)

303
vendor/github.com/tmthrgd/go-hex/hex_decode_amd64.s generated vendored Normal file
View File

@@ -0,0 +1,303 @@
// Copyright 2016 Tom Thorogood. All rights reserved.
// Use of this source code is governed by a
// Modified BSD License license that can be found in
// the LICENSE file.
//
// Copyright 2005-2016, Wojciech Muła. All rights reserved.
// Use of this source code is governed by a
// Simplified BSD License license that can be found in
// the LICENSE file.
//
// This file is auto-generated - do not modify
// +build amd64,!gccgo,!appengine
#include "textflag.h"
DATA decodeBase<>+0x00(SB)/8, $0x3030303030303030
DATA decodeBase<>+0x08(SB)/8, $0x3030303030303030
DATA decodeBase<>+0x10(SB)/8, $0x2727272727272727
DATA decodeBase<>+0x18(SB)/8, $0x2727272727272727
GLOBL decodeBase<>(SB),RODATA,$32
DATA decodeToLower<>+0x00(SB)/8, $0x2020202020202020
DATA decodeToLower<>+0x08(SB)/8, $0x2020202020202020
GLOBL decodeToLower<>(SB),RODATA,$16
DATA decodeHigh<>+0x00(SB)/8, $0x0e0c0a0806040200
DATA decodeHigh<>+0x08(SB)/8, $0xffffffffffffffff
GLOBL decodeHigh<>(SB),RODATA,$16
DATA decodeLow<>+0x00(SB)/8, $0x0f0d0b0907050301
DATA decodeLow<>+0x08(SB)/8, $0xffffffffffffffff
GLOBL decodeLow<>(SB),RODATA,$16
DATA decodeValid<>+0x00(SB)/8, $0xb0b0b0b0b0b0b0b0
DATA decodeValid<>+0x08(SB)/8, $0xb0b0b0b0b0b0b0b0
DATA decodeValid<>+0x10(SB)/8, $0xb9b9b9b9b9b9b9b9
DATA decodeValid<>+0x18(SB)/8, $0xb9b9b9b9b9b9b9b9
DATA decodeValid<>+0x20(SB)/8, $0xe1e1e1e1e1e1e1e1
DATA decodeValid<>+0x28(SB)/8, $0xe1e1e1e1e1e1e1e1
DATA decodeValid<>+0x30(SB)/8, $0xe6e6e6e6e6e6e6e6
DATA decodeValid<>+0x38(SB)/8, $0xe6e6e6e6e6e6e6e6
GLOBL decodeValid<>(SB),RODATA,$64
DATA decodeToSigned<>+0x00(SB)/8, $0x8080808080808080
DATA decodeToSigned<>+0x08(SB)/8, $0x8080808080808080
GLOBL decodeToSigned<>(SB),RODATA,$16
TEXT ·decodeAVX(SB),NOSPLIT,$0
MOVQ dst+0(FP), DI
MOVQ src+8(FP), SI
MOVQ len+16(FP), BX
MOVQ SI, R15
MOVOU decodeValid<>(SB), X14
MOVOU decodeValid<>+0x20(SB), X15
MOVW $65535, DX
CMPQ BX, $16
JB tail
bigloop:
MOVOU (SI), X0
VPXOR decodeToSigned<>(SB), X0, X1
POR decodeToLower<>(SB), X0
VPXOR decodeToSigned<>(SB), X0, X2
VPCMPGTB X1, X14, X3
PCMPGTB decodeValid<>+0x10(SB), X1
VPCMPGTB X2, X15, X4
PCMPGTB decodeValid<>+0x30(SB), X2
PAND X4, X1
POR X2, X3
POR X1, X3
PMOVMSKB X3, AX
TESTW AX, DX
JNZ invalid
PSUBB decodeBase<>(SB), X0
PANDN decodeBase<>+0x10(SB), X4
PSUBB X4, X0
VPSHUFB decodeLow<>(SB), X0, X3
PSHUFB decodeHigh<>(SB), X0
PSLLW $4, X0
POR X3, X0
MOVQ X0, (DI)
SUBQ $16, BX
JZ ret
ADDQ $16, SI
ADDQ $8, DI
CMPQ BX, $16
JAE bigloop
tail:
MOVQ $16, CX
SUBQ BX, CX
SHRW CX, DX
CMPQ BX, $4
JB tail_in_2
JE tail_in_4
CMPQ BX, $8
JB tail_in_6
JE tail_in_8
CMPQ BX, $12
JB tail_in_10
JE tail_in_12
tail_in_14:
PINSRW $6, 12(SI), X0
tail_in_12:
PINSRW $5, 10(SI), X0
tail_in_10:
PINSRW $4, 8(SI), X0
tail_in_8:
PINSRQ $0, (SI), X0
JMP tail_conv
tail_in_6:
PINSRW $2, 4(SI), X0
tail_in_4:
PINSRW $1, 2(SI), X0
tail_in_2:
PINSRW $0, (SI), X0
tail_conv:
VPXOR decodeToSigned<>(SB), X0, X1
POR decodeToLower<>(SB), X0
VPXOR decodeToSigned<>(SB), X0, X2
VPCMPGTB X1, X14, X3
PCMPGTB decodeValid<>+0x10(SB), X1
VPCMPGTB X2, X15, X4
PCMPGTB decodeValid<>+0x30(SB), X2
PAND X4, X1
POR X2, X3
POR X1, X3
PMOVMSKB X3, AX
TESTW AX, DX
JNZ invalid
PSUBB decodeBase<>(SB), X0
PANDN decodeBase<>+0x10(SB), X4
PSUBB X4, X0
VPSHUFB decodeLow<>(SB), X0, X3
PSHUFB decodeHigh<>(SB), X0
PSLLW $4, X0
POR X3, X0
CMPQ BX, $4
JB tail_out_2
JE tail_out_4
CMPQ BX, $8
JB tail_out_6
JE tail_out_8
CMPQ BX, $12
JB tail_out_10
JE tail_out_12
tail_out_14:
PEXTRB $6, X0, 6(DI)
tail_out_12:
PEXTRB $5, X0, 5(DI)
tail_out_10:
PEXTRB $4, X0, 4(DI)
tail_out_8:
MOVL X0, (DI)
JMP ret
tail_out_6:
PEXTRB $2, X0, 2(DI)
tail_out_4:
PEXTRB $1, X0, 1(DI)
tail_out_2:
PEXTRB $0, X0, (DI)
ret:
MOVB $1, ok+32(FP)
RET
invalid:
BSFW AX, AX
SUBQ R15, SI
ADDQ SI, AX
MOVQ AX, n+24(FP)
MOVB $0, ok+32(FP)
RET
TEXT ·decodeSSE(SB),NOSPLIT,$0
MOVQ dst+0(FP), DI
MOVQ src+8(FP), SI
MOVQ len+16(FP), BX
MOVQ SI, R15
MOVOU decodeValid<>(SB), X14
MOVOU decodeValid<>+0x20(SB), X15
MOVW $65535, DX
CMPQ BX, $16
JB tail
bigloop:
MOVOU (SI), X0
MOVOU X0, X1
PXOR decodeToSigned<>(SB), X1
POR decodeToLower<>(SB), X0
MOVOU X0, X2
PXOR decodeToSigned<>(SB), X2
MOVOU X14, X3
PCMPGTB X1, X3
PCMPGTB decodeValid<>+0x10(SB), X1
MOVOU X15, X4
PCMPGTB X2, X4
PCMPGTB decodeValid<>+0x30(SB), X2
PAND X4, X1
POR X2, X3
POR X1, X3
PMOVMSKB X3, AX
TESTW AX, DX
JNZ invalid
PSUBB decodeBase<>(SB), X0
PANDN decodeBase<>+0x10(SB), X4
PSUBB X4, X0
MOVOU X0, X3
PSHUFB decodeLow<>(SB), X3
PSHUFB decodeHigh<>(SB), X0
PSLLW $4, X0
POR X3, X0
MOVQ X0, (DI)
SUBQ $16, BX
JZ ret
ADDQ $16, SI
ADDQ $8, DI
CMPQ BX, $16
JAE bigloop
tail:
MOVQ $16, CX
SUBQ BX, CX
SHRW CX, DX
CMPQ BX, $4
JB tail_in_2
JE tail_in_4
CMPQ BX, $8
JB tail_in_6
JE tail_in_8
CMPQ BX, $12
JB tail_in_10
JE tail_in_12
tail_in_14:
PINSRW $6, 12(SI), X0
tail_in_12:
PINSRW $5, 10(SI), X0
tail_in_10:
PINSRW $4, 8(SI), X0
tail_in_8:
PINSRQ $0, (SI), X0
JMP tail_conv
tail_in_6:
PINSRW $2, 4(SI), X0
tail_in_4:
PINSRW $1, 2(SI), X0
tail_in_2:
PINSRW $0, (SI), X0
tail_conv:
MOVOU X0, X1
PXOR decodeToSigned<>(SB), X1
POR decodeToLower<>(SB), X0
MOVOU X0, X2
PXOR decodeToSigned<>(SB), X2
MOVOU X14, X3
PCMPGTB X1, X3
PCMPGTB decodeValid<>+0x10(SB), X1
MOVOU X15, X4
PCMPGTB X2, X4
PCMPGTB decodeValid<>+0x30(SB), X2
PAND X4, X1
POR X2, X3
POR X1, X3
PMOVMSKB X3, AX
TESTW AX, DX
JNZ invalid
PSUBB decodeBase<>(SB), X0
PANDN decodeBase<>+0x10(SB), X4
PSUBB X4, X0
MOVOU X0, X3
PSHUFB decodeLow<>(SB), X3
PSHUFB decodeHigh<>(SB), X0
PSLLW $4, X0
POR X3, X0
CMPQ BX, $4
JB tail_out_2
JE tail_out_4
CMPQ BX, $8
JB tail_out_6
JE tail_out_8
CMPQ BX, $12
JB tail_out_10
JE tail_out_12
tail_out_14:
PEXTRB $6, X0, 6(DI)
tail_out_12:
PEXTRB $5, X0, 5(DI)
tail_out_10:
PEXTRB $4, X0, 4(DI)
tail_out_8:
MOVL X0, (DI)
JMP ret
tail_out_6:
PEXTRB $2, X0, 2(DI)
tail_out_4:
PEXTRB $1, X0, 1(DI)
tail_out_2:
PEXTRB $0, X0, (DI)
ret:
MOVB $1, ok+32(FP)
RET
invalid:
BSFW AX, AX
SUBQ R15, SI
ADDQ SI, AX
MOVQ AX, n+24(FP)
MOVB $0, ok+32(FP)
RET

227
vendor/github.com/tmthrgd/go-hex/hex_encode_amd64.s generated vendored Normal file
View File

@@ -0,0 +1,227 @@
// Copyright 2016 Tom Thorogood. All rights reserved.
// Use of this source code is governed by a
// Modified BSD License license that can be found in
// the LICENSE file.
//
// Copyright 2005-2016, Wojciech Muła. All rights reserved.
// Use of this source code is governed by a
// Simplified BSD License license that can be found in
// the LICENSE file.
//
// This file is auto-generated - do not modify
// +build amd64,!gccgo,!appengine
#include "textflag.h"
DATA encodeMask<>+0x00(SB)/8, $0x0f0f0f0f0f0f0f0f
DATA encodeMask<>+0x08(SB)/8, $0x0f0f0f0f0f0f0f0f
GLOBL encodeMask<>(SB),RODATA,$16
TEXT ·encodeAVX(SB),NOSPLIT,$0
MOVQ dst+0(FP), DI
MOVQ src+8(FP), SI
MOVQ len+16(FP), BX
MOVQ alpha+24(FP), DX
MOVOU (DX), X15
CMPQ BX, $16
JB tail
bigloop:
MOVOU -16(SI)(BX*1), X0
VPAND encodeMask<>(SB), X0, X1
PSRLW $4, X0
PAND encodeMask<>(SB), X0
VPUNPCKHBW X1, X0, X3
PUNPCKLBW X1, X0
VPSHUFB X0, X15, X1
VPSHUFB X3, X15, X2
MOVOU X2, -16(DI)(BX*2)
MOVOU X1, -32(DI)(BX*2)
SUBQ $16, BX
JZ ret
CMPQ BX, $16
JAE bigloop
tail:
CMPQ BX, $2
JB tail_in_1
JE tail_in_2
CMPQ BX, $4
JB tail_in_3
JE tail_in_4
CMPQ BX, $6
JB tail_in_5
JE tail_in_6
CMPQ BX, $8
JB tail_in_7
tail_in_8:
MOVQ (SI), X0
JMP tail_conv
tail_in_7:
PINSRB $6, 6(SI), X0
tail_in_6:
PINSRB $5, 5(SI), X0
tail_in_5:
PINSRB $4, 4(SI), X0
tail_in_4:
PINSRD $0, (SI), X0
JMP tail_conv
tail_in_3:
PINSRB $2, 2(SI), X0
tail_in_2:
PINSRB $1, 1(SI), X0
tail_in_1:
PINSRB $0, (SI), X0
tail_conv:
VPAND encodeMask<>(SB), X0, X1
PSRLW $4, X0
PAND encodeMask<>(SB), X0
PUNPCKLBW X1, X0
VPSHUFB X0, X15, X1
CMPQ BX, $2
JB tail_out_1
JE tail_out_2
CMPQ BX, $4
JB tail_out_3
JE tail_out_4
CMPQ BX, $6
JB tail_out_5
JE tail_out_6
CMPQ BX, $8
JB tail_out_7
tail_out_8:
MOVOU X1, (DI)
SUBQ $8, BX
JZ ret
ADDQ $8, SI
ADDQ $16, DI
JMP tail
tail_out_7:
PEXTRB $13, X1, 13(DI)
PEXTRB $12, X1, 12(DI)
tail_out_6:
PEXTRB $11, X1, 11(DI)
PEXTRB $10, X1, 10(DI)
tail_out_5:
PEXTRB $9, X1, 9(DI)
PEXTRB $8, X1, 8(DI)
tail_out_4:
MOVQ X1, (DI)
RET
tail_out_3:
PEXTRB $5, X1, 5(DI)
PEXTRB $4, X1, 4(DI)
tail_out_2:
PEXTRB $3, X1, 3(DI)
PEXTRB $2, X1, 2(DI)
tail_out_1:
PEXTRB $1, X1, 1(DI)
PEXTRB $0, X1, (DI)
ret:
RET
TEXT ·encodeSSE(SB),NOSPLIT,$0
MOVQ dst+0(FP), DI
MOVQ src+8(FP), SI
MOVQ len+16(FP), BX
MOVQ alpha+24(FP), DX
MOVOU (DX), X15
CMPQ BX, $16
JB tail
bigloop:
MOVOU -16(SI)(BX*1), X0
MOVOU X0, X1
PAND encodeMask<>(SB), X1
PSRLW $4, X0
PAND encodeMask<>(SB), X0
MOVOU X0, X3
PUNPCKHBW X1, X3
PUNPCKLBW X1, X0
MOVOU X15, X1
PSHUFB X0, X1
MOVOU X15, X2
PSHUFB X3, X2
MOVOU X2, -16(DI)(BX*2)
MOVOU X1, -32(DI)(BX*2)
SUBQ $16, BX
JZ ret
CMPQ BX, $16
JAE bigloop
tail:
CMPQ BX, $2
JB tail_in_1
JE tail_in_2
CMPQ BX, $4
JB tail_in_3
JE tail_in_4
CMPQ BX, $6
JB tail_in_5
JE tail_in_6
CMPQ BX, $8
JB tail_in_7
tail_in_8:
MOVQ (SI), X0
JMP tail_conv
tail_in_7:
PINSRB $6, 6(SI), X0
tail_in_6:
PINSRB $5, 5(SI), X0
tail_in_5:
PINSRB $4, 4(SI), X0
tail_in_4:
PINSRD $0, (SI), X0
JMP tail_conv
tail_in_3:
PINSRB $2, 2(SI), X0
tail_in_2:
PINSRB $1, 1(SI), X0
tail_in_1:
PINSRB $0, (SI), X0
tail_conv:
MOVOU X0, X1
PAND encodeMask<>(SB), X1
PSRLW $4, X0
PAND encodeMask<>(SB), X0
PUNPCKLBW X1, X0
MOVOU X15, X1
PSHUFB X0, X1
CMPQ BX, $2
JB tail_out_1
JE tail_out_2
CMPQ BX, $4
JB tail_out_3
JE tail_out_4
CMPQ BX, $6
JB tail_out_5
JE tail_out_6
CMPQ BX, $8
JB tail_out_7
tail_out_8:
MOVOU X1, (DI)
SUBQ $8, BX
JZ ret
ADDQ $8, SI
ADDQ $16, DI
JMP tail
tail_out_7:
PEXTRB $13, X1, 13(DI)
PEXTRB $12, X1, 12(DI)
tail_out_6:
PEXTRB $11, X1, 11(DI)
PEXTRB $10, X1, 10(DI)
tail_out_5:
PEXTRB $9, X1, 9(DI)
PEXTRB $8, X1, 8(DI)
tail_out_4:
MOVQ X1, (DI)
RET
tail_out_3:
PEXTRB $5, X1, 5(DI)
PEXTRB $4, X1, 4(DI)
tail_out_2:
PEXTRB $3, X1, 3(DI)
PEXTRB $2, X1, 2(DI)
tail_out_1:
PEXTRB $1, X1, 1(DI)
PEXTRB $0, X1, (DI)
ret:
RET

36
vendor/github.com/tmthrgd/go-hex/hex_other.go generated vendored Normal file
View File

@@ -0,0 +1,36 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !amd64 gccgo appengine
package hex
// RawEncode encodes src into EncodedLen(len(src))
// bytes of dst. As a convenience, it returns the number
// of bytes written to dst, but this value is always EncodedLen(len(src)).
// RawEncode implements hexadecimal encoding for a given alphabet.
func RawEncode(dst, src, alpha []byte) int {
if len(alpha) != 16 {
panic("invalid alphabet")
}
encodeGeneric(dst, src, alpha)
return len(src) * 2
}
// Decode decodes src into DecodedLen(len(src)) bytes, returning the actual
// number of bytes written to dst.
//
// If Decode encounters invalid input, it returns an error describing the failure.
func Decode(dst, src []byte) (int, error) {
if len(src)%2 == 1 {
return 0, errLength
}
if n, ok := decodeGeneric(dst, src); !ok {
return 0, InvalidByteError(src[n])
}
return len(src) / 2, nil
}

4
vendor/github.com/uptrace/bun/.gitignore generated vendored Normal file
View File

@@ -0,0 +1,4 @@
# Patterns for files created by this project.
# For other files, use global gitignore.
*.s3db
.idea

6
vendor/github.com/uptrace/bun/.prettierrc.yml generated vendored Normal file
View File

@@ -0,0 +1,6 @@
trailingComma: all
tabWidth: 2
semi: false
singleQuote: true
proseWrap: always
printWidth: 100

1089
vendor/github.com/uptrace/bun/CHANGELOG.md generated vendored Normal file

File diff suppressed because it is too large Load Diff

34
vendor/github.com/uptrace/bun/CONTRIBUTING.md generated vendored Normal file
View File

@@ -0,0 +1,34 @@
## Running tests
To run tests, you need Docker which starts PostgreSQL and MySQL servers:
```shell
cd internal/dbtest
./test.sh
```
To ease debugging, you can run tests and print all executed queries:
```shell
BUNDEBUG=2 TZ= go test -run=TestName
```
## Releasing
1. Run `release.sh` script which updates versions in go.mod files and pushes a new branch to GitHub:
```shell
TAG=v1.0.0 ./scripts/release.sh
```
2. Open a pull request and wait for the build to finish.
3. Merge the pull request and run `tag.sh` to create tags for packages:
```shell
TAG=v1.0.0 ./scripts/tag.sh
```
## Documentation
To contribute to the docs visit https://github.com/uptrace/bun-docs

24
vendor/github.com/uptrace/bun/LICENSE generated vendored Normal file
View File

@@ -0,0 +1,24 @@
Copyright (c) 2021 Vladimir Mihailenco. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

30
vendor/github.com/uptrace/bun/Makefile generated vendored Normal file
View File

@@ -0,0 +1,30 @@
ALL_GO_MOD_DIRS := $(shell find . -type f -name 'go.mod' -exec dirname {} \; | sort)
EXAMPLE_GO_MOD_DIRS := $(shell find ./example/ -type f -name 'go.mod' -exec dirname {} \; | sort)
test:
set -e; for dir in $(ALL_GO_MOD_DIRS); do \
echo "go test in $${dir}"; \
(cd "$${dir}" && \
go test && \
go test -race && \
env GOOS=linux GOARCH=386 TZ= go test && \
go vet); \
done
go_mod_tidy:
set -e; for dir in $(ALL_GO_MOD_DIRS); do \
echo "go mod tidy in $${dir}"; \
(cd "$${dir}" && \
go get -u ./... && \
go mod tidy); \
done
fmt:
gofmt -w -s ./
goimports -w -local github.com/uptrace/bun ./
run-examples:
set -e; for dir in $(EXAMPLE_GO_MOD_DIRS); do \
echo "go run . in $${dir}"; \
(cd "$${dir}" && go run .); \
done

286
vendor/github.com/uptrace/bun/README.md generated vendored Normal file
View File

@@ -0,0 +1,286 @@
# Bun: SQL-first Golang ORM
[![build workflow](https://github.com/uptrace/bun/actions/workflows/build.yml/badge.svg)](https://github.com/uptrace/bun/actions)
[![PkgGoDev](https://pkg.go.dev/badge/github.com/uptrace/bun)](https://pkg.go.dev/github.com/uptrace/bun)
[![Documentation](https://img.shields.io/badge/bun-documentation-informational)](https://bun.uptrace.dev/)
[![Chat](https://discordapp.com/api/guilds/752070105847955518/widget.png)](https://discord.gg/rWtp5Aj)
[![Gurubase](https://img.shields.io/badge/Gurubase-Ask%20Bun%20Guru-006BFF)](https://gurubase.io/g/bun)
**Lightweight, SQL-first Golang ORM for PostgreSQL, MySQL, MSSQL, SQLite, and Oracle**
Bun is a modern ORM that embraces SQL rather than hiding it. Write complex queries in Go with type
safety, powerful scanning capabilities, and database-agnostic code that works across multiple SQL
databases.
## ✨ Key Features
- **SQL-first approach** - Write elegant, readable queries that feel like SQL
- **Multi-database support** - PostgreSQL, MySQL/MariaDB, MSSQL, SQLite, and Oracle
- **Type-safe operations** - Leverage Go's static typing for compile-time safety
- **Flexible scanning** - Query results into structs, maps, scalars, or slices
- **Performance optimized** - Built on `database/sql` with minimal overhead
- **Rich relationships** - Define complex table relationships with struct tags
- **Production ready** - Migrations, fixtures, soft deletes, and OpenTelemetry support
## 🚀 Quick Start
```bash
go get github.com/uptrace/bun
```
### Basic Example
```go
package main
import (
"context"
"database/sql"
"fmt"
"github.com/uptrace/bun"
"github.com/uptrace/bun/dialect/sqlitedialect"
"github.com/uptrace/bun/driver/sqliteshim"
)
func main() {
ctx := context.Background()
// Open database
sqldb, err := sql.Open(sqliteshim.ShimName, "file::memory:")
if err != nil {
panic(err)
}
// Create Bun instance
db := bun.NewDB(sqldb, sqlitedialect.New())
// Define model
type User struct {
ID int64 `bun:",pk,autoincrement"`
Name string `bun:",notnull"`
}
// Create table
db.NewCreateTable().Model((*User)(nil)).Exec(ctx)
// Insert user
user := &User{Name: "John Doe"}
db.NewInsert().Model(user).Exec(ctx)
// Query user
err = db.NewSelect().Model(user).Where("id = ?", user.ID).Scan(ctx)
fmt.Printf("User: %+v\n", user)
}
```
## 🎯 Why Choose Bun?
### Elegant Complex Queries
Write sophisticated queries that remain readable and maintainable:
```go
regionalSales := db.NewSelect().
ColumnExpr("region").
ColumnExpr("SUM(amount) AS total_sales").
TableExpr("orders").
GroupExpr("region")
topRegions := db.NewSelect().
ColumnExpr("region").
TableExpr("regional_sales").
Where("total_sales > (SELECT SUM(total_sales) / 10 FROM regional_sales)")
var results []struct {
Region string `bun:"region"`
Product string `bun:"product"`
ProductUnits int `bun:"product_units"`
ProductSales int `bun:"product_sales"`
}
err := db.NewSelect().
With("regional_sales", regionalSales).
With("top_regions", topRegions).
ColumnExpr("region, product").
ColumnExpr("SUM(quantity) AS product_units").
ColumnExpr("SUM(amount) AS product_sales").
TableExpr("orders").
Where("region IN (SELECT region FROM top_regions)").
GroupExpr("region, product").
Scan(ctx, &results)
```
### Flexible Result Scanning
Scan query results into various Go types:
```go
// Into structs
var users []User
db.NewSelect().Model(&users).Scan(ctx)
// Into maps
var userMaps []map[string]interface{}
db.NewSelect().Table("users").Scan(ctx, &userMaps)
// Into scalars
var count int
db.NewSelect().Table("users").ColumnExpr("COUNT(*)").Scan(ctx, &count)
// Into individual variables
var id int64
var name string
db.NewSelect().Table("users").Column("id", "name").Limit(1).Scan(ctx, &id, &name)
```
## 📊 Database Support
| Database | Driver | Dialect |
| ------------- | ------------------------------------------ | --------------------- |
| PostgreSQL | `github.com/uptrace/bun/driver/pgdriver` | `pgdialect.New()` |
| MySQL/MariaDB | `github.com/go-sql-driver/mysql` | `mysqldialect.New()` |
| SQLite | `github.com/uptrace/bun/driver/sqliteshim` | `sqlitedialect.New()` |
| SQL Server | `github.com/denisenkom/go-mssqldb` | `mssqldialect.New()` |
| Oracle | `github.com/sijms/go-ora/v2` | `oracledialect.New()` |
## 🔧 Advanced Features
### Table Relationships
Define complex relationships with struct tags:
```go
type User struct {
ID int64 `bun:",pk,autoincrement"`
Name string `bun:",notnull"`
Posts []Post `bun:"rel:has-many,join:id=user_id"`
Profile Profile `bun:"rel:has-one,join:id=user_id"`
}
type Post struct {
ID int64 `bun:",pk,autoincrement"`
Title string
UserID int64
User *User `bun:"rel:belongs-to,join:user_id=id"`
}
// Load users with their posts
var users []User
err := db.NewSelect().
Model(&users).
Relation("Posts").
Scan(ctx)
```
### Bulk Operations
Efficient bulk operations for large datasets:
```go
// Bulk insert
users := []User{{Name: "John"}, {Name: "Jane"}, {Name: "Bob"}}
_, err := db.NewInsert().Model(&users).Exec(ctx)
// Bulk update with CTE
_, err = db.NewUpdate().
Model(&users).
Set("updated_at = NOW()").
Where("active = ?", true).
Exec(ctx)
// Bulk delete
_, err = db.NewDelete().
Model((*User)(nil)).
Where("created_at < ?", time.Now().AddDate(-1, 0, 0)).
Exec(ctx)
```
### Migrations
Version your database schema:
```go
import "github.com/uptrace/bun/migrate"
migrations := migrate.NewMigrations()
migrations.MustRegister(func(ctx context.Context, db *bun.DB) error {
_, err := db.NewCreateTable().Model((*User)(nil)).Exec(ctx)
return err
}, func(ctx context.Context, db *bun.DB) error {
_, err := db.NewDropTable().Model((*User)(nil)).Exec(ctx)
return err
})
migrator := migrate.NewMigrator(db, migrations)
err := migrator.Init(ctx)
err = migrator.Up(ctx)
```
## 📈 Monitoring & Observability
### Debug Queries
Enable query logging for development:
```go
import "github.com/uptrace/bun/extra/bundebug"
db.AddQueryHook(bundebug.NewQueryHook(
bundebug.WithVerbose(true),
))
```
### OpenTelemetry Integration
Production-ready observability with distributed tracing:
```go
import "github.com/uptrace/bun/extra/bunotel"
db.AddQueryHook(bunotel.NewQueryHook(
bunotel.WithDBName("myapp"),
))
```
> **Monitoring made easy**: Bun is brought to you by ⭐
> [**uptrace/uptrace**](https://github.com/uptrace/uptrace). Uptrace is an open-source APM tool that
> supports distributed tracing, metrics, and logs. You can use it to monitor applications and set up
> automatic alerts to receive notifications via email, Slack, Telegram, and others.
>
> See [OpenTelemetry example](example/opentelemetry) which demonstrates how you can use Uptrace to
> monitor Bun.
## 📚 Documentation & Resources
- **[Getting Started Guide](https://bun.uptrace.dev/guide/golang-orm.html)** - Comprehensive
tutorial
- **[API Reference](https://pkg.go.dev/github.com/uptrace/bun)** - Complete package documentation
- **[Examples](https://github.com/uptrace/bun/tree/master/example)** - Working code samples
- **[Starter Kit](https://github.com/go-bun/bun-starter-kit)** - Production-ready template
- **[Community Discussions](https://github.com/uptrace/bun/discussions)** - Get help and share ideas
## 🤝 Contributing
We welcome contributions! Please see our [Contributing Guide](CONTRIBUTING.md) for details on how to
get started.
**Thanks to all our contributors:**
<a href="https://github.com/uptrace/bun/graphs/contributors">
<img src="https://contributors-img.web.app/image?repo=uptrace/bun" alt="Contributors" />
</a>
## 🔗 Related Projects
- **[Golang HTTP router](https://github.com/uptrace/bunrouter)** - Fast and flexible HTTP router
- **[Golang msgpack](https://github.com/vmihailenco/msgpack)** - High-performance MessagePack
serialization
---
<div align="center">
<strong>Star ⭐ this repo if you find Bun useful!</strong><br>
<sub>Join our community on <a href="https://discord.gg/rWtp5Aj">Discord</a> • Follow updates on <a href="https://github.com/uptrace/bun">GitHub</a></sub>
</div>

98
vendor/github.com/uptrace/bun/bun.go generated vendored Normal file
View File

@@ -0,0 +1,98 @@
package bun
import (
"context"
"github.com/uptrace/bun/internal"
"github.com/uptrace/bun/schema"
)
type (
Safe = schema.Safe
Name = schema.Name
Ident = schema.Ident
Order = schema.Order
NullTime = schema.NullTime
BaseModel = schema.BaseModel
Query = schema.Query
BeforeAppendModelHook = schema.BeforeAppendModelHook
BeforeScanRowHook = schema.BeforeScanRowHook
AfterScanRowHook = schema.AfterScanRowHook
)
const (
OrderAsc = schema.OrderAsc
OrderAscNullsFirst = schema.OrderDesc
OrderAscNullsLast = schema.OrderAscNullsLast
OrderDesc = schema.OrderDesc
OrderDescNullsFirst = schema.OrderDescNullsFirst
OrderDescNullsLast = schema.OrderDescNullsLast
)
func SafeQuery(query string, args ...any) schema.QueryWithArgs {
return schema.SafeQuery(query, args)
}
type BeforeSelectHook interface {
BeforeSelect(ctx context.Context, query *SelectQuery) error
}
type AfterSelectHook interface {
AfterSelect(ctx context.Context, query *SelectQuery) error
}
type BeforeInsertHook interface {
BeforeInsert(ctx context.Context, query *InsertQuery) error
}
type AfterInsertHook interface {
AfterInsert(ctx context.Context, query *InsertQuery) error
}
type BeforeUpdateHook interface {
BeforeUpdate(ctx context.Context, query *UpdateQuery) error
}
type AfterUpdateHook interface {
AfterUpdate(ctx context.Context, query *UpdateQuery) error
}
type BeforeDeleteHook interface {
BeforeDelete(ctx context.Context, query *DeleteQuery) error
}
type AfterDeleteHook interface {
AfterDelete(ctx context.Context, query *DeleteQuery) error
}
type BeforeCreateTableHook interface {
BeforeCreateTable(ctx context.Context, query *CreateTableQuery) error
}
type AfterCreateTableHook interface {
AfterCreateTable(ctx context.Context, query *CreateTableQuery) error
}
type BeforeDropTableHook interface {
BeforeDropTable(ctx context.Context, query *DropTableQuery) error
}
type AfterDropTableHook interface {
AfterDropTable(ctx context.Context, query *DropTableQuery) error
}
// SetLogger overwrites default Bun logger.
func SetLogger(logger internal.Logging) {
internal.SetLogger(logger)
}
func In(slice any) schema.QueryAppender {
return schema.In(slice)
}
func NullZero(value any) schema.QueryAppender {
return schema.NullZero(value)
}

1
vendor/github.com/uptrace/bun/commitlint.config.js generated vendored Normal file
View File

@@ -0,0 +1 @@
module.exports = { extends: ['@commitlint/config-conventional'] }

778
vendor/github.com/uptrace/bun/db.go generated vendored Normal file
View File

@@ -0,0 +1,778 @@
package bun
import (
"context"
cryptorand "crypto/rand"
"database/sql"
"encoding/hex"
"fmt"
"reflect"
"strings"
"sync/atomic"
"github.com/uptrace/bun/dialect/feature"
"github.com/uptrace/bun/internal"
"github.com/uptrace/bun/schema"
)
const (
discardUnknownColumns internal.Flag = 1 << iota
)
type DBStats struct {
Queries uint32
Errors uint32
}
type DBOption func(db *DB)
func WithOptions(opts ...DBOption) DBOption {
return func(db *DB) {
for _, opt := range opts {
opt(db)
}
}
}
func WithDiscardUnknownColumns() DBOption {
return func(db *DB) {
db.flags = db.flags.Set(discardUnknownColumns)
}
}
// ConnResolver enables routing queries to multiple databases.
type ConnResolver interface {
ResolveConn(ctx context.Context, query Query) IConn
Close() error
}
func WithConnResolver(resolver ConnResolver) DBOption {
return func(db *DB) {
db.resolver = resolver
}
}
type DB struct {
// Must be a pointer so we copy the whole state, not individual fields.
*noCopyState
gen schema.QueryGen
queryHooks []QueryHook
}
// noCopyState contains DB fields that must not be copied on clone(),
// for example, it is forbidden to copy atomic.Pointer.
type noCopyState struct {
*sql.DB
dialect schema.Dialect
resolver ConnResolver
flags internal.Flag
closed atomic.Bool
stats DBStats
}
func NewDB(sqldb *sql.DB, dialect schema.Dialect, opts ...DBOption) *DB {
dialect.Init(sqldb)
db := &DB{
noCopyState: &noCopyState{
DB: sqldb,
dialect: dialect,
},
gen: schema.NewQueryGen(dialect),
}
for _, opt := range opts {
opt(db)
}
return db
}
func (db *DB) String() string {
var b strings.Builder
b.WriteString("DB<dialect=")
b.WriteString(db.dialect.Name().String())
b.WriteString(">")
return b.String()
}
func (db *DB) Close() error {
if db.closed.Swap(true) {
return nil
}
firstErr := db.DB.Close()
if db.resolver != nil {
if err := db.resolver.Close(); err != nil && firstErr == nil {
firstErr = err
}
}
return firstErr
}
func (db *DB) DBStats() DBStats {
return DBStats{
Queries: atomic.LoadUint32(&db.stats.Queries),
Errors: atomic.LoadUint32(&db.stats.Errors),
}
}
func (db *DB) NewValues(model any) *ValuesQuery {
return NewValuesQuery(db, model)
}
func (db *DB) NewMerge() *MergeQuery {
return NewMergeQuery(db)
}
func (db *DB) NewSelect() *SelectQuery {
return NewSelectQuery(db)
}
func (db *DB) NewInsert() *InsertQuery {
return NewInsertQuery(db)
}
func (db *DB) NewUpdate() *UpdateQuery {
return NewUpdateQuery(db)
}
func (db *DB) NewDelete() *DeleteQuery {
return NewDeleteQuery(db)
}
func (db *DB) NewRaw(query string, args ...any) *RawQuery {
return NewRawQuery(db, query, args...)
}
func (db *DB) NewCreateTable() *CreateTableQuery {
return NewCreateTableQuery(db)
}
func (db *DB) NewDropTable() *DropTableQuery {
return NewDropTableQuery(db)
}
func (db *DB) NewCreateIndex() *CreateIndexQuery {
return NewCreateIndexQuery(db)
}
func (db *DB) NewDropIndex() *DropIndexQuery {
return NewDropIndexQuery(db)
}
func (db *DB) NewTruncateTable() *TruncateTableQuery {
return NewTruncateTableQuery(db)
}
func (db *DB) NewAddColumn() *AddColumnQuery {
return NewAddColumnQuery(db)
}
func (db *DB) NewDropColumn() *DropColumnQuery {
return NewDropColumnQuery(db)
}
func (db *DB) ResetModel(ctx context.Context, models ...any) error {
for _, model := range models {
if _, err := db.NewDropTable().Model(model).IfExists().Cascade().Exec(ctx); err != nil {
return err
}
if _, err := db.NewCreateTable().Model(model).Exec(ctx); err != nil {
return err
}
}
return nil
}
func (db *DB) Dialect() schema.Dialect {
return db.dialect
}
func (db *DB) ScanRows(ctx context.Context, rows *sql.Rows, dest ...any) error {
defer rows.Close()
model, err := newModel(db, dest)
if err != nil {
return err
}
_, err = model.ScanRows(ctx, rows)
if err != nil {
return err
}
return rows.Err()
}
func (db *DB) ScanRow(ctx context.Context, rows *sql.Rows, dest ...any) error {
model, err := newModel(db, dest)
if err != nil {
return err
}
rs, ok := model.(rowScanner)
if !ok {
return fmt.Errorf("bun: %T does not support ScanRow", model)
}
return rs.ScanRow(ctx, rows)
}
func (db *DB) Table(typ reflect.Type) *schema.Table {
return db.dialect.Tables().Get(typ)
}
// RegisterModel registers models by name so they can be referenced in table relations
// and fixtures.
func (db *DB) RegisterModel(models ...any) {
db.dialect.Tables().Register(models...)
}
func (db *DB) clone() *DB {
clone := *db
l := len(clone.queryHooks)
clone.queryHooks = clone.queryHooks[:l:l]
return &clone
}
// WithNamedArg returns a copy of the DB with an additional named argument
// bound into its query generator. Named arguments can later be referenced
// in SQL queries using placeholders (e.g. ?name). This method does not
// mutate the original DB instance but instead creates a cloned copy.
func (db *DB) WithNamedArg(name string, value any) *DB {
clone := db.clone()
clone.gen = clone.gen.WithNamedArg(name, value)
return clone
}
func (db *DB) QueryGen() schema.QueryGen {
return db.gen
}
type queryHookIniter interface {
Init(db *DB)
}
// WithQueryHook returns a copy of the DB with the provided query hook
// attached. A query hook allows inspection or modification of queries
// before/after execution (e.g. for logging, tracing, metrics).
// If the hook implements queryHookIniter, its Init method is invoked
// with the current DB before cloning. Like other modifiers, this
// method leaves the original DB unmodified.
func (db *DB) WithQueryHook(hook QueryHook) *DB {
if initer, ok := hook.(queryHookIniter); ok {
initer.Init(db)
}
clone := db.clone()
clone.queryHooks = append(clone.queryHooks, hook)
return clone
}
// DEPRECATED: use WithQueryHook instead
func (db *DB) AddQueryHook(hook QueryHook) {
if initer, ok := hook.(queryHookIniter); ok {
initer.Init(db)
}
db.queryHooks = append(db.queryHooks, hook)
}
// DEPRECATED: use WithQueryHook instead
func (db *DB) ResetQueryHooks() {
for i := range db.queryHooks {
db.queryHooks[i] = nil
}
db.queryHooks = nil
}
// UpdateFQN returns a fully qualified column name. For MySQL, it returns the column name with
// the table alias. For other RDBMS, it returns just the column name.
func (db *DB) UpdateFQN(alias, column string) Ident {
if db.HasFeature(feature.UpdateMultiTable) {
return Ident(alias + "." + column)
}
return Ident(column)
}
// HasFeature uses feature package to report whether the underlying DBMS supports this feature.
func (db *DB) HasFeature(feat feature.Feature) bool {
return db.dialect.Features().Has(feat)
}
//------------------------------------------------------------------------------
func (db *DB) Exec(query string, args ...any) (sql.Result, error) {
return db.ExecContext(context.Background(), query, args...)
}
func (db *DB) ExecContext(
ctx context.Context, query string, args ...any,
) (sql.Result, error) {
formattedQuery := db.format(query, args)
ctx, event := db.beforeQuery(ctx, nil, query, args, formattedQuery, nil)
res, err := db.DB.ExecContext(ctx, formattedQuery)
db.afterQuery(ctx, event, res, err)
return res, err
}
func (db *DB) Query(query string, args ...any) (*sql.Rows, error) {
return db.QueryContext(context.Background(), query, args...)
}
func (db *DB) QueryContext(
ctx context.Context, query string, args ...any,
) (*sql.Rows, error) {
formattedQuery := db.format(query, args)
ctx, event := db.beforeQuery(ctx, nil, query, args, formattedQuery, nil)
rows, err := db.DB.QueryContext(ctx, formattedQuery)
db.afterQuery(ctx, event, nil, err)
return rows, err
}
func (db *DB) QueryRow(query string, args ...any) *sql.Row {
return db.QueryRowContext(context.Background(), query, args...)
}
func (db *DB) QueryRowContext(ctx context.Context, query string, args ...any) *sql.Row {
formattedQuery := db.format(query, args)
ctx, event := db.beforeQuery(ctx, nil, query, args, formattedQuery, nil)
row := db.DB.QueryRowContext(ctx, formattedQuery)
db.afterQuery(ctx, event, nil, row.Err())
return row
}
func (db *DB) format(query string, args []any) string {
return db.gen.FormatQuery(query, args...)
}
//------------------------------------------------------------------------------
type Conn struct {
db *DB
*sql.Conn
}
func (db *DB) Conn(ctx context.Context) (Conn, error) {
conn, err := db.DB.Conn(ctx)
if err != nil {
return Conn{}, err
}
return Conn{
db: db,
Conn: conn,
}, nil
}
func (c Conn) ExecContext(
ctx context.Context, query string, args ...any,
) (sql.Result, error) {
formattedQuery := c.db.format(query, args)
ctx, event := c.db.beforeQuery(ctx, nil, query, args, formattedQuery, nil)
res, err := c.Conn.ExecContext(ctx, formattedQuery)
c.db.afterQuery(ctx, event, res, err)
return res, err
}
func (c Conn) QueryContext(
ctx context.Context, query string, args ...any,
) (*sql.Rows, error) {
formattedQuery := c.db.format(query, args)
ctx, event := c.db.beforeQuery(ctx, nil, query, args, formattedQuery, nil)
rows, err := c.Conn.QueryContext(ctx, formattedQuery)
c.db.afterQuery(ctx, event, nil, err)
return rows, err
}
func (c Conn) QueryRowContext(ctx context.Context, query string, args ...any) *sql.Row {
formattedQuery := c.db.format(query, args)
ctx, event := c.db.beforeQuery(ctx, nil, query, args, formattedQuery, nil)
row := c.Conn.QueryRowContext(ctx, formattedQuery)
c.db.afterQuery(ctx, event, nil, row.Err())
return row
}
func (c Conn) Dialect() schema.Dialect {
return c.db.Dialect()
}
func (c Conn) NewValues(model any) *ValuesQuery {
return NewValuesQuery(c.db, model).Conn(c)
}
func (c Conn) NewMerge() *MergeQuery {
return NewMergeQuery(c.db).Conn(c)
}
func (c Conn) NewSelect() *SelectQuery {
return NewSelectQuery(c.db).Conn(c)
}
func (c Conn) NewInsert() *InsertQuery {
return NewInsertQuery(c.db).Conn(c)
}
func (c Conn) NewUpdate() *UpdateQuery {
return NewUpdateQuery(c.db).Conn(c)
}
func (c Conn) NewDelete() *DeleteQuery {
return NewDeleteQuery(c.db).Conn(c)
}
func (c Conn) NewRaw(query string, args ...any) *RawQuery {
return NewRawQuery(c.db, query, args...).Conn(c)
}
func (c Conn) NewCreateTable() *CreateTableQuery {
return NewCreateTableQuery(c.db).Conn(c)
}
func (c Conn) NewDropTable() *DropTableQuery {
return NewDropTableQuery(c.db).Conn(c)
}
func (c Conn) NewCreateIndex() *CreateIndexQuery {
return NewCreateIndexQuery(c.db).Conn(c)
}
func (c Conn) NewDropIndex() *DropIndexQuery {
return NewDropIndexQuery(c.db).Conn(c)
}
func (c Conn) NewTruncateTable() *TruncateTableQuery {
return NewTruncateTableQuery(c.db).Conn(c)
}
func (c Conn) NewAddColumn() *AddColumnQuery {
return NewAddColumnQuery(c.db).Conn(c)
}
func (c Conn) NewDropColumn() *DropColumnQuery {
return NewDropColumnQuery(c.db).Conn(c)
}
// RunInTx runs the function in a transaction. If the function returns an error,
// the transaction is rolled back. Otherwise, the transaction is committed.
func (c Conn) RunInTx(
ctx context.Context, opts *sql.TxOptions, fn func(ctx context.Context, tx Tx) error,
) error {
tx, err := c.BeginTx(ctx, opts)
if err != nil {
return err
}
var done bool
defer func() {
if !done {
_ = tx.Rollback()
}
}()
if err := fn(ctx, tx); err != nil {
return err
}
done = true
return tx.Commit()
}
func (c Conn) BeginTx(ctx context.Context, opts *sql.TxOptions) (Tx, error) {
ctx, event := c.db.beforeQuery(ctx, nil, "BEGIN", nil, "BEGIN", nil)
tx, err := c.Conn.BeginTx(ctx, opts)
c.db.afterQuery(ctx, event, nil, err)
if err != nil {
return Tx{}, err
}
return Tx{
ctx: ctx,
db: c.db,
Tx: tx,
}, nil
}
//------------------------------------------------------------------------------
type Stmt struct {
*sql.Stmt
}
func (db *DB) Prepare(query string) (Stmt, error) {
return db.PrepareContext(context.Background(), query)
}
func (db *DB) PrepareContext(ctx context.Context, query string) (Stmt, error) {
stmt, err := db.DB.PrepareContext(ctx, query)
if err != nil {
return Stmt{}, err
}
return Stmt{Stmt: stmt}, nil
}
//------------------------------------------------------------------------------
type Tx struct {
ctx context.Context
db *DB
// name is the name of a savepoint
name string
*sql.Tx
}
// RunInTx runs the function in a transaction. If the function returns an error,
// the transaction is rolled back. Otherwise, the transaction is committed.
func (db *DB) RunInTx(
ctx context.Context, opts *sql.TxOptions, fn func(ctx context.Context, tx Tx) error,
) error {
tx, err := db.BeginTx(ctx, opts)
if err != nil {
return err
}
var done bool
defer func() {
if !done {
_ = tx.Rollback()
}
}()
if err := fn(ctx, tx); err != nil {
return err
}
done = true
return tx.Commit()
}
func (db *DB) Begin() (Tx, error) {
return db.BeginTx(context.Background(), nil)
}
func (db *DB) BeginTx(ctx context.Context, opts *sql.TxOptions) (Tx, error) {
ctx, event := db.beforeQuery(ctx, nil, "BEGIN", nil, "BEGIN", nil)
tx, err := db.DB.BeginTx(ctx, opts)
db.afterQuery(ctx, event, nil, err)
if err != nil {
return Tx{}, err
}
return Tx{
ctx: ctx,
db: db,
Tx: tx,
}, nil
}
func (tx Tx) Commit() error {
if tx.name == "" {
return tx.commitTX()
}
return tx.commitSP()
}
func (tx Tx) commitTX() error {
ctx, event := tx.db.beforeQuery(tx.ctx, nil, "COMMIT", nil, "COMMIT", nil)
err := tx.Tx.Commit()
tx.db.afterQuery(ctx, event, nil, err)
return err
}
func (tx Tx) commitSP() error {
if tx.db.HasFeature(feature.MSSavepoint) {
return nil
}
query := "RELEASE SAVEPOINT " + tx.name
_, err := tx.ExecContext(tx.ctx, query)
return err
}
func (tx Tx) Rollback() error {
if tx.name == "" {
return tx.rollbackTX()
}
return tx.rollbackSP()
}
func (tx Tx) rollbackTX() error {
ctx, event := tx.db.beforeQuery(tx.ctx, nil, "ROLLBACK", nil, "ROLLBACK", nil)
err := tx.Tx.Rollback()
tx.db.afterQuery(ctx, event, nil, err)
return err
}
func (tx Tx) rollbackSP() error {
query := "ROLLBACK TO SAVEPOINT " + tx.name
if tx.db.HasFeature(feature.MSSavepoint) {
query = "ROLLBACK TRANSACTION " + tx.name
}
_, err := tx.ExecContext(tx.ctx, query)
return err
}
func (tx Tx) Exec(query string, args ...any) (sql.Result, error) {
return tx.ExecContext(context.TODO(), query, args...)
}
func (tx Tx) ExecContext(
ctx context.Context, query string, args ...any,
) (sql.Result, error) {
formattedQuery := tx.db.format(query, args)
ctx, event := tx.db.beforeQuery(ctx, nil, query, args, formattedQuery, nil)
res, err := tx.Tx.ExecContext(ctx, formattedQuery)
tx.db.afterQuery(ctx, event, res, err)
return res, err
}
func (tx Tx) Query(query string, args ...any) (*sql.Rows, error) {
return tx.QueryContext(context.TODO(), query, args...)
}
func (tx Tx) QueryContext(
ctx context.Context, query string, args ...any,
) (*sql.Rows, error) {
formattedQuery := tx.db.format(query, args)
ctx, event := tx.db.beforeQuery(ctx, nil, query, args, formattedQuery, nil)
rows, err := tx.Tx.QueryContext(ctx, formattedQuery)
tx.db.afterQuery(ctx, event, nil, err)
return rows, err
}
func (tx Tx) QueryRow(query string, args ...any) *sql.Row {
return tx.QueryRowContext(context.TODO(), query, args...)
}
func (tx Tx) QueryRowContext(ctx context.Context, query string, args ...any) *sql.Row {
formattedQuery := tx.db.format(query, args)
ctx, event := tx.db.beforeQuery(ctx, nil, query, args, formattedQuery, nil)
row := tx.Tx.QueryRowContext(ctx, formattedQuery)
tx.db.afterQuery(ctx, event, nil, row.Err())
return row
}
//------------------------------------------------------------------------------
func (tx Tx) Begin() (Tx, error) {
return tx.BeginTx(tx.ctx, nil)
}
// BeginTx will save a point in the running transaction.
func (tx Tx) BeginTx(ctx context.Context, _ *sql.TxOptions) (Tx, error) {
// mssql savepoint names are limited to 32 characters
sp := make([]byte, 14)
_, err := cryptorand.Read(sp)
if err != nil {
return Tx{}, err
}
qName := "SP_" + hex.EncodeToString(sp)
query := "SAVEPOINT " + qName
if tx.db.HasFeature(feature.MSSavepoint) {
query = "SAVE TRANSACTION " + qName
}
_, err = tx.ExecContext(ctx, query)
if err != nil {
return Tx{}, err
}
return Tx{
ctx: ctx,
db: tx.db,
Tx: tx.Tx,
name: qName,
}, nil
}
func (tx Tx) RunInTx(
ctx context.Context, _ *sql.TxOptions, fn func(ctx context.Context, tx Tx) error,
) error {
sp, err := tx.BeginTx(ctx, nil)
if err != nil {
return err
}
var done bool
defer func() {
if !done {
_ = sp.Rollback()
}
}()
if err := fn(ctx, sp); err != nil {
return err
}
done = true
return sp.Commit()
}
func (tx Tx) Dialect() schema.Dialect {
return tx.db.Dialect()
}
func (tx Tx) NewValues(model any) *ValuesQuery {
return NewValuesQuery(tx.db, model).Conn(tx)
}
func (tx Tx) NewMerge() *MergeQuery {
return NewMergeQuery(tx.db).Conn(tx)
}
func (tx Tx) NewSelect() *SelectQuery {
return NewSelectQuery(tx.db).Conn(tx)
}
func (tx Tx) NewInsert() *InsertQuery {
return NewInsertQuery(tx.db).Conn(tx)
}
func (tx Tx) NewUpdate() *UpdateQuery {
return NewUpdateQuery(tx.db).Conn(tx)
}
func (tx Tx) NewDelete() *DeleteQuery {
return NewDeleteQuery(tx.db).Conn(tx)
}
func (tx Tx) NewRaw(query string, args ...any) *RawQuery {
return NewRawQuery(tx.db, query, args...).Conn(tx)
}
func (tx Tx) NewCreateTable() *CreateTableQuery {
return NewCreateTableQuery(tx.db).Conn(tx)
}
func (tx Tx) NewDropTable() *DropTableQuery {
return NewDropTableQuery(tx.db).Conn(tx)
}
func (tx Tx) NewCreateIndex() *CreateIndexQuery {
return NewCreateIndexQuery(tx.db).Conn(tx)
}
func (tx Tx) NewDropIndex() *DropIndexQuery {
return NewDropIndexQuery(tx.db).Conn(tx)
}
func (tx Tx) NewTruncateTable() *TruncateTableQuery {
return NewTruncateTableQuery(tx.db).Conn(tx)
}
func (tx Tx) NewAddColumn() *AddColumnQuery {
return NewAddColumnQuery(tx.db).Conn(tx)
}
func (tx Tx) NewDropColumn() *DropColumnQuery {
return NewDropColumnQuery(tx.db).Conn(tx)
}
func (db *DB) makeQueryBytes() []byte {
return internal.MakeQueryBytes()
}

105
vendor/github.com/uptrace/bun/dialect/append.go generated vendored Normal file
View File

@@ -0,0 +1,105 @@
package dialect
import (
"math"
"strconv"
"github.com/uptrace/bun/internal"
)
func AppendError(b []byte, err error) []byte {
b = append(b, "?!("...)
b = append(b, err.Error()...)
b = append(b, ')')
return b
}
func AppendNull(b []byte) []byte {
return append(b, "NULL"...)
}
func AppendBool(b []byte, v bool) []byte {
if v {
return append(b, "TRUE"...)
}
return append(b, "FALSE"...)
}
func AppendFloat32(b []byte, num float32) []byte {
return appendFloat(b, float64(num), 32)
}
func AppendFloat64(b []byte, num float64) []byte {
return appendFloat(b, num, 64)
}
func appendFloat(b []byte, num float64, bitSize int) []byte {
switch {
case math.IsNaN(num):
return append(b, "'NaN'"...)
case math.IsInf(num, 1):
return append(b, "'Infinity'"...)
case math.IsInf(num, -1):
return append(b, "'-Infinity'"...)
default:
return strconv.AppendFloat(b, num, 'f', -1, bitSize)
}
}
//------------------------------------------------------------------------------
func AppendName(b []byte, ident string, quote byte) []byte {
return appendName(b, internal.Bytes(ident), quote)
}
func appendName(b, ident []byte, quote byte) []byte {
b = append(b, quote)
for _, c := range ident {
if c == quote {
b = append(b, quote, quote)
} else {
b = append(b, c)
}
}
b = append(b, quote)
return b
}
func AppendIdent(b []byte, name string, quote byte) []byte {
return appendIdent(b, internal.Bytes(name), quote)
}
func appendIdent(b, name []byte, quote byte) []byte {
var quoted bool
loop:
for _, c := range name {
switch c {
case '*':
if !quoted {
b = append(b, '*')
continue loop
}
case '.':
if quoted {
b = append(b, quote)
quoted = false
}
b = append(b, '.')
continue loop
}
if !quoted {
b = append(b, quote)
quoted = true
}
if c == quote {
b = append(b, quote, quote)
} else {
b = append(b, c)
}
}
if quoted {
b = append(b, quote)
}
return b
}

31
vendor/github.com/uptrace/bun/dialect/dialect.go generated vendored Normal file
View File

@@ -0,0 +1,31 @@
package dialect
type Name int
func (n Name) String() string {
switch n {
case Invalid:
return "invalid"
case PG:
return "pg"
case SQLite:
return "sqlite"
case MySQL:
return "mysql"
case MSSQL:
return "mssql"
case Oracle:
return "oracle"
default:
return "custom"
}
}
const (
Invalid Name = iota
PG
SQLite
MySQL
MSSQL
Oracle
)

View File

@@ -0,0 +1,98 @@
package feature
import (
"fmt"
"strconv"
"github.com/uptrace/bun/internal"
)
type Feature = internal.Flag
const (
CTE Feature = 1 << iota
WithValues
Returning
InsertReturning
Output // mssql
DefaultPlaceholder
DoubleColonCast
ValuesRow
UpdateMultiTable
InsertTableAlias
UpdateTableAlias
DeleteTableAlias
AutoIncrement
Identity
TableCascade
TableIdentity
TableTruncate
InsertOnConflict // INSERT ... ON CONFLICT
InsertOnDuplicateKey // INSERT ... ON DUPLICATE KEY
InsertIgnore // INSERT IGNORE ...
TableNotExists
OffsetFetch
SelectExists
UpdateFromTable
MSSavepoint
GeneratedIdentity
CompositeIn // ... WHERE (A,B) IN ((N, NN), (N, NN)...)
UpdateOrderLimit // UPDATE ... ORDER BY ... LIMIT ...
DeleteOrderLimit // DELETE ... ORDER BY ... LIMIT ...
DeleteReturning
MergeReturning
AlterColumnExists // ADD/DROP COLUMN IF NOT EXISTS/IF EXISTS
FKDefaultOnAction // FK ON UPDATE/ON DELETE has default value: NO ACTION
)
type NotSupportError struct {
Flag Feature
}
func (err *NotSupportError) Error() string {
name, ok := flag2str[err.Flag]
if !ok {
name = strconv.FormatInt(int64(err.Flag), 10)
}
return fmt.Sprintf("bun: feature %s is not supported by current dialect", name)
}
func NewNotSupportError(flag Feature) *NotSupportError {
return &NotSupportError{Flag: flag}
}
var flag2str = map[Feature]string{
CTE: "CTE",
WithValues: "WithValues",
Returning: "Returning",
InsertReturning: "InsertReturning",
Output: "Output",
DefaultPlaceholder: "DefaultPlaceholder",
DoubleColonCast: "DoubleColonCast",
ValuesRow: "ValuesRow",
UpdateMultiTable: "UpdateMultiTable",
InsertTableAlias: "InsertTableAlias",
UpdateTableAlias: "UpdateTableAlias",
DeleteTableAlias: "DeleteTableAlias",
AutoIncrement: "AutoIncrement",
Identity: "Identity",
TableCascade: "TableCascade",
TableIdentity: "TableIdentity",
TableTruncate: "TableTruncate",
InsertOnConflict: "InsertOnConflict",
InsertOnDuplicateKey: "InsertOnDuplicateKey",
InsertIgnore: "InsertIgnore",
TableNotExists: "TableNotExists",
OffsetFetch: "OffsetFetch",
SelectExists: "SelectExists",
UpdateFromTable: "UpdateFromTable",
MSSavepoint: "MSSavepoint",
GeneratedIdentity: "GeneratedIdentity",
CompositeIn: "CompositeIn",
UpdateOrderLimit: "UpdateOrderLimit",
DeleteOrderLimit: "DeleteOrderLimit",
DeleteReturning: "DeleteReturning",
MergeReturning: "MergeReturning",
AlterColumnExists: "AlterColumnExists",
FKDefaultOnAction: "FKDefaultOnAction",
}

View File

@@ -0,0 +1,16 @@
package sqltype
const (
Boolean = "BOOLEAN"
SmallInt = "SMALLINT"
Integer = "INTEGER"
BigInt = "BIGINT"
Real = "REAL"
DoublePrecision = "DOUBLE PRECISION"
VarChar = "VARCHAR"
Blob = "BLOB"
Timestamp = "TIMESTAMP"
JSON = "JSON"
JSONB = "JSONB"
HSTORE = "HSTORE"
)

26
vendor/github.com/uptrace/bun/extra/bunjson/json.go generated vendored Normal file
View File

@@ -0,0 +1,26 @@
package bunjson
import (
"encoding/json"
"io"
)
var _ Provider = (*StdProvider)(nil)
type StdProvider struct{}
func (StdProvider) Marshal(v any) ([]byte, error) {
return json.Marshal(v)
}
func (StdProvider) Unmarshal(data []byte, v any) error {
return json.Unmarshal(data, v)
}
func (StdProvider) NewEncoder(w io.Writer) Encoder {
return json.NewEncoder(w)
}
func (StdProvider) NewDecoder(r io.Reader) Decoder {
return json.NewDecoder(r)
}

View File

@@ -0,0 +1,43 @@
package bunjson
import (
"io"
)
var provider Provider = StdProvider{}
func SetProvider(p Provider) {
provider = p
}
type Provider interface {
Marshal(v any) ([]byte, error)
Unmarshal(data []byte, v any) error
NewEncoder(w io.Writer) Encoder
NewDecoder(r io.Reader) Decoder
}
type Decoder interface {
Decode(v any) error
UseNumber()
}
type Encoder interface {
Encode(v any) error
}
func Marshal(v any) ([]byte, error) {
return provider.Marshal(v)
}
func Unmarshal(data []byte, v any) error {
return provider.Unmarshal(data, v)
}
func NewEncoder(w io.Writer) Encoder {
return provider.NewEncoder(w)
}
func NewDecoder(r io.Reader) Decoder {
return provider.NewDecoder(r)
}

112
vendor/github.com/uptrace/bun/hook.go generated vendored Normal file
View File

@@ -0,0 +1,112 @@
package bun
import (
"context"
"database/sql"
"strings"
"sync/atomic"
"time"
"unicode"
)
type QueryEvent struct {
DB *DB
IQuery Query
Query string
QueryTemplate string
QueryArgs []any
Model Model
StartTime time.Time
Result sql.Result
Err error
Stash map[any]any
}
func (e *QueryEvent) Operation() string {
if e.IQuery != nil {
return e.IQuery.Operation()
}
return queryOperation(e.Query)
}
func queryOperation(query string) string {
queryOp := strings.TrimLeftFunc(query, unicode.IsSpace)
if idx := strings.IndexByte(queryOp, ' '); idx > 0 {
queryOp = queryOp[:idx]
}
if len(queryOp) > 16 {
queryOp = queryOp[:16]
}
return queryOp
}
type QueryHook interface {
BeforeQuery(context.Context, *QueryEvent) context.Context
AfterQuery(context.Context, *QueryEvent)
}
func (db *DB) beforeQuery(
ctx context.Context,
iquery Query,
queryTemplate string,
queryArgs []any,
query string,
model Model,
) (context.Context, *QueryEvent) {
atomic.AddUint32(&db.stats.Queries, 1)
if len(db.queryHooks) == 0 {
return ctx, nil
}
event := &QueryEvent{
DB: db,
Model: model,
IQuery: iquery,
Query: query,
QueryTemplate: queryTemplate,
QueryArgs: queryArgs,
StartTime: time.Now(),
}
for _, hook := range db.queryHooks {
ctx = hook.BeforeQuery(ctx, event)
}
return ctx, event
}
func (db *DB) afterQuery(
ctx context.Context,
event *QueryEvent,
res sql.Result,
err error,
) {
switch err {
case nil, sql.ErrNoRows:
// nothing
default:
atomic.AddUint32(&db.stats.Errors, 1)
}
if event == nil {
return
}
event.Result = res
event.Err = err
db.afterQueryFromIndex(ctx, event, len(db.queryHooks)-1)
}
func (db *DB) afterQueryFromIndex(ctx context.Context, event *QueryEvent, hookIndex int) {
for ; hookIndex >= 0; hookIndex-- {
db.queryHooks[hookIndex].AfterQuery(ctx, event)
}
}

16
vendor/github.com/uptrace/bun/internal/flag.go generated vendored Normal file
View File

@@ -0,0 +1,16 @@
package internal
type Flag uint64
func (flag Flag) Has(other Flag) bool {
return flag&other != 0
}
func (flag Flag) Set(other Flag) Flag {
return flag | other
}
func (flag Flag) Remove(other Flag) Flag {
flag &= ^other
return flag
}

43
vendor/github.com/uptrace/bun/internal/hex.go generated vendored Normal file
View File

@@ -0,0 +1,43 @@
package internal
import (
fasthex "github.com/tmthrgd/go-hex"
)
type HexEncoder struct {
b []byte
written bool
}
func NewHexEncoder(b []byte) *HexEncoder {
return &HexEncoder{
b: b,
}
}
func (enc *HexEncoder) Bytes() []byte {
return enc.b
}
func (enc *HexEncoder) Write(b []byte) (int, error) {
if !enc.written {
enc.b = append(enc.b, '\'')
enc.b = append(enc.b, `\x`...)
enc.written = true
}
i := len(enc.b)
enc.b = append(enc.b, make([]byte, fasthex.EncodedLen(len(b)))...)
fasthex.Encode(enc.b[i:], b)
return len(b), nil
}
func (enc *HexEncoder) Close() error {
if enc.written {
enc.b = append(enc.b, '\'')
} else {
enc.b = append(enc.b, "NULL"...)
}
return nil
}

54
vendor/github.com/uptrace/bun/internal/logger.go generated vendored Normal file
View File

@@ -0,0 +1,54 @@
package internal
import (
"fmt"
"log"
"os"
)
type Logging interface {
Printf(format string, v ...any)
}
var defaultLogger = log.New(os.Stderr, "", log.LstdFlags)
var Logger Logging = &logger{
log: defaultLogger,
}
var Warn = &wrapper{
prefix: "WARN: bun: ",
logger: Logger,
}
var Deprecated = &wrapper{
prefix: "DEPRECATED: bun: ",
logger: Logger,
}
type logger struct {
log *log.Logger
}
func (l *logger) Printf(format string, v ...any) {
_ = l.log.Output(2, fmt.Sprintf(format, v...))
}
type wrapper struct {
prefix string
logger Logging
}
func (w *wrapper) Printf(format string, v ...any) {
w.logger.Printf(w.prefix+format, v...)
}
func SetLogger(newLogger Logging) {
if newLogger == nil {
Logger = &logger{log: defaultLogger}
} else {
Logger = newLogger
}
Warn.logger = Logger
Deprecated.logger = Logger
}

67
vendor/github.com/uptrace/bun/internal/map_key.go generated vendored Normal file
View File

@@ -0,0 +1,67 @@
package internal
import "reflect"
var ifaceType = reflect.TypeFor[any]()
type MapKey struct {
iface any
}
func NewMapKey(is []any) MapKey {
return MapKey{
iface: newMapKey(is),
}
}
func newMapKey(is []any) any {
switch len(is) {
case 1:
ptr := new([1]any)
copy((*ptr)[:], is)
return *ptr
case 2:
ptr := new([2]any)
copy((*ptr)[:], is)
return *ptr
case 3:
ptr := new([3]any)
copy((*ptr)[:], is)
return *ptr
case 4:
ptr := new([4]any)
copy((*ptr)[:], is)
return *ptr
case 5:
ptr := new([5]any)
copy((*ptr)[:], is)
return *ptr
case 6:
ptr := new([6]any)
copy((*ptr)[:], is)
return *ptr
case 7:
ptr := new([7]any)
copy((*ptr)[:], is)
return *ptr
case 8:
ptr := new([8]any)
copy((*ptr)[:], is)
return *ptr
case 9:
ptr := new([9]any)
copy((*ptr)[:], is)
return *ptr
case 10:
ptr := new([10]any)
copy((*ptr)[:], is)
return *ptr
default:
}
at := reflect.New(reflect.ArrayOf(len(is), ifaceType)).Elem()
for i, v := range is {
*(at.Index(i).Addr().Interface().(*any)) = v
}
return at.Interface()
}

169
vendor/github.com/uptrace/bun/internal/parser/parser.go generated vendored Normal file
View File

@@ -0,0 +1,169 @@
package parser
import (
"bytes"
"fmt"
"io"
"strconv"
"github.com/uptrace/bun/internal"
)
type Parser struct {
b []byte
i int
}
func New(b []byte) *Parser {
return &Parser{
b: b,
}
}
func NewString(s string) *Parser {
return New(internal.Bytes(s))
}
func (p *Parser) Reset(b []byte) {
p.b = b
p.i = 0
}
func (p *Parser) Valid() bool {
return p.i < len(p.b)
}
func (p *Parser) Remaining() []byte {
return p.b[p.i:]
}
func (p *Parser) ReadByte() (byte, error) {
if p.Valid() {
ch := p.b[p.i]
p.Advance()
return ch, nil
}
return 0, io.ErrUnexpectedEOF
}
func (p *Parser) Read() byte {
if p.Valid() {
ch := p.b[p.i]
p.Advance()
return ch
}
return 0
}
func (p *Parser) Unread() {
if p.i > 0 {
p.i--
}
}
func (p *Parser) Peek() byte {
if p.Valid() {
return p.b[p.i]
}
return 0
}
func (p *Parser) Advance() {
p.i++
}
func (p *Parser) Skip(skip byte) error {
ch := p.Peek()
if ch == skip {
p.Advance()
return nil
}
return fmt.Errorf("got %q, wanted %q", ch, skip)
}
func (p *Parser) SkipPrefix(skip []byte) error {
if !bytes.HasPrefix(p.b[p.i:], skip) {
return fmt.Errorf("got %q, wanted prefix %q", p.b, skip)
}
p.i += len(skip)
return nil
}
func (p *Parser) CutPrefix(skip []byte) bool {
if !bytes.HasPrefix(p.b[p.i:], skip) {
return false
}
p.i += len(skip)
return true
}
func (p *Parser) ReadSep(sep byte) ([]byte, bool) {
ind := bytes.IndexByte(p.b[p.i:], sep)
if ind == -1 {
b := p.b[p.i:]
p.i = len(p.b)
return b, false
}
b := p.b[p.i : p.i+ind]
p.i += ind + 1
return b, true
}
func (p *Parser) ReadIdentifier() (string, bool) {
if p.i < len(p.b) && p.b[p.i] == '(' {
s := p.i + 1
if ind := bytes.IndexByte(p.b[s:], ')'); ind != -1 {
b := p.b[s : s+ind]
p.i = s + ind + 1
return internal.String(b), false
}
}
ind := len(p.b) - p.i
var alpha bool
for i, c := range p.b[p.i:] {
if isNum(c) {
continue
}
if isAlpha(c) || (i > 0 && alpha && c == '_') {
alpha = true
continue
}
ind = i
break
}
if ind == 0 {
return "", false
}
b := p.b[p.i : p.i+ind]
p.i += ind
return internal.String(b), !alpha
}
func (p *Parser) ReadNumber() int {
ind := len(p.b) - p.i
for i, c := range p.b[p.i:] {
if !isNum(c) {
ind = i
break
}
}
if ind == 0 {
return 0
}
n, err := strconv.Atoi(string(p.b[p.i : p.i+ind]))
if err != nil {
panic(err)
}
p.i += ind
return n
}
func isNum(c byte) bool {
return c >= '0' && c <= '9'
}
func isAlpha(c byte) bool {
return (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z')
}

11
vendor/github.com/uptrace/bun/internal/safe.go generated vendored Normal file
View File

@@ -0,0 +1,11 @@
// +build appengine
package internal
func String(b []byte) string {
return string(b)
}
func Bytes(s string) []byte {
return []byte(s)
}

View File

@@ -0,0 +1,184 @@
package tagparser
import (
"strings"
)
type Tag struct {
Name string
Options map[string][]string
}
func (t Tag) IsZero() bool {
return t.Name == "" && t.Options == nil
}
func (t Tag) HasOption(name string) bool {
_, ok := t.Options[name]
return ok
}
func (t Tag) Option(name string) (string, bool) {
if vs, ok := t.Options[name]; ok {
return vs[len(vs)-1], true
}
return "", false
}
func Parse(s string) Tag {
if s == "" {
return Tag{}
}
p := parser{
s: s,
}
p.parse()
return p.tag
}
type parser struct {
s string
i int
tag Tag
seenName bool // for empty names
}
func (p *parser) setName(name string) {
if p.seenName {
p.addOption(name, "")
} else {
p.seenName = true
p.tag.Name = name
}
}
func (p *parser) addOption(key, value string) {
p.seenName = true
if key == "" {
return
}
if p.tag.Options == nil {
p.tag.Options = make(map[string][]string)
}
if vs, ok := p.tag.Options[key]; ok {
p.tag.Options[key] = append(vs, value)
} else {
p.tag.Options[key] = []string{value}
}
}
func (p *parser) parse() {
for p.valid() {
p.parseKeyValue()
if p.peek() == ',' {
p.i++
}
}
}
func (p *parser) parseKeyValue() {
start := p.i
for p.valid() {
switch c := p.read(); c {
case ',':
key := p.s[start : p.i-1]
p.setName(key)
return
case ':':
key := p.s[start : p.i-1]
value := p.parseValue()
p.addOption(key, value)
return
case '"':
key := p.parseQuotedValue()
p.setName(key)
return
}
}
key := p.s[start:p.i]
p.setName(key)
}
func (p *parser) parseValue() string {
start := p.i
for p.valid() {
switch c := p.read(); c {
case '"':
return p.parseQuotedValue()
case ',':
return p.s[start : p.i-1]
case '(':
p.skipPairs('(', ')')
}
}
if p.i == start {
return ""
}
return p.s[start:p.i]
}
func (p *parser) parseQuotedValue() string {
if i := strings.IndexByte(p.s[p.i:], '"'); i >= 0 && p.s[p.i+i-1] != '\\' {
s := p.s[p.i : p.i+i]
p.i += i + 1
return s
}
b := make([]byte, 0, 16)
for p.valid() {
switch c := p.read(); c {
case '\\':
b = append(b, p.read())
case '"':
return string(b)
default:
b = append(b, c)
}
}
return ""
}
func (p *parser) skipPairs(start, end byte) {
var lvl int
for p.valid() {
switch c := p.read(); c {
case '"':
_ = p.parseQuotedValue()
case start:
lvl++
case end:
if lvl == 0 {
return
}
lvl--
}
}
}
func (p *parser) valid() bool {
return p.i < len(p.s)
}
func (p *parser) read() byte {
if !p.valid() {
return 0
}
c := p.s[p.i]
p.i++
return c
}
func (p *parser) peek() byte {
if !p.valid() {
return 0
}
c := p.s[p.i]
return c
}

61
vendor/github.com/uptrace/bun/internal/time.go generated vendored Normal file
View File

@@ -0,0 +1,61 @@
package internal
import (
"fmt"
"time"
)
const (
dateFormat = "2006-01-02"
timeFormat = "15:04:05.999999999"
timetzFormat1 = "15:04:05.999999999-07:00:00"
timetzFormat2 = "15:04:05.999999999-07:00"
timetzFormat3 = "15:04:05.999999999-07"
timestampFormat = "2006-01-02 15:04:05.999999999"
timestamptzFormat1 = "2006-01-02 15:04:05.999999999-07:00:00"
timestamptzFormat2 = "2006-01-02 15:04:05.999999999-07:00"
timestamptzFormat3 = "2006-01-02 15:04:05.999999999-07"
)
func ParseTime(s string) (time.Time, error) {
l := len(s)
if l >= len("2006-01-02 15:04:05") {
switch s[10] {
case ' ':
if c := s[l-6]; c == '+' || c == '-' {
return time.Parse(timestamptzFormat2, s)
}
if c := s[l-3]; c == '+' || c == '-' {
return time.Parse(timestamptzFormat3, s)
}
if c := s[l-9]; c == '+' || c == '-' {
return time.Parse(timestamptzFormat1, s)
}
return time.ParseInLocation(timestampFormat, s, time.UTC)
case 'T':
return time.Parse(time.RFC3339Nano, s)
}
}
if l >= len("15:04:05-07") {
if c := s[l-6]; c == '+' || c == '-' {
return time.Parse(timetzFormat2, s)
}
if c := s[l-3]; c == '+' || c == '-' {
return time.Parse(timetzFormat3, s)
}
if c := s[l-9]; c == '+' || c == '-' {
return time.Parse(timetzFormat1, s)
}
}
if l < len("15:04:05") {
return time.Time{}, fmt.Errorf("bun: can't parse time=%q", s)
}
if s[2] == ':' {
return time.ParseInLocation(timeFormat, s, time.UTC)
}
return time.ParseInLocation(dateFormat, s, time.UTC)
}

67
vendor/github.com/uptrace/bun/internal/underscore.go generated vendored Normal file
View File

@@ -0,0 +1,67 @@
package internal
func IsUpper(c byte) bool {
return c >= 'A' && c <= 'Z'
}
func IsLower(c byte) bool {
return c >= 'a' && c <= 'z'
}
func ToUpper(c byte) byte {
return c - 32
}
func ToLower(c byte) byte {
return c + 32
}
// Underscore converts "CamelCasedString" to "camel_cased_string".
func Underscore(s string) string {
r := make([]byte, 0, len(s)+5)
for i := 0; i < len(s); i++ {
c := s[i]
if IsUpper(c) {
if i > 0 && i+1 < len(s) && (IsLower(s[i-1]) || IsLower(s[i+1])) {
r = append(r, '_', ToLower(c))
} else {
r = append(r, ToLower(c))
}
} else {
r = append(r, c)
}
}
return string(r)
}
func CamelCased(s string) string {
r := make([]byte, 0, len(s))
upperNext := true
for i := 0; i < len(s); i++ {
c := s[i]
if c == '_' {
upperNext = true
continue
}
if upperNext {
if IsLower(c) {
c = ToUpper(c)
}
upperNext = false
}
r = append(r, c)
}
return string(r)
}
func ToExported(s string) string {
if len(s) == 0 {
return s
}
if c := s[0]; IsLower(c) {
b := []byte(s)
b[0] = ToUpper(c)
return string(b)
}
return s
}

22
vendor/github.com/uptrace/bun/internal/unsafe.go generated vendored Normal file
View File

@@ -0,0 +1,22 @@
//go:build !appengine
// +build !appengine
package internal
import "unsafe"
// String converts byte slice to string.
func String(b []byte) string {
if len(b) == 0 {
return ""
}
return unsafe.String(&b[0], len(b))
}
// Bytes converts string to byte slice.
func Bytes(s string) []byte {
if s == "" {
return []byte{}
}
return unsafe.Slice(unsafe.StringData(s), len(s))
}

87
vendor/github.com/uptrace/bun/internal/util.go generated vendored Normal file
View File

@@ -0,0 +1,87 @@
package internal
import (
"reflect"
)
func MakeSliceNextElemFunc(v reflect.Value) func() reflect.Value {
if v.Kind() == reflect.Array {
var pos int
return func() reflect.Value {
v := v.Index(pos)
pos++
return v
}
}
elemType := v.Type().Elem()
if elemType.Kind() == reflect.Ptr {
elemType = elemType.Elem()
return func() reflect.Value {
if v.Len() < v.Cap() {
v.Set(v.Slice(0, v.Len()+1))
elem := v.Index(v.Len() - 1)
if elem.IsNil() {
elem.Set(reflect.New(elemType))
}
return elem
}
elem := reflect.New(elemType)
v.Set(reflect.Append(v, elem))
return elem
}
}
zero := reflect.Zero(elemType)
return func() reflect.Value {
if v.Len() < v.Cap() {
v.Set(v.Slice(0, v.Len()+1))
return v.Index(v.Len() - 1)
}
v.Set(reflect.Append(v, zero))
return v.Index(v.Len() - 1)
}
}
func Unwrap(err error) error {
u, ok := err.(interface {
Unwrap() error
})
if !ok {
return nil
}
return u.Unwrap()
}
func FieldByIndexAlloc(v reflect.Value, index []int) reflect.Value {
if len(index) == 1 {
return v.Field(index[0])
}
for i, idx := range index {
if i > 0 {
v = indirectNil(v)
}
v = v.Field(idx)
}
return v
}
func indirectNil(v reflect.Value) reflect.Value {
if v.Kind() == reflect.Ptr {
if v.IsNil() {
v.Set(reflect.New(v.Type().Elem()))
}
v = v.Elem()
}
return v
}
// MakeQueryBytes returns zero-length byte slice with capacity of 4096.
func MakeQueryBytes() []byte {
// TODO: make this configurable?
return make([]byte, 0, 4096)
}

208
vendor/github.com/uptrace/bun/model.go generated vendored Normal file
View File

@@ -0,0 +1,208 @@
package bun
import (
"context"
"database/sql"
"errors"
"fmt"
"reflect"
"time"
"github.com/uptrace/bun/schema"
)
var errNilModel = errors.New("bun: Model(nil)")
var (
timeType = reflect.TypeFor[time.Time]()
bytesType = reflect.TypeFor[[]byte]()
)
type Model = schema.Model
type rowScanner interface {
ScanRow(ctx context.Context, rows *sql.Rows) error
}
type TableModel interface {
Model
schema.BeforeAppendModelHook
schema.BeforeScanRowHook
schema.AfterScanRowHook
ScanColumn(column string, src any) error
Table() *schema.Table
Relation() *schema.Relation
join(string) *relationJoin
getJoin(string) *relationJoin
getJoins() []relationJoin
addJoin(relationJoin) *relationJoin
clone() TableModel
rootValue() reflect.Value
parentIndex() []int
mount(reflect.Value)
updateSoftDeleteField(time.Time) error
}
func newModel(db *DB, dest []any) (Model, error) {
if len(dest) == 1 {
return _newModel(db, dest[0], true)
}
values := make([]reflect.Value, len(dest))
for i, el := range dest {
v := reflect.ValueOf(el)
if v.Kind() != reflect.Ptr {
return nil, fmt.Errorf("bun: Scan(non-pointer %T)", dest)
}
v = v.Elem()
if v.Kind() != reflect.Slice {
return newScanModel(db, dest), nil
}
values[i] = v
}
return newSliceModel(db, dest, values), nil
}
func newSingleModel(db *DB, dest any) (Model, error) {
return _newModel(db, dest, false)
}
func _newModel(db *DB, dest any, scan bool) (Model, error) {
switch dest := dest.(type) {
case nil:
return nil, errNilModel
case Model:
return dest, nil
case sql.Scanner:
if !scan {
return nil, fmt.Errorf("bun: Model(unsupported %T)", dest)
}
return newScanModel(db, []any{dest}), nil
}
v := reflect.ValueOf(dest)
if !v.IsValid() {
return nil, errNilModel
}
if v.Kind() != reflect.Ptr {
return nil, fmt.Errorf("bun: Model(non-pointer %T)", dest)
}
if v.IsNil() {
typ := v.Type().Elem()
if typ.Kind() == reflect.Struct {
return newStructTableModel(db, dest, db.Table(typ)), nil
}
return nil, fmt.Errorf("bun: Model(nil %s %T)", typ.Kind(), dest)
}
v = v.Elem()
typ := v.Type()
switch typ {
case timeType, bytesType:
return newScanModel(db, []any{dest}), nil
}
switch v.Kind() {
case reflect.Map:
if err := validMap(typ); err != nil {
return nil, err
}
mapPtr := v.Addr().Interface().(*map[string]any)
return newMapModel(db, mapPtr), nil
case reflect.Struct:
return newStructTableModelValue(db, dest, v), nil
case reflect.Slice:
switch elemType := sliceElemType(v); elemType.Kind() {
case reflect.Struct:
if elemType != timeType {
return newSliceTableModel(db, dest, v, elemType), nil
}
case reflect.Map:
if err := validMap(elemType); err != nil {
return nil, err
}
slicePtr := v.Addr().Interface().(*[]map[string]any)
return newMapSliceModel(db, slicePtr), nil
}
return newSliceModel(db, []any{dest}, []reflect.Value{v}), nil
}
if scan {
return newScanModel(db, []any{dest}), nil
}
return nil, fmt.Errorf("bun: Model(unsupported %T)", dest)
}
func newTableModelIndex(
db *DB,
table *schema.Table,
root reflect.Value,
index []int,
rel *schema.Relation,
) (TableModel, error) {
typ := typeByIndex(table.Type, index)
if typ.Kind() == reflect.Struct {
return &structTableModel{
db: db,
table: table.Dialect().Tables().Get(typ),
rel: rel,
root: root,
index: index,
}, nil
}
if typ.Kind() == reflect.Slice {
structType := indirectType(typ.Elem())
if structType.Kind() == reflect.Struct {
m := sliceTableModel{
structTableModel: structTableModel{
db: db,
table: table.Dialect().Tables().Get(structType),
rel: rel,
root: root,
index: index,
},
}
m.init(typ)
return &m, nil
}
}
return nil, fmt.Errorf("bun: NewModel(%s)", typ)
}
func validMap(typ reflect.Type) error {
if typ.Key().Kind() != reflect.String || typ.Elem().Kind() != reflect.Interface {
return fmt.Errorf("bun: Model(unsupported %s) (expected *map[string]any)",
typ)
}
return nil
}
//------------------------------------------------------------------------------
func isSingleRowModel(m Model) bool {
switch m.(type) {
case *mapModel,
*structTableModel,
*scanModel:
return true
default:
return false
}
}

186
vendor/github.com/uptrace/bun/model_map.go generated vendored Normal file
View File

@@ -0,0 +1,186 @@
package bun
import (
"bytes"
"context"
"database/sql"
"reflect"
"slices"
"github.com/uptrace/bun/schema"
)
type mapModel struct {
db *DB
dest *map[string]any
m map[string]any
rows *sql.Rows
columns []string
_columnTypes []*sql.ColumnType
scanIndex int
}
var _ Model = (*mapModel)(nil)
func newMapModel(db *DB, dest *map[string]any) *mapModel {
m := &mapModel{
db: db,
dest: dest,
}
if dest != nil {
m.m = *dest
}
return m
}
func (m *mapModel) Value() any {
return m.dest
}
func (m *mapModel) ScanRows(ctx context.Context, rows *sql.Rows) (int, error) {
if !rows.Next() {
return 0, rows.Err()
}
columns, err := rows.Columns()
if err != nil {
return 0, err
}
m.rows = rows
m.columns = columns
dest := makeDest(m, len(columns))
if m.m == nil {
m.m = make(map[string]any, len(m.columns))
}
m.scanIndex = 0
if err := rows.Scan(dest...); err != nil {
return 0, err
}
*m.dest = m.m
return 1, nil
}
func (m *mapModel) Scan(src any) error {
if _, ok := src.([]byte); !ok {
return m.scanRaw(src)
}
columnTypes, err := m.columnTypes()
if err != nil {
return err
}
scanType := columnTypes[m.scanIndex].ScanType()
switch scanType.Kind() {
case reflect.Interface:
return m.scanRaw(src)
case reflect.Slice:
if scanType.Elem().Kind() == reflect.Uint8 {
// Reference types such as []byte are only valid until the next call to Scan.
src := bytes.Clone(src.([]byte))
return m.scanRaw(src)
}
}
dest := reflect.New(scanType).Elem()
if err := schema.Scanner(scanType)(dest, src); err != nil {
return err
}
return m.scanRaw(dest.Interface())
}
func (m *mapModel) columnTypes() ([]*sql.ColumnType, error) {
if m._columnTypes == nil {
columnTypes, err := m.rows.ColumnTypes()
if err != nil {
return nil, err
}
m._columnTypes = columnTypes
}
return m._columnTypes, nil
}
func (m *mapModel) scanRaw(src any) error {
columnName := m.columns[m.scanIndex]
m.scanIndex++
m.m[columnName] = src
return nil
}
func (m *mapModel) appendColumnsValues(gen schema.QueryGen, b []byte) []byte {
keys := make([]string, 0, len(m.m))
for k := range m.m {
keys = append(keys, k)
}
slices.Sort(keys)
b = append(b, " ("...)
for i, k := range keys {
if i > 0 {
b = append(b, ", "...)
}
b = gen.AppendIdent(b, k)
}
b = append(b, ") VALUES ("...)
isTemplate := gen.IsNop()
for i, k := range keys {
if i > 0 {
b = append(b, ", "...)
}
if isTemplate {
b = append(b, '?')
} else {
b = gen.Append(b, m.m[k])
}
}
b = append(b, ")"...)
return b
}
func (m *mapModel) appendSet(gen schema.QueryGen, b []byte) []byte {
keys := make([]string, 0, len(m.m))
for k := range m.m {
keys = append(keys, k)
}
slices.Sort(keys)
isTemplate := gen.IsNop()
for i, k := range keys {
if i > 0 {
b = append(b, ", "...)
}
b = gen.AppendIdent(b, k)
b = append(b, " = "...)
if isTemplate {
b = append(b, '?')
} else {
b = gen.Append(b, m.m[k])
}
}
return b
}
func makeDest(v any, n int) []any {
dest := make([]any, n)
for i := range dest {
dest[i] = v
}
return dest
}

153
vendor/github.com/uptrace/bun/model_map_slice.go generated vendored Normal file
View File

@@ -0,0 +1,153 @@
package bun
import (
"context"
"database/sql"
"errors"
"slices"
"github.com/uptrace/bun/dialect/feature"
"github.com/uptrace/bun/schema"
)
type mapSliceModel struct {
mapModel
dest *[]map[string]any
keys []string
}
var _ Model = (*mapSliceModel)(nil)
func newMapSliceModel(db *DB, dest *[]map[string]any) *mapSliceModel {
return &mapSliceModel{
mapModel: mapModel{
db: db,
},
dest: dest,
}
}
func (m *mapSliceModel) Value() any {
return m.dest
}
func (m *mapSliceModel) SetCap(cap int) {
if cap > 100 {
cap = 100
}
if slice := *m.dest; len(slice) < cap {
*m.dest = make([]map[string]any, 0, cap)
}
}
func (m *mapSliceModel) ScanRows(ctx context.Context, rows *sql.Rows) (int, error) {
columns, err := rows.Columns()
if err != nil {
return 0, err
}
m.rows = rows
m.columns = columns
dest := makeDest(m, len(columns))
slice := *m.dest
if len(slice) > 0 {
slice = slice[:0]
}
var n int
for rows.Next() {
m.m = make(map[string]any, len(m.columns))
m.scanIndex = 0
if err := rows.Scan(dest...); err != nil {
return 0, err
}
slice = append(slice, m.m)
n++
}
if err := rows.Err(); err != nil {
return 0, err
}
*m.dest = slice
return n, nil
}
func (m *mapSliceModel) appendColumns(gen schema.QueryGen, b []byte) (_ []byte, err error) {
if err := m.initKeys(); err != nil {
return nil, err
}
for i, k := range m.keys {
if i > 0 {
b = append(b, ", "...)
}
b = gen.AppendIdent(b, k)
}
return b, nil
}
func (m *mapSliceModel) appendValues(gen schema.QueryGen, b []byte) (_ []byte, err error) {
if err := m.initKeys(); err != nil {
return nil, err
}
slice := *m.dest
if gen.IsNop() {
for i := range m.keys {
if i > 0 {
b = append(b, ", "...)
}
b = append(b, '?')
}
return b, nil
}
for i, el := range slice {
if i > 0 {
b = append(b, "), "...)
if m.db.HasFeature(feature.ValuesRow) {
b = append(b, "ROW("...)
} else {
b = append(b, '(')
}
}
for j, key := range m.keys {
if j > 0 {
b = append(b, ", "...)
}
b = gen.Append(b, el[key])
}
}
return b, nil
}
func (m *mapSliceModel) initKeys() error {
if m.keys != nil {
return nil
}
slice := *m.dest
if len(slice) == 0 {
return errors.New("bun: map slice is empty")
}
first := slice[0]
keys := make([]string, 0, len(first))
for k := range first {
keys = append(keys, k)
}
slices.Sort(keys)
m.keys = keys
return nil
}

56
vendor/github.com/uptrace/bun/model_scan.go generated vendored Normal file
View File

@@ -0,0 +1,56 @@
package bun
import (
"context"
"database/sql"
"reflect"
"github.com/uptrace/bun/schema"
)
type scanModel struct {
db *DB
dest []any
scanIndex int
}
var _ Model = (*scanModel)(nil)
func newScanModel(db *DB, dest []any) *scanModel {
return &scanModel{
db: db,
dest: dest,
}
}
func (m *scanModel) Value() any {
return m.dest
}
func (m *scanModel) ScanRows(ctx context.Context, rows *sql.Rows) (int, error) {
if !rows.Next() {
return 0, rows.Err()
}
dest := makeDest(m, len(m.dest))
m.scanIndex = 0
if err := rows.Scan(dest...); err != nil {
return 0, err
}
return 1, nil
}
func (m *scanModel) ScanRow(ctx context.Context, rows *sql.Rows) error {
return rows.Scan(m.dest...)
}
func (m *scanModel) Scan(src any) error {
dest := reflect.ValueOf(m.dest[m.scanIndex])
m.scanIndex++
scanner := schema.Scanner(dest.Type())
return scanner(dest, src)
}

82
vendor/github.com/uptrace/bun/model_slice.go generated vendored Normal file
View File

@@ -0,0 +1,82 @@
package bun
import (
"context"
"database/sql"
"reflect"
"github.com/uptrace/bun/internal"
"github.com/uptrace/bun/schema"
)
type sliceInfo struct {
nextElem func() reflect.Value
scan schema.ScannerFunc
}
type sliceModel struct {
dest []any
values []reflect.Value
scanIndex int
info []sliceInfo
}
var _ Model = (*sliceModel)(nil)
func newSliceModel(db *DB, dest []any, values []reflect.Value) *sliceModel {
return &sliceModel{
dest: dest,
values: values,
}
}
func (m *sliceModel) Value() any {
return m.dest
}
func (m *sliceModel) ScanRows(ctx context.Context, rows *sql.Rows) (int, error) {
columns, err := rows.Columns()
if err != nil {
return 0, err
}
m.info = make([]sliceInfo, len(m.values))
for i, v := range m.values {
if v.IsValid() && v.Len() > 0 {
v.Set(v.Slice(0, 0))
}
m.info[i] = sliceInfo{
nextElem: internal.MakeSliceNextElemFunc(v),
scan: schema.Scanner(v.Type().Elem()),
}
}
if len(columns) == 0 {
return 0, nil
}
dest := makeDest(m, len(columns))
var n int
for rows.Next() {
m.scanIndex = 0
if err := rows.Scan(dest...); err != nil {
return 0, err
}
n++
}
if err := rows.Err(); err != nil {
return 0, err
}
return n, nil
}
func (m *sliceModel) Scan(src any) error {
info := m.info[m.scanIndex]
m.scanIndex++
dest := info.nextElem()
return info.scan(dest, src)
}

185
vendor/github.com/uptrace/bun/model_table_has_many.go generated vendored Normal file
View File

@@ -0,0 +1,185 @@
package bun
import (
"context"
"database/sql"
"database/sql/driver"
"fmt"
"reflect"
"github.com/uptrace/bun/internal"
"github.com/uptrace/bun/schema"
)
type hasManyModel struct {
*sliceTableModel
baseTable *schema.Table
rel *schema.Relation
baseValues map[internal.MapKey][]reflect.Value
structKey []any
}
var _ TableModel = (*hasManyModel)(nil)
func newHasManyModel(j *relationJoin) *hasManyModel {
baseTable := j.BaseModel.Table()
joinModel := j.JoinModel.(*sliceTableModel)
baseValues := baseValues(joinModel, j.Relation.BasePKs)
if len(baseValues) == 0 {
return nil
}
m := hasManyModel{
sliceTableModel: joinModel,
baseTable: baseTable,
rel: j.Relation,
baseValues: baseValues,
}
if !m.sliceOfPtr {
m.strct = reflect.New(m.table.Type).Elem()
}
return &m
}
func (m *hasManyModel) ScanRows(ctx context.Context, rows *sql.Rows) (int, error) {
columns, err := rows.Columns()
if err != nil {
return 0, err
}
m.columns = columns
dest := makeDest(m, len(columns))
var n int
m.structKey = make([]any, len(m.rel.JoinPKs))
for rows.Next() {
if m.sliceOfPtr {
m.strct = reflect.New(m.table.Type).Elem()
} else {
m.strct.Set(m.table.ZeroValue)
}
m.structInited = false
m.scanIndex = 0
if err := rows.Scan(dest...); err != nil {
return 0, err
}
if err := m.parkStruct(); err != nil {
return 0, err
}
n++
}
if err := rows.Err(); err != nil {
return 0, err
}
return n, nil
}
func (m *hasManyModel) Scan(src any) error {
column := m.columns[m.scanIndex]
m.scanIndex++
field := m.table.LookupField(column)
if field == nil {
return fmt.Errorf("bun: %s does not have column %q", m.table.TypeName, column)
}
if err := field.ScanValue(m.strct, src); err != nil {
return err
}
for i, f := range m.rel.JoinPKs {
if f.Name == column {
m.structKey[i] = indirectAsKey(field.Value(m.strct))
break
}
}
return nil
}
func (m *hasManyModel) parkStruct() error {
baseValues, ok := m.baseValues[internal.NewMapKey(m.structKey)]
if !ok {
return fmt.Errorf(
"bun: has-many relation=%s does not have base %s with id=%q (check join conditions)",
m.rel.Field.GoName, m.baseTable, m.structKey)
}
for i, v := range baseValues {
if !m.sliceOfPtr {
v.Set(reflect.Append(v, m.strct))
continue
}
if i == 0 {
v.Set(reflect.Append(v, m.strct.Addr()))
continue
}
clone := reflect.New(m.strct.Type()).Elem()
clone.Set(m.strct)
v.Set(reflect.Append(v, clone.Addr()))
}
return nil
}
func (m *hasManyModel) clone() TableModel {
return &hasManyModel{
sliceTableModel: m.sliceTableModel.clone().(*sliceTableModel),
baseTable: m.baseTable,
rel: m.rel,
baseValues: m.baseValues,
structKey: m.structKey,
}
}
func baseValues(model TableModel, fields []*schema.Field) map[internal.MapKey][]reflect.Value {
fieldIndex := model.Relation().Field.Index
m := make(map[internal.MapKey][]reflect.Value)
key := make([]any, 0, len(fields))
walk(model.rootValue(), model.parentIndex(), func(v reflect.Value) {
key = modelKey(key[:0], v, fields)
mapKey := internal.NewMapKey(key)
m[mapKey] = append(m[mapKey], v.FieldByIndex(fieldIndex))
})
return m
}
func modelKey(key []any, strct reflect.Value, fields []*schema.Field) []any {
for _, f := range fields {
key = append(key, indirectAsKey(f.Value(strct)))
}
return key
}
// indirectAsKey return the field value dereferencing the pointer if necessary.
// The value is then used as a map key.
func indirectAsKey(field reflect.Value) any {
if field.Kind() == reflect.Pointer && field.IsNil() {
return nil
}
i := field.Interface()
if valuer, ok := i.(driver.Valuer); ok {
if v, err := valuer.Value(); err == nil {
switch reflect.TypeOf(v).Kind() {
case reflect.Array, reflect.Chan, reflect.Func,
reflect.Map, reflect.Pointer, reflect.Slice, reflect.UnsafePointer:
// NOTE #1107, these types cannot be used as map key,
// let us use original logic.
return i
default:
return v
}
}
}
return reflect.Indirect(field).Interface()
}

142
vendor/github.com/uptrace/bun/model_table_m2m.go generated vendored Normal file
View File

@@ -0,0 +1,142 @@
package bun
import (
"context"
"database/sql"
"fmt"
"reflect"
"github.com/uptrace/bun/internal"
"github.com/uptrace/bun/schema"
)
type m2mModel struct {
*sliceTableModel
baseTable *schema.Table
rel *schema.Relation
baseValues map[internal.MapKey][]reflect.Value
structKey []any
}
var _ TableModel = (*m2mModel)(nil)
func newM2MModel(j *relationJoin) *m2mModel {
baseTable := j.BaseModel.Table()
joinModel := j.JoinModel.(*sliceTableModel)
baseValues := baseValues(joinModel, j.Relation.BasePKs)
if len(baseValues) == 0 {
return nil
}
m := &m2mModel{
sliceTableModel: joinModel,
baseTable: baseTable,
rel: j.Relation,
baseValues: baseValues,
}
if !m.sliceOfPtr {
m.strct = reflect.New(m.table.Type).Elem()
}
return m
}
func (m *m2mModel) ScanRows(ctx context.Context, rows *sql.Rows) (int, error) {
columns, err := rows.Columns()
if err != nil {
return 0, err
}
m.columns = columns
dest := makeDest(m, len(columns))
var n int
for rows.Next() {
if m.sliceOfPtr {
m.strct = reflect.New(m.table.Type).Elem()
} else {
m.strct.Set(m.table.ZeroValue)
}
m.structInited = false
m.scanIndex = 0
m.structKey = m.structKey[:0]
if err := rows.Scan(dest...); err != nil {
return 0, err
}
if err := m.parkStruct(); err != nil {
return 0, err
}
n++
}
if err := rows.Err(); err != nil {
return 0, err
}
return n, nil
}
func (m *m2mModel) Scan(src any) error {
column := m.columns[m.scanIndex]
m.scanIndex++
// Base pks must come first.
if m.scanIndex <= len(m.rel.M2MBasePKs) {
return m.scanM2MColumn(column, src)
}
if field, ok := m.table.FieldMap[column]; ok {
return field.ScanValue(m.strct, src)
}
_, err := m.scanColumn(column, src)
return err
}
func (m *m2mModel) scanM2MColumn(column string, src any) error {
for _, field := range m.rel.M2MBasePKs {
if field.Name == column {
dest := reflect.New(field.IndirectType).Elem()
if err := field.Scan(dest, src); err != nil {
return err
}
m.structKey = append(m.structKey, indirectAsKey(dest))
break
}
}
_, err := m.scanColumn(column, src)
return err
}
func (m *m2mModel) parkStruct() error {
baseValues, ok := m.baseValues[internal.NewMapKey(m.structKey)]
if !ok {
return fmt.Errorf(
"bun: m2m relation=%s does not have base %s with key=%q (check join conditions)",
m.rel.Field.GoName, m.baseTable, m.structKey)
}
for _, v := range baseValues {
if m.sliceOfPtr {
v.Set(reflect.Append(v, m.strct.Addr()))
} else {
v.Set(reflect.Append(v, m.strct))
}
}
return nil
}
func (m *m2mModel) clone() TableModel {
return &m2mModel{
sliceTableModel: m.sliceTableModel.clone().(*sliceTableModel),
baseTable: m.baseTable,
rel: m.rel,
baseValues: m.baseValues,
structKey: m.structKey,
}
}

136
vendor/github.com/uptrace/bun/model_table_slice.go generated vendored Normal file
View File

@@ -0,0 +1,136 @@
package bun
import (
"context"
"database/sql"
"reflect"
"time"
"github.com/uptrace/bun/internal"
"github.com/uptrace/bun/schema"
)
type sliceTableModel struct {
structTableModel
slice reflect.Value
sliceLen int
sliceOfPtr bool
nextElem func() reflect.Value
}
var _ TableModel = (*sliceTableModel)(nil)
func newSliceTableModel(
db *DB, dest any, slice reflect.Value, elemType reflect.Type,
) *sliceTableModel {
m := &sliceTableModel{
structTableModel: structTableModel{
db: db,
table: db.Table(elemType),
dest: dest,
root: slice,
},
slice: slice,
sliceLen: slice.Len(),
nextElem: internal.MakeSliceNextElemFunc(slice),
}
m.init(slice.Type())
return m
}
func (m *sliceTableModel) init(sliceType reflect.Type) {
switch sliceType.Elem().Kind() {
case reflect.Ptr, reflect.Interface:
m.sliceOfPtr = true
}
}
func (m *sliceTableModel) join(name string) *relationJoin {
return m._join(m.slice, name)
}
func (m *sliceTableModel) ScanRows(ctx context.Context, rows *sql.Rows) (int, error) {
columns, err := rows.Columns()
if err != nil {
return 0, err
}
m.columns = columns
dest := makeDest(m, len(columns))
if m.slice.IsValid() && m.slice.Len() > 0 {
m.slice.Set(m.slice.Slice(0, 0))
}
var n int
for rows.Next() {
m.strct = m.nextElem()
if m.sliceOfPtr {
m.strct = m.strct.Elem()
}
m.structInited = false
if err := m.scanRow(ctx, rows, dest); err != nil {
return 0, err
}
n++
}
if err := rows.Err(); err != nil {
return 0, err
}
return n, nil
}
var _ schema.BeforeAppendModelHook = (*sliceTableModel)(nil)
func (m *sliceTableModel) BeforeAppendModel(ctx context.Context, query Query) error {
if !m.table.HasBeforeAppendModelHook() || !m.slice.IsValid() {
return nil
}
sliceLen := m.slice.Len()
for i := 0; i < sliceLen; i++ {
strct := m.slice.Index(i)
if !m.sliceOfPtr {
strct = strct.Addr()
}
err := strct.Interface().(schema.BeforeAppendModelHook).BeforeAppendModel(ctx, query)
if err != nil {
return err
}
}
return nil
}
// Inherit these hooks from structTableModel.
var (
_ schema.BeforeScanRowHook = (*sliceTableModel)(nil)
_ schema.AfterScanRowHook = (*sliceTableModel)(nil)
)
func (m *sliceTableModel) updateSoftDeleteField(tm time.Time) error {
sliceLen := m.slice.Len()
for i := 0; i < sliceLen; i++ {
strct := indirect(m.slice.Index(i))
fv := m.table.SoftDeleteField.Value(strct)
if err := m.table.UpdateSoftDeleteField(fv, tm); err != nil {
return err
}
}
return nil
}
func (m *sliceTableModel) clone() TableModel {
return &sliceTableModel{
structTableModel: *m.structTableModel.clone().(*structTableModel),
slice: m.slice,
sliceLen: m.sliceLen,
sliceOfPtr: m.sliceOfPtr,
nextElem: m.nextElem,
}
}

373
vendor/github.com/uptrace/bun/model_table_struct.go generated vendored Normal file
View File

@@ -0,0 +1,373 @@
package bun
import (
"context"
"database/sql"
"fmt"
"reflect"
"strings"
"time"
"github.com/uptrace/bun/internal"
"github.com/uptrace/bun/schema"
)
type structTableModel struct {
db *DB
table *schema.Table
rel *schema.Relation
joins []relationJoin
dest any
root reflect.Value
index []int
strct reflect.Value
structInited bool
structInitErr error
columns []string
scanIndex int
}
var _ TableModel = (*structTableModel)(nil)
func newStructTableModel(db *DB, dest any, table *schema.Table) *structTableModel {
return &structTableModel{
db: db,
table: table,
dest: dest,
}
}
func newStructTableModelValue(db *DB, dest any, v reflect.Value) *structTableModel {
return &structTableModel{
db: db,
table: db.Table(v.Type()),
dest: dest,
root: v,
strct: v,
}
}
func (m *structTableModel) Value() any {
return m.dest
}
func (m *structTableModel) Table() *schema.Table {
return m.table
}
func (m *structTableModel) Relation() *schema.Relation {
return m.rel
}
func (m *structTableModel) initStruct() error {
if m.structInited {
return m.structInitErr
}
m.structInited = true
switch m.strct.Kind() {
case reflect.Invalid:
m.structInitErr = errNilModel
return m.structInitErr
case reflect.Interface:
m.strct = m.strct.Elem()
}
if m.strct.Kind() == reflect.Ptr {
if m.strct.IsNil() {
m.strct.Set(reflect.New(m.strct.Type().Elem()))
m.strct = m.strct.Elem()
} else {
m.strct = m.strct.Elem()
}
}
m.mountJoins()
return nil
}
func (m *structTableModel) mountJoins() {
for i := range m.joins {
j := &m.joins[i]
switch j.Relation.Type {
case schema.HasOneRelation, schema.BelongsToRelation:
j.JoinModel.mount(m.strct)
}
}
}
var _ schema.BeforeAppendModelHook = (*structTableModel)(nil)
func (m *structTableModel) BeforeAppendModel(ctx context.Context, query Query) error {
if !m.table.HasBeforeAppendModelHook() || !m.strct.IsValid() {
return nil
}
return m.strct.Addr().Interface().(schema.BeforeAppendModelHook).BeforeAppendModel(ctx, query)
}
var _ schema.BeforeScanRowHook = (*structTableModel)(nil)
func (m *structTableModel) BeforeScanRow(ctx context.Context) error {
if m.table.HasBeforeScanRowHook() {
return m.strct.Addr().Interface().(schema.BeforeScanRowHook).BeforeScanRow(ctx)
}
return nil
}
var _ schema.AfterScanRowHook = (*structTableModel)(nil)
func (m *structTableModel) AfterScanRow(ctx context.Context) error {
if !m.structInited {
return nil
}
if m.table.HasAfterScanRowHook() {
firstErr := m.strct.Addr().Interface().(schema.AfterScanRowHook).AfterScanRow(ctx)
for _, j := range m.joins {
switch j.Relation.Type {
case schema.HasOneRelation, schema.BelongsToRelation:
if err := j.JoinModel.AfterScanRow(ctx); err != nil && firstErr == nil {
firstErr = err
}
}
}
return firstErr
}
return nil
}
func (m *structTableModel) getJoin(name string) *relationJoin {
for i := range m.joins {
j := &m.joins[i]
if j.Relation.Field.Name == name || j.Relation.Field.GoName == name {
return j
}
}
return nil
}
func (m *structTableModel) getJoins() []relationJoin {
return m.joins
}
func (m *structTableModel) addJoin(j relationJoin) *relationJoin {
m.joins = append(m.joins, j)
return &m.joins[len(m.joins)-1]
}
func (m *structTableModel) join(name string) *relationJoin {
return m._join(m.strct, name)
}
func (m *structTableModel) _join(bind reflect.Value, name string) *relationJoin {
path := strings.Split(name, ".")
index := make([]int, 0, len(path))
currJoin := relationJoin{
BaseModel: m,
JoinModel: m,
}
var lastJoin *relationJoin
for _, name := range path {
relation, ok := currJoin.JoinModel.Table().Relations[name]
if !ok {
return nil
}
currJoin.Relation = relation
index = append(index, relation.Field.Index...)
if j := currJoin.JoinModel.getJoin(name); j != nil {
currJoin.BaseModel = j.BaseModel
currJoin.JoinModel = j.JoinModel
lastJoin = j
} else {
model, err := newTableModelIndex(m.db, m.table, bind, index, relation)
if err != nil {
return nil
}
currJoin.Parent = lastJoin
currJoin.BaseModel = currJoin.JoinModel
currJoin.JoinModel = model
lastJoin = currJoin.BaseModel.addJoin(currJoin)
}
}
return lastJoin
}
func (m *structTableModel) rootValue() reflect.Value {
return m.root
}
func (m *structTableModel) parentIndex() []int {
return m.index[:len(m.index)-len(m.rel.Field.Index)]
}
func (m *structTableModel) mount(host reflect.Value) {
m.strct = internal.FieldByIndexAlloc(host, m.rel.Field.Index)
m.structInited = false
}
func (m *structTableModel) updateSoftDeleteField(tm time.Time) error {
if !m.strct.IsValid() {
return nil
}
fv := m.table.SoftDeleteField.Value(m.strct)
return m.table.UpdateSoftDeleteField(fv, tm)
}
func (m *structTableModel) ScanRows(ctx context.Context, rows *sql.Rows) (int, error) {
if !rows.Next() {
return 0, rows.Err()
}
var n int
if err := m.ScanRow(ctx, rows); err != nil {
return 0, err
}
n++
// And discard the rest. This is especially important for SQLite3, which can return
// a row like it was inserted successfully and then return an actual error for the next row.
// See issues/100.
for rows.Next() {
n++
}
if err := rows.Err(); err != nil {
return 0, err
}
return n, nil
}
func (m *structTableModel) ScanRow(ctx context.Context, rows *sql.Rows) error {
columns, err := rows.Columns()
if err != nil {
return err
}
m.columns = columns
dest := makeDest(m, len(columns))
return m.scanRow(ctx, rows, dest)
}
func (m *structTableModel) scanRow(ctx context.Context, rows *sql.Rows, dest []any) error {
if err := m.BeforeScanRow(ctx); err != nil {
return err
}
m.scanIndex = 0
if err := rows.Scan(dest...); err != nil {
return err
}
if err := m.AfterScanRow(ctx); err != nil {
return err
}
return nil
}
func (m *structTableModel) Scan(src any) error {
column := m.columns[m.scanIndex]
m.scanIndex++
return m.ScanColumn(unquote(column), src)
}
func (m *structTableModel) ScanColumn(column string, src any) error {
if ok, err := m.scanColumn(column, src); ok {
return err
}
if column == "" || column[0] == '_' || m.db.flags.Has(discardUnknownColumns) {
return nil
}
return fmt.Errorf("bun: %s does not have column %q", m.table.TypeName, column)
}
func (m *structTableModel) scanColumn(column string, src any) (bool, error) {
if src != nil {
if err := m.initStruct(); err != nil {
return true, err
}
}
if field := m.table.LookupField(column); field != nil {
if src == nil && m.isNil() {
return true, nil
}
return true, field.ScanValue(m.strct, src)
}
if joinName, column := splitColumn(column); joinName != "" {
if join := m.getJoin(joinName); join != nil {
return true, join.JoinModel.ScanColumn(column, src)
}
if m.table.ModelName == joinName {
return true, m.ScanColumn(column, src)
}
}
return false, nil
}
func (m *structTableModel) isNil() bool {
return m.strct.Kind() == reflect.Ptr && m.strct.IsNil()
}
func (m *structTableModel) AppendNamedArg(
gen schema.QueryGen, b []byte, name string,
) ([]byte, bool) {
return m.table.AppendNamedArg(gen, b, name, m.strct)
}
func (m *structTableModel) clone() TableModel {
return &structTableModel{
db: m.db,
table: m.table,
rel: m.rel,
joins: append([]relationJoin{}, m.joins...),
dest: m.dest,
root: m.root,
index: append([]int{}, m.index...),
strct: m.strct,
structInited: m.structInited,
structInitErr: m.structInitErr,
columns: append([]string{}, m.columns...),
scanIndex: m.scanIndex,
}
}
// sqlite3 sometimes does not unquote columns.
func unquote(s string) string {
if s == "" {
return s
}
if s[0] == '"' && s[len(s)-1] == '"' {
return s[1 : len(s)-1]
}
return s
}
func splitColumn(s string) (string, string) {
if i := strings.Index(s, "__"); i >= 0 {
return s[:i], s[i+2:]
}
return "", s
}

8
vendor/github.com/uptrace/bun/package.json generated vendored Normal file
View File

@@ -0,0 +1,8 @@
{
"name": "gobun",
"version": "1.2.16",
"main": "index.js",
"repository": "git@github.com:uptrace/bun.git",
"author": "Vladimir Mihailenco <vladimir.webdev@gmail.com>",
"license": "BSD-2-clause"
}

1582
vendor/github.com/uptrace/bun/query_base.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

150
vendor/github.com/uptrace/bun/query_column_add.go generated vendored Normal file
View File

@@ -0,0 +1,150 @@
package bun
import (
"context"
"database/sql"
"fmt"
"github.com/uptrace/bun/dialect/feature"
"github.com/uptrace/bun/internal"
"github.com/uptrace/bun/schema"
)
type AddColumnQuery struct {
baseQuery
ifNotExists bool
comment string
}
var _ Query = (*AddColumnQuery)(nil)
func NewAddColumnQuery(db *DB) *AddColumnQuery {
q := &AddColumnQuery{
baseQuery: baseQuery{
db: db,
},
}
return q
}
func (q *AddColumnQuery) Conn(db IConn) *AddColumnQuery {
q.setConn(db)
return q
}
func (q *AddColumnQuery) Model(model any) *AddColumnQuery {
q.setModel(model)
return q
}
func (q *AddColumnQuery) Err(err error) *AddColumnQuery {
q.setErr(err)
return q
}
// Apply calls each function in fns, passing the AddColumnQuery as an argument.
func (q *AddColumnQuery) Apply(fns ...func(*AddColumnQuery) *AddColumnQuery) *AddColumnQuery {
for _, fn := range fns {
if fn != nil {
q = fn(q)
}
}
return q
}
//------------------------------------------------------------------------------
func (q *AddColumnQuery) Table(tables ...string) *AddColumnQuery {
for _, table := range tables {
q.addTable(schema.UnsafeIdent(table))
}
return q
}
func (q *AddColumnQuery) TableExpr(query string, args ...any) *AddColumnQuery {
q.addTable(schema.SafeQuery(query, args))
return q
}
func (q *AddColumnQuery) ModelTableExpr(query string, args ...any) *AddColumnQuery {
q.modelTableName = schema.SafeQuery(query, args)
return q
}
//------------------------------------------------------------------------------
func (q *AddColumnQuery) ColumnExpr(query string, args ...any) *AddColumnQuery {
q.addColumn(schema.SafeQuery(query, args))
return q
}
func (q *AddColumnQuery) IfNotExists() *AddColumnQuery {
q.ifNotExists = true
return q
}
//------------------------------------------------------------------------------
// Comment adds a comment to the query, wrapped by /* ... */.
func (q *AddColumnQuery) Comment(comment string) *AddColumnQuery {
q.comment = comment
return q
}
//------------------------------------------------------------------------------
func (q *AddColumnQuery) Operation() string {
return "ADD COLUMN"
}
func (q *AddColumnQuery) AppendQuery(gen schema.QueryGen, b []byte) (_ []byte, err error) {
if q.err != nil {
return nil, q.err
}
b = appendComment(b, q.comment)
if len(q.columns) != 1 {
return nil, fmt.Errorf("bun: AddColumnQuery requires exactly one column")
}
b = append(b, "ALTER TABLE "...)
b, err = q.appendFirstTable(gen, b)
if err != nil {
return nil, err
}
b = append(b, " ADD "...)
if q.ifNotExists {
b = append(b, "IF NOT EXISTS "...)
}
b, err = q.columns[0].AppendQuery(gen, b)
if err != nil {
return nil, err
}
return b, nil
}
//------------------------------------------------------------------------------
func (q *AddColumnQuery) Exec(ctx context.Context, dest ...any) (sql.Result, error) {
if q.ifNotExists && !q.hasFeature(feature.AlterColumnExists) {
return nil, feature.NewNotSupportError(feature.AlterColumnExists)
}
// if a comment is propagated via the context, use it
setCommentFromContext(ctx, q)
queryBytes, err := q.AppendQuery(q.db.gen, q.db.makeQueryBytes())
if err != nil {
return nil, err
}
query := internal.String(queryBytes)
return q.exec(ctx, q, query)
}

148
vendor/github.com/uptrace/bun/query_column_drop.go generated vendored Normal file
View File

@@ -0,0 +1,148 @@
package bun
import (
"context"
"database/sql"
"fmt"
"github.com/uptrace/bun/internal"
"github.com/uptrace/bun/schema"
)
type DropColumnQuery struct {
baseQuery
comment string
}
var _ Query = (*DropColumnQuery)(nil)
func NewDropColumnQuery(db *DB) *DropColumnQuery {
q := &DropColumnQuery{
baseQuery: baseQuery{
db: db,
},
}
return q
}
func (q *DropColumnQuery) Conn(db IConn) *DropColumnQuery {
q.setConn(db)
return q
}
func (q *DropColumnQuery) Model(model any) *DropColumnQuery {
q.setModel(model)
return q
}
func (q *DropColumnQuery) Err(err error) *DropColumnQuery {
q.setErr(err)
return q
}
// Apply calls each function in fns, passing the DropColumnQuery as an argument.
func (q *DropColumnQuery) Apply(fns ...func(*DropColumnQuery) *DropColumnQuery) *DropColumnQuery {
for _, fn := range fns {
if fn != nil {
q = fn(q)
}
}
return q
}
//------------------------------------------------------------------------------
func (q *DropColumnQuery) Table(tables ...string) *DropColumnQuery {
for _, table := range tables {
q.addTable(schema.UnsafeIdent(table))
}
return q
}
func (q *DropColumnQuery) TableExpr(query string, args ...any) *DropColumnQuery {
q.addTable(schema.SafeQuery(query, args))
return q
}
func (q *DropColumnQuery) ModelTableExpr(query string, args ...any) *DropColumnQuery {
q.modelTableName = schema.SafeQuery(query, args)
return q
}
//------------------------------------------------------------------------------
func (q *DropColumnQuery) Column(columns ...string) *DropColumnQuery {
for _, column := range columns {
q.addColumn(schema.UnsafeIdent(column))
}
return q
}
func (q *DropColumnQuery) ColumnExpr(query string, args ...any) *DropColumnQuery {
q.addColumn(schema.SafeQuery(query, args))
return q
}
//------------------------------------------------------------------------------
// Comment adds a comment to the query, wrapped by /* ... */.
func (q *DropColumnQuery) Comment(comment string) *DropColumnQuery {
q.comment = comment
return q
}
//------------------------------------------------------------------------------
func (q *DropColumnQuery) Operation() string {
return "DROP COLUMN"
}
func (q *DropColumnQuery) AppendQuery(gen schema.QueryGen, b []byte) (_ []byte, err error) {
if q.err != nil {
return nil, q.err
}
b = appendComment(b, q.comment)
if len(q.columns) != 1 {
return nil, fmt.Errorf("bun: DropColumnQuery requires exactly one column")
}
b = append(b, "ALTER TABLE "...)
b, err = q.appendFirstTable(gen, b)
if err != nil {
return nil, err
}
b = append(b, " DROP COLUMN "...)
b, err = q.columns[0].AppendQuery(gen, b)
if err != nil {
return nil, err
}
return b, nil
}
//------------------------------------------------------------------------------
func (q *DropColumnQuery) Exec(ctx context.Context, dest ...any) (sql.Result, error) {
// if a comment is propagated via the context, use it
setCommentFromContext(ctx, q)
queryBytes, err := q.AppendQuery(q.db.gen, q.db.makeQueryBytes())
if err != nil {
return nil, err
}
query := internal.String(queryBytes)
res, err := q.exec(ctx, q, query)
if err != nil {
return nil, err
}
return res, nil
}

452
vendor/github.com/uptrace/bun/query_delete.go generated vendored Normal file
View File

@@ -0,0 +1,452 @@
package bun
import (
"context"
"database/sql"
"errors"
"time"
"github.com/uptrace/bun/dialect/feature"
"github.com/uptrace/bun/internal"
"github.com/uptrace/bun/schema"
)
type DeleteQuery struct {
whereBaseQuery
orderLimitOffsetQuery
returningQuery
comment string
}
var _ Query = (*DeleteQuery)(nil)
func NewDeleteQuery(db *DB) *DeleteQuery {
q := &DeleteQuery{
whereBaseQuery: whereBaseQuery{
baseQuery: baseQuery{
db: db,
},
},
}
return q
}
func (q *DeleteQuery) Conn(db IConn) *DeleteQuery {
q.setConn(db)
return q
}
func (q *DeleteQuery) Model(model any) *DeleteQuery {
q.setModel(model)
return q
}
func (q *DeleteQuery) Err(err error) *DeleteQuery {
q.setErr(err)
return q
}
// Apply calls each function in fns, passing the DeleteQuery as an argument.
func (q *DeleteQuery) Apply(fns ...func(*DeleteQuery) *DeleteQuery) *DeleteQuery {
for _, fn := range fns {
if fn != nil {
q = fn(q)
}
}
return q
}
func (q *DeleteQuery) With(name string, query Query) *DeleteQuery {
q.addWith(NewWithQuery(name, query))
return q
}
func (q *DeleteQuery) WithRecursive(name string, query Query) *DeleteQuery {
q.addWith(NewWithQuery(name, query).Recursive())
return q
}
func (q *DeleteQuery) WithQuery(query *WithQuery) *DeleteQuery {
q.addWith(query)
return q
}
func (q *DeleteQuery) Table(tables ...string) *DeleteQuery {
for _, table := range tables {
q.addTable(schema.UnsafeIdent(table))
}
return q
}
func (q *DeleteQuery) TableExpr(query string, args ...any) *DeleteQuery {
q.addTable(schema.SafeQuery(query, args))
return q
}
func (q *DeleteQuery) ModelTableExpr(query string, args ...any) *DeleteQuery {
q.modelTableName = schema.SafeQuery(query, args)
return q
}
//------------------------------------------------------------------------------
func (q *DeleteQuery) WherePK(cols ...string) *DeleteQuery {
q.addWhereCols(cols)
return q
}
func (q *DeleteQuery) Where(query string, args ...any) *DeleteQuery {
q.addWhere(schema.SafeQueryWithSep(query, args, " AND "))
return q
}
func (q *DeleteQuery) WhereOr(query string, args ...any) *DeleteQuery {
q.addWhere(schema.SafeQueryWithSep(query, args, " OR "))
return q
}
func (q *DeleteQuery) WhereGroup(sep string, fn func(*DeleteQuery) *DeleteQuery) *DeleteQuery {
saved := q.where
q.where = nil
q = fn(q)
where := q.where
q.where = saved
q.addWhereGroup(sep, where)
return q
}
func (q *DeleteQuery) WhereDeleted() *DeleteQuery {
q.whereDeleted()
return q
}
func (q *DeleteQuery) WhereAllWithDeleted() *DeleteQuery {
q.whereAllWithDeleted()
return q
}
func (q *DeleteQuery) Order(orders ...string) *DeleteQuery {
if !q.hasFeature(feature.DeleteOrderLimit) {
q.setErr(feature.NewNotSupportError(feature.DeleteOrderLimit))
return q
}
q.addOrder(orders...)
return q
}
func (q *DeleteQuery) OrderExpr(query string, args ...any) *DeleteQuery {
if !q.hasFeature(feature.DeleteOrderLimit) {
q.setErr(feature.NewNotSupportError(feature.DeleteOrderLimit))
return q
}
q.addOrderExpr(query, args...)
return q
}
func (q *DeleteQuery) ForceDelete() *DeleteQuery {
q.flags = q.flags.Set(forceDeleteFlag)
return q
}
// ------------------------------------------------------------------------------
func (q *DeleteQuery) Limit(n int) *DeleteQuery {
if !q.hasFeature(feature.DeleteOrderLimit) {
q.setErr(feature.NewNotSupportError(feature.DeleteOrderLimit))
return q
}
q.setLimit(n)
return q
}
//------------------------------------------------------------------------------
// Returning adds a RETURNING clause to the query.
//
// To suppress the auto-generated RETURNING clause, use `Returning("NULL")`.
func (q *DeleteQuery) Returning(query string, args ...any) *DeleteQuery {
if !q.hasFeature(feature.DeleteReturning) {
q.setErr(feature.NewNotSupportError(feature.DeleteOrderLimit))
return q
}
q.addReturning(schema.SafeQuery(query, args))
return q
}
//------------------------------------------------------------------------------
// Comment adds a comment to the query, wrapped by /* ... */.
func (q *DeleteQuery) Comment(comment string) *DeleteQuery {
q.comment = comment
return q
}
//------------------------------------------------------------------------------
func (q *DeleteQuery) Operation() string {
return "DELETE"
}
func (q *DeleteQuery) AppendQuery(gen schema.QueryGen, b []byte) (_ []byte, err error) {
if q.err != nil {
return nil, q.err
}
b = appendComment(b, q.comment)
gen = formatterWithModel(gen, q)
if q.isSoftDelete() {
now := time.Now()
if err := q.tableModel.updateSoftDeleteField(now); err != nil {
return nil, err
}
upd := &UpdateQuery{
whereBaseQuery: q.whereBaseQuery,
returningQuery: q.returningQuery,
}
upd.Set(q.softDeleteSet(gen, now))
return upd.AppendQuery(gen, b)
}
withAlias := q.db.HasFeature(feature.DeleteTableAlias)
b, err = q.appendWith(gen, b)
if err != nil {
return nil, err
}
b = append(b, "DELETE FROM "...)
if withAlias {
b, err = q.appendFirstTableWithAlias(gen, b)
} else {
b, err = q.appendFirstTable(gen, b)
}
if err != nil {
return nil, err
}
if q.hasMultiTables() {
b = append(b, " USING "...)
b, err = q.appendOtherTables(gen, b)
if err != nil {
return nil, err
}
}
if q.hasFeature(feature.Output) && q.hasReturning() {
b = append(b, " OUTPUT "...)
b, err = q.appendOutput(gen, b)
if err != nil {
return nil, err
}
}
b, err = q.mustAppendWhere(gen, b, withAlias)
if err != nil {
return nil, err
}
if q.hasMultiTables() && (len(q.order) > 0 || q.limit > 0) {
return nil, errors.New("bun: can't use ORDER or LIMIT with multiple tables")
}
b, err = q.appendOrder(gen, b)
if err != nil {
return nil, err
}
b, err = q.appendLimitOffset(gen, b)
if err != nil {
return nil, err
}
if q.hasFeature(feature.DeleteReturning) && q.hasReturning() {
b = append(b, " RETURNING "...)
b, err = q.appendReturning(gen, b)
if err != nil {
return nil, err
}
}
return b, nil
}
func (q *DeleteQuery) isSoftDelete() bool {
return q.tableModel != nil && q.table.SoftDeleteField != nil && !q.flags.Has(forceDeleteFlag)
}
func (q *DeleteQuery) softDeleteSet(gen schema.QueryGen, tm time.Time) string {
b := make([]byte, 0, 32)
if gen.HasFeature(feature.UpdateMultiTable) {
b = append(b, q.table.SQLAlias...)
b = append(b, '.')
}
b = append(b, q.table.SoftDeleteField.SQLName...)
b = append(b, " = "...)
b = gen.Append(b, tm)
return internal.String(b)
}
//------------------------------------------------------------------------------
func (q *DeleteQuery) Scan(ctx context.Context, dest ...any) error {
_, err := q.scanOrExec(ctx, dest, true)
return err
}
func (q *DeleteQuery) Exec(ctx context.Context, dest ...any) (sql.Result, error) {
return q.scanOrExec(ctx, dest, len(dest) > 0)
}
func (q *DeleteQuery) scanOrExec(
ctx context.Context, dest []any, hasDest bool,
) (sql.Result, error) {
if q.err != nil {
return nil, q.err
}
if q.table != nil {
if err := q.beforeDeleteHook(ctx); err != nil {
return nil, err
}
}
// Run append model hooks before generating the query.
if err := q.beforeAppendModel(ctx, q); err != nil {
return nil, err
}
// if a comment is propagated via the context, use it
setCommentFromContext(ctx, q)
// Generate the query before checking hasReturning.
queryBytes, err := q.AppendQuery(q.db.gen, q.db.makeQueryBytes())
if err != nil {
return nil, err
}
useScan := hasDest || (q.hasReturning() && q.hasFeature(feature.DeleteReturning|feature.Output))
var model Model
if useScan {
var err error
model, err = q.getModel(dest)
if err != nil {
return nil, err
}
}
query := internal.String(queryBytes)
var res sql.Result
if useScan {
res, err = q.scan(ctx, q, query, model, hasDest)
if err != nil {
return nil, err
}
} else {
res, err = q.exec(ctx, q, query)
if err != nil {
return nil, err
}
}
if q.table != nil {
if err := q.afterDeleteHook(ctx); err != nil {
return nil, err
}
}
return res, nil
}
func (q *DeleteQuery) beforeDeleteHook(ctx context.Context) error {
if hook, ok := q.table.ZeroIface.(BeforeDeleteHook); ok {
if err := hook.BeforeDelete(ctx, q); err != nil {
return err
}
}
return nil
}
func (q *DeleteQuery) afterDeleteHook(ctx context.Context) error {
if hook, ok := q.table.ZeroIface.(AfterDeleteHook); ok {
if err := hook.AfterDelete(ctx, q); err != nil {
return err
}
}
return nil
}
// String returns the generated SQL query string. The DeleteQuery instance must not be
// modified during query generation to ensure multiple calls to String() return identical results.
func (q *DeleteQuery) String() string {
buf, err := q.AppendQuery(q.db.QueryGen(), nil)
if err != nil {
panic(err)
}
return string(buf)
}
//------------------------------------------------------------------------------
func (q *DeleteQuery) QueryBuilder() QueryBuilder {
return &deleteQueryBuilder{q}
}
func (q *DeleteQuery) ApplyQueryBuilder(fn func(QueryBuilder) QueryBuilder) *DeleteQuery {
return fn(q.QueryBuilder()).Unwrap().(*DeleteQuery)
}
type deleteQueryBuilder struct {
*DeleteQuery
}
func (q *deleteQueryBuilder) WhereGroup(
sep string, fn func(QueryBuilder) QueryBuilder,
) QueryBuilder {
q.DeleteQuery = q.DeleteQuery.WhereGroup(sep, func(qs *DeleteQuery) *DeleteQuery {
return fn(q).(*deleteQueryBuilder).DeleteQuery
})
return q
}
func (q *deleteQueryBuilder) Where(query string, args ...any) QueryBuilder {
q.DeleteQuery.Where(query, args...)
return q
}
func (q *deleteQueryBuilder) WhereOr(query string, args ...any) QueryBuilder {
q.DeleteQuery.WhereOr(query, args...)
return q
}
func (q *deleteQueryBuilder) WhereDeleted() QueryBuilder {
q.DeleteQuery.WhereDeleted()
return q
}
func (q *deleteQueryBuilder) WhereAllWithDeleted() QueryBuilder {
q.DeleteQuery.WhereAllWithDeleted()
return q
}
func (q *deleteQueryBuilder) WherePK(cols ...string) QueryBuilder {
q.DeleteQuery.WherePK(cols...)
return q
}
func (q *deleteQueryBuilder) Unwrap() any {
return q.DeleteQuery
}

267
vendor/github.com/uptrace/bun/query_index_create.go generated vendored Normal file
View File

@@ -0,0 +1,267 @@
package bun
import (
"context"
"database/sql"
"github.com/uptrace/bun/internal"
"github.com/uptrace/bun/schema"
)
type CreateIndexQuery struct {
whereBaseQuery
unique bool
fulltext bool
spatial bool
concurrently bool
ifNotExists bool
index schema.QueryWithArgs
using schema.QueryWithArgs
include []schema.QueryWithArgs
comment string
}
var _ Query = (*CreateIndexQuery)(nil)
func NewCreateIndexQuery(db *DB) *CreateIndexQuery {
q := &CreateIndexQuery{
whereBaseQuery: whereBaseQuery{
baseQuery: baseQuery{
db: db,
},
},
}
return q
}
func (q *CreateIndexQuery) Conn(db IConn) *CreateIndexQuery {
q.setConn(db)
return q
}
func (q *CreateIndexQuery) Model(model any) *CreateIndexQuery {
q.setModel(model)
return q
}
func (q *CreateIndexQuery) Err(err error) *CreateIndexQuery {
q.setErr(err)
return q
}
func (q *CreateIndexQuery) Unique() *CreateIndexQuery {
q.unique = true
return q
}
func (q *CreateIndexQuery) Concurrently() *CreateIndexQuery {
q.concurrently = true
return q
}
func (q *CreateIndexQuery) IfNotExists() *CreateIndexQuery {
q.ifNotExists = true
return q
}
//------------------------------------------------------------------------------
func (q *CreateIndexQuery) Index(query string) *CreateIndexQuery {
q.index = schema.UnsafeIdent(query)
return q
}
func (q *CreateIndexQuery) IndexExpr(query string, args ...any) *CreateIndexQuery {
q.index = schema.SafeQuery(query, args)
return q
}
//------------------------------------------------------------------------------
func (q *CreateIndexQuery) Table(tables ...string) *CreateIndexQuery {
for _, table := range tables {
q.addTable(schema.UnsafeIdent(table))
}
return q
}
func (q *CreateIndexQuery) TableExpr(query string, args ...any) *CreateIndexQuery {
q.addTable(schema.SafeQuery(query, args))
return q
}
func (q *CreateIndexQuery) ModelTableExpr(query string, args ...any) *CreateIndexQuery {
q.modelTableName = schema.SafeQuery(query, args)
return q
}
func (q *CreateIndexQuery) Using(query string, args ...any) *CreateIndexQuery {
q.using = schema.SafeQuery(query, args)
return q
}
//------------------------------------------------------------------------------
func (q *CreateIndexQuery) Column(columns ...string) *CreateIndexQuery {
for _, column := range columns {
q.addColumn(schema.UnsafeIdent(column))
}
return q
}
func (q *CreateIndexQuery) ColumnExpr(query string, args ...any) *CreateIndexQuery {
q.addColumn(schema.SafeQuery(query, args))
return q
}
func (q *CreateIndexQuery) ExcludeColumn(columns ...string) *CreateIndexQuery {
q.excludeColumn(columns)
return q
}
//------------------------------------------------------------------------------
func (q *CreateIndexQuery) Include(columns ...string) *CreateIndexQuery {
for _, column := range columns {
q.include = append(q.include, schema.UnsafeIdent(column))
}
return q
}
func (q *CreateIndexQuery) IncludeExpr(query string, args ...any) *CreateIndexQuery {
q.include = append(q.include, schema.SafeQuery(query, args))
return q
}
//------------------------------------------------------------------------------
func (q *CreateIndexQuery) Where(query string, args ...any) *CreateIndexQuery {
q.addWhere(schema.SafeQueryWithSep(query, args, " AND "))
return q
}
func (q *CreateIndexQuery) WhereOr(query string, args ...any) *CreateIndexQuery {
q.addWhere(schema.SafeQueryWithSep(query, args, " OR "))
return q
}
//------------------------------------------------------------------------------
// Comment adds a comment to the query, wrapped by /* ... */.
func (q *CreateIndexQuery) Comment(comment string) *CreateIndexQuery {
q.comment = comment
return q
}
//------------------------------------------------------------------------------
func (q *CreateIndexQuery) Operation() string {
return "CREATE INDEX"
}
func (q *CreateIndexQuery) AppendQuery(gen schema.QueryGen, b []byte) (_ []byte, err error) {
if q.err != nil {
return nil, q.err
}
b = appendComment(b, q.comment)
b = append(b, "CREATE "...)
if q.unique {
b = append(b, "UNIQUE "...)
}
if q.fulltext {
b = append(b, "FULLTEXT "...)
}
if q.spatial {
b = append(b, "SPATIAL "...)
}
b = append(b, "INDEX "...)
if q.concurrently {
b = append(b, "CONCURRENTLY "...)
}
if q.ifNotExists {
b = append(b, "IF NOT EXISTS "...)
}
b, err = q.index.AppendQuery(gen, b)
if err != nil {
return nil, err
}
b = append(b, " ON "...)
b, err = q.appendFirstTable(gen, b)
if err != nil {
return nil, err
}
if !q.using.IsZero() {
b = append(b, " USING "...)
b, err = q.using.AppendQuery(gen, b)
if err != nil {
return nil, err
}
}
b = append(b, " ("...)
for i, col := range q.columns {
if i > 0 {
b = append(b, ", "...)
}
b, err = col.AppendQuery(gen, b)
if err != nil {
return nil, err
}
}
b = append(b, ')')
if len(q.include) > 0 {
b = append(b, " INCLUDE ("...)
for i, col := range q.include {
if i > 0 {
b = append(b, ", "...)
}
b, err = col.AppendQuery(gen, b)
if err != nil {
return nil, err
}
}
b = append(b, ')')
}
if len(q.where) > 0 {
b = append(b, " WHERE "...)
b, err = appendWhere(gen, b, q.where)
if err != nil {
return nil, err
}
}
return b, nil
}
//------------------------------------------------------------------------------
func (q *CreateIndexQuery) Exec(ctx context.Context, dest ...any) (sql.Result, error) {
// if a comment is propagated via the context, use it
setCommentFromContext(ctx, q)
queryBytes, err := q.AppendQuery(q.db.gen, q.db.makeQueryBytes())
if err != nil {
return nil, err
}
query := internal.String(queryBytes)
res, err := q.exec(ctx, q, query)
if err != nil {
return nil, err
}
return res, nil
}

134
vendor/github.com/uptrace/bun/query_index_drop.go generated vendored Normal file
View File

@@ -0,0 +1,134 @@
package bun
import (
"context"
"database/sql"
"github.com/uptrace/bun/internal"
"github.com/uptrace/bun/schema"
)
type DropIndexQuery struct {
baseQuery
cascadeQuery
concurrently bool
ifExists bool
index schema.QueryWithArgs
comment string
}
var _ Query = (*DropIndexQuery)(nil)
func NewDropIndexQuery(db *DB) *DropIndexQuery {
q := &DropIndexQuery{
baseQuery: baseQuery{
db: db,
},
}
return q
}
func (q *DropIndexQuery) Conn(db IConn) *DropIndexQuery {
q.setConn(db)
return q
}
func (q *DropIndexQuery) Model(model any) *DropIndexQuery {
q.setModel(model)
return q
}
func (q *DropIndexQuery) Err(err error) *DropIndexQuery {
q.setErr(err)
return q
}
//------------------------------------------------------------------------------
func (q *DropIndexQuery) Concurrently() *DropIndexQuery {
q.concurrently = true
return q
}
func (q *DropIndexQuery) IfExists() *DropIndexQuery {
q.ifExists = true
return q
}
func (q *DropIndexQuery) Cascade() *DropIndexQuery {
q.cascade = true
return q
}
func (q *DropIndexQuery) Restrict() *DropIndexQuery {
q.restrict = true
return q
}
func (q *DropIndexQuery) Index(query string, args ...any) *DropIndexQuery {
q.index = schema.SafeQuery(query, args)
return q
}
//------------------------------------------------------------------------------
// Comment adds a comment to the query, wrapped by /* ... */.
func (q *DropIndexQuery) Comment(comment string) *DropIndexQuery {
q.comment = comment
return q
}
//------------------------------------------------------------------------------
func (q *DropIndexQuery) Operation() string {
return "DROP INDEX"
}
func (q *DropIndexQuery) AppendQuery(gen schema.QueryGen, b []byte) (_ []byte, err error) {
if q.err != nil {
return nil, q.err
}
b = appendComment(b, q.comment)
b = append(b, "DROP INDEX "...)
if q.concurrently {
b = append(b, "CONCURRENTLY "...)
}
if q.ifExists {
b = append(b, "IF EXISTS "...)
}
b, err = q.index.AppendQuery(gen, b)
if err != nil {
return nil, err
}
b = q.appendCascade(gen, b)
return b, nil
}
//------------------------------------------------------------------------------
func (q *DropIndexQuery) Exec(ctx context.Context, dest ...any) (sql.Result, error) {
// if a comment is propagated via the context, use it
setCommentFromContext(ctx, q)
queryBytes, err := q.AppendQuery(q.db.gen, q.db.makeQueryBytes())
if err != nil {
return nil, err
}
query := internal.String(queryBytes)
res, err := q.exec(ctx, q, query)
if err != nil {
return nil, err
}
return res, nil
}

706
vendor/github.com/uptrace/bun/query_insert.go generated vendored Normal file
View File

@@ -0,0 +1,706 @@
package bun
import (
"context"
"database/sql"
"fmt"
"reflect"
"strings"
"github.com/uptrace/bun/dialect/feature"
"github.com/uptrace/bun/internal"
"github.com/uptrace/bun/schema"
)
type InsertQuery struct {
whereBaseQuery
returningQuery
customValueQuery
on schema.QueryWithArgs
setQuery
ignore bool
replace bool
comment string
}
var _ Query = (*InsertQuery)(nil)
func NewInsertQuery(db *DB) *InsertQuery {
q := &InsertQuery{
whereBaseQuery: whereBaseQuery{
baseQuery: baseQuery{
db: db,
},
},
}
return q
}
func (q *InsertQuery) Conn(db IConn) *InsertQuery {
q.setConn(db)
return q
}
func (q *InsertQuery) Model(model any) *InsertQuery {
q.setModel(model)
return q
}
func (q *InsertQuery) Err(err error) *InsertQuery {
q.setErr(err)
return q
}
// Apply calls each function in fns, passing the InsertQuery as an argument.
func (q *InsertQuery) Apply(fns ...func(*InsertQuery) *InsertQuery) *InsertQuery {
for _, fn := range fns {
if fn != nil {
q = fn(q)
}
}
return q
}
func (q *InsertQuery) With(name string, query Query) *InsertQuery {
q.addWith(NewWithQuery(name, query))
return q
}
func (q *InsertQuery) WithRecursive(name string, query Query) *InsertQuery {
q.addWith(NewWithQuery(name, query).Recursive())
return q
}
func (q *InsertQuery) WithQuery(query *WithQuery) *InsertQuery {
q.addWith(query)
return q
}
//------------------------------------------------------------------------------
func (q *InsertQuery) Table(tables ...string) *InsertQuery {
for _, table := range tables {
q.addTable(schema.UnsafeIdent(table))
}
return q
}
func (q *InsertQuery) TableExpr(query string, args ...any) *InsertQuery {
q.addTable(schema.SafeQuery(query, args))
return q
}
func (q *InsertQuery) ModelTableExpr(query string, args ...any) *InsertQuery {
q.modelTableName = schema.SafeQuery(query, args)
return q
}
//------------------------------------------------------------------------------
func (q *InsertQuery) Column(columns ...string) *InsertQuery {
for _, column := range columns {
q.addColumn(schema.UnsafeIdent(column))
}
return q
}
func (q *InsertQuery) ColumnExpr(query string, args ...any) *InsertQuery {
q.addColumn(schema.SafeQuery(query, args))
return q
}
func (q *InsertQuery) ExcludeColumn(columns ...string) *InsertQuery {
q.excludeColumn(columns)
return q
}
// Value overwrites model value for the column.
func (q *InsertQuery) Value(column string, expr string, args ...any) *InsertQuery {
if q.table == nil {
q.setErr(errNilModel)
return q
}
q.addValue(q.table, column, expr, args)
return q
}
func (q *InsertQuery) Where(query string, args ...any) *InsertQuery {
q.addWhere(schema.SafeQueryWithSep(query, args, " AND "))
return q
}
func (q *InsertQuery) WhereOr(query string, args ...any) *InsertQuery {
q.addWhere(schema.SafeQueryWithSep(query, args, " OR "))
return q
}
//------------------------------------------------------------------------------
// Returning adds a RETURNING clause to the query.
//
// To suppress the auto-generated RETURNING clause, use `Returning("")`.
func (q *InsertQuery) Returning(query string, args ...any) *InsertQuery {
q.addReturning(schema.SafeQuery(query, args))
return q
}
//------------------------------------------------------------------------------
// Ignore generates different queries depending on the DBMS:
// - On MySQL, it generates `INSERT IGNORE INTO`.
// - On PostgreSQL, it generates `ON CONFLICT DO NOTHING`.
func (q *InsertQuery) Ignore() *InsertQuery {
if q.db.gen.HasFeature(feature.InsertOnConflict) {
return q.On("CONFLICT DO NOTHING")
}
if q.db.gen.HasFeature(feature.InsertIgnore) {
q.ignore = true
}
return q
}
// Replaces generates a `REPLACE INTO` query (MySQL and MariaDB).
func (q *InsertQuery) Replace() *InsertQuery {
q.replace = true
return q
}
//------------------------------------------------------------------------------
// Comment adds a comment to the query, wrapped by /* ... */.
func (q *InsertQuery) Comment(comment string) *InsertQuery {
q.comment = comment
return q
}
//------------------------------------------------------------------------------
func (q *InsertQuery) Operation() string {
return "INSERT"
}
func (q *InsertQuery) AppendQuery(gen schema.QueryGen, b []byte) (_ []byte, err error) {
if q.err != nil {
return nil, q.err
}
b = appendComment(b, q.comment)
gen = formatterWithModel(gen, q)
b, err = q.appendWith(gen, b)
if err != nil {
return nil, err
}
if q.replace {
b = append(b, "REPLACE "...)
} else {
b = append(b, "INSERT "...)
if q.ignore {
b = append(b, "IGNORE "...)
}
}
b = append(b, "INTO "...)
if q.db.HasFeature(feature.InsertTableAlias) && !q.on.IsZero() {
b, err = q.appendFirstTableWithAlias(gen, b)
} else {
b, err = q.appendFirstTable(gen, b)
}
if err != nil {
return nil, err
}
b, err = q.appendColumnsValues(gen, b, false)
if err != nil {
return nil, err
}
b, err = q.appendOn(gen, b)
if err != nil {
return nil, err
}
if q.hasFeature(feature.InsertReturning) && q.hasReturning() {
b = append(b, " RETURNING "...)
b, err = q.appendReturning(gen, b)
if err != nil {
return nil, err
}
}
return b, nil
}
func (q *InsertQuery) appendColumnsValues(
gen schema.QueryGen, b []byte, skipOutput bool,
) (_ []byte, err error) {
if q.hasMultiTables() {
if q.columns != nil {
b = append(b, " ("...)
b, err = q.appendColumns(gen, b)
if err != nil {
return nil, err
}
b = append(b, ")"...)
}
if q.hasFeature(feature.Output) && q.hasReturning() {
b = append(b, " OUTPUT "...)
b, err = q.appendOutput(gen, b)
if err != nil {
return nil, err
}
}
b = append(b, " SELECT "...)
if q.columns != nil {
b, err = q.appendColumns(gen, b)
if err != nil {
return nil, err
}
} else {
b = append(b, "*"...)
}
b = append(b, " FROM "...)
b, err = q.appendOtherTables(gen, b)
if err != nil {
return nil, err
}
return b, nil
}
if m, ok := q.model.(*mapModel); ok {
return m.appendColumnsValues(gen, b), nil
}
if _, ok := q.model.(*mapSliceModel); ok {
return nil, fmt.Errorf("Insert(*[]map[string]any) is not supported")
}
if q.model == nil {
return nil, errNilModel
}
// Build fields to populate RETURNING clause.
fields, err := q.getFields()
if err != nil {
return nil, err
}
b = append(b, " ("...)
b = q.appendFields(gen, b, fields)
b = append(b, ")"...)
if q.hasFeature(feature.Output) && q.hasReturning() && !skipOutput {
b = append(b, " OUTPUT "...)
b, err = q.appendOutput(gen, b)
if err != nil {
return nil, err
}
}
b = append(b, " VALUES ("...)
switch model := q.tableModel.(type) {
case *structTableModel:
b, err = q.appendStructValues(gen, b, fields, model.strct)
if err != nil {
return nil, err
}
case *sliceTableModel:
b, err = q.appendSliceValues(gen, b, fields, model.slice)
if err != nil {
return nil, err
}
default:
return nil, fmt.Errorf("bun: Insert does not support %T", q.tableModel)
}
b = append(b, ')')
return b, nil
}
func (q *InsertQuery) appendStructValues(
gen schema.QueryGen, b []byte, fields []*schema.Field, strct reflect.Value,
) (_ []byte, err error) {
isTemplate := gen.IsNop()
for i, f := range fields {
if i > 0 {
b = append(b, ", "...)
}
app, ok := q.modelValues[f.Name]
if ok {
b, err = app.AppendQuery(gen, b)
if err != nil {
return nil, err
}
q.addReturningField(f)
continue
}
switch {
case isTemplate:
b = append(b, '?')
case q.marshalsToDefault(f, strct):
if q.db.HasFeature(feature.DefaultPlaceholder) {
b = append(b, "DEFAULT"...)
} else if f.SQLDefault != "" {
b = append(b, f.SQLDefault...)
} else {
b = append(b, "NULL"...)
}
q.addReturningField(f)
default:
b = f.AppendValue(gen, b, strct)
}
}
for i, v := range q.extraValues {
if i > 0 || len(fields) > 0 {
b = append(b, ", "...)
}
b, err = v.value.AppendQuery(gen, b)
if err != nil {
return nil, err
}
}
return b, nil
}
func (q *InsertQuery) appendSliceValues(
gen schema.QueryGen, b []byte, fields []*schema.Field, slice reflect.Value,
) (_ []byte, err error) {
if gen.IsNop() {
return q.appendStructValues(gen, b, fields, reflect.Value{})
}
sliceLen := slice.Len()
for i := 0; i < sliceLen; i++ {
if i > 0 {
b = append(b, "), ("...)
}
el := indirect(slice.Index(i))
b, err = q.appendStructValues(gen, b, fields, el)
if err != nil {
return nil, err
}
}
return b, nil
}
func (q *InsertQuery) getFields() ([]*schema.Field, error) {
hasIdentity := q.db.HasFeature(feature.Identity)
if len(q.columns) > 0 || q.db.HasFeature(feature.DefaultPlaceholder) && !hasIdentity {
return q.baseQuery.getFields()
}
var strct reflect.Value
switch model := q.tableModel.(type) {
case *structTableModel:
strct = model.strct
case *sliceTableModel:
if model.sliceLen == 0 {
return nil, fmt.Errorf("bun: Insert(empty %T)", model.slice.Type())
}
strct = indirect(model.slice.Index(0))
default:
return nil, errNilModel
}
fields := make([]*schema.Field, 0, len(q.table.Fields))
for _, f := range q.table.Fields {
if hasIdentity && f.AutoIncrement {
q.addReturningField(f)
continue
}
if f.NotNull && q.marshalsToDefault(f, strct) {
q.addReturningField(f)
continue
}
fields = append(fields, f)
}
return fields, nil
}
// marshalsToDefault checks if the value will be marshaled as DEFAULT or NULL (if DEFAULT placeholder is not supported)
// when appending it to the VALUES clause in place of the given field.
func (q InsertQuery) marshalsToDefault(f *schema.Field, v reflect.Value) bool {
return (f.IsPtr && f.HasNilValue(v)) ||
(f.HasZeroValue(v) && (f.NullZero || f.SQLDefault != ""))
}
func (q *InsertQuery) appendFields(
gen schema.QueryGen, b []byte, fields []*schema.Field,
) []byte {
b = appendColumns(b, "", fields)
for i, v := range q.extraValues {
if i > 0 || len(fields) > 0 {
b = append(b, ", "...)
}
b = gen.AppendIdent(b, v.column)
}
return b
}
//------------------------------------------------------------------------------
func (q *InsertQuery) On(s string, args ...any) *InsertQuery {
q.on = schema.SafeQuery(s, args)
return q
}
func (q *InsertQuery) Set(query string, args ...any) *InsertQuery {
q.addSet(schema.SafeQuery(query, args))
return q
}
func (q *InsertQuery) SetValues(values *ValuesQuery) *InsertQuery {
q.setValues = values
return q
}
func (q *InsertQuery) appendOn(gen schema.QueryGen, b []byte) (_ []byte, err error) {
if q.on.IsZero() {
return b, nil
}
b = append(b, " ON "...)
b, err = q.on.AppendQuery(gen, b)
if err != nil {
return nil, err
}
if len(q.set) > 0 || q.setValues != nil {
if gen.HasFeature(feature.InsertOnDuplicateKey) {
b = append(b, ' ')
} else {
b = append(b, " SET "...)
}
b, err = q.appendSet(gen, b)
if err != nil {
return nil, err
}
} else if q.onConflictDoUpdate() {
fields, err := q.getDataFields()
if err != nil {
return nil, err
}
b = q.appendSetExcluded(b, fields)
} else if q.onDuplicateKeyUpdate() {
fields, err := q.getDataFields()
if err != nil {
return nil, err
}
b = q.appendSetValues(b, fields)
}
if len(q.where) > 0 {
b = append(b, " WHERE "...)
b, err = appendWhere(gen, b, q.where)
if err != nil {
return nil, err
}
}
return b, nil
}
func (q *InsertQuery) onConflictDoUpdate() bool {
return strings.HasSuffix(strings.ToUpper(q.on.Query), " DO UPDATE")
}
func (q *InsertQuery) onDuplicateKeyUpdate() bool {
return strings.ToUpper(q.on.Query) == "DUPLICATE KEY UPDATE"
}
func (q *InsertQuery) appendSetExcluded(b []byte, fields []*schema.Field) []byte {
b = append(b, " SET "...)
for i, f := range fields {
if i > 0 {
b = append(b, ", "...)
}
b = append(b, f.SQLName...)
b = append(b, " = EXCLUDED."...)
b = append(b, f.SQLName...)
}
return b
}
func (q *InsertQuery) appendSetValues(b []byte, fields []*schema.Field) []byte {
b = append(b, " "...)
for i, f := range fields {
if i > 0 {
b = append(b, ", "...)
}
b = append(b, f.SQLName...)
b = append(b, " = VALUES("...)
b = append(b, f.SQLName...)
b = append(b, ")"...)
}
return b
}
//------------------------------------------------------------------------------
func (q *InsertQuery) Scan(ctx context.Context, dest ...any) error {
_, err := q.scanOrExec(ctx, dest, true)
return err
}
func (q *InsertQuery) Exec(ctx context.Context, dest ...any) (sql.Result, error) {
return q.scanOrExec(ctx, dest, len(dest) > 0)
}
func (q *InsertQuery) scanOrExec(
ctx context.Context, dest []any, hasDest bool,
) (sql.Result, error) {
if q.err != nil {
return nil, q.err
}
if q.table != nil {
if err := q.beforeInsertHook(ctx); err != nil {
return nil, err
}
}
// Run append model hooks before generating the query.
if err := q.beforeAppendModel(ctx, q); err != nil {
return nil, err
}
// if a comment is propagated via the context, use it
setCommentFromContext(ctx, q)
// Generate the query before checking hasReturning.
queryBytes, err := q.AppendQuery(q.db.gen, q.db.makeQueryBytes())
if err != nil {
return nil, err
}
useScan := hasDest || (q.hasReturning() && q.hasFeature(feature.InsertReturning|feature.Output))
var model Model
if useScan {
var err error
model, err = q.getModel(dest)
if err != nil {
return nil, err
}
}
query := internal.String(queryBytes)
var res sql.Result
if useScan {
res, err = q.scan(ctx, q, query, model, hasDest)
if err != nil {
return nil, err
}
} else {
res, err = q.exec(ctx, q, query)
if err != nil {
return nil, err
}
if err := q.tryLastInsertID(res, dest); err != nil {
return nil, err
}
}
if q.table != nil {
if err := q.afterInsertHook(ctx); err != nil {
return nil, err
}
}
return res, nil
}
func (q *InsertQuery) beforeInsertHook(ctx context.Context) error {
if hook, ok := q.table.ZeroIface.(BeforeInsertHook); ok {
if err := hook.BeforeInsert(ctx, q); err != nil {
return err
}
}
return nil
}
func (q *InsertQuery) afterInsertHook(ctx context.Context) error {
if hook, ok := q.table.ZeroIface.(AfterInsertHook); ok {
if err := hook.AfterInsert(ctx, q); err != nil {
return err
}
}
return nil
}
func (q *InsertQuery) tryLastInsertID(res sql.Result, dest []any) error {
if q.db.HasFeature(feature.Returning) ||
q.db.HasFeature(feature.Output) ||
q.table == nil ||
len(q.table.PKs) != 1 ||
!q.table.PKs[0].AutoIncrement {
return nil
}
id, err := res.LastInsertId()
if err != nil {
return err
}
if id == 0 {
return nil
}
model, err := q.getModel(dest)
if err != nil {
return err
}
pk := q.table.PKs[0]
switch model := model.(type) {
case *structTableModel:
if err := pk.ScanValue(model.strct, id); err != nil {
return err
}
case *sliceTableModel:
sliceLen := model.slice.Len()
for i := 0; i < sliceLen; i++ {
strct := indirect(model.slice.Index(i))
if err := pk.ScanValue(strct, id); err != nil {
return err
}
id++
}
}
return nil
}
// String returns the generated SQL query string. The InsertQuery instance must not be
// modified during query generation to ensure multiple calls to String() return identical results.
func (q *InsertQuery) String() string {
buf, err := q.AppendQuery(q.db.QueryGen(), nil)
if err != nil {
panic(err)
}
return string(buf)
}

351
vendor/github.com/uptrace/bun/query_merge.go generated vendored Normal file
View File

@@ -0,0 +1,351 @@
package bun
import (
"context"
"database/sql"
"errors"
"github.com/uptrace/bun/dialect"
"github.com/uptrace/bun/dialect/feature"
"github.com/uptrace/bun/internal"
"github.com/uptrace/bun/schema"
)
type MergeQuery struct {
baseQuery
returningQuery
using schema.QueryWithArgs
on schema.QueryWithArgs
when []schema.QueryAppender
comment string
}
var _ Query = (*MergeQuery)(nil)
func NewMergeQuery(db *DB) *MergeQuery {
q := &MergeQuery{
baseQuery: baseQuery{
db: db,
},
}
if q.db.dialect.Name() != dialect.MSSQL && q.db.dialect.Name() != dialect.PG {
q.setErr(errors.New("bun: merge not supported for current dialect"))
}
return q
}
func (q *MergeQuery) Conn(db IConn) *MergeQuery {
q.setConn(db)
return q
}
func (q *MergeQuery) Model(model any) *MergeQuery {
q.setModel(model)
return q
}
func (q *MergeQuery) Err(err error) *MergeQuery {
q.setErr(err)
return q
}
// Apply calls each function in fns, passing the MergeQuery as an argument.
func (q *MergeQuery) Apply(fns ...func(*MergeQuery) *MergeQuery) *MergeQuery {
for _, fn := range fns {
if fn != nil {
q = fn(q)
}
}
return q
}
func (q *MergeQuery) With(name string, query Query) *MergeQuery {
q.addWith(NewWithQuery(name, query))
return q
}
func (q *MergeQuery) WithRecursive(name string, query Query) *MergeQuery {
q.addWith(NewWithQuery(name, query).Recursive())
return q
}
func (q *MergeQuery) WithQuery(query *WithQuery) *MergeQuery {
q.addWith(query)
return q
}
// ------------------------------------------------------------------------------
func (q *MergeQuery) Table(tables ...string) *MergeQuery {
for _, table := range tables {
q.addTable(schema.UnsafeIdent(table))
}
return q
}
func (q *MergeQuery) TableExpr(query string, args ...any) *MergeQuery {
q.addTable(schema.SafeQuery(query, args))
return q
}
func (q *MergeQuery) ModelTableExpr(query string, args ...any) *MergeQuery {
q.modelTableName = schema.SafeQuery(query, args)
return q
}
//------------------------------------------------------------------------------
// Returning adds a RETURNING clause to the query.
//
// To suppress the auto-generated RETURNING clause, use `Returning("NULL")`.
// Supported for PostgreSQL 17+ and MSSQL (via OUTPUT clause)
func (q *MergeQuery) Returning(query string, args ...any) *MergeQuery {
q.addReturning(schema.SafeQuery(query, args))
return q
}
//------------------------------------------------------------------------------
func (q *MergeQuery) Using(s string, args ...any) *MergeQuery {
q.using = schema.SafeQuery(s, args)
return q
}
func (q *MergeQuery) On(s string, args ...any) *MergeQuery {
q.on = schema.SafeQuery(s, args)
return q
}
// WhenInsert for when insert clause.
func (q *MergeQuery) WhenInsert(expr string, fn func(q *InsertQuery) *InsertQuery) *MergeQuery {
sq := NewInsertQuery(q.db)
// apply the model as default into sub query, since appendColumnsValues required
if q.model != nil {
sq = sq.Model(q.model)
}
sq = sq.Apply(fn)
q.when = append(q.when, &whenInsert{expr: expr, query: sq})
return q
}
// WhenUpdate for when update clause.
func (q *MergeQuery) WhenUpdate(expr string, fn func(q *UpdateQuery) *UpdateQuery) *MergeQuery {
sq := NewUpdateQuery(q.db)
// apply the model as default into sub query
if q.model != nil {
sq = sq.Model(q.model)
}
sq = sq.Apply(fn)
q.when = append(q.when, &whenUpdate{expr: expr, query: sq})
return q
}
// WhenDelete for when delete clause.
func (q *MergeQuery) WhenDelete(expr string) *MergeQuery {
q.when = append(q.when, &whenDelete{expr: expr})
return q
}
// When for raw expression clause.
func (q *MergeQuery) When(expr string, args ...any) *MergeQuery {
q.when = append(q.when, schema.SafeQuery(expr, args))
return q
}
//------------------------------------------------------------------------------
// Comment adds a comment to the query, wrapped by /* ... */.
func (q *MergeQuery) Comment(comment string) *MergeQuery {
q.comment = comment
return q
}
//------------------------------------------------------------------------------
func (q *MergeQuery) Operation() string {
return "MERGE"
}
func (q *MergeQuery) AppendQuery(gen schema.QueryGen, b []byte) (_ []byte, err error) {
if q.err != nil {
return nil, q.err
}
b = appendComment(b, q.comment)
gen = formatterWithModel(gen, q)
b, err = q.appendWith(gen, b)
if err != nil {
return nil, err
}
b = append(b, "MERGE "...)
if q.db.dialect.Name() == dialect.PG {
b = append(b, "INTO "...)
}
b, err = q.appendFirstTableWithAlias(gen, b)
if err != nil {
return nil, err
}
b = append(b, " USING "...)
b, err = q.using.AppendQuery(gen, b)
if err != nil {
return nil, err
}
b = append(b, " ON "...)
b, err = q.on.AppendQuery(gen, b)
if err != nil {
return nil, err
}
for _, w := range q.when {
b = append(b, " WHEN "...)
b, err = w.AppendQuery(gen, b)
if err != nil {
return nil, err
}
}
if q.hasFeature(feature.Output) && q.hasReturning() {
b = append(b, " OUTPUT "...)
b, err = q.appendOutput(gen, b)
if err != nil {
return nil, err
}
}
if q.hasFeature(feature.MergeReturning) && q.hasReturning() {
b = append(b, " RETURNING "...)
b, err = q.appendReturning(gen, b)
if err != nil {
return nil, err
}
}
// A MERGE statement must be terminated by a semi-colon (;).
b = append(b, ";"...)
return b, nil
}
//------------------------------------------------------------------------------
func (q *MergeQuery) Scan(ctx context.Context, dest ...any) error {
_, err := q.scanOrExec(ctx, dest, true)
return err
}
func (q *MergeQuery) Exec(ctx context.Context, dest ...any) (sql.Result, error) {
return q.scanOrExec(ctx, dest, len(dest) > 0)
}
func (q *MergeQuery) scanOrExec(
ctx context.Context, dest []any, hasDest bool,
) (sql.Result, error) {
if q.err != nil {
return nil, q.err
}
// Run append model hooks before generating the query.
if err := q.beforeAppendModel(ctx, q); err != nil {
return nil, err
}
// if a comment is propagated via the context, use it
setCommentFromContext(ctx, q)
// Generate the query before checking hasReturning.
queryBytes, err := q.AppendQuery(q.db.gen, q.db.makeQueryBytes())
if err != nil {
return nil, err
}
useScan := hasDest || (q.hasReturning() && q.hasFeature(feature.InsertReturning|feature.MergeReturning|feature.Output))
var model Model
if useScan {
var err error
model, err = q.getModel(dest)
if err != nil {
return nil, err
}
}
query := internal.String(queryBytes)
var res sql.Result
if useScan {
res, err = q.scan(ctx, q, query, model, true)
if err != nil {
return nil, err
}
} else {
res, err = q.exec(ctx, q, query)
if err != nil {
return nil, err
}
}
return res, nil
}
// String returns the generated SQL query string. The MergeQuery instance must not be
// modified during query generation to ensure multiple calls to String() return identical results.
func (q *MergeQuery) String() string {
buf, err := q.AppendQuery(q.db.QueryGen(), nil)
if err != nil {
panic(err)
}
return string(buf)
}
//------------------------------------------------------------------------------
type whenInsert struct {
expr string
query *InsertQuery
}
func (w *whenInsert) AppendQuery(gen schema.QueryGen, b []byte) (_ []byte, err error) {
b = append(b, w.expr...)
if w.query != nil {
b = append(b, " THEN INSERT"...)
b, err = w.query.appendColumnsValues(gen, b, true)
if err != nil {
return nil, err
}
}
return b, nil
}
type whenUpdate struct {
expr string
query *UpdateQuery
}
func (w *whenUpdate) AppendQuery(gen schema.QueryGen, b []byte) (_ []byte, err error) {
b = append(b, w.expr...)
if w.query != nil {
b = append(b, " THEN UPDATE SET "...)
b, err = w.query.appendSet(gen, b)
if err != nil {
return nil, err
}
}
return b, nil
}
type whenDelete struct {
expr string
}
func (w *whenDelete) AppendQuery(gen schema.QueryGen, b []byte) (_ []byte, err error) {
b = append(b, w.expr...)
b = append(b, " THEN DELETE"...)
return b, nil
}

107
vendor/github.com/uptrace/bun/query_raw.go generated vendored Normal file
View File

@@ -0,0 +1,107 @@
package bun
import (
"context"
"database/sql"
"github.com/uptrace/bun/schema"
)
type RawQuery struct {
baseQuery
query string
args []any
comment string
}
func NewRawQuery(db *DB, query string, args ...any) *RawQuery {
return &RawQuery{
baseQuery: baseQuery{
db: db,
},
query: query,
args: args,
}
}
func (q *RawQuery) Conn(db IConn) *RawQuery {
q.setConn(db)
return q
}
func (q *RawQuery) Err(err error) *RawQuery {
q.setErr(err)
return q
}
func (q *RawQuery) Exec(ctx context.Context, dest ...any) (sql.Result, error) {
return q.scanOrExec(ctx, dest, len(dest) > 0)
}
func (q *RawQuery) Scan(ctx context.Context, dest ...any) error {
_, err := q.scanOrExec(ctx, dest, true)
return err
}
// Comment adds a comment to the query, wrapped by /* ... */.
func (q *RawQuery) Comment(comment string) *RawQuery {
q.comment = comment
return q
}
func (q *RawQuery) scanOrExec(
ctx context.Context, dest []any, hasDest bool,
) (sql.Result, error) {
if q.err != nil {
return nil, q.err
}
var model Model
var err error
if hasDest {
model, err = q.getModel(dest)
if err != nil {
return nil, err
}
}
// if a comment is propagated via the context, use it
setCommentFromContext(ctx, q)
query := q.db.format(q.query, q.args)
var res sql.Result
if hasDest {
res, err = q.scan(ctx, q, query, model, hasDest)
} else {
res, err = q.exec(ctx, q, query)
}
if err != nil {
return nil, err
}
return res, nil
}
func (q *RawQuery) AppendQuery(gen schema.QueryGen, b []byte) ([]byte, error) {
b = appendComment(b, q.comment)
return gen.AppendQuery(b, q.query, q.args...), nil
}
func (q *RawQuery) Operation() string {
return "SELECT"
}
// String returns the generated SQL query string. The RawQuery instance must not be
// modified during query generation to ensure multiple calls to String() return identical results.
func (q *RawQuery) String() string {
buf, err := q.AppendQuery(q.db.QueryGen(), nil)
if err != nil {
panic(err)
}
return string(buf)
}

1376
vendor/github.com/uptrace/bun/query_select.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

427
vendor/github.com/uptrace/bun/query_table_create.go generated vendored Normal file
View File

@@ -0,0 +1,427 @@
package bun
import (
"bytes"
"context"
"database/sql"
"fmt"
"slices"
"strconv"
"strings"
"github.com/uptrace/bun/dialect"
"github.com/uptrace/bun/dialect/feature"
"github.com/uptrace/bun/dialect/sqltype"
"github.com/uptrace/bun/internal"
"github.com/uptrace/bun/schema"
)
type CreateTableQuery struct {
baseQuery
temp bool
ifNotExists bool
fksFromRel bool // Create foreign keys captured in table's relations.
// varchar changes the default length for VARCHAR columns.
// Because some dialects require that length is always specified for VARCHAR type,
// we will use the exact user-defined type if length is set explicitly, as in `bun:",type:varchar(5)"`,
// but assume the new default length when it's omitted, e.g. `bun:",type:varchar"`.
varchar int
fks []schema.QueryWithArgs
partitionBy schema.QueryWithArgs
tablespace schema.QueryWithArgs
comment string
}
var _ Query = (*CreateTableQuery)(nil)
func NewCreateTableQuery(db *DB) *CreateTableQuery {
q := &CreateTableQuery{
baseQuery: baseQuery{
db: db,
},
varchar: db.Dialect().DefaultVarcharLen(),
}
return q
}
func (q *CreateTableQuery) Conn(db IConn) *CreateTableQuery {
q.setConn(db)
return q
}
func (q *CreateTableQuery) Model(model any) *CreateTableQuery {
q.setModel(model)
return q
}
func (q *CreateTableQuery) Err(err error) *CreateTableQuery {
q.setErr(err)
return q
}
// ------------------------------------------------------------------------------
func (q *CreateTableQuery) Table(tables ...string) *CreateTableQuery {
for _, table := range tables {
q.addTable(schema.UnsafeIdent(table))
}
return q
}
func (q *CreateTableQuery) TableExpr(query string, args ...any) *CreateTableQuery {
q.addTable(schema.SafeQuery(query, args))
return q
}
func (q *CreateTableQuery) ModelTableExpr(query string, args ...any) *CreateTableQuery {
q.modelTableName = schema.SafeQuery(query, args)
return q
}
func (q *CreateTableQuery) ColumnExpr(query string, args ...any) *CreateTableQuery {
q.addColumn(schema.SafeQuery(query, args))
return q
}
// ------------------------------------------------------------------------------
func (q *CreateTableQuery) Temp() *CreateTableQuery {
q.temp = true
return q
}
func (q *CreateTableQuery) IfNotExists() *CreateTableQuery {
q.ifNotExists = true
return q
}
// Varchar sets default length for VARCHAR columns.
func (q *CreateTableQuery) Varchar(n int) *CreateTableQuery {
if n <= 0 {
q.setErr(fmt.Errorf("bun: illegal VARCHAR length: %d", n))
return q
}
q.varchar = n
return q
}
func (q *CreateTableQuery) ForeignKey(query string, args ...any) *CreateTableQuery {
q.fks = append(q.fks, schema.SafeQuery(query, args))
return q
}
func (q *CreateTableQuery) PartitionBy(query string, args ...any) *CreateTableQuery {
q.partitionBy = schema.SafeQuery(query, args)
return q
}
func (q *CreateTableQuery) TableSpace(tablespace string) *CreateTableQuery {
q.tablespace = schema.UnsafeIdent(tablespace)
return q
}
// WithForeignKeys adds a FOREIGN KEY clause for each of the model's existing relations.
func (q *CreateTableQuery) WithForeignKeys() *CreateTableQuery {
q.fksFromRel = true
return q
}
//------------------------------------------------------------------------------
// Comment adds a comment to the query, wrapped by /* ... */.
func (q *CreateTableQuery) Comment(comment string) *CreateTableQuery {
q.comment = comment
return q
}
// ------------------------------------------------------------------------------
func (q *CreateTableQuery) Operation() string {
return "CREATE TABLE"
}
func (q *CreateTableQuery) AppendQuery(gen schema.QueryGen, b []byte) (_ []byte, err error) {
if q.err != nil {
return nil, q.err
}
b = appendComment(b, q.comment)
if q.table == nil {
return nil, errNilModel
}
b = append(b, "CREATE "...)
if q.temp {
b = append(b, "TEMP "...)
}
b = append(b, "TABLE "...)
if q.ifNotExists && gen.HasFeature(feature.TableNotExists) {
b = append(b, "IF NOT EXISTS "...)
}
b, err = q.appendFirstTable(gen, b)
if err != nil {
return nil, err
}
b = append(b, " ("...)
for i, field := range q.table.Fields {
if i > 0 {
b = append(b, ", "...)
}
b = append(b, field.SQLName...)
b = append(b, " "...)
b = q.appendSQLType(b, field)
if field.NotNull && q.db.dialect.Name() != dialect.Oracle {
b = append(b, " NOT NULL"...)
}
if (field.Identity && gen.HasFeature(feature.GeneratedIdentity)) ||
(field.AutoIncrement && (gen.HasFeature(feature.AutoIncrement) || gen.HasFeature(feature.Identity))) {
b = q.db.dialect.AppendSequence(b, q.table, field)
}
if field.SQLDefault != "" {
b = append(b, " DEFAULT "...)
b = append(b, field.SQLDefault...)
}
}
for i, col := range q.columns {
// Only pre-pend the comma if we are on subsequent iterations, or if there were fields/columns appended before
// this. This way if we are only appending custom column expressions we will not produce a syntax error with a
// leading comma.
if i > 0 || len(q.table.Fields) > 0 {
b = append(b, ", "...)
}
b, err = col.AppendQuery(gen, b)
if err != nil {
return nil, err
}
}
// In SQLite AUTOINCREMENT is only valid for INTEGER PRIMARY KEY columns, so it might be that
// a primary key constraint has already been created in dialect.AppendSequence() call above.
// See sqldialect.Dialect.AppendSequence() for more details.
if len(q.table.PKs) > 0 && !bytes.Contains(b, []byte("PRIMARY KEY")) {
b = q.appendPKConstraint(b, q.table.PKs)
}
b = q.appendUniqueConstraints(gen, b)
if q.fksFromRel {
b, err = q.appendFKConstraintsRel(gen, b)
if err != nil {
return nil, err
}
}
b, err = q.appendFKConstraints(gen, b)
if err != nil {
return nil, err
}
b = append(b, ")"...)
if !q.partitionBy.IsZero() {
b = append(b, " PARTITION BY "...)
b, err = q.partitionBy.AppendQuery(gen, b)
if err != nil {
return nil, err
}
}
if !q.tablespace.IsZero() {
b = append(b, " TABLESPACE "...)
b, err = q.tablespace.AppendQuery(gen, b)
if err != nil {
return nil, err
}
}
return b, nil
}
func (q *CreateTableQuery) appendSQLType(b []byte, field *schema.Field) []byte {
// Most of the time these two will match, but for the cases where DiscoveredSQLType is dialect-specific,
// e.g. pgdialect would change sqltype.SmallInt to pgTypeSmallSerial for columns that have `bun:",autoincrement"`
if !strings.EqualFold(field.CreateTableSQLType, field.DiscoveredSQLType) {
return append(b, field.CreateTableSQLType...)
}
// For all common SQL types except VARCHAR, both UserDefinedSQLType and DiscoveredSQLType specify the correct type,
// and we needn't modify it. For VARCHAR columns, we will stop to check if a valid length has been set in .Varchar(int).
if !strings.EqualFold(field.CreateTableSQLType, sqltype.VarChar) || q.varchar <= 0 {
return append(b, field.CreateTableSQLType...)
}
if q.db.dialect.Name() == dialect.Oracle {
b = append(b, "VARCHAR2"...)
} else {
b = append(b, sqltype.VarChar...)
}
b = append(b, "("...)
b = strconv.AppendInt(b, int64(q.varchar), 10)
b = append(b, ")"...)
return b
}
func (q *CreateTableQuery) appendUniqueConstraints(gen schema.QueryGen, b []byte) []byte {
unique := q.table.Unique
keys := make([]string, 0, len(unique))
for key := range unique {
keys = append(keys, key)
}
slices.Sort(keys)
for _, key := range keys {
if key == "" {
for _, field := range unique[key] {
b = q.appendUniqueConstraint(gen, b, key, field)
}
continue
}
b = q.appendUniqueConstraint(gen, b, key, unique[key]...)
}
return b
}
func (q *CreateTableQuery) appendUniqueConstraint(
gen schema.QueryGen, b []byte, name string, fields ...*schema.Field,
) []byte {
if name != "" {
b = append(b, ", CONSTRAINT "...)
b = gen.AppendIdent(b, name)
} else {
b = append(b, ","...)
}
b = append(b, " UNIQUE ("...)
b = appendColumns(b, "", fields)
b = append(b, ")"...)
return b
}
// appendFKConstraintsRel appends a FOREIGN KEY clause for each of the model's existing relations.
func (q *CreateTableQuery) appendFKConstraintsRel(gen schema.QueryGen, b []byte) (_ []byte, err error) {
relations := q.tableModel.Table().Relations
keys := make([]string, 0, len(relations))
for key := range relations {
keys = append(keys, key)
}
slices.Sort(keys)
for _, key := range keys {
if rel := relations[key]; rel.References() {
query := "(?) REFERENCES ? (?)"
args := []any{
Safe(appendColumns(nil, "", rel.BasePKs)),
rel.JoinTable.SQLName,
Safe(appendColumns(nil, "", rel.JoinPKs)),
}
if len(rel.OnUpdate) > 0 {
query += " ?"
args = append(args, Safe(rel.OnUpdate))
}
if len(rel.OnDelete) > 0 {
query += " ?"
args = append(args, Safe(rel.OnDelete))
}
b, err = q.appendFK(gen, b, schema.QueryWithArgs{
Query: query,
Args: args,
})
if err != nil {
return nil, err
}
}
}
return b, nil
}
func (q *CreateTableQuery) appendFK(gen schema.QueryGen, b []byte, fk schema.QueryWithArgs) (_ []byte, err error) {
b = append(b, ", FOREIGN KEY "...)
return fk.AppendQuery(gen, b)
}
func (q *CreateTableQuery) appendFKConstraints(
gen schema.QueryGen, b []byte,
) (_ []byte, err error) {
for _, fk := range q.fks {
if b, err = q.appendFK(gen, b, fk); err != nil {
return nil, err
}
}
return b, nil
}
func (q *CreateTableQuery) appendPKConstraint(b []byte, pks []*schema.Field) []byte {
b = append(b, ", PRIMARY KEY ("...)
b = appendColumns(b, "", pks)
b = append(b, ")"...)
return b
}
// ------------------------------------------------------------------------------
func (q *CreateTableQuery) Exec(ctx context.Context, dest ...any) (sql.Result, error) {
if err := q.beforeCreateTableHook(ctx); err != nil {
return nil, err
}
// if a comment is propagated via the context, use it
setCommentFromContext(ctx, q)
queryBytes, err := q.AppendQuery(q.db.gen, q.db.makeQueryBytes())
if err != nil {
return nil, err
}
query := internal.String(queryBytes)
res, err := q.exec(ctx, q, query)
if err != nil {
return nil, err
}
if q.table != nil {
if err := q.afterCreateTableHook(ctx); err != nil {
return nil, err
}
}
return res, nil
}
func (q *CreateTableQuery) beforeCreateTableHook(ctx context.Context) error {
if hook, ok := q.table.ZeroIface.(BeforeCreateTableHook); ok {
if err := hook.BeforeCreateTable(ctx, q); err != nil {
return err
}
}
return nil
}
func (q *CreateTableQuery) afterCreateTableHook(ctx context.Context) error {
if hook, ok := q.table.ZeroIface.(AfterCreateTableHook); ok {
if err := hook.AfterCreateTable(ctx, q); err != nil {
return err
}
}
return nil
}
// String returns the generated SQL query string. The CreateTableQuery instance must not be
// modified during query generation to ensure multiple calls to String() return identical results.
func (q *CreateTableQuery) String() string {
buf, err := q.AppendQuery(q.db.QueryGen(), nil)
if err != nil {
panic(err)
}
return string(buf)
}

176
vendor/github.com/uptrace/bun/query_table_drop.go generated vendored Normal file
View File

@@ -0,0 +1,176 @@
package bun
import (
"context"
"database/sql"
"github.com/uptrace/bun/internal"
"github.com/uptrace/bun/schema"
)
type DropTableQuery struct {
baseQuery
cascadeQuery
ifExists bool
comment string
}
var _ Query = (*DropTableQuery)(nil)
func NewDropTableQuery(db *DB) *DropTableQuery {
q := &DropTableQuery{
baseQuery: baseQuery{
db: db,
},
}
return q
}
func (q *DropTableQuery) Conn(db IConn) *DropTableQuery {
q.setConn(db)
return q
}
func (q *DropTableQuery) Model(model any) *DropTableQuery {
q.setModel(model)
return q
}
func (q *DropTableQuery) Err(err error) *DropTableQuery {
q.setErr(err)
return q
}
//------------------------------------------------------------------------------
func (q *DropTableQuery) Table(tables ...string) *DropTableQuery {
for _, table := range tables {
q.addTable(schema.UnsafeIdent(table))
}
return q
}
func (q *DropTableQuery) TableExpr(query string, args ...any) *DropTableQuery {
q.addTable(schema.SafeQuery(query, args))
return q
}
func (q *DropTableQuery) ModelTableExpr(query string, args ...any) *DropTableQuery {
q.modelTableName = schema.SafeQuery(query, args)
return q
}
//------------------------------------------------------------------------------
func (q *DropTableQuery) IfExists() *DropTableQuery {
q.ifExists = true
return q
}
func (q *DropTableQuery) Cascade() *DropTableQuery {
q.cascade = true
return q
}
func (q *DropTableQuery) Restrict() *DropTableQuery {
q.restrict = true
return q
}
//------------------------------------------------------------------------------
// Comment adds a comment to the query, wrapped by /* ... */.
func (q *DropTableQuery) Comment(comment string) *DropTableQuery {
q.comment = comment
return q
}
//------------------------------------------------------------------------------
func (q *DropTableQuery) Operation() string {
return "DROP TABLE"
}
func (q *DropTableQuery) AppendQuery(gen schema.QueryGen, b []byte) (_ []byte, err error) {
if q.err != nil {
return nil, q.err
}
b = appendComment(b, q.comment)
b = append(b, "DROP TABLE "...)
if q.ifExists {
b = append(b, "IF EXISTS "...)
}
b, err = q.appendTables(gen, b)
if err != nil {
return nil, err
}
b = q.appendCascade(gen, b)
return b, nil
}
//------------------------------------------------------------------------------
func (q *DropTableQuery) Exec(ctx context.Context, dest ...any) (sql.Result, error) {
if q.table != nil {
if err := q.beforeDropTableHook(ctx); err != nil {
return nil, err
}
}
// if a comment is propagated via the context, use it
setCommentFromContext(ctx, q)
queryBytes, err := q.AppendQuery(q.db.gen, q.db.makeQueryBytes())
if err != nil {
return nil, err
}
query := internal.String(queryBytes)
res, err := q.exec(ctx, q, query)
if err != nil {
return nil, err
}
if q.table != nil {
if err := q.afterDropTableHook(ctx); err != nil {
return nil, err
}
}
return res, nil
}
func (q *DropTableQuery) beforeDropTableHook(ctx context.Context) error {
if hook, ok := q.table.ZeroIface.(BeforeDropTableHook); ok {
if err := hook.BeforeDropTable(ctx, q); err != nil {
return err
}
}
return nil
}
func (q *DropTableQuery) afterDropTableHook(ctx context.Context) error {
if hook, ok := q.table.ZeroIface.(AfterDropTableHook); ok {
if err := hook.AfterDropTable(ctx, q); err != nil {
return err
}
}
return nil
}
// String returns the generated SQL query string. The DropTableQuery instance must not be
// modified during query generation to ensure multiple calls to String() return identical results.
func (q *DropTableQuery) String() string {
buf, err := q.AppendQuery(q.db.QueryGen(), nil)
if err != nil {
panic(err)
}
return string(buf)
}

155
vendor/github.com/uptrace/bun/query_table_truncate.go generated vendored Normal file
View File

@@ -0,0 +1,155 @@
package bun
import (
"context"
"database/sql"
"github.com/uptrace/bun/dialect/feature"
"github.com/uptrace/bun/internal"
"github.com/uptrace/bun/schema"
)
type TruncateTableQuery struct {
baseQuery
cascadeQuery
continueIdentity bool
comment string
}
var _ Query = (*TruncateTableQuery)(nil)
func NewTruncateTableQuery(db *DB) *TruncateTableQuery {
q := &TruncateTableQuery{
baseQuery: baseQuery{
db: db,
},
}
return q
}
func (q *TruncateTableQuery) Conn(db IConn) *TruncateTableQuery {
q.setConn(db)
return q
}
func (q *TruncateTableQuery) Model(model any) *TruncateTableQuery {
q.setModel(model)
return q
}
func (q *TruncateTableQuery) Err(err error) *TruncateTableQuery {
q.setErr(err)
return q
}
//------------------------------------------------------------------------------
func (q *TruncateTableQuery) Table(tables ...string) *TruncateTableQuery {
for _, table := range tables {
q.addTable(schema.UnsafeIdent(table))
}
return q
}
func (q *TruncateTableQuery) TableExpr(query string, args ...any) *TruncateTableQuery {
q.addTable(schema.SafeQuery(query, args))
return q
}
func (q *TruncateTableQuery) ModelTableExpr(query string, args ...any) *TruncateTableQuery {
q.modelTableName = schema.SafeQuery(query, args)
return q
}
//------------------------------------------------------------------------------
func (q *TruncateTableQuery) ContinueIdentity() *TruncateTableQuery {
q.continueIdentity = true
return q
}
func (q *TruncateTableQuery) Cascade() *TruncateTableQuery {
q.cascade = true
return q
}
func (q *TruncateTableQuery) Restrict() *TruncateTableQuery {
q.restrict = true
return q
}
//------------------------------------------------------------------------------
// Comment adds a comment to the query, wrapped by /* ... */.
func (q *TruncateTableQuery) Comment(comment string) *TruncateTableQuery {
q.comment = comment
return q
}
//------------------------------------------------------------------------------
func (q *TruncateTableQuery) Operation() string {
return "TRUNCATE TABLE"
}
func (q *TruncateTableQuery) AppendQuery(
gen schema.QueryGen, b []byte,
) (_ []byte, err error) {
if q.err != nil {
return nil, q.err
}
b = appendComment(b, q.comment)
if !gen.HasFeature(feature.TableTruncate) {
b = append(b, "DELETE FROM "...)
b, err = q.appendTables(gen, b)
if err != nil {
return nil, err
}
return b, nil
}
b = append(b, "TRUNCATE TABLE "...)
b, err = q.appendTables(gen, b)
if err != nil {
return nil, err
}
if q.db.HasFeature(feature.TableIdentity) {
if q.continueIdentity {
b = append(b, " CONTINUE IDENTITY"...)
} else {
b = append(b, " RESTART IDENTITY"...)
}
}
b = q.appendCascade(gen, b)
return b, nil
}
//------------------------------------------------------------------------------
func (q *TruncateTableQuery) Exec(ctx context.Context, dest ...any) (sql.Result, error) {
// if a comment is propagated via the context, use it
setCommentFromContext(ctx, q)
queryBytes, err := q.AppendQuery(q.db.gen, q.db.makeQueryBytes())
if err != nil {
return nil, err
}
query := internal.String(queryBytes)
res, err := q.exec(ctx, q, query)
if err != nil {
return nil, err
}
return res, nil
}

677
vendor/github.com/uptrace/bun/query_update.go generated vendored Normal file
View File

@@ -0,0 +1,677 @@
package bun
import (
"context"
"database/sql"
"errors"
"fmt"
"github.com/uptrace/bun/dialect"
"github.com/uptrace/bun/dialect/feature"
"github.com/uptrace/bun/internal"
"github.com/uptrace/bun/schema"
)
type UpdateQuery struct {
whereBaseQuery
orderLimitOffsetQuery
returningQuery
setQuery
idxHintsQuery
joins []joinQuery
comment string
}
var _ Query = (*UpdateQuery)(nil)
func NewUpdateQuery(db *DB) *UpdateQuery {
q := &UpdateQuery{
whereBaseQuery: whereBaseQuery{
baseQuery: baseQuery{
db: db,
},
},
}
return q
}
func (q *UpdateQuery) Conn(db IConn) *UpdateQuery {
q.setConn(db)
return q
}
func (q *UpdateQuery) Model(model any) *UpdateQuery {
q.setModel(model)
return q
}
func (q *UpdateQuery) Err(err error) *UpdateQuery {
q.setErr(err)
return q
}
// Apply calls each function in fns, passing the UpdateQuery as an argument.
func (q *UpdateQuery) Apply(fns ...func(*UpdateQuery) *UpdateQuery) *UpdateQuery {
for _, fn := range fns {
if fn != nil {
q = fn(q)
}
}
return q
}
func (q *UpdateQuery) With(name string, query Query) *UpdateQuery {
q.addWith(NewWithQuery(name, query))
return q
}
func (q *UpdateQuery) WithRecursive(name string, query Query) *UpdateQuery {
q.addWith(NewWithQuery(name, query).Recursive())
return q
}
func (q *UpdateQuery) WithQuery(query *WithQuery) *UpdateQuery {
q.addWith(query)
return q
}
// ------------------------------------------------------------------------------
func (q *UpdateQuery) Table(tables ...string) *UpdateQuery {
for _, table := range tables {
q.addTable(schema.UnsafeIdent(table))
}
return q
}
func (q *UpdateQuery) TableExpr(query string, args ...any) *UpdateQuery {
q.addTable(schema.SafeQuery(query, args))
return q
}
func (q *UpdateQuery) ModelTableExpr(query string, args ...any) *UpdateQuery {
q.modelTableName = schema.SafeQuery(query, args)
return q
}
//------------------------------------------------------------------------------
func (q *UpdateQuery) Column(columns ...string) *UpdateQuery {
for _, column := range columns {
q.addColumn(schema.UnsafeIdent(column))
}
return q
}
func (q *UpdateQuery) ExcludeColumn(columns ...string) *UpdateQuery {
q.excludeColumn(columns)
return q
}
func (q *UpdateQuery) Set(query string, args ...any) *UpdateQuery {
q.addSet(schema.SafeQuery(query, args))
return q
}
func (q *UpdateQuery) SetColumn(column string, query string, args ...any) *UpdateQuery {
if q.db.HasFeature(feature.UpdateMultiTable) {
column = q.table.Alias + "." + column
}
q.addSet(schema.SafeQuery(column+" = "+query, args))
return q
}
// Value overwrites model value for the column.
func (q *UpdateQuery) Value(column string, query string, args ...any) *UpdateQuery {
if q.table == nil {
q.setErr(errNilModel)
return q
}
q.addValue(q.table, column, query, args)
return q
}
func (q *UpdateQuery) OmitZero() *UpdateQuery {
q.omitZero = true
return q
}
//------------------------------------------------------------------------------
func (q *UpdateQuery) Join(join string, args ...any) *UpdateQuery {
q.joins = append(q.joins, joinQuery{
join: schema.SafeQuery(join, args),
})
return q
}
func (q *UpdateQuery) JoinOn(cond string, args ...any) *UpdateQuery {
return q.joinOn(cond, args, " AND ")
}
func (q *UpdateQuery) JoinOnOr(cond string, args ...any) *UpdateQuery {
return q.joinOn(cond, args, " OR ")
}
func (q *UpdateQuery) joinOn(cond string, args []any, sep string) *UpdateQuery {
if len(q.joins) == 0 {
q.setErr(errors.New("bun: query has no joins"))
return q
}
j := &q.joins[len(q.joins)-1]
j.on = append(j.on, schema.SafeQueryWithSep(cond, args, sep))
return q
}
//------------------------------------------------------------------------------
func (q *UpdateQuery) WherePK(cols ...string) *UpdateQuery {
q.addWhereCols(cols)
return q
}
func (q *UpdateQuery) Where(query string, args ...any) *UpdateQuery {
q.addWhere(schema.SafeQueryWithSep(query, args, " AND "))
return q
}
func (q *UpdateQuery) WhereOr(query string, args ...any) *UpdateQuery {
q.addWhere(schema.SafeQueryWithSep(query, args, " OR "))
return q
}
func (q *UpdateQuery) WhereGroup(sep string, fn func(*UpdateQuery) *UpdateQuery) *UpdateQuery {
saved := q.where
q.where = nil
q = fn(q)
where := q.where
q.where = saved
q.addWhereGroup(sep, where)
return q
}
func (q *UpdateQuery) WhereDeleted() *UpdateQuery {
q.whereDeleted()
return q
}
func (q *UpdateQuery) WhereAllWithDeleted() *UpdateQuery {
q.whereAllWithDeleted()
return q
}
// ------------------------------------------------------------------------------
func (q *UpdateQuery) Order(orders ...string) *UpdateQuery {
if !q.hasFeature(feature.UpdateOrderLimit) {
q.setErr(feature.NewNotSupportError(feature.UpdateOrderLimit))
return q
}
q.addOrder(orders...)
return q
}
func (q *UpdateQuery) OrderExpr(query string, args ...any) *UpdateQuery {
if !q.hasFeature(feature.UpdateOrderLimit) {
q.setErr(feature.NewNotSupportError(feature.UpdateOrderLimit))
return q
}
q.addOrderExpr(query, args...)
return q
}
func (q *UpdateQuery) Limit(n int) *UpdateQuery {
if !q.hasFeature(feature.UpdateOrderLimit) {
q.setErr(feature.NewNotSupportError(feature.UpdateOrderLimit))
return q
}
q.setLimit(n)
return q
}
//------------------------------------------------------------------------------
// Returning adds a RETURNING clause to the query.
//
// To suppress the auto-generated RETURNING clause, use `Returning("NULL")`.
func (q *UpdateQuery) Returning(query string, args ...any) *UpdateQuery {
q.addReturning(schema.SafeQuery(query, args))
return q
}
//------------------------------------------------------------------------------
// Comment adds a comment to the query, wrapped by /* ... */.
func (q *UpdateQuery) Comment(comment string) *UpdateQuery {
q.comment = comment
return q
}
//------------------------------------------------------------------------------
func (q *UpdateQuery) Operation() string {
return "UPDATE"
}
func (q *UpdateQuery) AppendQuery(gen schema.QueryGen, b []byte) (_ []byte, err error) {
if q.err != nil {
return nil, q.err
}
b = appendComment(b, q.comment)
gen = formatterWithModel(gen, q)
b, err = q.appendWith(gen, b)
if err != nil {
return nil, err
}
b = append(b, "UPDATE "...)
if gen.HasFeature(feature.UpdateMultiTable) {
b, err = q.appendTablesWithAlias(gen, b)
} else if gen.HasFeature(feature.UpdateTableAlias) {
b, err = q.appendFirstTableWithAlias(gen, b)
} else {
b, err = q.appendFirstTable(gen, b)
}
if err != nil {
return nil, err
}
b, err = q.appendIndexHints(gen, b)
if err != nil {
return nil, err
}
b, err = q.mustAppendSet(gen, b)
if err != nil {
return nil, err
}
if !gen.HasFeature(feature.UpdateMultiTable) {
b, err = q.appendOtherTables(gen, b)
if err != nil {
return nil, err
}
}
for _, j := range q.joins {
b, err = j.AppendQuery(gen, b)
if err != nil {
return nil, err
}
}
if q.hasFeature(feature.Output) && q.hasReturning() {
b = append(b, " OUTPUT "...)
b, err = q.appendOutput(gen, b)
if err != nil {
return nil, err
}
}
b, err = q.mustAppendWhere(gen, b, q.hasTableAlias(gen))
if err != nil {
return nil, err
}
b, err = q.appendOrder(gen, b)
if err != nil {
return nil, err
}
b, err = q.appendLimitOffset(gen, b)
if err != nil {
return nil, err
}
if q.hasFeature(feature.Returning) && q.hasReturning() {
b = append(b, " RETURNING "...)
b, err = q.appendReturning(gen, b)
if err != nil {
return nil, err
}
}
return b, nil
}
func (q *UpdateQuery) mustAppendSet(gen schema.QueryGen, b []byte) (_ []byte, err error) {
b = append(b, " SET "...)
pos := len(b)
switch model := q.model.(type) {
case *structTableModel:
if !model.strct.IsValid() { // Model((*Foo)(nil))
break
}
if len(q.set) > 0 && q.columns == nil {
break
}
fields, err := q.getDataFields()
if err != nil {
return nil, err
}
b, err = q.appendSetStruct(gen, b, model, fields)
if err != nil {
return nil, err
}
case *sliceTableModel:
if len(q.set) > 0 { // bulk-update
return q.appendSet(gen, b)
}
return nil, errors.New("bun: to bulk Update, use CTE and VALUES")
case *mapModel:
b = model.appendSet(gen, b)
case nil:
// continue below
default:
return nil, fmt.Errorf("bun: Update does not support %T", q.model)
}
if len(q.set) > 0 {
if len(b) > pos {
b = append(b, ", "...)
}
return q.appendSet(gen, b)
}
if len(b) == pos {
return nil, errors.New("bun: empty SET clause is not allowed in the UPDATE query")
}
return b, nil
}
func (q *UpdateQuery) appendOtherTables(gen schema.QueryGen, b []byte) (_ []byte, err error) {
if !q.hasMultiTables() {
return b, nil
}
b = append(b, " FROM "...)
b, err = q.whereBaseQuery.appendOtherTables(gen, b)
if err != nil {
return nil, err
}
return b, nil
}
//------------------------------------------------------------------------------
func (q *UpdateQuery) Bulk() *UpdateQuery {
model, ok := q.model.(*sliceTableModel)
if !ok {
q.setErr(fmt.Errorf("bun: Bulk requires a slice, got %T", q.model))
return q
}
set, err := q.updateSliceSet(q.db.gen, model)
if err != nil {
q.setErr(err)
return q
}
values := q.db.NewValues(model)
values.customValueQuery = q.customValueQuery
return q.With("_data", values).
Model(model).
TableExpr("_data").
Set(set).
Where(q.updateSliceWhere(q.db.gen, model))
}
func (q *UpdateQuery) updateSliceSet(
gen schema.QueryGen, model *sliceTableModel,
) (string, error) {
fields, err := q.getDataFields()
if err != nil {
return "", err
}
var b []byte
pos := len(b)
for _, field := range fields {
if field.SkipUpdate() {
continue
}
if len(b) != pos {
b = append(b, ", "...)
pos = len(b)
}
if gen.HasFeature(feature.UpdateMultiTable) {
b = append(b, model.table.SQLAlias...)
b = append(b, '.')
}
b = append(b, field.SQLName...)
b = append(b, " = _data."...)
b = append(b, field.SQLName...)
}
return internal.String(b), nil
}
func (q *UpdateQuery) updateSliceWhere(gen schema.QueryGen, model *sliceTableModel) string {
var b []byte
for i, pk := range model.table.PKs {
if i > 0 {
b = append(b, " AND "...)
}
if q.hasTableAlias(gen) {
b = append(b, model.table.SQLAlias...)
} else {
b = append(b, model.table.SQLName...)
}
b = append(b, '.')
b = append(b, pk.SQLName...)
b = append(b, " = _data."...)
b = append(b, pk.SQLName...)
}
return internal.String(b)
}
//------------------------------------------------------------------------------
func (q *UpdateQuery) Scan(ctx context.Context, dest ...any) error {
_, err := q.scanOrExec(ctx, dest, true)
return err
}
func (q *UpdateQuery) Exec(ctx context.Context, dest ...any) (sql.Result, error) {
return q.scanOrExec(ctx, dest, len(dest) > 0)
}
func (q *UpdateQuery) scanOrExec(
ctx context.Context, dest []any, hasDest bool,
) (sql.Result, error) {
if q.err != nil {
return nil, q.err
}
if q.table != nil {
if err := q.beforeUpdateHook(ctx); err != nil {
return nil, err
}
}
// Run append model hooks before generating the query.
if err := q.beforeAppendModel(ctx, q); err != nil {
return nil, err
}
// if a comment is propagated via the context, use it
setCommentFromContext(ctx, q)
// Generate the query before checking hasReturning.
queryBytes, err := q.AppendQuery(q.db.gen, q.db.makeQueryBytes())
if err != nil {
return nil, err
}
useScan := hasDest || (q.hasReturning() && q.hasFeature(feature.Returning|feature.Output))
var model Model
if useScan {
var err error
model, err = q.getModel(dest)
if err != nil {
return nil, err
}
}
query := internal.String(queryBytes)
var res sql.Result
if useScan {
res, err = q.scan(ctx, q, query, model, hasDest)
if err != nil {
return nil, err
}
} else {
res, err = q.exec(ctx, q, query)
if err != nil {
return nil, err
}
}
if q.table != nil {
if err := q.afterUpdateHook(ctx); err != nil {
return nil, err
}
}
return res, nil
}
func (q *UpdateQuery) beforeUpdateHook(ctx context.Context) error {
if hook, ok := q.table.ZeroIface.(BeforeUpdateHook); ok {
if err := hook.BeforeUpdate(ctx, q); err != nil {
return err
}
}
return nil
}
func (q *UpdateQuery) afterUpdateHook(ctx context.Context) error {
if hook, ok := q.table.ZeroIface.(AfterUpdateHook); ok {
if err := hook.AfterUpdate(ctx, q); err != nil {
return err
}
}
return nil
}
// FQN returns a fully qualified column name, for example, table_name.column_name or
// table_alias.column_alias.
func (q *UpdateQuery) FQN(column string) Ident {
if q.table == nil {
panic("UpdateQuery.FQN requires a model")
}
if q.hasTableAlias(q.db.gen) {
return Ident(q.table.Alias + "." + column)
}
return Ident(q.table.Name + "." + column)
}
func (q *UpdateQuery) hasTableAlias(gen schema.QueryGen) bool {
return gen.HasFeature(feature.UpdateMultiTable | feature.UpdateTableAlias)
}
// String returns the generated SQL query string. The UpdateQuery instance must not be
// modified during query generation to ensure multiple calls to String() return identical results.
func (q *UpdateQuery) String() string {
buf, err := q.AppendQuery(q.db.QueryGen(), nil)
if err != nil {
panic(err)
}
return string(buf)
}
//------------------------------------------------------------------------------
func (q *UpdateQuery) QueryBuilder() QueryBuilder {
return &updateQueryBuilder{q}
}
func (q *UpdateQuery) ApplyQueryBuilder(fn func(QueryBuilder) QueryBuilder) *UpdateQuery {
return fn(q.QueryBuilder()).Unwrap().(*UpdateQuery)
}
type updateQueryBuilder struct {
*UpdateQuery
}
func (q *updateQueryBuilder) WhereGroup(
sep string, fn func(QueryBuilder) QueryBuilder,
) QueryBuilder {
q.UpdateQuery = q.UpdateQuery.WhereGroup(sep, func(qs *UpdateQuery) *UpdateQuery {
return fn(q).(*updateQueryBuilder).UpdateQuery
})
return q
}
func (q *updateQueryBuilder) Where(query string, args ...any) QueryBuilder {
q.UpdateQuery.Where(query, args...)
return q
}
func (q *updateQueryBuilder) WhereOr(query string, args ...any) QueryBuilder {
q.UpdateQuery.WhereOr(query, args...)
return q
}
func (q *updateQueryBuilder) WhereDeleted() QueryBuilder {
q.UpdateQuery.WhereDeleted()
return q
}
func (q *updateQueryBuilder) WhereAllWithDeleted() QueryBuilder {
q.UpdateQuery.WhereAllWithDeleted()
return q
}
func (q *updateQueryBuilder) WherePK(cols ...string) QueryBuilder {
q.UpdateQuery.WherePK(cols...)
return q
}
func (q *updateQueryBuilder) Unwrap() any {
return q.UpdateQuery
}
//------------------------------------------------------------------------------
func (q *UpdateQuery) UseIndex(indexes ...string) *UpdateQuery {
if q.db.dialect.Name() == dialect.MySQL {
q.addUseIndex(indexes...)
}
return q
}
func (q *UpdateQuery) IgnoreIndex(indexes ...string) *UpdateQuery {
if q.db.dialect.Name() == dialect.MySQL {
q.addIgnoreIndex(indexes...)
}
return q
}
func (q *UpdateQuery) ForceIndex(indexes ...string) *UpdateQuery {
if q.db.dialect.Name() == dialect.MySQL {
q.addForceIndex(indexes...)
}
return q
}

250
vendor/github.com/uptrace/bun/query_values.go generated vendored Normal file
View File

@@ -0,0 +1,250 @@
package bun
import (
"fmt"
"reflect"
"strconv"
"github.com/uptrace/bun/dialect/feature"
"github.com/uptrace/bun/schema"
)
type ValuesQuery struct {
baseQuery
setQuery
withOrder bool
comment string
}
var (
_ Query = (*ValuesQuery)(nil)
_ schema.NamedArgAppender = (*ValuesQuery)(nil)
)
func NewValuesQuery(db *DB, model any) *ValuesQuery {
q := &ValuesQuery{
baseQuery: baseQuery{
db: db,
},
}
q.setModel(model)
return q
}
func (q *ValuesQuery) Conn(db IConn) *ValuesQuery {
q.setConn(db)
return q
}
func (q *ValuesQuery) Err(err error) *ValuesQuery {
q.setErr(err)
return q
}
func (q *ValuesQuery) Column(columns ...string) *ValuesQuery {
for _, column := range columns {
q.addColumn(schema.UnsafeIdent(column))
}
return q
}
// Value overwrites model value for the column.
func (q *ValuesQuery) Value(column string, expr string, args ...any) *ValuesQuery {
if q.table == nil {
q.setErr(errNilModel)
return q
}
q.addValue(q.table, column, expr, args)
return q
}
func (q *ValuesQuery) OmitZero() *ValuesQuery {
q.omitZero = true
return q
}
func (q *ValuesQuery) WithOrder() *ValuesQuery {
q.withOrder = true
return q
}
// Comment adds a comment to the query, wrapped by /* ... */.
func (q *ValuesQuery) Comment(comment string) *ValuesQuery {
q.comment = comment
return q
}
func (q *ValuesQuery) AppendNamedArg(gen schema.QueryGen, b []byte, name string) ([]byte, bool) {
switch name {
case "Columns":
bb, err := q.AppendColumns(gen, b)
if err != nil {
q.setErr(err)
return b, true
}
return bb, true
}
return b, false
}
// AppendColumns appends the table columns. It is used by CTE.
func (q *ValuesQuery) AppendColumns(gen schema.QueryGen, b []byte) (_ []byte, err error) {
if q.err != nil {
return nil, q.err
}
if q.model == nil {
return nil, errNilModel
}
if q.tableModel != nil {
fields, err := q.getFields()
if err != nil {
return nil, err
}
b = appendColumns(b, "", fields)
if q.withOrder {
b = append(b, ", _order"...)
}
return b, nil
}
switch model := q.model.(type) {
case *mapSliceModel:
return model.appendColumns(gen, b)
}
return nil, fmt.Errorf("bun: Values does not support %T", q.model)
}
func (q *ValuesQuery) Operation() string {
return "VALUES"
}
func (q *ValuesQuery) AppendQuery(gen schema.QueryGen, b []byte) (_ []byte, err error) {
if q.err != nil {
return nil, q.err
}
if q.model == nil {
return nil, errNilModel
}
b = appendComment(b, q.comment)
gen = formatterWithModel(gen, q)
b = append(b, "VALUES "...)
if q.db.HasFeature(feature.ValuesRow) {
b = append(b, "ROW("...)
} else {
b = append(b, '(')
}
switch model := q.model.(type) {
case *structTableModel:
fields, err := q.getFields()
if err != nil {
return nil, err
}
b, err = q.appendValues(gen, b, fields, model.strct)
if err != nil {
return nil, err
}
if q.withOrder {
b = append(b, ", "...)
b = strconv.AppendInt(b, 0, 10)
}
case *sliceTableModel:
fields, err := q.getFields()
if err != nil {
return nil, err
}
sliceLen := model.slice.Len()
for i := range sliceLen {
if i > 0 {
b = append(b, "), "...)
if q.db.HasFeature(feature.ValuesRow) {
b = append(b, "ROW("...)
} else {
b = append(b, '(')
}
}
b, err = q.appendValues(gen, b, fields, model.slice.Index(i))
if err != nil {
return nil, err
}
if q.withOrder {
b = append(b, ", "...)
b = strconv.AppendInt(b, int64(i), 10)
}
}
case *mapSliceModel:
b, err = model.appendValues(gen, b)
if err != nil {
return nil, err
}
default:
return nil, fmt.Errorf("bun: Values does not support %T", model)
}
b = append(b, ')')
return b, nil
}
func (q *ValuesQuery) appendValues(
gen schema.QueryGen, b []byte, fields []*schema.Field, strct reflect.Value,
) (_ []byte, err error) {
isTemplate := gen.IsNop()
for i, f := range fields {
if i > 0 {
b = append(b, ", "...)
}
app, ok := q.modelValues[f.Name]
if ok {
b, err = app.AppendQuery(gen, b)
if err != nil {
return nil, err
}
continue
}
if isTemplate {
b = append(b, '?')
} else {
b = f.AppendValue(gen, b, indirect(strct))
}
if gen.HasFeature(feature.DoubleColonCast) {
b = append(b, "::"...)
b = append(b, f.UserSQLType...)
}
}
return b, nil
}
func (q *ValuesQuery) appendSet(gen schema.QueryGen, b []byte) (_ []byte, err error) {
switch model := q.model.(type) {
case *mapModel:
return model.appendSet(gen, b), nil
case *structTableModel:
fields, err := q.getDataFields()
if err != nil {
return nil, err
}
return q.appendSetStruct(gen, b, model, fields)
default:
return nil, fmt.Errorf("bun: SetValues(unsupported %T)", model)
}
}

453
vendor/github.com/uptrace/bun/relation_join.go generated vendored Normal file
View File

@@ -0,0 +1,453 @@
package bun
import (
"context"
"reflect"
"time"
"github.com/uptrace/bun/dialect/feature"
"github.com/uptrace/bun/internal"
"github.com/uptrace/bun/schema"
)
type relationJoin struct {
Parent *relationJoin
BaseModel TableModel
JoinModel TableModel
Relation *schema.Relation
additionalJoinOnConditions []schema.QueryWithArgs
apply func(*SelectQuery) *SelectQuery
columns []schema.QueryWithArgs
}
func (j *relationJoin) applyTo(q *SelectQuery) {
if j.apply == nil {
return
}
var table *schema.Table
var columns []schema.QueryWithArgs
// Save state.
table, q.table = q.table, j.JoinModel.Table()
columns, q.columns = q.columns, nil
q = j.apply(q)
// Restore state.
q.table = table
j.columns, q.columns = q.columns, columns
}
func (j *relationJoin) Select(ctx context.Context, q *SelectQuery) error {
switch j.Relation.Type {
}
panic("not reached")
}
func (j *relationJoin) selectMany(ctx context.Context, q *SelectQuery) error {
q = j.manyQuery(q)
if q == nil {
return nil
}
return q.Scan(ctx)
}
func (j *relationJoin) manyQuery(q *SelectQuery) *SelectQuery {
hasManyModel := newHasManyModel(j)
if hasManyModel == nil {
return nil
}
q = q.Model(hasManyModel)
var where []byte
if q.db.HasFeature(feature.CompositeIn) {
return j.manyQueryCompositeIn(where, q)
}
return j.manyQueryMulti(where, q)
}
func (j *relationJoin) manyQueryCompositeIn(where []byte, q *SelectQuery) *SelectQuery {
if len(j.Relation.JoinPKs) > 1 {
where = append(where, '(')
}
where = appendColumns(where, j.JoinModel.Table().SQLAlias, j.Relation.JoinPKs)
if len(j.Relation.JoinPKs) > 1 {
where = append(where, ')')
}
where = append(where, " IN ("...)
where = appendChildValues(
q.db.QueryGen(),
where,
j.JoinModel.rootValue(),
j.JoinModel.parentIndex(),
j.Relation.BasePKs,
)
where = append(where, ")"...)
if len(j.additionalJoinOnConditions) > 0 {
where = append(where, " AND "...)
where = appendAdditionalJoinOnConditions(q.db.QueryGen(), where, j.additionalJoinOnConditions)
}
q = q.Where(internal.String(where))
if j.Relation.PolymorphicField != nil {
q = q.Where("? = ?", j.Relation.PolymorphicField.SQLName, j.Relation.PolymorphicValue)
}
j.applyTo(q)
q = q.Apply(j.hasManyColumns)
return q
}
func (j *relationJoin) manyQueryMulti(where []byte, q *SelectQuery) *SelectQuery {
where = appendMultiValues(
q.db.QueryGen(),
where,
j.JoinModel.rootValue(),
j.JoinModel.parentIndex(),
j.Relation.BasePKs,
j.Relation.JoinPKs,
j.JoinModel.Table().SQLAlias,
)
q = q.Where(internal.String(where))
if len(j.additionalJoinOnConditions) > 0 {
q = q.Where(internal.String(appendAdditionalJoinOnConditions(q.db.QueryGen(), []byte{}, j.additionalJoinOnConditions)))
}
if j.Relation.PolymorphicField != nil {
q = q.Where("? = ?", j.Relation.PolymorphicField.SQLName, j.Relation.PolymorphicValue)
}
j.applyTo(q)
q = q.Apply(j.hasManyColumns)
return q
}
func (j *relationJoin) hasManyColumns(q *SelectQuery) *SelectQuery {
b := make([]byte, 0, 32)
joinTable := j.JoinModel.Table()
if len(j.columns) > 0 {
for i, col := range j.columns {
if i > 0 {
b = append(b, ", "...)
}
if col.Args == nil {
if field, ok := joinTable.FieldMap[col.Query]; ok {
b = append(b, joinTable.SQLAlias...)
b = append(b, '.')
b = append(b, field.SQLName...)
continue
}
}
var err error
b, err = col.AppendQuery(q.db.gen, b)
if err != nil {
q.setErr(err)
return q
}
}
} else {
b = appendColumns(b, joinTable.SQLAlias, joinTable.Fields)
}
q = q.ColumnExpr(internal.String(b))
return q
}
func (j *relationJoin) selectM2M(ctx context.Context, q *SelectQuery) error {
q = j.m2mQuery(q)
if q == nil {
return nil
}
return q.Scan(ctx)
}
func (j *relationJoin) m2mQuery(q *SelectQuery) *SelectQuery {
gen := q.db.gen
m2mModel := newM2MModel(j)
if m2mModel == nil {
return nil
}
q = q.Model(m2mModel)
index := j.JoinModel.parentIndex()
if j.Relation.M2MTable != nil {
// We only need base pks to park joined models to the base model.
fields := j.Relation.M2MBasePKs
b := make([]byte, 0, len(fields))
b = appendColumns(b, j.Relation.M2MTable.SQLAlias, fields)
q = q.ColumnExpr(internal.String(b))
}
//nolint
var join []byte
join = append(join, "JOIN "...)
join = gen.AppendQuery(join, string(j.Relation.M2MTable.SQLName))
join = append(join, " AS "...)
join = append(join, j.Relation.M2MTable.SQLAlias...)
join = append(join, " ON ("...)
for i, col := range j.Relation.M2MBasePKs {
if i > 0 {
join = append(join, ", "...)
}
join = append(join, j.Relation.M2MTable.SQLAlias...)
join = append(join, '.')
join = append(join, col.SQLName...)
}
join = append(join, ") IN ("...)
join = appendChildValues(gen, join, j.BaseModel.rootValue(), index, j.Relation.BasePKs)
join = append(join, ")"...)
if len(j.additionalJoinOnConditions) > 0 {
join = append(join, " AND "...)
join = appendAdditionalJoinOnConditions(gen, join, j.additionalJoinOnConditions)
}
q = q.Join(internal.String(join))
joinTable := j.JoinModel.Table()
for i, m2mJoinField := range j.Relation.M2MJoinPKs {
joinField := j.Relation.JoinPKs[i]
q = q.Where("?.? = ?.?",
joinTable.SQLAlias, joinField.SQLName,
j.Relation.M2MTable.SQLAlias, m2mJoinField.SQLName)
}
j.applyTo(q)
q = q.Apply(j.hasManyColumns)
return q
}
func (j *relationJoin) hasParent() bool {
if j.Parent != nil {
switch j.Parent.Relation.Type {
case schema.HasOneRelation, schema.BelongsToRelation:
return true
}
}
return false
}
func (j *relationJoin) appendAlias(gen schema.QueryGen, b []byte) []byte {
quote := gen.IdentQuote()
b = append(b, quote)
b = appendAlias(b, j)
b = append(b, quote)
return b
}
func (j *relationJoin) appendAliasColumn(gen schema.QueryGen, b []byte, column string) []byte {
quote := gen.IdentQuote()
b = append(b, quote)
b = appendAlias(b, j)
b = append(b, "__"...)
b = append(b, column...)
b = append(b, quote)
return b
}
func (j *relationJoin) appendBaseAlias(gen schema.QueryGen, b []byte) []byte {
quote := gen.IdentQuote()
if j.hasParent() {
b = append(b, quote)
b = appendAlias(b, j.Parent)
b = append(b, quote)
return b
}
return append(b, j.BaseModel.Table().SQLAlias...)
}
func (j *relationJoin) appendSoftDelete(
gen schema.QueryGen, b []byte, flags internal.Flag,
) []byte {
b = append(b, '.')
field := j.JoinModel.Table().SoftDeleteField
b = append(b, field.SQLName...)
if field.IsPtr || field.NullZero {
if flags.Has(deletedFlag) {
b = append(b, " IS NOT NULL"...)
} else {
b = append(b, " IS NULL"...)
}
} else {
if flags.Has(deletedFlag) {
b = append(b, " != "...)
} else {
b = append(b, " = "...)
}
b = gen.Dialect().AppendTime(b, time.Time{})
}
return b
}
func appendAlias(b []byte, j *relationJoin) []byte {
if j.hasParent() {
b = appendAlias(b, j.Parent)
b = append(b, "__"...)
}
b = append(b, j.Relation.Field.Name...)
return b
}
func (j *relationJoin) appendHasOneJoin(
gen schema.QueryGen, b []byte, q *SelectQuery,
) (_ []byte, err error) {
isSoftDelete := j.JoinModel.Table().SoftDeleteField != nil && !q.flags.Has(allWithDeletedFlag)
b = append(b, "LEFT JOIN "...)
b = gen.AppendQuery(b, string(j.JoinModel.Table().SQLNameForSelects))
b = append(b, " AS "...)
b = j.appendAlias(gen, b)
b = append(b, " ON "...)
b = append(b, '(')
for i, baseField := range j.Relation.BasePKs {
if i > 0 {
b = append(b, " AND "...)
}
b = j.appendAlias(gen, b)
b = append(b, '.')
b = append(b, j.Relation.JoinPKs[i].SQLName...)
b = append(b, " = "...)
b = j.appendBaseAlias(gen, b)
b = append(b, '.')
b = append(b, baseField.SQLName...)
}
b = append(b, ')')
if isSoftDelete {
b = append(b, " AND "...)
b = j.appendAlias(gen, b)
b = j.appendSoftDelete(gen, b, q.flags)
}
if len(j.additionalJoinOnConditions) > 0 {
b = append(b, " AND "...)
b = appendAdditionalJoinOnConditions(gen, b, j.additionalJoinOnConditions)
}
return b, nil
}
func appendChildValues(
gen schema.QueryGen, b []byte, v reflect.Value, index []int, fields []*schema.Field,
) []byte {
seen := make(map[string]struct{})
walk(v, index, func(v reflect.Value) {
start := len(b)
if len(fields) > 1 {
b = append(b, '(')
}
for i, f := range fields {
if i > 0 {
b = append(b, ", "...)
}
b = f.AppendValue(gen, b, v)
}
if len(fields) > 1 {
b = append(b, ')')
}
b = append(b, ", "...)
if _, ok := seen[string(b[start:])]; ok {
b = b[:start]
} else {
seen[string(b[start:])] = struct{}{}
}
})
if len(seen) > 0 {
b = b[:len(b)-2] // trim ", "
}
return b
}
// appendMultiValues is an alternative to appendChildValues that doesn't use the sql keyword ID
// but instead uses old style ((k1=v1) AND (k2=v2)) OR (...) conditions.
func appendMultiValues(
gen schema.QueryGen, b []byte, v reflect.Value, index []int, baseFields, joinFields []*schema.Field, joinTable schema.Safe,
) []byte {
// This is based on a mix of appendChildValues and query_base.appendColumns
// These should never mismatch in length but nice to know if it does
if len(joinFields) != len(baseFields) {
panic("not reached")
}
// walk the relations
b = append(b, '(')
seen := make(map[string]struct{})
walk(v, index, func(v reflect.Value) {
start := len(b)
for i, f := range baseFields {
if i > 0 {
b = append(b, " AND "...)
}
if len(baseFields) > 1 {
b = append(b, '(')
}
// Field name
b = append(b, joinTable...)
b = append(b, '.')
b = append(b, []byte(joinFields[i].SQLName)...)
// Equals value
b = append(b, '=')
b = f.AppendValue(gen, b, v)
if len(baseFields) > 1 {
b = append(b, ')')
}
}
b = append(b, ") OR ("...)
if _, ok := seen[string(b[start:])]; ok {
b = b[:start]
} else {
seen[string(b[start:])] = struct{}{}
}
})
if len(seen) > 0 {
b = b[:len(b)-6] // trim ") OR ("
}
b = append(b, ')')
return b
}
func appendAdditionalJoinOnConditions(
gen schema.QueryGen, b []byte, conditions []schema.QueryWithArgs,
) []byte {
for i, cond := range conditions {
if i > 0 {
b = append(b, " AND "...)
}
b = gen.AppendQuery(b, cond.Query, cond.Args...)
}
return b
}

81
vendor/github.com/uptrace/bun/schema/append.go generated vendored Normal file
View File

@@ -0,0 +1,81 @@
package schema
import (
"fmt"
"reflect"
"github.com/uptrace/bun/dialect"
)
func In(slice any) QueryAppender {
v := reflect.ValueOf(slice)
if v.Kind() != reflect.Slice {
return &inValues{
err: fmt.Errorf("bun: In(non-slice %T)", slice),
}
}
return &inValues{
slice: v,
}
}
type inValues struct {
slice reflect.Value
err error
}
var _ QueryAppender = (*inValues)(nil)
func (in *inValues) AppendQuery(gen QueryGen, b []byte) (_ []byte, err error) {
if in.err != nil {
return nil, in.err
}
return appendIn(gen, b, in.slice), nil
}
func appendIn(gen QueryGen, b []byte, slice reflect.Value) []byte {
sliceLen := slice.Len()
if sliceLen == 0 {
return dialect.AppendNull(b)
}
for i := 0; i < sliceLen; i++ {
if i > 0 {
b = append(b, ", "...)
}
elem := slice.Index(i)
if elem.Kind() == reflect.Interface {
elem = elem.Elem()
}
if elem.Kind() == reflect.Slice && elem.Type() != bytesType {
b = append(b, '(')
b = appendIn(gen, b, elem)
b = append(b, ')')
} else {
b = gen.AppendValue(b, elem)
}
}
return b
}
//------------------------------------------------------------------------------
func NullZero(value any) QueryAppender {
return nullZero{
value: value,
}
}
type nullZero struct {
value any
}
func (nz nullZero) AppendQuery(gen QueryGen, b []byte) (_ []byte, err error) {
if isZero(nz.value) {
return dialect.AppendNull(b), nil
}
return gen.AppendValue(b, reflect.ValueOf(nz.value)), nil
}

316
vendor/github.com/uptrace/bun/schema/append_value.go generated vendored Normal file
View File

@@ -0,0 +1,316 @@
package schema
import (
"database/sql/driver"
"fmt"
"net"
"reflect"
"strconv"
"strings"
"time"
"github.com/puzpuzpuz/xsync/v3"
"github.com/uptrace/bun/dialect"
"github.com/uptrace/bun/dialect/sqltype"
"github.com/uptrace/bun/extra/bunjson"
"github.com/uptrace/bun/internal"
"github.com/vmihailenco/msgpack/v5"
)
type (
AppenderFunc func(gen QueryGen, b []byte, v reflect.Value) []byte
CustomAppender func(typ reflect.Type) AppenderFunc
)
var appenders = []AppenderFunc{
reflect.Bool: AppendBoolValue,
reflect.Int: AppendIntValue,
reflect.Int8: AppendIntValue,
reflect.Int16: AppendIntValue,
reflect.Int32: AppendIntValue,
reflect.Int64: AppendIntValue,
reflect.Uint: AppendUintValue,
reflect.Uint8: AppendUintValue,
reflect.Uint16: AppendUintValue,
reflect.Uint32: appendUint32Value,
reflect.Uint64: appendUint64Value,
reflect.Uintptr: nil,
reflect.Float32: AppendFloat32Value,
reflect.Float64: AppendFloat64Value,
reflect.Complex64: nil,
reflect.Complex128: nil,
reflect.Array: AppendJSONValue,
reflect.Chan: nil,
reflect.Func: nil,
reflect.Interface: nil,
reflect.Map: AppendJSONValue,
reflect.Ptr: nil,
reflect.Slice: AppendJSONValue,
reflect.String: AppendStringValue,
reflect.Struct: AppendJSONValue,
reflect.UnsafePointer: nil,
}
var appenderCache = xsync.NewMapOf[reflect.Type, AppenderFunc]()
func FieldAppender(dialect Dialect, field *Field) AppenderFunc {
if field.Tag.HasOption("msgpack") {
return appendMsgpack
}
fieldType := field.StructField.Type
switch strings.ToUpper(field.UserSQLType) {
case sqltype.JSON, sqltype.JSONB:
if fieldType.Implements(driverValuerType) {
return appendDriverValue
}
if fieldType.Kind() != reflect.Ptr {
if reflect.PointerTo(fieldType).Implements(driverValuerType) {
return addrAppender(appendDriverValue)
}
}
return AppendJSONValue
}
return Appender(dialect, fieldType)
}
func Appender(dialect Dialect, typ reflect.Type) AppenderFunc {
if v, ok := appenderCache.Load(typ); ok {
return v
}
fn := appender(dialect, typ)
if v, ok := appenderCache.LoadOrStore(typ, fn); ok {
return v
}
return fn
}
func appender(dialect Dialect, typ reflect.Type) AppenderFunc {
switch typ {
case bytesType:
return appendBytesValue
case timeType:
return appendTimeValue
case timePtrType:
return PtrAppender(appendTimeValue)
case ipNetType:
return appendIPNetValue
case ipType, netipPrefixType, netipAddrType:
return appendStringer
case jsonRawMessageType:
return appendJSONRawMessageValue
}
kind := typ.Kind()
if typ.Implements(queryAppenderType) {
if kind == reflect.Ptr {
return nilAwareAppender(appendQueryAppenderValue)
}
return appendQueryAppenderValue
}
if typ.Implements(driverValuerType) {
if kind == reflect.Ptr {
return nilAwareAppender(appendDriverValue)
}
return appendDriverValue
}
if kind != reflect.Ptr {
ptr := reflect.PointerTo(typ)
if ptr.Implements(queryAppenderType) {
return addrAppender(appendQueryAppenderValue)
}
if ptr.Implements(driverValuerType) {
return addrAppender(appendDriverValue)
}
}
switch kind {
case reflect.Interface:
return ifaceAppenderFunc
case reflect.Ptr:
if typ.Implements(jsonMarshalerType) {
return nilAwareAppender(AppendJSONValue)
}
if fn := Appender(dialect, typ.Elem()); fn != nil {
return PtrAppender(fn)
}
case reflect.Slice:
if typ.Elem().Kind() == reflect.Uint8 {
return appendBytesValue
}
case reflect.Array:
if typ.Elem().Kind() == reflect.Uint8 {
return appendArrayBytesValue
}
}
return appenders[typ.Kind()]
}
func ifaceAppenderFunc(gen QueryGen, b []byte, v reflect.Value) []byte {
if v.IsNil() {
return dialect.AppendNull(b)
}
elem := v.Elem()
appender := Appender(gen.Dialect(), elem.Type())
return appender(gen, b, elem)
}
func nilAwareAppender(fn AppenderFunc) AppenderFunc {
return func(gen QueryGen, b []byte, v reflect.Value) []byte {
if v.IsNil() {
return dialect.AppendNull(b)
}
return fn(gen, b, v)
}
}
func PtrAppender(fn AppenderFunc) AppenderFunc {
return func(gen QueryGen, b []byte, v reflect.Value) []byte {
if v.IsNil() {
return dialect.AppendNull(b)
}
return fn(gen, b, v.Elem())
}
}
func AppendBoolValue(gen QueryGen, b []byte, v reflect.Value) []byte {
return gen.Dialect().AppendBool(b, v.Bool())
}
func AppendIntValue(gen QueryGen, b []byte, v reflect.Value) []byte {
return strconv.AppendInt(b, v.Int(), 10)
}
func AppendUintValue(gen QueryGen, b []byte, v reflect.Value) []byte {
return strconv.AppendUint(b, v.Uint(), 10)
}
func appendUint32Value(gen QueryGen, b []byte, v reflect.Value) []byte {
return gen.Dialect().AppendUint32(b, uint32(v.Uint()))
}
func appendUint64Value(gen QueryGen, b []byte, v reflect.Value) []byte {
return gen.Dialect().AppendUint64(b, v.Uint())
}
func AppendFloat32Value(gen QueryGen, b []byte, v reflect.Value) []byte {
return dialect.AppendFloat32(b, float32(v.Float()))
}
func AppendFloat64Value(gen QueryGen, b []byte, v reflect.Value) []byte {
return dialect.AppendFloat64(b, float64(v.Float()))
}
func appendBytesValue(gen QueryGen, b []byte, v reflect.Value) []byte {
return gen.Dialect().AppendBytes(b, v.Bytes())
}
func appendArrayBytesValue(gen QueryGen, b []byte, v reflect.Value) []byte {
if v.CanAddr() {
return gen.Dialect().AppendBytes(b, v.Slice(0, v.Len()).Bytes())
}
tmp := make([]byte, v.Len())
reflect.Copy(reflect.ValueOf(tmp), v)
b = gen.Dialect().AppendBytes(b, tmp)
return b
}
func AppendStringValue(gen QueryGen, b []byte, v reflect.Value) []byte {
return gen.Dialect().AppendString(b, v.String())
}
func AppendJSONValue(gen QueryGen, b []byte, v reflect.Value) []byte {
bb, err := bunjson.Marshal(v.Interface())
if err != nil {
return dialect.AppendError(b, err)
}
if len(bb) > 0 && bb[len(bb)-1] == '\n' {
bb = bb[:len(bb)-1]
}
return gen.Dialect().AppendJSON(b, bb)
}
func appendTimeValue(gen QueryGen, b []byte, v reflect.Value) []byte {
tm := v.Interface().(time.Time)
return gen.Dialect().AppendTime(b, tm)
}
func appendIPNetValue(gen QueryGen, b []byte, v reflect.Value) []byte {
ipnet := v.Interface().(net.IPNet)
return gen.Dialect().AppendString(b, ipnet.String())
}
func appendStringer(gen QueryGen, b []byte, v reflect.Value) []byte {
return gen.Dialect().AppendString(b, v.Interface().(fmt.Stringer).String())
}
func appendJSONRawMessageValue(gen QueryGen, b []byte, v reflect.Value) []byte {
bytes := v.Bytes()
if bytes == nil {
return dialect.AppendNull(b)
}
return gen.Dialect().AppendString(b, internal.String(bytes))
}
func appendQueryAppenderValue(gen QueryGen, b []byte, v reflect.Value) []byte {
return AppendQueryAppender(gen, b, v.Interface().(QueryAppender))
}
func appendDriverValue(gen QueryGen, b []byte, v reflect.Value) []byte {
value, err := v.Interface().(driver.Valuer).Value()
if err != nil {
return dialect.AppendError(b, err)
}
if _, ok := value.(driver.Valuer); ok {
return dialect.AppendError(b, fmt.Errorf("driver.Valuer returns unsupported type %T", value))
}
return gen.Append(b, value)
}
func addrAppender(fn AppenderFunc) AppenderFunc {
return func(gen QueryGen, b []byte, v reflect.Value) []byte {
if !v.CanAddr() {
err := fmt.Errorf("bun: Append(nonaddressable %T)", v.Interface())
return dialect.AppendError(b, err)
}
return fn(gen, b, v.Addr())
}
}
func appendMsgpack(gen QueryGen, b []byte, v reflect.Value) []byte {
hexEnc := internal.NewHexEncoder(b)
enc := msgpack.GetEncoder()
defer msgpack.PutEncoder(enc)
enc.Reset(hexEnc)
if err := enc.EncodeValue(v); err != nil {
return dialect.AppendError(b, err)
}
if err := hexEnc.Close(); err != nil {
return dialect.AppendError(b, err)
}
return hexEnc.Bytes()
}
func AppendQueryAppender(gen QueryGen, b []byte, app QueryAppender) []byte {
bb, err := app.AppendQuery(gen, b)
if err != nil {
return dialect.AppendError(b, err)
}
return bb
}

194
vendor/github.com/uptrace/bun/schema/dialect.go generated vendored Normal file
View File

@@ -0,0 +1,194 @@
package schema
import (
"database/sql"
"encoding/hex"
"strconv"
"time"
"unicode/utf8"
"github.com/uptrace/bun/dialect"
"github.com/uptrace/bun/dialect/feature"
"github.com/uptrace/bun/internal/parser"
)
type Dialect interface {
Init(db *sql.DB)
Name() dialect.Name
Features() feature.Feature
Tables() *Tables
OnTable(table *Table)
IdentQuote() byte
AppendUint32(b []byte, n uint32) []byte
AppendUint64(b []byte, n uint64) []byte
AppendTime(b []byte, tm time.Time) []byte
AppendString(b []byte, s string) []byte
AppendBytes(b []byte, bs []byte) []byte
AppendJSON(b, jsonb []byte) []byte
AppendBool(b []byte, v bool) []byte
// AppendSequence adds the appropriate instruction for the driver to create a sequence
// from which (autoincremented) values for the column will be generated.
AppendSequence(b []byte, t *Table, f *Field) []byte
// DefaultVarcharLen should be returned for dialects in which specifying VARCHAR length
// is mandatory in queries that modify the schema (CREATE TABLE / ADD COLUMN, etc).
// Dialects that do not have such requirement may return 0, which should be interpreted so by the caller.
DefaultVarcharLen() int
// DefaultSchema should returns the name of the default database schema.
DefaultSchema() string
}
// ------------------------------------------------------------------------------
type BaseDialect struct{}
func (BaseDialect) AppendUint32(b []byte, n uint32) []byte {
return strconv.AppendUint(b, uint64(n), 10)
}
func (BaseDialect) AppendUint64(b []byte, n uint64) []byte {
return strconv.AppendUint(b, n, 10)
}
func (BaseDialect) AppendTime(b []byte, tm time.Time) []byte {
b = append(b, '\'')
b = tm.UTC().AppendFormat(b, "2006-01-02 15:04:05.999999-07:00")
b = append(b, '\'')
return b
}
func (BaseDialect) AppendString(b []byte, s string) []byte {
b = append(b, '\'')
for _, r := range s {
if r == '\000' {
continue
}
if r == '\'' {
b = append(b, '\'', '\'')
continue
}
if r < utf8.RuneSelf {
b = append(b, byte(r))
continue
}
l := len(b)
if cap(b)-l < utf8.UTFMax {
b = append(b, make([]byte, utf8.UTFMax)...)
}
n := utf8.EncodeRune(b[l:l+utf8.UTFMax], r)
b = b[:l+n]
}
b = append(b, '\'')
return b
}
func (BaseDialect) AppendBytes(b, bs []byte) []byte {
if bs == nil {
return dialect.AppendNull(b)
}
b = append(b, `'\x`...)
s := len(b)
b = append(b, make([]byte, hex.EncodedLen(len(bs)))...)
hex.Encode(b[s:], bs)
b = append(b, '\'')
return b
}
func (BaseDialect) AppendJSON(b, jsonb []byte) []byte {
b = append(b, '\'')
p := parser.New(jsonb)
for p.Valid() {
c := p.Read()
switch c {
case '"':
b = append(b, '"')
case '\'':
b = append(b, "''"...)
case '\000':
continue
case '\\':
if p.CutPrefix([]byte("u0000")) {
b = append(b, `\\u0000`...)
} else {
b = append(b, '\\')
if p.Valid() {
b = append(b, p.Read())
}
}
default:
b = append(b, c)
}
}
b = append(b, '\'')
return b
}
func (BaseDialect) AppendBool(b []byte, v bool) []byte {
return dialect.AppendBool(b, v)
}
// ------------------------------------------------------------------------------
type nopDialect struct {
BaseDialect
tables *Tables
features feature.Feature
}
func newNopDialect() *nopDialect {
d := new(nopDialect)
d.tables = NewTables(d)
d.features = feature.Returning
return d
}
func (d *nopDialect) Init(*sql.DB) {}
func (d *nopDialect) Name() dialect.Name {
return dialect.Invalid
}
func (d *nopDialect) Features() feature.Feature {
return d.features
}
func (d *nopDialect) Tables() *Tables {
return d.tables
}
func (d *nopDialect) OnField(field *Field) {}
func (d *nopDialect) OnTable(table *Table) {}
func (d *nopDialect) IdentQuote() byte {
return '"'
}
func (d *nopDialect) DefaultVarcharLen() int {
return 0
}
func (d *nopDialect) AppendSequence(b []byte, _ *Table, _ *Field) []byte {
return b
}
func (d *nopDialect) DefaultSchema() string {
return "nop"
}

137
vendor/github.com/uptrace/bun/schema/field.go generated vendored Normal file
View File

@@ -0,0 +1,137 @@
package schema
import (
"fmt"
"reflect"
"github.com/uptrace/bun/dialect"
"github.com/uptrace/bun/internal"
"github.com/uptrace/bun/internal/tagparser"
)
type Field struct {
Table *Table // Contains this field
StructField reflect.StructField
IsPtr bool
Tag tagparser.Tag
IndirectType reflect.Type
Index []int
Name string // SQL name, .e.g. id
SQLName Safe // escaped SQL name, e.g. "id"
GoName string // struct field name, e.g. Id
DiscoveredSQLType string
UserSQLType string
CreateTableSQLType string
SQLDefault string
OnDelete string
OnUpdate string
IsPK bool
NotNull bool
NullZero bool
AutoIncrement bool
Identity bool
Append AppenderFunc
Scan ScannerFunc
IsZero IsZeroerFunc
}
func (f *Field) String() string {
return f.Name
}
func (f *Field) WithIndex(path []int) *Field {
if len(path) == 0 {
return f
}
clone := *f
clone.Index = makeIndex(path, f.Index)
return &clone
}
func (f *Field) Clone() *Field {
cp := *f
cp.Index = cp.Index[:len(f.Index):len(f.Index)]
return &cp
}
func (f *Field) Value(strct reflect.Value) reflect.Value {
return internal.FieldByIndexAlloc(strct, f.Index)
}
func (f *Field) HasNilValue(v reflect.Value) bool {
if len(f.Index) == 1 {
return v.Field(f.Index[0]).IsNil()
}
for _, index := range f.Index {
if v.Kind() == reflect.Ptr {
if v.IsNil() {
return true
}
v = v.Elem()
}
v = v.Field(index)
}
return v.IsNil()
}
func (f *Field) HasZeroValue(v reflect.Value) bool {
if len(f.Index) == 1 {
return f.IsZero(v.Field(f.Index[0]))
}
for _, index := range f.Index {
if v.Kind() == reflect.Ptr {
if v.IsNil() {
return true
}
v = v.Elem()
}
v = v.Field(index)
}
return f.IsZero(v)
}
func (f *Field) AppendValue(gen QueryGen, b []byte, strct reflect.Value) []byte {
fv, ok := fieldByIndex(strct, f.Index)
if !ok {
return dialect.AppendNull(b)
}
if (f.IsPtr && fv.IsNil()) || (f.NullZero && f.IsZero(fv)) {
return dialect.AppendNull(b)
}
if f.Append == nil {
panic(fmt.Errorf("bun: AppendValue(unsupported %s)", fv.Type()))
}
return f.Append(gen, b, fv)
}
func (f *Field) ScanValue(strct reflect.Value, src any) error {
if src == nil {
if fv, ok := fieldByIndex(strct, f.Index); ok {
return f.ScanWithCheck(fv, src)
}
return nil
}
fv := internal.FieldByIndexAlloc(strct, f.Index)
return f.ScanWithCheck(fv, src)
}
func (f *Field) ScanWithCheck(fv reflect.Value, src any) error {
if f.Scan == nil {
return fmt.Errorf("bun: Scan(unsupported %s)", f.IndirectType)
}
return f.Scan(fv, src)
}
func (f *Field) SkipUpdate() bool {
return f.Tag.HasOption("skipupdate")
}

43
vendor/github.com/uptrace/bun/schema/hook.go generated vendored Normal file
View File

@@ -0,0 +1,43 @@
package schema
import (
"context"
"database/sql"
"reflect"
)
type Model interface {
ScanRows(ctx context.Context, rows *sql.Rows) (int, error)
Value() any
}
type Query interface {
QueryAppender
Operation() string
GetModel() Model
GetTableName() string
}
//------------------------------------------------------------------------------
type BeforeAppendModelHook interface {
BeforeAppendModel(ctx context.Context, query Query) error
}
var beforeAppendModelHookType = reflect.TypeFor[BeforeAppendModelHook]()
//------------------------------------------------------------------------------
type BeforeScanRowHook interface {
BeforeScanRow(context.Context) error
}
var beforeScanRowHookType = reflect.TypeFor[BeforeScanRowHook]()
//------------------------------------------------------------------------------
type AfterScanRowHook interface {
AfterScanRow(context.Context) error
}
var afterScanRowHookType = reflect.TypeFor[AfterScanRowHook]()

291
vendor/github.com/uptrace/bun/schema/querygen.go generated vendored Normal file
View File

@@ -0,0 +1,291 @@
package schema
import (
"reflect"
"strconv"
"strings"
"time"
"github.com/uptrace/bun/dialect"
"github.com/uptrace/bun/dialect/feature"
"github.com/uptrace/bun/internal"
"github.com/uptrace/bun/internal/parser"
)
var nopQueryGen = QueryGen{
dialect: newNopDialect(),
}
type QueryGen struct {
dialect Dialect
args *namedArgList
}
func NewQueryGen(dialect Dialect) QueryGen {
return QueryGen{
dialect: dialect,
}
}
func NewNopQueryGen() QueryGen {
return nopQueryGen
}
func (f QueryGen) IsNop() bool {
return f.dialect.Name() == dialect.Invalid
}
func (f QueryGen) Dialect() Dialect {
return f.dialect
}
func (f QueryGen) IdentQuote() byte {
return f.dialect.IdentQuote()
}
func (gen QueryGen) Append(b []byte, v any) []byte {
switch v := v.(type) {
case nil:
return dialect.AppendNull(b)
case bool:
return dialect.AppendBool(b, v)
case int:
return strconv.AppendInt(b, int64(v), 10)
case int32:
return strconv.AppendInt(b, int64(v), 10)
case int64:
return strconv.AppendInt(b, v, 10)
case uint:
return strconv.AppendInt(b, int64(v), 10)
case uint32:
return gen.Dialect().AppendUint32(b, v)
case uint64:
return gen.Dialect().AppendUint64(b, v)
case float32:
return dialect.AppendFloat32(b, v)
case float64:
return dialect.AppendFloat64(b, v)
case string:
return gen.Dialect().AppendString(b, v)
case time.Time:
return gen.Dialect().AppendTime(b, v)
case []byte:
return gen.Dialect().AppendBytes(b, v)
case QueryAppender:
return AppendQueryAppender(gen, b, v)
default:
vv := reflect.ValueOf(v)
if vv.Kind() == reflect.Ptr && vv.IsNil() {
return dialect.AppendNull(b)
}
appender := Appender(gen.Dialect(), vv.Type())
return appender(gen, b, vv)
}
}
func (f QueryGen) AppendName(b []byte, name string) []byte {
return dialect.AppendName(b, name, f.IdentQuote())
}
func (f QueryGen) AppendIdent(b []byte, ident string) []byte {
return dialect.AppendIdent(b, ident, f.IdentQuote())
}
func (f QueryGen) AppendValue(b []byte, v reflect.Value) []byte {
if v.Kind() == reflect.Ptr && v.IsNil() {
return dialect.AppendNull(b)
}
appender := Appender(f.dialect, v.Type())
return appender(f, b, v)
}
func (f QueryGen) HasFeature(feature feature.Feature) bool {
return f.dialect.Features().Has(feature)
}
func (f QueryGen) WithArg(arg NamedArgAppender) QueryGen {
return QueryGen{
dialect: f.dialect,
args: f.args.WithArg(arg),
}
}
func (f QueryGen) WithNamedArg(name string, value any) QueryGen {
return QueryGen{
dialect: f.dialect,
args: f.args.WithArg(&namedArg{name: name, value: value}),
}
}
func (f QueryGen) FormatQuery(query string, args ...any) string {
if f.IsNop() || (args == nil && f.args == nil) || strings.IndexByte(query, '?') == -1 {
return query
}
return internal.String(f.AppendQuery(nil, query, args...))
}
func (f QueryGen) AppendQuery(dst []byte, query string, args ...any) []byte {
if f.IsNop() || (args == nil && f.args == nil) || strings.IndexByte(query, '?') == -1 {
return append(dst, query...)
}
return f.append(dst, parser.NewString(query), args)
}
func (f QueryGen) append(dst []byte, p *parser.Parser, args []any) []byte {
var namedArgs NamedArgAppender
if len(args) == 1 {
if v, ok := args[0].(NamedArgAppender); ok {
namedArgs = v
} else if v, ok := newStructArgs(f, args[0]); ok {
namedArgs = v
}
}
var argIndex int
for p.Valid() {
b, ok := p.ReadSep('?')
if !ok {
dst = append(dst, b...)
continue
}
if len(b) > 0 && b[len(b)-1] == '\\' {
dst = append(dst, b[:len(b)-1]...)
dst = append(dst, '?')
continue
}
dst = append(dst, b...)
name, numeric := p.ReadIdentifier()
if name != "" {
if numeric {
idx, err := strconv.Atoi(name)
if err != nil {
goto restore_arg
}
if idx >= len(args) {
goto restore_arg
}
dst = f.appendArg(dst, args[idx])
continue
}
if namedArgs != nil {
dst, ok = namedArgs.AppendNamedArg(f, dst, name)
if ok {
continue
}
}
dst, ok = f.args.AppendNamedArg(f, dst, name)
if ok {
continue
}
restore_arg:
dst = append(dst, '?')
dst = append(dst, name...)
continue
}
if argIndex >= len(args) {
dst = append(dst, '?')
continue
}
arg := args[argIndex]
argIndex++
dst = f.appendArg(dst, arg)
}
return dst
}
func (gen QueryGen) appendArg(b []byte, arg any) []byte {
switch arg := arg.(type) {
case QueryAppender:
bb, err := arg.AppendQuery(gen, b)
if err != nil {
return dialect.AppendError(b, err)
}
return bb
default:
return gen.Append(b, arg)
}
}
//------------------------------------------------------------------------------
type NamedArgAppender interface {
AppendNamedArg(gen QueryGen, b []byte, name string) ([]byte, bool)
}
type namedArgList struct {
arg NamedArgAppender
next *namedArgList
}
func (l *namedArgList) WithArg(arg NamedArgAppender) *namedArgList {
return &namedArgList{
arg: arg,
next: l,
}
}
func (l *namedArgList) AppendNamedArg(gen QueryGen, b []byte, name string) ([]byte, bool) {
for l != nil && l.arg != nil {
if b, ok := l.arg.AppendNamedArg(gen, b, name); ok {
return b, true
}
l = l.next
}
return b, false
}
//------------------------------------------------------------------------------
type namedArg struct {
name string
value any
}
var _ NamedArgAppender = (*namedArg)(nil)
func (a *namedArg) AppendNamedArg(gen QueryGen, b []byte, name string) ([]byte, bool) {
if a.name == name {
return gen.appendArg(b, a.value), true
}
return b, false
}
//------------------------------------------------------------------------------
type structArgs struct {
table *Table
strct reflect.Value
}
var _ NamedArgAppender = (*structArgs)(nil)
func newStructArgs(gen QueryGen, strct any) (*structArgs, bool) {
v := reflect.ValueOf(strct)
if !v.IsValid() {
return nil, false
}
v = reflect.Indirect(v)
if v.Kind() != reflect.Struct {
return nil, false
}
return &structArgs{
table: gen.Dialect().Tables().Get(v.Type()),
strct: v,
}, true
}
func (m *structArgs) AppendNamedArg(gen QueryGen, b []byte, name string) ([]byte, bool) {
return m.table.AppendNamedArg(gen, b, name, m.strct)
}

51
vendor/github.com/uptrace/bun/schema/reflect.go generated vendored Normal file
View File

@@ -0,0 +1,51 @@
package schema
import (
"database/sql/driver"
"encoding/json"
"net"
"net/netip"
"reflect"
"time"
)
var (
bytesType = reflect.TypeFor[[]byte]()
timePtrType = reflect.TypeFor[*time.Time]()
timeType = reflect.TypeFor[time.Time]()
ipType = reflect.TypeFor[net.IP]()
ipNetType = reflect.TypeFor[net.IPNet]()
netipPrefixType = reflect.TypeFor[netip.Prefix]()
netipAddrType = reflect.TypeFor[netip.Addr]()
jsonRawMessageType = reflect.TypeFor[json.RawMessage]()
driverValuerType = reflect.TypeFor[driver.Valuer]()
queryAppenderType = reflect.TypeFor[QueryAppender]()
jsonMarshalerType = reflect.TypeFor[json.Marshaler]()
)
func indirectType(t reflect.Type) reflect.Type {
if t.Kind() == reflect.Ptr {
t = t.Elem()
}
return t
}
func fieldByIndex(v reflect.Value, index []int) (_ reflect.Value, ok bool) {
if len(index) == 1 {
return v.Field(index[0]), true
}
for i, idx := range index {
if i > 0 {
if v.Kind() == reflect.Ptr {
if v.IsNil() {
return v, false
}
v = v.Elem()
}
}
v = v.Field(idx)
}
return v, true
}

84
vendor/github.com/uptrace/bun/schema/relation.go generated vendored Normal file
View File

@@ -0,0 +1,84 @@
package schema
import (
"fmt"
)
const (
InvalidRelation = iota
HasOneRelation
BelongsToRelation
HasManyRelation
ManyToManyRelation
)
type Relation struct {
Type int
Field *Field // Has the bun tag defining this relation.
// Base and Join can be explained with this query:
//
// SELECT * FROM base_table JOIN join_table
JoinTable *Table
BasePKs []*Field
JoinPKs []*Field
OnUpdate string
OnDelete string
Condition []string
PolymorphicField *Field
PolymorphicValue string
M2MTable *Table
M2MBasePKs []*Field
M2MJoinPKs []*Field
}
// References returns true if the table which defines this Relation
// needs to declare a foreign key constraint, as is the case
// for 'has-one' and 'belongs-to' relations. For other relations,
// the constraint is created either in the referencing table (1:N, 'has-many' relations)
// or the junction table (N:N, 'm2m' relations).
//
// Usage of `rel:` tag does not always imply creation of foreign keys (when WithForeignKeys() is not set)
// and can be used exclusively for joining tables at query time. For example:
//
// type User struct {
// ID int64 `bun:",pk"`
// Profile *Profile `bun:",rel:has-one,join:id=user_id"`
// }
//
// Creating a FK users.id -> profiles.user_id would be confusing and incorrect,
// so for such cases References() returns false. One notable exception to this rule
// is when a Relation is defined in a junction table, in which case it is perfectly
// fine for its primary keys to reference other tables. Consider:
//
// // UsersToGroups maps users to groups they follow.
// type UsersToGroups struct {
// UserID string `bun:"user_id,pk"` // Needs FK to users.id
// GroupID string `bun:"group_id,pk"` // Needs FK to groups.id
//
// User *User `bun:"rel:belongs-to,join:user_id=id"`
// Group *Group `bun:"rel:belongs-to,join:group_id=id"`
// }
//
// Here BooksToReaders has a composite primary key, composed of other primary keys.
func (r *Relation) References() bool {
allPK := true
nonePK := true
for _, f := range r.BasePKs {
allPK = allPK && f.IsPK
nonePK = nonePK && !f.IsPK
}
// Erring on the side of caution, only create foreign keys
// if the referencing columns are part of a composite PK
// in the junction table of the m2m relationship.
effectsM2M := r.Field.Table.IsM2MTable && allPK
return (r.Type == HasOneRelation || r.Type == BelongsToRelation) && (effectsM2M || nonePK)
}
func (r *Relation) String() string {
return fmt.Sprintf("relation=%s", r.Field.GoName)
}

566
vendor/github.com/uptrace/bun/schema/scan.go generated vendored Normal file
View File

@@ -0,0 +1,566 @@
package schema
import (
"bytes"
"database/sql"
"fmt"
"net"
"net/netip"
"reflect"
"strconv"
"strings"
"time"
"github.com/puzpuzpuz/xsync/v3"
"github.com/vmihailenco/msgpack/v5"
"github.com/uptrace/bun/dialect/sqltype"
"github.com/uptrace/bun/extra/bunjson"
"github.com/uptrace/bun/internal"
)
var scannerType = reflect.TypeFor[sql.Scanner]()
type ScannerFunc func(dest reflect.Value, src any) error
var scanners []ScannerFunc
func init() {
scanners = []ScannerFunc{
reflect.Bool: scanBool,
reflect.Int: scanInt64,
reflect.Int8: scanInt64,
reflect.Int16: scanInt64,
reflect.Int32: scanInt64,
reflect.Int64: scanInt64,
reflect.Uint: scanUint64,
reflect.Uint8: scanUint64,
reflect.Uint16: scanUint64,
reflect.Uint32: scanUint64,
reflect.Uint64: scanUint64,
reflect.Uintptr: scanUint64,
reflect.Float32: scanFloat,
reflect.Float64: scanFloat,
reflect.Complex64: nil,
reflect.Complex128: nil,
reflect.Array: nil,
reflect.Interface: scanInterface,
reflect.Map: scanJSON,
reflect.Ptr: nil,
reflect.Slice: scanJSON,
reflect.String: scanString,
reflect.Struct: scanJSON,
reflect.UnsafePointer: nil,
}
}
var scannerCache = xsync.NewMapOf[reflect.Type, ScannerFunc]()
func FieldScanner(dialect Dialect, field *Field) ScannerFunc {
if field.Tag.HasOption("msgpack") {
return scanMsgpack
}
if field.Tag.HasOption("json_use_number") {
return scanJSONUseNumber
}
if field.StructField.Type.Kind() == reflect.Interface {
switch strings.ToUpper(field.UserSQLType) {
case sqltype.JSON, sqltype.JSONB:
return scanJSONIntoInterface
}
}
return Scanner(field.StructField.Type)
}
func Scanner(typ reflect.Type) ScannerFunc {
if v, ok := scannerCache.Load(typ); ok {
return v
}
fn := scanner(typ)
if v, ok := scannerCache.LoadOrStore(typ, fn); ok {
return v
}
return fn
}
func scanner(typ reflect.Type) ScannerFunc {
kind := typ.Kind()
if kind == reflect.Ptr {
if fn := Scanner(typ.Elem()); fn != nil {
return PtrScanner(fn)
}
}
switch typ {
case bytesType:
return scanBytes
case timeType:
return scanTime
case ipType:
return scanIP
case ipNetType:
return scanIPNet
case netipAddrType:
return scanNetIpAddr
case netipPrefixType:
return scanNetIpPrefix
case jsonRawMessageType:
return scanBytes
}
if typ.Implements(scannerType) {
return scanScanner
}
if kind != reflect.Ptr {
ptr := reflect.PointerTo(typ)
if ptr.Implements(scannerType) {
return addrScanner(scanScanner)
}
}
if typ.Kind() == reflect.Slice && typ.Elem().Kind() == reflect.Uint8 {
return scanBytes
}
return scanners[kind]
}
func scanBool(dest reflect.Value, src any) error {
switch src := src.(type) {
case nil:
dest.SetBool(false)
return nil
case bool:
dest.SetBool(src)
return nil
case int64:
dest.SetBool(src != 0)
return nil
case []byte:
f, err := strconv.ParseBool(internal.String(src))
if err != nil {
return err
}
dest.SetBool(f)
return nil
case string:
f, err := strconv.ParseBool(src)
if err != nil {
return err
}
dest.SetBool(f)
return nil
default:
return scanError(dest.Type(), src)
}
}
func scanInt64(dest reflect.Value, src any) error {
switch src := src.(type) {
case nil:
dest.SetInt(0)
return nil
case int64:
dest.SetInt(src)
return nil
case uint64:
dest.SetInt(int64(src))
return nil
case []byte:
n, err := strconv.ParseInt(internal.String(src), 10, 64)
if err != nil {
return err
}
dest.SetInt(n)
return nil
case string:
n, err := strconv.ParseInt(src, 10, 64)
if err != nil {
return err
}
dest.SetInt(n)
return nil
default:
return scanError(dest.Type(), src)
}
}
func scanUint64(dest reflect.Value, src any) error {
switch src := src.(type) {
case nil:
dest.SetUint(0)
return nil
case uint64:
dest.SetUint(src)
return nil
case int64:
dest.SetUint(uint64(src))
return nil
case []byte:
n, err := strconv.ParseUint(internal.String(src), 10, 64)
if err != nil {
return err
}
dest.SetUint(n)
return nil
case string:
n, err := strconv.ParseUint(src, 10, 64)
if err != nil {
return err
}
dest.SetUint(n)
return nil
default:
return scanError(dest.Type(), src)
}
}
func scanFloat(dest reflect.Value, src any) error {
switch src := src.(type) {
case nil:
dest.SetFloat(0)
return nil
case float32:
dest.SetFloat(float64(src))
return nil
case float64:
dest.SetFloat(src)
return nil
case []byte:
f, err := strconv.ParseFloat(internal.String(src), 64)
if err != nil {
return err
}
dest.SetFloat(f)
return nil
case string:
f, err := strconv.ParseFloat(src, 64)
if err != nil {
return err
}
dest.SetFloat(f)
return nil
default:
return scanError(dest.Type(), src)
}
}
func scanString(dest reflect.Value, src any) error {
switch src := src.(type) {
case nil:
dest.SetString("")
return nil
case string:
dest.SetString(src)
return nil
case []byte:
dest.SetString(string(src))
return nil
case time.Time:
dest.SetString(src.Format(time.RFC3339Nano))
return nil
case int64:
dest.SetString(strconv.FormatInt(src, 10))
return nil
case uint64:
dest.SetString(strconv.FormatUint(src, 10))
return nil
case float64:
dest.SetString(strconv.FormatFloat(src, 'G', -1, 64))
return nil
default:
return scanError(dest.Type(), src)
}
}
func scanBytes(dest reflect.Value, src any) error {
switch src := src.(type) {
case nil:
dest.SetBytes(nil)
return nil
case string:
dest.SetBytes([]byte(src))
return nil
case []byte:
clone := make([]byte, len(src))
copy(clone, src)
dest.SetBytes(clone)
return nil
default:
return scanError(dest.Type(), src)
}
}
func scanTime(dest reflect.Value, src any) error {
switch src := src.(type) {
case nil:
destTime := dest.Addr().Interface().(*time.Time)
*destTime = time.Time{}
return nil
case time.Time:
destTime := dest.Addr().Interface().(*time.Time)
*destTime = src
return nil
case string:
srcTime, err := internal.ParseTime(src)
if err != nil {
return err
}
destTime := dest.Addr().Interface().(*time.Time)
*destTime = srcTime
return nil
case []byte:
srcTime, err := internal.ParseTime(internal.String(src))
if err != nil {
return err
}
destTime := dest.Addr().Interface().(*time.Time)
*destTime = srcTime
return nil
default:
return scanError(dest.Type(), src)
}
}
func scanScanner(dest reflect.Value, src any) error {
return dest.Interface().(sql.Scanner).Scan(src)
}
func scanMsgpack(dest reflect.Value, src any) error {
if src == nil {
return scanNull(dest)
}
b, err := toBytes(src)
if err != nil {
return err
}
dec := msgpack.GetDecoder()
defer msgpack.PutDecoder(dec)
dec.Reset(bytes.NewReader(b))
return dec.DecodeValue(dest)
}
func scanJSON(dest reflect.Value, src any) error {
if src == nil {
return scanNull(dest)
}
b, err := toBytes(src)
if err != nil {
return err
}
return bunjson.Unmarshal(b, dest.Addr().Interface())
}
func scanJSONUseNumber(dest reflect.Value, src any) error {
if src == nil {
return scanNull(dest)
}
b, err := toBytes(src)
if err != nil {
return err
}
dec := bunjson.NewDecoder(bytes.NewReader(b))
dec.UseNumber()
return dec.Decode(dest.Addr().Interface())
}
func scanIP(dest reflect.Value, src any) error {
if src == nil {
return scanNull(dest)
}
b, err := toBytes(src)
if err != nil {
return err
}
ip := net.ParseIP(internal.String(b))
if ip == nil {
return fmt.Errorf("bun: invalid ip: %q", b)
}
ptr := dest.Addr().Interface().(*net.IP)
*ptr = ip
return nil
}
func scanIPNet(dest reflect.Value, src any) error {
if src == nil {
return scanNull(dest)
}
b, err := toBytes(src)
if err != nil {
return err
}
_, ipnet, err := net.ParseCIDR(internal.String(b))
if err != nil {
return err
}
ptr := dest.Addr().Interface().(*net.IPNet)
*ptr = *ipnet
return nil
}
func scanNetIpAddr(dest reflect.Value, src any) error {
if src == nil {
return scanNull(dest)
}
b, err := toBytes(src)
if err != nil {
return err
}
val, _ := netip.ParseAddr(internal.String(b))
if !val.IsValid() {
return fmt.Errorf("bun: invalid ip: %q", b)
}
ptr := dest.Addr().Interface().(*netip.Addr)
*ptr = val
return nil
}
func scanNetIpPrefix(dest reflect.Value, src any) error {
if src == nil {
return scanNull(dest)
}
b, err := toBytes(src)
if err != nil {
return err
}
val, _ := netip.ParsePrefix(internal.String(b))
if !val.IsValid() {
return fmt.Errorf("bun: invalid prefix: %q", b)
}
ptr := dest.Addr().Interface().(*netip.Prefix)
*ptr = val
return nil
}
func addrScanner(fn ScannerFunc) ScannerFunc {
return func(dest reflect.Value, src any) error {
if !dest.CanAddr() {
return fmt.Errorf("bun: Scan(nonaddressable %T)", dest.Interface())
}
return fn(dest.Addr(), src)
}
}
func toBytes(src any) ([]byte, error) {
switch src := src.(type) {
case string:
return internal.Bytes(src), nil
case []byte:
return src, nil
default:
return nil, fmt.Errorf("bun: got %T, wanted []byte or string", src)
}
}
func PtrScanner(fn ScannerFunc) ScannerFunc {
return func(dest reflect.Value, src any) error {
if src == nil {
if !dest.CanAddr() {
if dest.IsNil() {
return nil
}
return fn(dest.Elem(), src)
}
if !dest.IsNil() {
dest.Set(reflect.New(dest.Type().Elem()))
}
return nil
}
if dest.IsNil() {
dest.Set(reflect.New(dest.Type().Elem()))
}
if dest.Kind() == reflect.Map {
return fn(dest, src)
}
return fn(dest.Elem(), src)
}
}
func scanNull(dest reflect.Value) error {
if nilable(dest.Kind()) && dest.IsNil() {
return nil
}
dest.Set(reflect.New(dest.Type()).Elem())
return nil
}
func scanJSONIntoInterface(dest reflect.Value, src any) error {
if dest.IsNil() {
if src == nil {
return nil
}
b, err := toBytes(src)
if err != nil {
return err
}
return bunjson.Unmarshal(b, dest.Addr().Interface())
}
dest = dest.Elem()
if fn := Scanner(dest.Type()); fn != nil {
return fn(dest, src)
}
return scanError(dest.Type(), src)
}
func scanInterface(dest reflect.Value, src any) error {
if dest.IsNil() {
if src == nil {
return nil
}
dest.Set(reflect.ValueOf(src))
return nil
}
dest = dest.Elem()
if fn := Scanner(dest.Type()); fn != nil {
return fn(dest, src)
}
return scanError(dest.Type(), src)
}
func nilable(kind reflect.Kind) bool {
switch kind {
case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice:
return true
}
return false
}
func scanError(dest reflect.Type, src any) error {
return fmt.Errorf("bun: can't scan %#v (%T) into %s", src, src, dest.String())
}

133
vendor/github.com/uptrace/bun/schema/sqlfmt.go generated vendored Normal file
View File

@@ -0,0 +1,133 @@
package schema
import (
"log/slog"
"strings"
"github.com/uptrace/bun/internal"
)
type QueryAppender interface {
AppendQuery(gen QueryGen, b []byte) ([]byte, error)
}
type ColumnsAppender interface {
AppendColumns(gen QueryGen, b []byte) ([]byte, error)
}
//------------------------------------------------------------------------------
// Safe represents a safe SQL query.
type Safe string
var _ QueryAppender = (*Safe)(nil)
func (s Safe) AppendQuery(gen QueryGen, b []byte) ([]byte, error) {
return append(b, s...), nil
}
//------------------------------------------------------------------------------
// Name represents a single SQL name, for example, a column name.
type Name string
var _ QueryAppender = (*Name)(nil)
func (s Name) AppendQuery(gen QueryGen, b []byte) ([]byte, error) {
return gen.AppendName(b, string(s)), nil
}
//------------------------------------------------------------------------------
// Ident represents a SQL identifier, for example,
// a fully qualified column name such as `table_name.col_name`.
type Ident string
var _ QueryAppender = (*Ident)(nil)
func (s Ident) AppendQuery(gen QueryGen, b []byte) ([]byte, error) {
return gen.AppendIdent(b, string(s)), nil
}
//------------------------------------------------------------------------------
// NOTE: It should not be modified after creation.
type QueryWithArgs struct {
Query string
Args []any
}
var _ QueryAppender = QueryWithArgs{}
func SafeQuery(query string, args []any) QueryWithArgs {
if args == nil {
args = make([]any, 0)
} else if len(query) > 0 && strings.IndexByte(query, '?') == -1 {
internal.Warn.Printf("query %q has %v args, but no placeholders", query, args)
}
return QueryWithArgs{
Query: query,
Args: args,
}
}
func UnsafeIdent(ident string) QueryWithArgs {
return QueryWithArgs{Query: ident}
}
func (q QueryWithArgs) IsZero() bool {
return q.Query == "" && q.Args == nil
}
func (q QueryWithArgs) AppendQuery(gen QueryGen, b []byte) ([]byte, error) {
if q.Args == nil {
return gen.AppendIdent(b, q.Query), nil
}
return gen.AppendQuery(b, q.Query, q.Args...), nil
}
//------------------------------------------------------------------------------
type Order string
const (
OrderNone Order = ""
OrderAsc Order = "ASC"
OrderAscNullsFirst Order = "ASC NULLS FIRST"
OrderAscNullsLast Order = "ASC NULLS LAST"
OrderDesc Order = "DESC"
OrderDescNullsFirst Order = "DESC NULLS FIRST"
OrderDescNullsLast Order = "DESC NULLS LAST"
)
func (s Order) AppendQuery(gen QueryGen, b []byte) ([]byte, error) {
return AppendOrder(b, s), nil
}
func AppendOrder(b []byte, sortDir Order) []byte {
switch sortDir {
case OrderAsc, OrderDesc,
OrderAscNullsFirst, OrderAscNullsLast,
OrderDescNullsFirst, OrderDescNullsLast:
return append(b, sortDir...)
case OrderNone:
return b
default:
slog.Error("unsupported sort direction", slog.String("sort_dir", string(sortDir)))
return b
}
}
//------------------------------------------------------------------------------
type QueryWithSep struct {
QueryWithArgs
Sep string
}
func SafeQueryWithSep(query string, args []any, sep string) QueryWithSep {
return QueryWithSep{
QueryWithArgs: SafeQuery(query, args),
Sep: sep,
}
}

141
vendor/github.com/uptrace/bun/schema/sqltype.go generated vendored Normal file
View File

@@ -0,0 +1,141 @@
package schema
import (
"bytes"
"database/sql"
"encoding/json"
"reflect"
"time"
"github.com/uptrace/bun/dialect"
"github.com/uptrace/bun/dialect/sqltype"
"github.com/uptrace/bun/internal"
)
var (
bunNullTimeType = reflect.TypeFor[NullTime]()
nullTimeType = reflect.TypeFor[sql.NullTime]()
nullBoolType = reflect.TypeFor[sql.NullBool]()
nullFloatType = reflect.TypeFor[sql.NullFloat64]()
nullIntType = reflect.TypeFor[sql.NullInt64]()
nullStringType = reflect.TypeFor[sql.NullString]()
)
var sqlTypes = []string{
reflect.Bool: sqltype.Boolean,
reflect.Int: sqltype.BigInt,
reflect.Int8: sqltype.SmallInt,
reflect.Int16: sqltype.SmallInt,
reflect.Int32: sqltype.Integer,
reflect.Int64: sqltype.BigInt,
reflect.Uint: sqltype.BigInt,
reflect.Uint8: sqltype.SmallInt,
reflect.Uint16: sqltype.SmallInt,
reflect.Uint32: sqltype.Integer,
reflect.Uint64: sqltype.BigInt,
reflect.Uintptr: sqltype.BigInt,
reflect.Float32: sqltype.Real,
reflect.Float64: sqltype.DoublePrecision,
reflect.Complex64: "",
reflect.Complex128: "",
reflect.Array: "",
reflect.Interface: "",
reflect.Map: sqltype.VarChar,
reflect.Ptr: "",
reflect.Slice: sqltype.VarChar,
reflect.String: sqltype.VarChar,
reflect.Struct: sqltype.VarChar,
}
func DiscoverSQLType(typ reflect.Type) string {
switch typ {
case timeType, nullTimeType, bunNullTimeType:
return sqltype.Timestamp
case nullBoolType:
return sqltype.Boolean
case nullFloatType:
return sqltype.DoublePrecision
case nullIntType:
return sqltype.BigInt
case nullStringType:
return sqltype.VarChar
case jsonRawMessageType:
return sqltype.JSON
}
switch typ.Kind() {
case reflect.Slice:
if typ.Elem().Kind() == reflect.Uint8 {
return sqltype.Blob
}
}
return sqlTypes[typ.Kind()]
}
//------------------------------------------------------------------------------
var jsonNull = []byte("null")
// NullTime is a time.Time wrapper that marshals zero time as JSON null and SQL NULL.
type NullTime struct {
time.Time
}
var (
_ json.Marshaler = (*NullTime)(nil)
_ json.Unmarshaler = (*NullTime)(nil)
_ sql.Scanner = (*NullTime)(nil)
_ QueryAppender = (*NullTime)(nil)
)
func (tm NullTime) MarshalJSON() ([]byte, error) {
if tm.IsZero() {
return jsonNull, nil
}
return tm.Time.MarshalJSON()
}
func (tm *NullTime) UnmarshalJSON(b []byte) error {
if bytes.Equal(b, jsonNull) {
tm.Time = time.Time{}
return nil
}
return tm.Time.UnmarshalJSON(b)
}
func (tm NullTime) AppendQuery(gen QueryGen, b []byte) ([]byte, error) {
if tm.IsZero() {
return dialect.AppendNull(b), nil
}
return gen.Dialect().AppendTime(b, tm.Time), nil
}
func (tm *NullTime) Scan(src any) error {
if src == nil {
tm.Time = time.Time{}
return nil
}
switch src := src.(type) {
case time.Time:
tm.Time = src
return nil
case string:
newtm, err := internal.ParseTime(src)
if err != nil {
return err
}
tm.Time = newtm
return nil
case []byte:
newtm, err := internal.ParseTime(internal.String(src))
if err != nil {
return err
}
tm.Time = newtm
return nil
default:
return scanError(bunNullTimeType, src)
}
}

1130
vendor/github.com/uptrace/bun/schema/table.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

114
vendor/github.com/uptrace/bun/schema/tables.go generated vendored Normal file
View File

@@ -0,0 +1,114 @@
package schema
import (
"fmt"
"reflect"
"sync"
"github.com/puzpuzpuz/xsync/v3"
)
type Tables struct {
dialect Dialect
mu sync.Mutex
tables *xsync.MapOf[reflect.Type, *Table]
inProgress map[reflect.Type]*Table
}
func NewTables(dialect Dialect) *Tables {
return &Tables{
dialect: dialect,
tables: xsync.NewMapOf[reflect.Type, *Table](),
inProgress: make(map[reflect.Type]*Table),
}
}
func (t *Tables) Register(models ...any) {
for _, model := range models {
_ = t.Get(reflect.TypeOf(model).Elem())
}
}
func (t *Tables) Get(typ reflect.Type) *Table {
typ = indirectType(typ)
if typ.Kind() != reflect.Struct {
panic(fmt.Errorf("got %s, wanted %s", typ.Kind(), reflect.Struct))
}
if v, ok := t.tables.Load(typ); ok {
return v
}
t.mu.Lock()
defer t.mu.Unlock()
if v, ok := t.tables.Load(typ); ok {
return v
}
table := t.InProgress(typ)
table.initRelations()
t.dialect.OnTable(table)
for _, field := range table.FieldMap {
if field.UserSQLType == "" {
field.UserSQLType = field.DiscoveredSQLType
}
if field.CreateTableSQLType == "" {
field.CreateTableSQLType = field.UserSQLType
}
}
t.tables.Store(typ, table)
return table
}
func (t *Tables) InProgress(typ reflect.Type) *Table {
if table, ok := t.inProgress[typ]; ok {
return table
}
table := new(Table)
t.inProgress[typ] = table
table.init(t.dialect, typ)
return table
}
// ByModel gets the table by its Go name.
func (t *Tables) ByModel(name string) *Table {
var found *Table
t.tables.Range(func(typ reflect.Type, table *Table) bool {
if table.TypeName == name {
found = table
return false
}
return true
})
return found
}
// ByName gets the table by its SQL name.
func (t *Tables) ByName(name string) *Table {
var found *Table
t.tables.Range(func(typ reflect.Type, table *Table) bool {
if table.Name == name {
found = table
return false
}
return true
})
return found
}
// All returns all registered tables.
func (t *Tables) All() []*Table {
var found []*Table
t.tables.Range(func(typ reflect.Type, table *Table) bool {
found = append(found, table)
return true
})
return found
}

161
vendor/github.com/uptrace/bun/schema/zerochecker.go generated vendored Normal file
View File

@@ -0,0 +1,161 @@
package schema
import (
"database/sql/driver"
"reflect"
)
var isZeroerType = reflect.TypeFor[isZeroer]()
type isZeroer interface {
IsZero() bool
}
func isZero(v any) bool {
switch v := v.(type) {
case isZeroer:
return v.IsZero()
case string:
return v == ""
case []byte:
return v == nil
case int:
return v == 0
case int64:
return v == 0
case uint:
return v == 0
case uint64:
return v == 0
case float32:
return v == 0
case float64:
return v == 0
case int8:
return v == 0
case int16:
return v == 0
case int32:
return v == 0
case uint8:
return v == 0
case uint16:
return v == 0
case uint32:
return v == 0
default:
rv := reflect.ValueOf(v)
fn := zeroChecker(rv.Type())
return fn(rv)
}
}
type IsZeroerFunc func(reflect.Value) bool
func zeroChecker(typ reflect.Type) IsZeroerFunc {
if typ.Implements(isZeroerType) {
return isZeroInterface
}
kind := typ.Kind()
if kind != reflect.Ptr {
ptr := reflect.PointerTo(typ)
if ptr.Implements(isZeroerType) {
return addrChecker(isZeroInterface)
}
}
switch kind {
case reflect.Array:
if typ.Elem().Kind() == reflect.Uint8 {
return isZeroBytes
}
return isZeroLen
case reflect.String:
return isZeroLen
case reflect.Bool:
return isZeroBool
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return isZeroInt
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
return isZeroUint
case reflect.Float32, reflect.Float64:
return isZeroFloat
case reflect.Interface, reflect.Ptr, reflect.Slice, reflect.Map:
return isNil
}
if typ.Implements(driverValuerType) {
return isZeroDriverValue
}
return notZero
}
func addrChecker(fn IsZeroerFunc) IsZeroerFunc {
return func(v reflect.Value) bool {
if !v.CanAddr() {
return false
}
return fn(v.Addr())
}
}
func isZeroInterface(v reflect.Value) bool {
if v.Kind() == reflect.Ptr && v.IsNil() {
return true
}
return v.Interface().(isZeroer).IsZero()
}
func isZeroDriverValue(v reflect.Value) bool {
if v.Kind() == reflect.Ptr {
return v.IsNil()
}
valuer := v.Interface().(driver.Valuer)
value, err := valuer.Value()
if err != nil {
return false
}
return value == nil
}
func isZeroLen(v reflect.Value) bool {
return v.Len() == 0
}
func isNil(v reflect.Value) bool {
return v.IsNil()
}
func isZeroBool(v reflect.Value) bool {
return !v.Bool()
}
func isZeroInt(v reflect.Value) bool {
return v.Int() == 0
}
func isZeroUint(v reflect.Value) bool {
return v.Uint() == 0
}
func isZeroFloat(v reflect.Value) bool {
return v.Float() == 0
}
func isZeroBytes(v reflect.Value) bool {
b := v.Slice(0, v.Len()).Bytes()
for _, c := range b {
if c != 0 {
return false
}
}
return true
}
func notZero(v reflect.Value) bool {
return false
}

112
vendor/github.com/uptrace/bun/util.go generated vendored Normal file
View File

@@ -0,0 +1,112 @@
package bun
import (
"context"
"fmt"
"reflect"
"strings"
)
func indirect(v reflect.Value) reflect.Value {
switch v.Kind() {
case reflect.Interface:
return indirect(v.Elem())
case reflect.Ptr:
return v.Elem()
default:
return v
}
}
func walk(v reflect.Value, index []int, fn func(reflect.Value)) {
v = reflect.Indirect(v)
switch v.Kind() {
case reflect.Slice:
sliceLen := v.Len()
for i := 0; i < sliceLen; i++ {
visitField(v.Index(i), index, fn)
}
default:
visitField(v, index, fn)
}
}
func visitField(v reflect.Value, index []int, fn func(reflect.Value)) {
v = reflect.Indirect(v)
if len(index) > 0 {
v = v.Field(index[0])
if v.Kind() == reflect.Ptr && v.IsNil() {
return
}
walk(v, index[1:], fn)
} else {
fn(v)
}
}
func typeByIndex(t reflect.Type, index []int) reflect.Type {
for _, x := range index {
switch t.Kind() {
case reflect.Ptr:
t = t.Elem()
case reflect.Slice:
t = indirectType(t.Elem())
}
t = t.Field(x).Type
}
return indirectType(t)
}
func indirectType(t reflect.Type) reflect.Type {
if t.Kind() == reflect.Ptr {
t = t.Elem()
}
return t
}
func sliceElemType(v reflect.Value) reflect.Type {
elemType := v.Type().Elem()
if elemType.Kind() == reflect.Interface && v.Len() > 0 {
return indirect(v.Index(0).Elem()).Type()
}
return indirectType(elemType)
}
// appendComment adds comment in the header of the query into buffer
func appendComment(b []byte, name string) []byte {
if name == "" {
return b
}
name = strings.Map(func(r rune) rune {
if r == '\x00' {
return -1
}
return r
}, name)
name = strings.ReplaceAll(name, `/*`, `/\*`)
name = strings.ReplaceAll(name, `*/`, `*\/`)
return append(b, fmt.Sprintf("/* %s */ ", name)...)
}
// queryCommentCtxKey is a context key for setting a query comment on a context instead of calling the Comment("...") API directly
type queryCommentCtxKey struct{}
// WithComment returns a context that includes a comment that may be included in a query for debugging
//
// If a context with an attached query is used, a comment set by the Comment("...") API will be overwritten.
func WithComment(ctx context.Context, comment string) context.Context {
return context.WithValue(ctx, queryCommentCtxKey{}, comment)
}
// commenter describes the Comment interface implemented by all of the query types
type commenter[T any] interface {
Comment(string) T
}
// setCommentFromContext sets the comment on the given query from the supplied context if one is set using the Comment(...) method.
func setCommentFromContext[T any](ctx context.Context, q commenter[T]) {
s, _ := ctx.Value(queryCommentCtxKey{}).(string)
if s != "" {
q.Comment(s)
}
}

6
vendor/github.com/uptrace/bun/version.go generated vendored Normal file
View File

@@ -0,0 +1,6 @@
package bun
// Version is the current release version.
func Version() string {
return "1.2.16"
}

4
vendor/github.com/vmihailenco/msgpack/v5/.prettierrc generated vendored Normal file
View File

@@ -0,0 +1,4 @@
semi: false
singleQuote: true
proseWrap: always
printWidth: 100

20
vendor/github.com/vmihailenco/msgpack/v5/.travis.yml generated vendored Normal file
View File

@@ -0,0 +1,20 @@
sudo: false
language: go
go:
- 1.15.x
- 1.16.x
- tip
matrix:
allow_failures:
- go: tip
env:
- GO111MODULE=on
go_import_path: github.com/vmihailenco/msgpack
before_install:
- curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh -s -- -b $(go
env GOPATH)/bin v1.31.0

Some files were not shown because too many files have changed in this diff Show More