Saved vendor libs
This commit is contained in:
70
vendor/github.com/jackc/pgx/v5/internal/iobufpool/iobufpool.go
generated
vendored
Normal file
70
vendor/github.com/jackc/pgx/v5/internal/iobufpool/iobufpool.go
generated
vendored
Normal file
@@ -0,0 +1,70 @@
|
||||
// Package iobufpool implements a global segregated-fit pool of buffers for IO.
|
||||
//
|
||||
// It uses *[]byte instead of []byte to avoid the sync.Pool allocation with Put. Unfortunately, using a pointer to avoid
|
||||
// an allocation is purposely not documented. https://github.com/golang/go/issues/16323
|
||||
package iobufpool
|
||||
|
||||
import "sync"
|
||||
|
||||
const minPoolExpOf2 = 8
|
||||
|
||||
var pools [18]*sync.Pool
|
||||
|
||||
func init() {
|
||||
for i := range pools {
|
||||
bufLen := 1 << (minPoolExpOf2 + i)
|
||||
pools[i] = &sync.Pool{
|
||||
New: func() any {
|
||||
buf := make([]byte, bufLen)
|
||||
return &buf
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Get gets a []byte of len size with cap <= size*2.
|
||||
func Get(size int) *[]byte {
|
||||
i := getPoolIdx(size)
|
||||
if i >= len(pools) {
|
||||
buf := make([]byte, size)
|
||||
return &buf
|
||||
}
|
||||
|
||||
ptrBuf := (pools[i].Get().(*[]byte))
|
||||
*ptrBuf = (*ptrBuf)[:size]
|
||||
|
||||
return ptrBuf
|
||||
}
|
||||
|
||||
func getPoolIdx(size int) int {
|
||||
size--
|
||||
size >>= minPoolExpOf2
|
||||
i := 0
|
||||
for size > 0 {
|
||||
size >>= 1
|
||||
i++
|
||||
}
|
||||
|
||||
return i
|
||||
}
|
||||
|
||||
// Put returns buf to the pool.
|
||||
func Put(buf *[]byte) {
|
||||
i := putPoolIdx(cap(*buf))
|
||||
if i < 0 {
|
||||
return
|
||||
}
|
||||
|
||||
pools[i].Put(buf)
|
||||
}
|
||||
|
||||
func putPoolIdx(size int) int {
|
||||
minPoolSize := 1 << minPoolExpOf2
|
||||
for i := range pools {
|
||||
if size == minPoolSize<<i {
|
||||
return i
|
||||
}
|
||||
}
|
||||
|
||||
return -1
|
||||
}
|
||||
6
vendor/github.com/jackc/pgx/v5/internal/pgio/README.md
generated
vendored
Normal file
6
vendor/github.com/jackc/pgx/v5/internal/pgio/README.md
generated
vendored
Normal file
@@ -0,0 +1,6 @@
|
||||
# pgio
|
||||
|
||||
Package pgio is a low-level toolkit building messages in the PostgreSQL wire protocol.
|
||||
|
||||
pgio provides functions for appending integers to a []byte while doing byte
|
||||
order conversion.
|
||||
6
vendor/github.com/jackc/pgx/v5/internal/pgio/doc.go
generated
vendored
Normal file
6
vendor/github.com/jackc/pgx/v5/internal/pgio/doc.go
generated
vendored
Normal file
@@ -0,0 +1,6 @@
|
||||
// Package pgio is a low-level toolkit building messages in the PostgreSQL wire protocol.
|
||||
/*
|
||||
pgio provides functions for appending integers to a []byte while doing byte
|
||||
order conversion.
|
||||
*/
|
||||
package pgio
|
||||
40
vendor/github.com/jackc/pgx/v5/internal/pgio/write.go
generated
vendored
Normal file
40
vendor/github.com/jackc/pgx/v5/internal/pgio/write.go
generated
vendored
Normal file
@@ -0,0 +1,40 @@
|
||||
package pgio
|
||||
|
||||
import "encoding/binary"
|
||||
|
||||
func AppendUint16(buf []byte, n uint16) []byte {
|
||||
wp := len(buf)
|
||||
buf = append(buf, 0, 0)
|
||||
binary.BigEndian.PutUint16(buf[wp:], n)
|
||||
return buf
|
||||
}
|
||||
|
||||
func AppendUint32(buf []byte, n uint32) []byte {
|
||||
wp := len(buf)
|
||||
buf = append(buf, 0, 0, 0, 0)
|
||||
binary.BigEndian.PutUint32(buf[wp:], n)
|
||||
return buf
|
||||
}
|
||||
|
||||
func AppendUint64(buf []byte, n uint64) []byte {
|
||||
wp := len(buf)
|
||||
buf = append(buf, 0, 0, 0, 0, 0, 0, 0, 0)
|
||||
binary.BigEndian.PutUint64(buf[wp:], n)
|
||||
return buf
|
||||
}
|
||||
|
||||
func AppendInt16(buf []byte, n int16) []byte {
|
||||
return AppendUint16(buf, uint16(n))
|
||||
}
|
||||
|
||||
func AppendInt32(buf []byte, n int32) []byte {
|
||||
return AppendUint32(buf, uint32(n))
|
||||
}
|
||||
|
||||
func AppendInt64(buf []byte, n int64) []byte {
|
||||
return AppendUint64(buf, uint64(n))
|
||||
}
|
||||
|
||||
func SetInt32(buf []byte, n int32) {
|
||||
binary.BigEndian.PutUint32(buf, uint32(n))
|
||||
}
|
||||
60
vendor/github.com/jackc/pgx/v5/internal/sanitize/benchmmark.sh
generated
vendored
Normal file
60
vendor/github.com/jackc/pgx/v5/internal/sanitize/benchmmark.sh
generated
vendored
Normal file
@@ -0,0 +1,60 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
current_branch=$(git rev-parse --abbrev-ref HEAD)
|
||||
if [ "$current_branch" == "HEAD" ]; then
|
||||
current_branch=$(git rev-parse HEAD)
|
||||
fi
|
||||
|
||||
restore_branch() {
|
||||
echo "Restoring original branch/commit: $current_branch"
|
||||
git checkout "$current_branch"
|
||||
}
|
||||
trap restore_branch EXIT
|
||||
|
||||
# Check if there are uncommitted changes
|
||||
if ! git diff --quiet || ! git diff --cached --quiet; then
|
||||
echo "There are uncommitted changes. Please commit or stash them before running this script."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Ensure that at least one commit argument is passed
|
||||
if [ "$#" -lt 1 ]; then
|
||||
echo "Usage: $0 <commit1> <commit2> ... <commitN>"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
commits=("$@")
|
||||
benchmarks_dir=benchmarks
|
||||
|
||||
if ! mkdir -p "${benchmarks_dir}"; then
|
||||
echo "Unable to create dir for benchmarks data"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Benchmark results
|
||||
bench_files=()
|
||||
|
||||
# Run benchmark for each listed commit
|
||||
for i in "${!commits[@]}"; do
|
||||
commit="${commits[i]}"
|
||||
git checkout "$commit" || {
|
||||
echo "Failed to checkout $commit"
|
||||
exit 1
|
||||
}
|
||||
|
||||
# Sanitized commmit message
|
||||
commit_message=$(git log -1 --pretty=format:"%s" | tr -c '[:alnum:]-_' '_')
|
||||
|
||||
# Benchmark data will go there
|
||||
bench_file="${benchmarks_dir}/${i}_${commit_message}.bench"
|
||||
|
||||
if ! go test -bench=. -count=10 >"$bench_file"; then
|
||||
echo "Benchmarking failed for commit $commit"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
bench_files+=("$bench_file")
|
||||
done
|
||||
|
||||
# go install golang.org/x/perf/cmd/benchstat[@latest]
|
||||
benchstat "${bench_files[@]}"
|
||||
460
vendor/github.com/jackc/pgx/v5/internal/sanitize/sanitize.go
generated
vendored
Normal file
460
vendor/github.com/jackc/pgx/v5/internal/sanitize/sanitize.go
generated
vendored
Normal file
@@ -0,0 +1,460 @@
|
||||
package sanitize
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"slices"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
// Part is either a string or an int. A string is raw SQL. An int is a
|
||||
// argument placeholder.
|
||||
type Part any
|
||||
|
||||
type Query struct {
|
||||
Parts []Part
|
||||
}
|
||||
|
||||
// utf.DecodeRune returns the utf8.RuneError for errors. But that is actually rune U+FFFD -- the unicode replacement
|
||||
// character. utf8.RuneError is not an error if it is also width 3.
|
||||
//
|
||||
// https://github.com/jackc/pgx/issues/1380
|
||||
const replacementcharacterwidth = 3
|
||||
|
||||
const maxBufSize = 16384 // 16 Ki
|
||||
|
||||
var bufPool = &pool[*bytes.Buffer]{
|
||||
new: func() *bytes.Buffer {
|
||||
return &bytes.Buffer{}
|
||||
},
|
||||
reset: func(b *bytes.Buffer) bool {
|
||||
n := b.Len()
|
||||
b.Reset()
|
||||
return n < maxBufSize
|
||||
},
|
||||
}
|
||||
|
||||
var null = []byte("null")
|
||||
|
||||
func (q *Query) Sanitize(args ...any) (string, error) {
|
||||
argUse := make([]bool, len(args))
|
||||
buf := bufPool.get()
|
||||
defer bufPool.put(buf)
|
||||
|
||||
for _, part := range q.Parts {
|
||||
switch part := part.(type) {
|
||||
case string:
|
||||
buf.WriteString(part)
|
||||
case int:
|
||||
argIdx := part - 1
|
||||
var p []byte
|
||||
if argIdx < 0 {
|
||||
return "", fmt.Errorf("first sql argument must be > 0")
|
||||
}
|
||||
|
||||
if argIdx >= len(args) {
|
||||
return "", fmt.Errorf("insufficient arguments")
|
||||
}
|
||||
|
||||
// Prevent SQL injection via Line Comment Creation
|
||||
// https://github.com/jackc/pgx/security/advisories/GHSA-m7wr-2xf7-cm9p
|
||||
buf.WriteByte(' ')
|
||||
|
||||
arg := args[argIdx]
|
||||
switch arg := arg.(type) {
|
||||
case nil:
|
||||
p = null
|
||||
case int64:
|
||||
p = strconv.AppendInt(buf.AvailableBuffer(), arg, 10)
|
||||
case float64:
|
||||
p = strconv.AppendFloat(buf.AvailableBuffer(), arg, 'f', -1, 64)
|
||||
case bool:
|
||||
p = strconv.AppendBool(buf.AvailableBuffer(), arg)
|
||||
case []byte:
|
||||
p = QuoteBytes(buf.AvailableBuffer(), arg)
|
||||
case string:
|
||||
p = QuoteString(buf.AvailableBuffer(), arg)
|
||||
case time.Time:
|
||||
p = arg.Truncate(time.Microsecond).
|
||||
AppendFormat(buf.AvailableBuffer(), "'2006-01-02 15:04:05.999999999Z07:00:00'")
|
||||
default:
|
||||
return "", fmt.Errorf("invalid arg type: %T", arg)
|
||||
}
|
||||
argUse[argIdx] = true
|
||||
|
||||
buf.Write(p)
|
||||
|
||||
// Prevent SQL injection via Line Comment Creation
|
||||
// https://github.com/jackc/pgx/security/advisories/GHSA-m7wr-2xf7-cm9p
|
||||
buf.WriteByte(' ')
|
||||
default:
|
||||
return "", fmt.Errorf("invalid Part type: %T", part)
|
||||
}
|
||||
}
|
||||
|
||||
for i, used := range argUse {
|
||||
if !used {
|
||||
return "", fmt.Errorf("unused argument: %d", i)
|
||||
}
|
||||
}
|
||||
return buf.String(), nil
|
||||
}
|
||||
|
||||
func NewQuery(sql string) (*Query, error) {
|
||||
query := &Query{}
|
||||
query.init(sql)
|
||||
|
||||
return query, nil
|
||||
}
|
||||
|
||||
var sqlLexerPool = &pool[*sqlLexer]{
|
||||
new: func() *sqlLexer {
|
||||
return &sqlLexer{}
|
||||
},
|
||||
reset: func(sl *sqlLexer) bool {
|
||||
*sl = sqlLexer{}
|
||||
return true
|
||||
},
|
||||
}
|
||||
|
||||
func (q *Query) init(sql string) {
|
||||
parts := q.Parts[:0]
|
||||
if parts == nil {
|
||||
// dirty, but fast heuristic to preallocate for ~90% usecases
|
||||
n := strings.Count(sql, "$") + strings.Count(sql, "--") + 1
|
||||
parts = make([]Part, 0, n)
|
||||
}
|
||||
|
||||
l := sqlLexerPool.get()
|
||||
defer sqlLexerPool.put(l)
|
||||
|
||||
l.src = sql
|
||||
l.stateFn = rawState
|
||||
l.parts = parts
|
||||
|
||||
for l.stateFn != nil {
|
||||
l.stateFn = l.stateFn(l)
|
||||
}
|
||||
|
||||
q.Parts = l.parts
|
||||
}
|
||||
|
||||
func QuoteString(dst []byte, str string) []byte {
|
||||
const quote = '\''
|
||||
|
||||
// Preallocate space for the worst case scenario
|
||||
dst = slices.Grow(dst, len(str)*2+2)
|
||||
|
||||
// Add opening quote
|
||||
dst = append(dst, quote)
|
||||
|
||||
// Iterate through the string without allocating
|
||||
for i := 0; i < len(str); i++ {
|
||||
if str[i] == quote {
|
||||
dst = append(dst, quote, quote)
|
||||
} else {
|
||||
dst = append(dst, str[i])
|
||||
}
|
||||
}
|
||||
|
||||
// Add closing quote
|
||||
dst = append(dst, quote)
|
||||
|
||||
return dst
|
||||
}
|
||||
|
||||
func QuoteBytes(dst, buf []byte) []byte {
|
||||
if len(buf) == 0 {
|
||||
return append(dst, `'\x'`...)
|
||||
}
|
||||
|
||||
// Calculate required length
|
||||
requiredLen := 3 + hex.EncodedLen(len(buf)) + 1
|
||||
|
||||
// Ensure dst has enough capacity
|
||||
if cap(dst)-len(dst) < requiredLen {
|
||||
newDst := make([]byte, len(dst), len(dst)+requiredLen)
|
||||
copy(newDst, dst)
|
||||
dst = newDst
|
||||
}
|
||||
|
||||
// Record original length and extend slice
|
||||
origLen := len(dst)
|
||||
dst = dst[:origLen+requiredLen]
|
||||
|
||||
// Add prefix
|
||||
dst[origLen] = '\''
|
||||
dst[origLen+1] = '\\'
|
||||
dst[origLen+2] = 'x'
|
||||
|
||||
// Encode bytes directly into dst
|
||||
hex.Encode(dst[origLen+3:len(dst)-1], buf)
|
||||
|
||||
// Add suffix
|
||||
dst[len(dst)-1] = '\''
|
||||
|
||||
return dst
|
||||
}
|
||||
|
||||
type sqlLexer struct {
|
||||
src string
|
||||
start int
|
||||
pos int
|
||||
nested int // multiline comment nesting level.
|
||||
stateFn stateFn
|
||||
parts []Part
|
||||
}
|
||||
|
||||
type stateFn func(*sqlLexer) stateFn
|
||||
|
||||
func rawState(l *sqlLexer) stateFn {
|
||||
for {
|
||||
r, width := utf8.DecodeRuneInString(l.src[l.pos:])
|
||||
l.pos += width
|
||||
|
||||
switch r {
|
||||
case 'e', 'E':
|
||||
nextRune, width := utf8.DecodeRuneInString(l.src[l.pos:])
|
||||
if nextRune == '\'' {
|
||||
l.pos += width
|
||||
return escapeStringState
|
||||
}
|
||||
case '\'':
|
||||
return singleQuoteState
|
||||
case '"':
|
||||
return doubleQuoteState
|
||||
case '$':
|
||||
nextRune, _ := utf8.DecodeRuneInString(l.src[l.pos:])
|
||||
if '0' <= nextRune && nextRune <= '9' {
|
||||
if l.pos-l.start > 0 {
|
||||
l.parts = append(l.parts, l.src[l.start:l.pos-width])
|
||||
}
|
||||
l.start = l.pos
|
||||
return placeholderState
|
||||
}
|
||||
case '-':
|
||||
nextRune, width := utf8.DecodeRuneInString(l.src[l.pos:])
|
||||
if nextRune == '-' {
|
||||
l.pos += width
|
||||
return oneLineCommentState
|
||||
}
|
||||
case '/':
|
||||
nextRune, width := utf8.DecodeRuneInString(l.src[l.pos:])
|
||||
if nextRune == '*' {
|
||||
l.pos += width
|
||||
return multilineCommentState
|
||||
}
|
||||
case utf8.RuneError:
|
||||
if width != replacementcharacterwidth {
|
||||
if l.pos-l.start > 0 {
|
||||
l.parts = append(l.parts, l.src[l.start:l.pos])
|
||||
l.start = l.pos
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func singleQuoteState(l *sqlLexer) stateFn {
|
||||
for {
|
||||
r, width := utf8.DecodeRuneInString(l.src[l.pos:])
|
||||
l.pos += width
|
||||
|
||||
switch r {
|
||||
case '\'':
|
||||
nextRune, width := utf8.DecodeRuneInString(l.src[l.pos:])
|
||||
if nextRune != '\'' {
|
||||
return rawState
|
||||
}
|
||||
l.pos += width
|
||||
case utf8.RuneError:
|
||||
if width != replacementcharacterwidth {
|
||||
if l.pos-l.start > 0 {
|
||||
l.parts = append(l.parts, l.src[l.start:l.pos])
|
||||
l.start = l.pos
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func doubleQuoteState(l *sqlLexer) stateFn {
|
||||
for {
|
||||
r, width := utf8.DecodeRuneInString(l.src[l.pos:])
|
||||
l.pos += width
|
||||
|
||||
switch r {
|
||||
case '"':
|
||||
nextRune, width := utf8.DecodeRuneInString(l.src[l.pos:])
|
||||
if nextRune != '"' {
|
||||
return rawState
|
||||
}
|
||||
l.pos += width
|
||||
case utf8.RuneError:
|
||||
if width != replacementcharacterwidth {
|
||||
if l.pos-l.start > 0 {
|
||||
l.parts = append(l.parts, l.src[l.start:l.pos])
|
||||
l.start = l.pos
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// placeholderState consumes a placeholder value. The $ must have already has
|
||||
// already been consumed. The first rune must be a digit.
|
||||
func placeholderState(l *sqlLexer) stateFn {
|
||||
num := 0
|
||||
|
||||
for {
|
||||
r, width := utf8.DecodeRuneInString(l.src[l.pos:])
|
||||
l.pos += width
|
||||
|
||||
if '0' <= r && r <= '9' {
|
||||
num *= 10
|
||||
num += int(r - '0')
|
||||
} else {
|
||||
l.parts = append(l.parts, num)
|
||||
l.pos -= width
|
||||
l.start = l.pos
|
||||
return rawState
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func escapeStringState(l *sqlLexer) stateFn {
|
||||
for {
|
||||
r, width := utf8.DecodeRuneInString(l.src[l.pos:])
|
||||
l.pos += width
|
||||
|
||||
switch r {
|
||||
case '\\':
|
||||
_, width = utf8.DecodeRuneInString(l.src[l.pos:])
|
||||
l.pos += width
|
||||
case '\'':
|
||||
nextRune, width := utf8.DecodeRuneInString(l.src[l.pos:])
|
||||
if nextRune != '\'' {
|
||||
return rawState
|
||||
}
|
||||
l.pos += width
|
||||
case utf8.RuneError:
|
||||
if width != replacementcharacterwidth {
|
||||
if l.pos-l.start > 0 {
|
||||
l.parts = append(l.parts, l.src[l.start:l.pos])
|
||||
l.start = l.pos
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func oneLineCommentState(l *sqlLexer) stateFn {
|
||||
for {
|
||||
r, width := utf8.DecodeRuneInString(l.src[l.pos:])
|
||||
l.pos += width
|
||||
|
||||
switch r {
|
||||
case '\\':
|
||||
_, width = utf8.DecodeRuneInString(l.src[l.pos:])
|
||||
l.pos += width
|
||||
case '\n', '\r':
|
||||
return rawState
|
||||
case utf8.RuneError:
|
||||
if width != replacementcharacterwidth {
|
||||
if l.pos-l.start > 0 {
|
||||
l.parts = append(l.parts, l.src[l.start:l.pos])
|
||||
l.start = l.pos
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func multilineCommentState(l *sqlLexer) stateFn {
|
||||
for {
|
||||
r, width := utf8.DecodeRuneInString(l.src[l.pos:])
|
||||
l.pos += width
|
||||
|
||||
switch r {
|
||||
case '/':
|
||||
nextRune, width := utf8.DecodeRuneInString(l.src[l.pos:])
|
||||
if nextRune == '*' {
|
||||
l.pos += width
|
||||
l.nested++
|
||||
}
|
||||
case '*':
|
||||
nextRune, width := utf8.DecodeRuneInString(l.src[l.pos:])
|
||||
if nextRune != '/' {
|
||||
continue
|
||||
}
|
||||
|
||||
l.pos += width
|
||||
if l.nested == 0 {
|
||||
return rawState
|
||||
}
|
||||
l.nested--
|
||||
|
||||
case utf8.RuneError:
|
||||
if width != replacementcharacterwidth {
|
||||
if l.pos-l.start > 0 {
|
||||
l.parts = append(l.parts, l.src[l.start:l.pos])
|
||||
l.start = l.pos
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var queryPool = &pool[*Query]{
|
||||
new: func() *Query {
|
||||
return &Query{}
|
||||
},
|
||||
reset: func(q *Query) bool {
|
||||
n := len(q.Parts)
|
||||
q.Parts = q.Parts[:0]
|
||||
return n < 64 // drop too large queries
|
||||
},
|
||||
}
|
||||
|
||||
// SanitizeSQL replaces placeholder values with args. It quotes and escapes args
|
||||
// as necessary. This function is only safe when standard_conforming_strings is
|
||||
// on.
|
||||
func SanitizeSQL(sql string, args ...any) (string, error) {
|
||||
query := queryPool.get()
|
||||
query.init(sql)
|
||||
defer queryPool.put(query)
|
||||
|
||||
return query.Sanitize(args...)
|
||||
}
|
||||
|
||||
type pool[E any] struct {
|
||||
p sync.Pool
|
||||
new func() E
|
||||
reset func(E) bool
|
||||
}
|
||||
|
||||
func (pool *pool[E]) get() E {
|
||||
v, ok := pool.p.Get().(E)
|
||||
if !ok {
|
||||
v = pool.new()
|
||||
}
|
||||
|
||||
return v
|
||||
}
|
||||
|
||||
func (p *pool[E]) put(v E) {
|
||||
if p.reset(v) {
|
||||
p.p.Put(v)
|
||||
}
|
||||
}
|
||||
111
vendor/github.com/jackc/pgx/v5/internal/stmtcache/lru_cache.go
generated
vendored
Normal file
111
vendor/github.com/jackc/pgx/v5/internal/stmtcache/lru_cache.go
generated
vendored
Normal file
@@ -0,0 +1,111 @@
|
||||
package stmtcache
|
||||
|
||||
import (
|
||||
"container/list"
|
||||
|
||||
"github.com/jackc/pgx/v5/pgconn"
|
||||
)
|
||||
|
||||
// LRUCache implements Cache with a Least Recently Used (LRU) cache.
|
||||
type LRUCache struct {
|
||||
cap int
|
||||
m map[string]*list.Element
|
||||
l *list.List
|
||||
invalidStmts []*pgconn.StatementDescription
|
||||
}
|
||||
|
||||
// NewLRUCache creates a new LRUCache. cap is the maximum size of the cache.
|
||||
func NewLRUCache(cap int) *LRUCache {
|
||||
return &LRUCache{
|
||||
cap: cap,
|
||||
m: make(map[string]*list.Element),
|
||||
l: list.New(),
|
||||
}
|
||||
}
|
||||
|
||||
// Get returns the statement description for sql. Returns nil if not found.
|
||||
func (c *LRUCache) Get(key string) *pgconn.StatementDescription {
|
||||
if el, ok := c.m[key]; ok {
|
||||
c.l.MoveToFront(el)
|
||||
return el.Value.(*pgconn.StatementDescription)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Put stores sd in the cache. Put panics if sd.SQL is "". Put does nothing if sd.SQL already exists in the cache or
|
||||
// sd.SQL has been invalidated and HandleInvalidated has not been called yet.
|
||||
func (c *LRUCache) Put(sd *pgconn.StatementDescription) {
|
||||
if sd.SQL == "" {
|
||||
panic("cannot store statement description with empty SQL")
|
||||
}
|
||||
|
||||
if _, present := c.m[sd.SQL]; present {
|
||||
return
|
||||
}
|
||||
|
||||
// The statement may have been invalidated but not yet handled. Do not readd it to the cache.
|
||||
for _, invalidSD := range c.invalidStmts {
|
||||
if invalidSD.SQL == sd.SQL {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if c.l.Len() == c.cap {
|
||||
c.invalidateOldest()
|
||||
}
|
||||
|
||||
el := c.l.PushFront(sd)
|
||||
c.m[sd.SQL] = el
|
||||
}
|
||||
|
||||
// Invalidate invalidates statement description identified by sql. Does nothing if not found.
|
||||
func (c *LRUCache) Invalidate(sql string) {
|
||||
if el, ok := c.m[sql]; ok {
|
||||
delete(c.m, sql)
|
||||
c.invalidStmts = append(c.invalidStmts, el.Value.(*pgconn.StatementDescription))
|
||||
c.l.Remove(el)
|
||||
}
|
||||
}
|
||||
|
||||
// InvalidateAll invalidates all statement descriptions.
|
||||
func (c *LRUCache) InvalidateAll() {
|
||||
el := c.l.Front()
|
||||
for el != nil {
|
||||
c.invalidStmts = append(c.invalidStmts, el.Value.(*pgconn.StatementDescription))
|
||||
el = el.Next()
|
||||
}
|
||||
|
||||
c.m = make(map[string]*list.Element)
|
||||
c.l = list.New()
|
||||
}
|
||||
|
||||
// GetInvalidated returns a slice of all statement descriptions invalidated since the last call to RemoveInvalidated.
|
||||
func (c *LRUCache) GetInvalidated() []*pgconn.StatementDescription {
|
||||
return c.invalidStmts
|
||||
}
|
||||
|
||||
// RemoveInvalidated removes all invalidated statement descriptions. No other calls to Cache must be made between a
|
||||
// call to GetInvalidated and RemoveInvalidated or RemoveInvalidated may remove statement descriptions that were
|
||||
// never seen by the call to GetInvalidated.
|
||||
func (c *LRUCache) RemoveInvalidated() {
|
||||
c.invalidStmts = nil
|
||||
}
|
||||
|
||||
// Len returns the number of cached prepared statement descriptions.
|
||||
func (c *LRUCache) Len() int {
|
||||
return c.l.Len()
|
||||
}
|
||||
|
||||
// Cap returns the maximum number of cached prepared statement descriptions.
|
||||
func (c *LRUCache) Cap() int {
|
||||
return c.cap
|
||||
}
|
||||
|
||||
func (c *LRUCache) invalidateOldest() {
|
||||
oldest := c.l.Back()
|
||||
sd := oldest.Value.(*pgconn.StatementDescription)
|
||||
c.invalidStmts = append(c.invalidStmts, sd)
|
||||
delete(c.m, sd.SQL)
|
||||
c.l.Remove(oldest)
|
||||
}
|
||||
45
vendor/github.com/jackc/pgx/v5/internal/stmtcache/stmtcache.go
generated
vendored
Normal file
45
vendor/github.com/jackc/pgx/v5/internal/stmtcache/stmtcache.go
generated
vendored
Normal file
@@ -0,0 +1,45 @@
|
||||
// Package stmtcache is a cache for statement descriptions.
|
||||
package stmtcache
|
||||
|
||||
import (
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
|
||||
"github.com/jackc/pgx/v5/pgconn"
|
||||
)
|
||||
|
||||
// StatementName returns a statement name that will be stable for sql across multiple connections and program
|
||||
// executions.
|
||||
func StatementName(sql string) string {
|
||||
digest := sha256.Sum256([]byte(sql))
|
||||
return "stmtcache_" + hex.EncodeToString(digest[0:24])
|
||||
}
|
||||
|
||||
// Cache caches statement descriptions.
|
||||
type Cache interface {
|
||||
// Get returns the statement description for sql. Returns nil if not found.
|
||||
Get(sql string) *pgconn.StatementDescription
|
||||
|
||||
// Put stores sd in the cache. Put panics if sd.SQL is "". Put does nothing if sd.SQL already exists in the cache.
|
||||
Put(sd *pgconn.StatementDescription)
|
||||
|
||||
// Invalidate invalidates statement description identified by sql. Does nothing if not found.
|
||||
Invalidate(sql string)
|
||||
|
||||
// InvalidateAll invalidates all statement descriptions.
|
||||
InvalidateAll()
|
||||
|
||||
// GetInvalidated returns a slice of all statement descriptions invalidated since the last call to RemoveInvalidated.
|
||||
GetInvalidated() []*pgconn.StatementDescription
|
||||
|
||||
// RemoveInvalidated removes all invalidated statement descriptions. No other calls to Cache must be made between a
|
||||
// call to GetInvalidated and RemoveInvalidated or RemoveInvalidated may remove statement descriptions that were
|
||||
// never seen by the call to GetInvalidated.
|
||||
RemoveInvalidated()
|
||||
|
||||
// Len returns the number of cached prepared statement descriptions.
|
||||
Len() int
|
||||
|
||||
// Cap returns the maximum number of cached prepared statement descriptions.
|
||||
Cap() int
|
||||
}
|
||||
77
vendor/github.com/jackc/pgx/v5/internal/stmtcache/unlimited_cache.go
generated
vendored
Normal file
77
vendor/github.com/jackc/pgx/v5/internal/stmtcache/unlimited_cache.go
generated
vendored
Normal file
@@ -0,0 +1,77 @@
|
||||
package stmtcache
|
||||
|
||||
import (
|
||||
"math"
|
||||
|
||||
"github.com/jackc/pgx/v5/pgconn"
|
||||
)
|
||||
|
||||
// UnlimitedCache implements Cache with no capacity limit.
|
||||
type UnlimitedCache struct {
|
||||
m map[string]*pgconn.StatementDescription
|
||||
invalidStmts []*pgconn.StatementDescription
|
||||
}
|
||||
|
||||
// NewUnlimitedCache creates a new UnlimitedCache.
|
||||
func NewUnlimitedCache() *UnlimitedCache {
|
||||
return &UnlimitedCache{
|
||||
m: make(map[string]*pgconn.StatementDescription),
|
||||
}
|
||||
}
|
||||
|
||||
// Get returns the statement description for sql. Returns nil if not found.
|
||||
func (c *UnlimitedCache) Get(sql string) *pgconn.StatementDescription {
|
||||
return c.m[sql]
|
||||
}
|
||||
|
||||
// Put stores sd in the cache. Put panics if sd.SQL is "". Put does nothing if sd.SQL already exists in the cache.
|
||||
func (c *UnlimitedCache) Put(sd *pgconn.StatementDescription) {
|
||||
if sd.SQL == "" {
|
||||
panic("cannot store statement description with empty SQL")
|
||||
}
|
||||
|
||||
if _, present := c.m[sd.SQL]; present {
|
||||
return
|
||||
}
|
||||
|
||||
c.m[sd.SQL] = sd
|
||||
}
|
||||
|
||||
// Invalidate invalidates statement description identified by sql. Does nothing if not found.
|
||||
func (c *UnlimitedCache) Invalidate(sql string) {
|
||||
if sd, ok := c.m[sql]; ok {
|
||||
delete(c.m, sql)
|
||||
c.invalidStmts = append(c.invalidStmts, sd)
|
||||
}
|
||||
}
|
||||
|
||||
// InvalidateAll invalidates all statement descriptions.
|
||||
func (c *UnlimitedCache) InvalidateAll() {
|
||||
for _, sd := range c.m {
|
||||
c.invalidStmts = append(c.invalidStmts, sd)
|
||||
}
|
||||
|
||||
c.m = make(map[string]*pgconn.StatementDescription)
|
||||
}
|
||||
|
||||
// GetInvalidated returns a slice of all statement descriptions invalidated since the last call to RemoveInvalidated.
|
||||
func (c *UnlimitedCache) GetInvalidated() []*pgconn.StatementDescription {
|
||||
return c.invalidStmts
|
||||
}
|
||||
|
||||
// RemoveInvalidated removes all invalidated statement descriptions. No other calls to Cache must be made between a
|
||||
// call to GetInvalidated and RemoveInvalidated or RemoveInvalidated may remove statement descriptions that were
|
||||
// never seen by the call to GetInvalidated.
|
||||
func (c *UnlimitedCache) RemoveInvalidated() {
|
||||
c.invalidStmts = nil
|
||||
}
|
||||
|
||||
// Len returns the number of cached prepared statement descriptions.
|
||||
func (c *UnlimitedCache) Len() int {
|
||||
return len(c.m)
|
||||
}
|
||||
|
||||
// Cap returns the maximum number of cached prepared statement descriptions.
|
||||
func (c *UnlimitedCache) Cap() int {
|
||||
return math.MaxInt
|
||||
}
|
||||
Reference in New Issue
Block a user