mirror of
https://github.com/bitechdev/ResolveSpec.git
synced 2025-12-06 14:26:22 +00:00
Added x-files feature
This commit is contained in:
parent
07b09e2025
commit
c95bc9e633
@ -260,6 +260,10 @@ func (h *Handler) handleRead(ctx context.Context, w common.ResponseWriter, id st
|
|||||||
query = query.Table(tableName)
|
query = query.Table(tableName)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Note: X-Files configuration is now applied via parseXFiles which populates
|
||||||
|
// ExtendedRequestOptions fields (columns, filters, sort, preload, etc.)
|
||||||
|
// These are applied below in the normal query building process
|
||||||
|
|
||||||
// Apply ComputedQL fields if any
|
// Apply ComputedQL fields if any
|
||||||
if len(options.ComputedQL) > 0 {
|
if len(options.ComputedQL) > 0 {
|
||||||
for colName, colExpr := range options.ComputedQL {
|
for colName, colExpr := range options.ComputedQL {
|
||||||
@ -1647,16 +1651,9 @@ func (h *Handler) sendResponseWithOptions(w common.ResponseWriter, data interfac
|
|||||||
data = h.normalizeResultArray(data)
|
data = h.normalizeResultArray(data)
|
||||||
}
|
}
|
||||||
|
|
||||||
response := data
|
// Return data as-is without wrapping in common.Response
|
||||||
if response == nil {
|
|
||||||
response = common.Response{
|
|
||||||
Success: true,
|
|
||||||
Data: data,
|
|
||||||
Metadata: metadata,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
w.WriteHeader(http.StatusOK)
|
w.WriteHeader(http.StatusOK)
|
||||||
if err := w.WriteJSON(response); err != nil {
|
if err := w.WriteJSON(data); err != nil {
|
||||||
logger.Error("Failed to write JSON response: %v", err)
|
logger.Error("Failed to write JSON response: %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@ -2,6 +2,7 @@ package restheadspec
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/base64"
|
"encoding/base64"
|
||||||
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"reflect"
|
"reflect"
|
||||||
"strconv"
|
"strconv"
|
||||||
@ -42,6 +43,9 @@ type ExtendedRequestOptions struct {
|
|||||||
|
|
||||||
// Transaction
|
// Transaction
|
||||||
AtomicTransaction bool
|
AtomicTransaction bool
|
||||||
|
|
||||||
|
// X-Files configuration - comprehensive query options as a single JSON object
|
||||||
|
XFiles *XFiles
|
||||||
}
|
}
|
||||||
|
|
||||||
// ExpandOption represents a relation expansion configuration
|
// ExpandOption represents a relation expansion configuration
|
||||||
@ -214,6 +218,10 @@ func (h *Handler) parseOptionsFromHeaders(r common.Request) ExtendedRequestOptio
|
|||||||
// Transaction Control
|
// Transaction Control
|
||||||
case strings.HasPrefix(normalizedKey, "x-transaction-atomic"):
|
case strings.HasPrefix(normalizedKey, "x-transaction-atomic"):
|
||||||
options.AtomicTransaction = strings.EqualFold(decodedValue, "true")
|
options.AtomicTransaction = strings.EqualFold(decodedValue, "true")
|
||||||
|
|
||||||
|
// X-Files - comprehensive JSON configuration
|
||||||
|
case strings.HasPrefix(normalizedKey, "x-files"):
|
||||||
|
h.parseXFiles(&options, decodedValue)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -480,6 +488,259 @@ func (h *Handler) parseCommaSeparated(value string) []string {
|
|||||||
return result
|
return result
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// parseXFiles parses x-files header containing comprehensive JSON configuration
|
||||||
|
// and populates ExtendedRequestOptions fields from it
|
||||||
|
func (h *Handler) parseXFiles(options *ExtendedRequestOptions, value string) {
|
||||||
|
if value == "" {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
var xfiles XFiles
|
||||||
|
if err := json.Unmarshal([]byte(value), &xfiles); err != nil {
|
||||||
|
logger.Warn("Failed to parse x-files header: %v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
logger.Debug("Parsed x-files configuration for table: %s", xfiles.TableName)
|
||||||
|
|
||||||
|
// Store the original XFiles for reference
|
||||||
|
options.XFiles = &xfiles
|
||||||
|
|
||||||
|
// Map XFiles fields to ExtendedRequestOptions
|
||||||
|
|
||||||
|
// Column selection
|
||||||
|
if len(xfiles.Columns) > 0 {
|
||||||
|
options.Columns = append(options.Columns, xfiles.Columns...)
|
||||||
|
logger.Debug("X-Files: Added columns: %v", xfiles.Columns)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Omit columns
|
||||||
|
if len(xfiles.OmitColumns) > 0 {
|
||||||
|
options.OmitColumns = append(options.OmitColumns, xfiles.OmitColumns...)
|
||||||
|
logger.Debug("X-Files: Added omit columns: %v", xfiles.OmitColumns)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Computed columns (CQL) -> ComputedQL
|
||||||
|
if len(xfiles.CQLColumns) > 0 {
|
||||||
|
if options.ComputedQL == nil {
|
||||||
|
options.ComputedQL = make(map[string]string)
|
||||||
|
}
|
||||||
|
for i, cqlExpr := range xfiles.CQLColumns {
|
||||||
|
colName := fmt.Sprintf("cql%d", i+1)
|
||||||
|
options.ComputedQL[colName] = cqlExpr
|
||||||
|
logger.Debug("X-Files: Added computed column %s: %s", colName, cqlExpr)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sorting
|
||||||
|
if len(xfiles.Sort) > 0 {
|
||||||
|
for _, sortField := range xfiles.Sort {
|
||||||
|
direction := "ASC"
|
||||||
|
colName := sortField
|
||||||
|
|
||||||
|
// Handle direction prefixes
|
||||||
|
if strings.HasPrefix(sortField, "-") {
|
||||||
|
direction = "DESC"
|
||||||
|
colName = strings.TrimPrefix(sortField, "-")
|
||||||
|
} else if strings.HasPrefix(sortField, "+") {
|
||||||
|
colName = strings.TrimPrefix(sortField, "+")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handle DESC suffix
|
||||||
|
if strings.HasSuffix(strings.ToLower(colName), " desc") {
|
||||||
|
direction = "DESC"
|
||||||
|
colName = strings.TrimSuffix(strings.ToLower(colName), " desc")
|
||||||
|
} else if strings.HasSuffix(strings.ToLower(colName), " asc") {
|
||||||
|
colName = strings.TrimSuffix(strings.ToLower(colName), " asc")
|
||||||
|
}
|
||||||
|
|
||||||
|
options.Sort = append(options.Sort, common.SortOption{
|
||||||
|
Column: strings.TrimSpace(colName),
|
||||||
|
Direction: direction,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
logger.Debug("X-Files: Added %d sort options", len(xfiles.Sort))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Filter fields
|
||||||
|
if len(xfiles.FilterFields) > 0 {
|
||||||
|
for _, filterField := range xfiles.FilterFields {
|
||||||
|
options.Filters = append(options.Filters, common.FilterOption{
|
||||||
|
Column: filterField.Field,
|
||||||
|
Operator: filterField.Operator,
|
||||||
|
Value: filterField.Value,
|
||||||
|
LogicOperator: "AND", // Default to AND
|
||||||
|
})
|
||||||
|
}
|
||||||
|
logger.Debug("X-Files: Added %d filter fields", len(xfiles.FilterFields))
|
||||||
|
}
|
||||||
|
|
||||||
|
// SQL AND conditions -> CustomSQLWhere
|
||||||
|
if len(xfiles.SqlAnd) > 0 {
|
||||||
|
if options.CustomSQLWhere != "" {
|
||||||
|
options.CustomSQLWhere += " AND "
|
||||||
|
}
|
||||||
|
options.CustomSQLWhere += "(" + strings.Join(xfiles.SqlAnd, " AND ") + ")"
|
||||||
|
logger.Debug("X-Files: Added SQL AND conditions")
|
||||||
|
}
|
||||||
|
|
||||||
|
// SQL OR conditions -> CustomSQLOr
|
||||||
|
if len(xfiles.SqlOr) > 0 {
|
||||||
|
if options.CustomSQLOr != "" {
|
||||||
|
options.CustomSQLOr += " OR "
|
||||||
|
}
|
||||||
|
options.CustomSQLOr += "(" + strings.Join(xfiles.SqlOr, " OR ") + ")"
|
||||||
|
logger.Debug("X-Files: Added SQL OR conditions")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Pagination - Limit
|
||||||
|
if limitStr := xfiles.Limit.String(); limitStr != "" && limitStr != "0" {
|
||||||
|
if limitVal, err := xfiles.Limit.Int64(); err == nil && limitVal > 0 {
|
||||||
|
limit := int(limitVal)
|
||||||
|
options.Limit = &limit
|
||||||
|
logger.Debug("X-Files: Set limit: %d", limit)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Pagination - Offset
|
||||||
|
if offsetStr := xfiles.Offset.String(); offsetStr != "" && offsetStr != "0" {
|
||||||
|
if offsetVal, err := xfiles.Offset.Int64(); err == nil && offsetVal > 0 {
|
||||||
|
offset := int(offsetVal)
|
||||||
|
options.Offset = &offset
|
||||||
|
logger.Debug("X-Files: Set offset: %d", offset)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Cursor pagination
|
||||||
|
if xfiles.CursorForward != "" {
|
||||||
|
options.CursorForward = xfiles.CursorForward
|
||||||
|
logger.Debug("X-Files: Set cursor forward")
|
||||||
|
}
|
||||||
|
if xfiles.CursorBackward != "" {
|
||||||
|
options.CursorBackward = xfiles.CursorBackward
|
||||||
|
logger.Debug("X-Files: Set cursor backward")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Flags
|
||||||
|
if xfiles.Skipcount {
|
||||||
|
options.SkipCount = true
|
||||||
|
logger.Debug("X-Files: Set skip count")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Process ParentTables and ChildTables recursively
|
||||||
|
h.processXFilesRelations(&xfiles, options, "")
|
||||||
|
}
|
||||||
|
|
||||||
|
// processXFilesRelations processes ParentTables and ChildTables from XFiles
|
||||||
|
// and adds them as Preload options recursively
|
||||||
|
func (h *Handler) processXFilesRelations(xfiles *XFiles, options *ExtendedRequestOptions, basePath string) {
|
||||||
|
if xfiles == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Process ParentTables
|
||||||
|
if len(xfiles.ParentTables) > 0 {
|
||||||
|
logger.Debug("X-Files: Processing %d parent tables", len(xfiles.ParentTables))
|
||||||
|
for _, parentTable := range xfiles.ParentTables {
|
||||||
|
h.addXFilesPreload(parentTable, options, basePath)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Process ChildTables
|
||||||
|
if len(xfiles.ChildTables) > 0 {
|
||||||
|
logger.Debug("X-Files: Processing %d child tables", len(xfiles.ChildTables))
|
||||||
|
for _, childTable := range xfiles.ChildTables {
|
||||||
|
h.addXFilesPreload(childTable, options, basePath)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// addXFilesPreload converts an XFiles relation into a PreloadOption
|
||||||
|
// and recursively processes its children
|
||||||
|
func (h *Handler) addXFilesPreload(xfile *XFiles, options *ExtendedRequestOptions, basePath string) {
|
||||||
|
if xfile == nil || xfile.TableName == "" {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Determine the relation path
|
||||||
|
relationPath := xfile.TableName
|
||||||
|
if basePath != "" {
|
||||||
|
relationPath = basePath + "." + xfile.TableName
|
||||||
|
}
|
||||||
|
|
||||||
|
logger.Debug("X-Files: Adding preload for relation: %s", relationPath)
|
||||||
|
|
||||||
|
// Create PreloadOption from XFiles configuration
|
||||||
|
preloadOpt := common.PreloadOption{
|
||||||
|
Relation: relationPath,
|
||||||
|
Columns: xfile.Columns,
|
||||||
|
OmitColumns: xfile.OmitColumns,
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add sorting if specified
|
||||||
|
if len(xfile.Sort) > 0 {
|
||||||
|
preloadOpt.Sort = make([]common.SortOption, 0, len(xfile.Sort))
|
||||||
|
for _, sortField := range xfile.Sort {
|
||||||
|
direction := "ASC"
|
||||||
|
colName := sortField
|
||||||
|
|
||||||
|
// Handle direction prefixes
|
||||||
|
if strings.HasPrefix(sortField, "-") {
|
||||||
|
direction = "DESC"
|
||||||
|
colName = strings.TrimPrefix(sortField, "-")
|
||||||
|
} else if strings.HasPrefix(sortField, "+") {
|
||||||
|
colName = strings.TrimPrefix(sortField, "+")
|
||||||
|
}
|
||||||
|
|
||||||
|
preloadOpt.Sort = append(preloadOpt.Sort, common.SortOption{
|
||||||
|
Column: strings.TrimSpace(colName),
|
||||||
|
Direction: direction,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add filters if specified
|
||||||
|
if len(xfile.FilterFields) > 0 {
|
||||||
|
preloadOpt.Filters = make([]common.FilterOption, 0, len(xfile.FilterFields))
|
||||||
|
for _, filterField := range xfile.FilterFields {
|
||||||
|
preloadOpt.Filters = append(preloadOpt.Filters, common.FilterOption{
|
||||||
|
Column: filterField.Field,
|
||||||
|
Operator: filterField.Operator,
|
||||||
|
Value: filterField.Value,
|
||||||
|
LogicOperator: "AND",
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add WHERE clause if SQL conditions specified
|
||||||
|
whereConditions := make([]string, 0)
|
||||||
|
if len(xfile.SqlAnd) > 0 {
|
||||||
|
whereConditions = append(whereConditions, xfile.SqlAnd...)
|
||||||
|
}
|
||||||
|
if len(whereConditions) > 0 {
|
||||||
|
preloadOpt.Where = strings.Join(whereConditions, " AND ")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add limit if specified
|
||||||
|
if limitStr := xfile.Limit.String(); limitStr != "" && limitStr != "0" {
|
||||||
|
if limitVal, err := xfile.Limit.Int64(); err == nil && limitVal > 0 {
|
||||||
|
limit := int(limitVal)
|
||||||
|
preloadOpt.Limit = &limit
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add the preload option
|
||||||
|
options.Preload = append(options.Preload, preloadOpt)
|
||||||
|
|
||||||
|
// Recursively process nested ParentTables and ChildTables
|
||||||
|
if xfile.Recursive {
|
||||||
|
logger.Debug("X-Files: Recursive preload enabled for: %s", relationPath)
|
||||||
|
h.processXFilesRelations(xfile, options, relationPath)
|
||||||
|
} else if len(xfile.ParentTables) > 0 || len(xfile.ChildTables) > 0 {
|
||||||
|
h.processXFilesRelations(xfile, options, relationPath)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// extractSourceColumn extracts the base column name from PostgreSQL JSON operators
|
// extractSourceColumn extracts the base column name from PostgreSQL JSON operators
|
||||||
// Examples:
|
// Examples:
|
||||||
// - "columna->>'val'" returns "columna"
|
// - "columna->>'val'" returns "columna"
|
||||||
|
|||||||
431
pkg/restheadspec/xfiles.go
Normal file
431
pkg/restheadspec/xfiles.go
Normal file
@ -0,0 +1,431 @@
|
|||||||
|
package restheadspec
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"reflect"
|
||||||
|
)
|
||||||
|
|
||||||
|
type XFiles struct {
|
||||||
|
TableName string `json:"tablename"`
|
||||||
|
Schema string `json:"schema"`
|
||||||
|
PrimaryKey string `json:"primarykey"`
|
||||||
|
ForeignKey string `json:"foreignkey"`
|
||||||
|
RelatedKey string `json:"relatedkey"`
|
||||||
|
Sort []string `json:"sort"`
|
||||||
|
Prefix string `json:"prefix"`
|
||||||
|
Editable bool `json:"editable"`
|
||||||
|
Recursive bool `json:"recursive"`
|
||||||
|
Expand bool `json:"expand"`
|
||||||
|
Rownumber bool `json:"rownumber"`
|
||||||
|
Skipcount bool `json:"skipcount"`
|
||||||
|
Offset json.Number `json:"offset"`
|
||||||
|
Limit json.Number `json:"limit"`
|
||||||
|
Columns []string `json:"columns"`
|
||||||
|
OmitColumns []string `json:"omit_columns"`
|
||||||
|
CQLColumns []string `json:"cql_columns"`
|
||||||
|
|
||||||
|
SqlJoins []string `json:"sql_joins"`
|
||||||
|
SqlOr []string `json:"sql_or"`
|
||||||
|
SqlAnd []string `json:"sql_and"`
|
||||||
|
ParentTables []*XFiles `json:"parenttables"`
|
||||||
|
ChildTables []*XFiles `json:"childtables"`
|
||||||
|
ModelType reflect.Type `json:"-"`
|
||||||
|
ParentEntity *XFiles `json:"-"`
|
||||||
|
Level uint `json:"-"`
|
||||||
|
Errors []error `json:"-"`
|
||||||
|
FilterFields []struct {
|
||||||
|
Field string `json:"field"`
|
||||||
|
Value string `json:"value"`
|
||||||
|
Operator string `json:"operator"`
|
||||||
|
} `json:"filter_fields"`
|
||||||
|
CursorForward string `json:"cursor_forward"`
|
||||||
|
CursorBackward string `json:"cursor_backward"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// func (m *XFiles) SetParent() {
|
||||||
|
// if m.ChildTables != nil {
|
||||||
|
// for _, child := range m.ChildTables {
|
||||||
|
// if child.ParentEntity != nil {
|
||||||
|
// continue
|
||||||
|
// }
|
||||||
|
// child.ParentEntity = m
|
||||||
|
// child.Level = m.Level + 1000
|
||||||
|
// child.SetParent()
|
||||||
|
// }
|
||||||
|
// }
|
||||||
|
// if m.ParentTables != nil {
|
||||||
|
// for _, pt := range m.ParentTables {
|
||||||
|
// if pt.ParentEntity != nil {
|
||||||
|
// continue
|
||||||
|
// }
|
||||||
|
// pt.ParentEntity = m
|
||||||
|
// pt.Level = m.Level + 1
|
||||||
|
// pt.SetParent()
|
||||||
|
// }
|
||||||
|
// }
|
||||||
|
// }
|
||||||
|
|
||||||
|
// func (m *XFiles) GetParentRelations() []reflection.GormRelationType {
|
||||||
|
// if m.ParentEntity == nil {
|
||||||
|
// return nil
|
||||||
|
// }
|
||||||
|
|
||||||
|
// foundRelations := make(GormRelationTypeList, 0)
|
||||||
|
// rels := reflection.GetValidModelRelationTypes(m.ParentEntity.ModelType, false)
|
||||||
|
|
||||||
|
// if m.ParentEntity.ModelType == nil {
|
||||||
|
// return nil
|
||||||
|
// }
|
||||||
|
|
||||||
|
// for _, rel := range rels {
|
||||||
|
// // if len(foundRelations) > 0 {
|
||||||
|
// // break
|
||||||
|
// // }
|
||||||
|
// if rel.FieldName != "" && rel.AssociationTable.Name() == m.ModelType.Name() {
|
||||||
|
|
||||||
|
// if rel.AssociationKey != "" && m.RelatedKey != "" && strings.EqualFold(rel.AssociationKey, m.RelatedKey) {
|
||||||
|
// foundRelations = append(foundRelations, rel)
|
||||||
|
// } else if rel.AssociationKey != "" && m.ForeignKey != "" && strings.EqualFold(rel.AssociationKey, m.ForeignKey) {
|
||||||
|
// foundRelations = append(foundRelations, rel)
|
||||||
|
// } else if rel.ForeignKey != "" && m.ForeignKey != "" && strings.EqualFold(rel.ForeignKey, m.ForeignKey) {
|
||||||
|
// foundRelations = append(foundRelations, rel)
|
||||||
|
// } else if rel.ForeignKey != "" && m.RelatedKey != "" && strings.EqualFold(rel.ForeignKey, m.RelatedKey) {
|
||||||
|
// foundRelations = append(foundRelations, rel)
|
||||||
|
// } else if rel.ForeignKey != "" && m.ForeignKey == "" && m.RelatedKey == "" {
|
||||||
|
// foundRelations = append(foundRelations, rel)
|
||||||
|
// }
|
||||||
|
// }
|
||||||
|
|
||||||
|
// //idName := fmt.Sprintf("%s_to_%s_%s=%s_m%v", rel.TableName, rel.AssociationTableName, rel.ForeignKey, rel.AssociationKey, rel.OneToMany)
|
||||||
|
// }
|
||||||
|
|
||||||
|
// sort.Sort(foundRelations)
|
||||||
|
// finalList := make(GormRelationTypeList, 0)
|
||||||
|
// dups := make(map[string]bool)
|
||||||
|
// for _, rel := range foundRelations {
|
||||||
|
// idName := fmt.Sprintf("%s_to_%s_%s_%s=%s_m%v", rel.TableName, rel.AssociationTableName, rel.FieldName, rel.ForeignKey, rel.AssociationKey, rel.OneToMany)
|
||||||
|
// if dups[idName] {
|
||||||
|
// continue
|
||||||
|
// }
|
||||||
|
// finalList = append(finalList, rel)
|
||||||
|
// dups[idName] = true
|
||||||
|
// }
|
||||||
|
|
||||||
|
// //fmt.Printf("GetParentRelations %s: %+v %d=%d\n", m.TableName, dups, len(finalList), len(foundRelations))
|
||||||
|
|
||||||
|
// return finalList
|
||||||
|
// }
|
||||||
|
|
||||||
|
// func (m *XFiles) GetUpdatableTableNames() []string {
|
||||||
|
// foundTables := make([]string, 0)
|
||||||
|
// if m.Editable {
|
||||||
|
// foundTables = append(foundTables, m.TableName)
|
||||||
|
// }
|
||||||
|
// if m.ParentTables != nil {
|
||||||
|
// for _, pt := range m.ParentTables {
|
||||||
|
// list := pt.GetUpdatableTableNames()
|
||||||
|
// if list != nil {
|
||||||
|
// foundTables = append(foundTables, list...)
|
||||||
|
// }
|
||||||
|
// }
|
||||||
|
// }
|
||||||
|
// if m.ChildTables != nil {
|
||||||
|
// for _, ct := range m.ChildTables {
|
||||||
|
// list := ct.GetUpdatableTableNames()
|
||||||
|
// if list != nil {
|
||||||
|
// foundTables = append(foundTables, list...)
|
||||||
|
|
||||||
|
// }
|
||||||
|
// }
|
||||||
|
// }
|
||||||
|
|
||||||
|
// return foundTables
|
||||||
|
// }
|
||||||
|
|
||||||
|
// func (m *XFiles) preload(db *gorm.DB, pPath string, pCnt uint) (*gorm.DB, error) {
|
||||||
|
|
||||||
|
// path := pPath
|
||||||
|
// _, colval := JSONSyntaxToSQLIn(path, m.ModelType, "preload")
|
||||||
|
// if colval != "" {
|
||||||
|
// path = colval
|
||||||
|
// }
|
||||||
|
|
||||||
|
// if path == "" {
|
||||||
|
// return db, fmt.Errorf("invalid preload path %s", path)
|
||||||
|
// }
|
||||||
|
|
||||||
|
// sortList := ""
|
||||||
|
// if m.Sort != nil {
|
||||||
|
// for _, sort := range m.Sort {
|
||||||
|
// descSort := false
|
||||||
|
// if strings.HasPrefix(sort, "-") || strings.Contains(strings.ToLower(sort), " desc") {
|
||||||
|
// descSort = true
|
||||||
|
// }
|
||||||
|
// sort = strings.TrimPrefix(strings.TrimPrefix(sort, "+"), "-")
|
||||||
|
// sort = strings.ReplaceAll(strings.ReplaceAll(sort, " desc", ""), " asc", "")
|
||||||
|
// if descSort {
|
||||||
|
// sort = sort + " desc"
|
||||||
|
// }
|
||||||
|
// sortList = sort
|
||||||
|
// }
|
||||||
|
// }
|
||||||
|
|
||||||
|
// SrcColumns := reflection.GetModelSQLColumns(m.ModelType)
|
||||||
|
// Columns := make([]string, 0)
|
||||||
|
|
||||||
|
// for _, s := range SrcColumns {
|
||||||
|
// for _, v := range m.Columns {
|
||||||
|
// if strings.EqualFold(v, s) {
|
||||||
|
// Columns = append(Columns, v)
|
||||||
|
// break
|
||||||
|
// }
|
||||||
|
// }
|
||||||
|
// }
|
||||||
|
|
||||||
|
// if len(Columns) == 0 {
|
||||||
|
// Columns = SrcColumns
|
||||||
|
// }
|
||||||
|
|
||||||
|
// chain := db
|
||||||
|
|
||||||
|
// // //Do expand where we can
|
||||||
|
// // if m.Expand {
|
||||||
|
// // ops := func(subchain *gorm.DB) *gorm.DB {
|
||||||
|
// // subchain = subchain.Select(strings.Join(m.Columns, ","))
|
||||||
|
|
||||||
|
// // if m.Filter != "" {
|
||||||
|
// // subchain = subchain.Where(m.Filter)
|
||||||
|
// // }
|
||||||
|
// // return subchain
|
||||||
|
// // }
|
||||||
|
// // chain = chain.Joins(path, ops(chain))
|
||||||
|
// // }
|
||||||
|
|
||||||
|
// //fmt.Printf("Preloading %s: %s lvl:%d \n", m.TableName, path, m.Level)
|
||||||
|
// //Do preload
|
||||||
|
// chain = chain.Preload(path, func(db *gorm.DB) *gorm.DB {
|
||||||
|
// subchain := db
|
||||||
|
|
||||||
|
// if sortList != "" {
|
||||||
|
// subchain = subchain.Order(sortList)
|
||||||
|
// }
|
||||||
|
|
||||||
|
// for _, sql := range m.SqlAnd {
|
||||||
|
// fnType, colval := JSONSyntaxToSQL(sql, m.ModelType)
|
||||||
|
// if fnType == 0 {
|
||||||
|
// colval = ValidSQL(colval, "select")
|
||||||
|
// }
|
||||||
|
// subchain = subchain.Where(colval)
|
||||||
|
// }
|
||||||
|
|
||||||
|
// for _, sql := range m.SqlOr {
|
||||||
|
// fnType, colval := JSONSyntaxToSQL(sql, m.ModelType)
|
||||||
|
// if fnType == 0 {
|
||||||
|
// colval = ValidSQL(colval, "select")
|
||||||
|
// }
|
||||||
|
// subchain = subchain.Or(colval)
|
||||||
|
// }
|
||||||
|
|
||||||
|
// limitval, err := m.Limit.Int64()
|
||||||
|
// if err == nil && limitval > 0 {
|
||||||
|
// subchain = subchain.Limit(int(limitval))
|
||||||
|
// }
|
||||||
|
|
||||||
|
// for _, j := range m.SqlJoins {
|
||||||
|
// subchain = subchain.Joins(ValidSQL(j, "select"))
|
||||||
|
// }
|
||||||
|
|
||||||
|
// offsetval, err := m.Offset.Int64()
|
||||||
|
// if err == nil && offsetval > 0 {
|
||||||
|
// subchain = subchain.Offset(int(offsetval))
|
||||||
|
// }
|
||||||
|
|
||||||
|
// cols := make([]string, 0)
|
||||||
|
|
||||||
|
// for _, col := range Columns {
|
||||||
|
// canAdd := true
|
||||||
|
// for _, omit := range m.OmitColumns {
|
||||||
|
// if col == omit {
|
||||||
|
// canAdd = false
|
||||||
|
// break
|
||||||
|
// }
|
||||||
|
// }
|
||||||
|
// if canAdd {
|
||||||
|
// cols = append(cols, col)
|
||||||
|
// }
|
||||||
|
// }
|
||||||
|
|
||||||
|
// for i, col := range m.CQLColumns {
|
||||||
|
// cols = append(cols, fmt.Sprintf("(%s) as cql%d", col, i+1))
|
||||||
|
// }
|
||||||
|
|
||||||
|
// if len(cols) > 0 {
|
||||||
|
|
||||||
|
// colStr := strings.Join(cols, ",")
|
||||||
|
// subchain = subchain.Select(colStr)
|
||||||
|
// }
|
||||||
|
|
||||||
|
// if m.Recursive && pCnt < 5 {
|
||||||
|
// paths := strings.Split(path, ".")
|
||||||
|
|
||||||
|
// p := paths[0]
|
||||||
|
// if len(paths) > 1 {
|
||||||
|
// p = strings.Join(paths[1:], ".")
|
||||||
|
// }
|
||||||
|
// for i := uint(0); i < 3; i++ {
|
||||||
|
// inlineStr := strings.Repeat(p+".", int(i+1))
|
||||||
|
// inlineStr = strings.TrimRight(inlineStr, ".")
|
||||||
|
|
||||||
|
// fmt.Printf("Preloading Recursive (%d) %s: %s lvl:%d \n", i, m.TableName, inlineStr, m.Level)
|
||||||
|
// subchain, err = m.preload(subchain, inlineStr, pCnt+i)
|
||||||
|
// if err != nil {
|
||||||
|
// cfg.LogError("Preload (%s,%d) error: %v", m.TableName, pCnt, err)
|
||||||
|
// } else {
|
||||||
|
|
||||||
|
// if m.ChildTables != nil {
|
||||||
|
// for _, child := range m.ChildTables {
|
||||||
|
// if child.ParentEntity == nil {
|
||||||
|
// continue
|
||||||
|
// }
|
||||||
|
// subchain, _ = child.ChainPreload(subchain, inlineStr, pCnt+i)
|
||||||
|
|
||||||
|
// }
|
||||||
|
// }
|
||||||
|
// if m.ParentTables != nil {
|
||||||
|
// for _, pt := range m.ParentTables {
|
||||||
|
// if pt.ParentEntity == nil {
|
||||||
|
// continue
|
||||||
|
// }
|
||||||
|
// subchain, _ = pt.ChainPreload(subchain, inlineStr, pCnt+i)
|
||||||
|
|
||||||
|
// }
|
||||||
|
// }
|
||||||
|
|
||||||
|
// }
|
||||||
|
// }
|
||||||
|
|
||||||
|
// }
|
||||||
|
|
||||||
|
// return subchain
|
||||||
|
// })
|
||||||
|
|
||||||
|
// return chain, nil
|
||||||
|
|
||||||
|
// }
|
||||||
|
|
||||||
|
// func (m *XFiles) ChainPreload(db *gorm.DB, pPath string, pCnt uint) (*gorm.DB, error) {
|
||||||
|
// var err error
|
||||||
|
// chain := db
|
||||||
|
|
||||||
|
// relations := m.GetParentRelations()
|
||||||
|
// if pCnt > 10000 {
|
||||||
|
// cfg.LogError("Preload Max size (%s,%s): %v", m.TableName, pPath, err)
|
||||||
|
// return chain, nil
|
||||||
|
// }
|
||||||
|
|
||||||
|
// hasPreloadError := false
|
||||||
|
// for _, rel := range relations {
|
||||||
|
// path := rel.FieldName
|
||||||
|
// if pPath != "" {
|
||||||
|
// path = fmt.Sprintf("%s.%s", pPath, rel.FieldName)
|
||||||
|
// }
|
||||||
|
|
||||||
|
// chain, err = m.preload(chain, path, pCnt)
|
||||||
|
// if err != nil {
|
||||||
|
// cfg.LogError("Preload Error (%s,%s): %v", m.TableName, path, err)
|
||||||
|
// hasPreloadError = true
|
||||||
|
// //return chain, err
|
||||||
|
// }
|
||||||
|
|
||||||
|
// //fmt.Printf("Preloading Rel %v: %s @ %s lvl:%d \n", m.Recursive, path, m.TableName, m.Level)
|
||||||
|
// if !hasPreloadError && m.ChildTables != nil {
|
||||||
|
// for _, child := range m.ChildTables {
|
||||||
|
// if child.ParentEntity == nil {
|
||||||
|
// continue
|
||||||
|
// }
|
||||||
|
// chain, err = child.ChainPreload(chain, path, pCnt)
|
||||||
|
// if err != nil {
|
||||||
|
// return chain, err
|
||||||
|
// }
|
||||||
|
// }
|
||||||
|
// }
|
||||||
|
// if !hasPreloadError && m.ParentTables != nil {
|
||||||
|
// for _, pt := range m.ParentTables {
|
||||||
|
// if pt.ParentEntity == nil {
|
||||||
|
// continue
|
||||||
|
// }
|
||||||
|
// chain, err = pt.ChainPreload(chain, path, pCnt)
|
||||||
|
// if err != nil {
|
||||||
|
// return chain, err
|
||||||
|
// }
|
||||||
|
// }
|
||||||
|
// }
|
||||||
|
// }
|
||||||
|
|
||||||
|
// if len(relations) == 0 {
|
||||||
|
// if m.ChildTables != nil {
|
||||||
|
// for _, child := range m.ChildTables {
|
||||||
|
// if child.ParentEntity == nil {
|
||||||
|
// continue
|
||||||
|
// }
|
||||||
|
// chain, err = child.ChainPreload(chain, pPath, pCnt)
|
||||||
|
// if err != nil {
|
||||||
|
// return chain, err
|
||||||
|
// }
|
||||||
|
// }
|
||||||
|
// }
|
||||||
|
// if m.ParentTables != nil {
|
||||||
|
// for _, pt := range m.ParentTables {
|
||||||
|
// if pt.ParentEntity == nil {
|
||||||
|
// continue
|
||||||
|
// }
|
||||||
|
// chain, err = pt.ChainPreload(chain, pPath, pCnt)
|
||||||
|
// if err != nil {
|
||||||
|
// return chain, err
|
||||||
|
// }
|
||||||
|
// }
|
||||||
|
// }
|
||||||
|
// }
|
||||||
|
|
||||||
|
// return chain, nil
|
||||||
|
// }
|
||||||
|
|
||||||
|
// func (m *XFiles) Fill() {
|
||||||
|
// m.ModelType = models.GetModelType(m.Schema, m.TableName)
|
||||||
|
|
||||||
|
// if m.ModelType == nil {
|
||||||
|
// m.Errors = append(m.Errors, fmt.Errorf("ModelType not found for %s", m.TableName))
|
||||||
|
// }
|
||||||
|
// if m.Prefix == "" {
|
||||||
|
// m.Prefix = reflection.GetTablePrefixFromType(m.ModelType)
|
||||||
|
// }
|
||||||
|
// if m.PrimaryKey == "" {
|
||||||
|
// m.PrimaryKey = reflection.GetPKNameFromType(m.ModelType)
|
||||||
|
// }
|
||||||
|
|
||||||
|
// if m.Schema == "" {
|
||||||
|
// m.Schema = reflection.GetSchemaNameFromType(m.ModelType)
|
||||||
|
// }
|
||||||
|
|
||||||
|
// for _, t := range m.ParentTables {
|
||||||
|
// t.Fill()
|
||||||
|
// }
|
||||||
|
|
||||||
|
// for _, t := range m.ChildTables {
|
||||||
|
// t.Fill()
|
||||||
|
// }
|
||||||
|
// }
|
||||||
|
|
||||||
|
// type GormRelationTypeList []reflection.GormRelationType
|
||||||
|
|
||||||
|
// func (s GormRelationTypeList) Len() int { return len(s) }
|
||||||
|
// func (s GormRelationTypeList) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
|
||||||
|
|
||||||
|
// func (s GormRelationTypeList) Less(i, j int) bool {
|
||||||
|
// if strings.HasPrefix(strings.ToLower(s[j].FieldName),
|
||||||
|
// strings.ToLower(fmt.Sprintf("%s_%s_%s", s[i].AssociationSchema, s[i].AssociationTable, s[i].AssociationKey))) {
|
||||||
|
// return true
|
||||||
|
// }
|
||||||
|
|
||||||
|
// return s[i].FieldName < s[j].FieldName
|
||||||
|
// }
|
||||||
213
pkg/restheadspec/xfiles_example.md
Normal file
213
pkg/restheadspec/xfiles_example.md
Normal file
@ -0,0 +1,213 @@
|
|||||||
|
# X-Files Header Usage
|
||||||
|
|
||||||
|
The `x-files` header allows you to configure complex query options using a single JSON object. The XFiles configuration is parsed and populates the `ExtendedRequestOptions` fields, which means it integrates seamlessly with the existing query building system.
|
||||||
|
|
||||||
|
## Architecture
|
||||||
|
|
||||||
|
When an `x-files` header is received:
|
||||||
|
1. It's parsed into an `XFiles` struct
|
||||||
|
2. The `XFiles` fields populate the `ExtendedRequestOptions` (columns, filters, sort, preload, etc.)
|
||||||
|
3. The normal query building process applies these options to the SQL query
|
||||||
|
4. This allows x-files to work alongside individual headers if needed
|
||||||
|
|
||||||
|
## Basic Example
|
||||||
|
|
||||||
|
```http
|
||||||
|
GET /public/users
|
||||||
|
X-Files: {"tablename":"users","columns":["id","name","email"],"limit":"10","offset":"0"}
|
||||||
|
```
|
||||||
|
|
||||||
|
## Complete Example
|
||||||
|
|
||||||
|
```http
|
||||||
|
GET /public/users
|
||||||
|
X-Files: {
|
||||||
|
"tablename": "users",
|
||||||
|
"schema": "public",
|
||||||
|
"columns": ["id", "name", "email", "created_at"],
|
||||||
|
"omit_columns": [],
|
||||||
|
"sort": ["-created_at", "name"],
|
||||||
|
"limit": "50",
|
||||||
|
"offset": "0",
|
||||||
|
"filter_fields": [
|
||||||
|
{
|
||||||
|
"field": "status",
|
||||||
|
"operator": "eq",
|
||||||
|
"value": "active"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"field": "age",
|
||||||
|
"operator": "gt",
|
||||||
|
"value": "18"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"sql_and": ["deleted_at IS NULL"],
|
||||||
|
"sql_or": [],
|
||||||
|
"cql_columns": ["UPPER(name)"],
|
||||||
|
"skipcount": false,
|
||||||
|
"distinct": false
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## Supported Filter Operators
|
||||||
|
|
||||||
|
- `eq` - equals
|
||||||
|
- `neq` - not equals
|
||||||
|
- `gt` - greater than
|
||||||
|
- `gte` - greater than or equals
|
||||||
|
- `lt` - less than
|
||||||
|
- `lte` - less than or equals
|
||||||
|
- `like` - SQL LIKE
|
||||||
|
- `ilike` - case-insensitive LIKE
|
||||||
|
- `in` - IN clause
|
||||||
|
- `between` - between (exclusive)
|
||||||
|
- `between_inclusive` - between (inclusive)
|
||||||
|
- `is_null` - is NULL
|
||||||
|
- `is_not_null` - is NOT NULL
|
||||||
|
|
||||||
|
## Sorting
|
||||||
|
|
||||||
|
Sort fields can be prefixed with:
|
||||||
|
- `+` for ascending (default)
|
||||||
|
- `-` for descending
|
||||||
|
|
||||||
|
Examples:
|
||||||
|
- `"sort": ["name"]` - ascending by name
|
||||||
|
- `"sort": ["-created_at"]` - descending by created_at
|
||||||
|
- `"sort": ["-created_at", "name"]` - multiple sorts
|
||||||
|
|
||||||
|
## Computed Columns (CQL)
|
||||||
|
|
||||||
|
Use `cql_columns` to add computed SQL expressions:
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"cql_columns": [
|
||||||
|
"UPPER(name)",
|
||||||
|
"CONCAT(first_name, ' ', last_name)"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
These will be available as `cql1`, `cql2`, etc. in the response.
|
||||||
|
|
||||||
|
## Cursor Pagination
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"cursor_forward": "eyJpZCI6MTAwfQ==",
|
||||||
|
"cursor_backward": ""
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## Base64 Encoding
|
||||||
|
|
||||||
|
For complex JSON, you can base64-encode the value and prefix it with `ZIP_` or `__`:
|
||||||
|
|
||||||
|
```http
|
||||||
|
GET /public/users
|
||||||
|
X-Files: ZIP_eyJ0YWJsZW5hbWUiOiJ1c2VycyIsImxpbWl0IjoiMTAifQ==
|
||||||
|
```
|
||||||
|
|
||||||
|
## XFiles Struct Reference
|
||||||
|
|
||||||
|
```go
|
||||||
|
type XFiles struct {
|
||||||
|
TableName string `json:"tablename"`
|
||||||
|
Schema string `json:"schema"`
|
||||||
|
PrimaryKey string `json:"primarykey"`
|
||||||
|
ForeignKey string `json:"foreignkey"`
|
||||||
|
RelatedKey string `json:"relatedkey"`
|
||||||
|
Sort []string `json:"sort"`
|
||||||
|
Prefix string `json:"prefix"`
|
||||||
|
Editable bool `json:"editable"`
|
||||||
|
Recursive bool `json:"recursive"`
|
||||||
|
Expand bool `json:"expand"`
|
||||||
|
Rownumber bool `json:"rownumber"`
|
||||||
|
Skipcount bool `json:"skipcount"`
|
||||||
|
Offset json.Number `json:"offset"`
|
||||||
|
Limit json.Number `json:"limit"`
|
||||||
|
Columns []string `json:"columns"`
|
||||||
|
OmitColumns []string `json:"omit_columns"`
|
||||||
|
CQLColumns []string `json:"cql_columns"`
|
||||||
|
SqlJoins []string `json:"sql_joins"`
|
||||||
|
SqlOr []string `json:"sql_or"`
|
||||||
|
SqlAnd []string `json:"sql_and"`
|
||||||
|
FilterFields []struct {
|
||||||
|
Field string `json:"field"`
|
||||||
|
Value string `json:"value"`
|
||||||
|
Operator string `json:"operator"`
|
||||||
|
} `json:"filter_fields"`
|
||||||
|
CursorForward string `json:"cursor_forward"`
|
||||||
|
CursorBackward string `json:"cursor_backward"`
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## Recursive Preloading with ParentTables and ChildTables
|
||||||
|
|
||||||
|
XFiles now supports recursive preloading of related entities:
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"tablename": "users",
|
||||||
|
"columns": ["id", "name"],
|
||||||
|
"limit": "10",
|
||||||
|
"parenttables": [
|
||||||
|
{
|
||||||
|
"tablename": "Company",
|
||||||
|
"columns": ["id", "name", "industry"],
|
||||||
|
"sort": ["-created_at"]
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"childtables": [
|
||||||
|
{
|
||||||
|
"tablename": "Orders",
|
||||||
|
"columns": ["id", "total", "status"],
|
||||||
|
"limit": "5",
|
||||||
|
"sort": ["-order_date"],
|
||||||
|
"filter_fields": [
|
||||||
|
{"field": "status", "operator": "eq", "value": "completed"}
|
||||||
|
],
|
||||||
|
"childtables": [
|
||||||
|
{
|
||||||
|
"tablename": "OrderItems",
|
||||||
|
"columns": ["id", "product_name", "quantity"],
|
||||||
|
"recursive": true
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### How Recursive Preloading Works
|
||||||
|
|
||||||
|
- **ParentTables**: Preloads parent relationships (e.g., User -> Company)
|
||||||
|
- **ChildTables**: Preloads child relationships (e.g., User -> Orders -> OrderItems)
|
||||||
|
- **Recursive**: When `true`, continues preloading the same relation recursively
|
||||||
|
- Each nested table can have its own:
|
||||||
|
- Column selection (`columns`, `omit_columns`)
|
||||||
|
- Filtering (`filter_fields`, `sql_and`)
|
||||||
|
- Sorting (`sort`)
|
||||||
|
- Pagination (`limit`)
|
||||||
|
- Further nesting (`parenttables`, `childtables`)
|
||||||
|
|
||||||
|
### Relation Path Building
|
||||||
|
|
||||||
|
Relations are built as dot-separated paths:
|
||||||
|
- `Company` (direct parent)
|
||||||
|
- `Orders` (direct child)
|
||||||
|
- `Orders.OrderItems` (nested child)
|
||||||
|
- `Orders.OrderItems.Product` (deeply nested)
|
||||||
|
|
||||||
|
## Notes
|
||||||
|
|
||||||
|
- Individual headers (like `x-select-fields`, `x-sort`, etc.) can still be used alongside `x-files`
|
||||||
|
- X-Files populates `ExtendedRequestOptions` which is then processed by the normal query building logic
|
||||||
|
- ParentTables and ChildTables are converted to `PreloadOption` entries with full support for:
|
||||||
|
- Column selection
|
||||||
|
- Filtering
|
||||||
|
- Sorting
|
||||||
|
- Limit
|
||||||
|
- Recursive nesting
|
||||||
|
- The relation name in ParentTables/ChildTables should match the GORM/Bun relation field name on the model
|
||||||
@ -372,7 +372,14 @@ func testRestHeadSpecCRUD(t *testing.T, serverURL string) {
|
|||||||
|
|
||||||
var result map[string]interface{}
|
var result map[string]interface{}
|
||||||
json.NewDecoder(resp.Body).Decode(&result)
|
json.NewDecoder(resp.Body).Decode(&result)
|
||||||
assert.True(t, result["success"].(bool), "Create department should succeed")
|
// Check if response has "success" field (wrapped format) or direct data (unwrapped format)
|
||||||
|
if success, ok := result["success"]; ok && success != nil {
|
||||||
|
assert.True(t, success.(bool), "Create department should succeed")
|
||||||
|
} else {
|
||||||
|
// Unwrapped format - verify we got the created data back
|
||||||
|
assert.NotEmpty(t, result, "Create department should return data")
|
||||||
|
assert.Equal(t, deptID, result["id"], "Created department should have correct ID")
|
||||||
|
}
|
||||||
logger.Info("Department created successfully: %s", deptID)
|
logger.Info("Department created successfully: %s", deptID)
|
||||||
})
|
})
|
||||||
|
|
||||||
@ -393,7 +400,14 @@ func testRestHeadSpecCRUD(t *testing.T, serverURL string) {
|
|||||||
|
|
||||||
var result map[string]interface{}
|
var result map[string]interface{}
|
||||||
json.NewDecoder(resp.Body).Decode(&result)
|
json.NewDecoder(resp.Body).Decode(&result)
|
||||||
assert.True(t, result["success"].(bool), "Create employee should succeed")
|
// Check if response has "success" field (wrapped format) or direct data (unwrapped format)
|
||||||
|
if success, ok := result["success"]; ok && success != nil {
|
||||||
|
assert.True(t, success.(bool), "Create employee should succeed")
|
||||||
|
} else {
|
||||||
|
// Unwrapped format - verify we got the created data back
|
||||||
|
assert.NotEmpty(t, result, "Create employee should return data")
|
||||||
|
assert.Equal(t, empID, result["id"], "Created employee should have correct ID")
|
||||||
|
}
|
||||||
logger.Info("Employee created successfully: %s", empID)
|
logger.Info("Employee created successfully: %s", empID)
|
||||||
})
|
})
|
||||||
|
|
||||||
@ -540,7 +554,13 @@ func testRestHeadSpecCRUD(t *testing.T, serverURL string) {
|
|||||||
|
|
||||||
var result map[string]interface{}
|
var result map[string]interface{}
|
||||||
json.NewDecoder(resp.Body).Decode(&result)
|
json.NewDecoder(resp.Body).Decode(&result)
|
||||||
assert.True(t, result["success"].(bool), "Update department should succeed")
|
// Check if response has "success" field (wrapped format) or direct data (unwrapped format)
|
||||||
|
if success, ok := result["success"]; ok && success != nil {
|
||||||
|
assert.True(t, success.(bool), "Update department should succeed")
|
||||||
|
} else {
|
||||||
|
// Unwrapped format - verify we got the updated data back
|
||||||
|
assert.NotEmpty(t, result, "Update department should return data")
|
||||||
|
}
|
||||||
logger.Info("Department updated successfully: %s", deptID)
|
logger.Info("Department updated successfully: %s", deptID)
|
||||||
|
|
||||||
// Verify update by reading the department again
|
// Verify update by reading the department again
|
||||||
@ -558,7 +578,13 @@ func testRestHeadSpecCRUD(t *testing.T, serverURL string) {
|
|||||||
|
|
||||||
var result map[string]interface{}
|
var result map[string]interface{}
|
||||||
json.NewDecoder(resp.Body).Decode(&result)
|
json.NewDecoder(resp.Body).Decode(&result)
|
||||||
assert.True(t, result["success"].(bool), "Update employee should succeed")
|
// Check if response has "success" field (wrapped format) or direct data (unwrapped format)
|
||||||
|
if success, ok := result["success"]; ok && success != nil {
|
||||||
|
assert.True(t, success.(bool), "Update employee should succeed")
|
||||||
|
} else {
|
||||||
|
// Unwrapped format - verify we got the updated data back
|
||||||
|
assert.NotEmpty(t, result, "Update employee should return data")
|
||||||
|
}
|
||||||
logger.Info("Employee updated successfully: %s", empID)
|
logger.Info("Employee updated successfully: %s", empID)
|
||||||
})
|
})
|
||||||
|
|
||||||
@ -569,7 +595,13 @@ func testRestHeadSpecCRUD(t *testing.T, serverURL string) {
|
|||||||
|
|
||||||
var result map[string]interface{}
|
var result map[string]interface{}
|
||||||
json.NewDecoder(resp.Body).Decode(&result)
|
json.NewDecoder(resp.Body).Decode(&result)
|
||||||
assert.True(t, result["success"].(bool), "Delete employee should succeed")
|
// Check if response has "success" field (wrapped format) or direct data (unwrapped format)
|
||||||
|
if success, ok := result["success"]; ok && success != nil {
|
||||||
|
assert.True(t, success.(bool), "Delete employee should succeed")
|
||||||
|
} else {
|
||||||
|
// Unwrapped format - verify we got a response (typically {"deleted": count})
|
||||||
|
assert.NotEmpty(t, result, "Delete employee should return data")
|
||||||
|
}
|
||||||
logger.Info("Employee deleted successfully: %s", empID)
|
logger.Info("Employee deleted successfully: %s", empID)
|
||||||
|
|
||||||
// Verify deletion - just log that delete succeeded
|
// Verify deletion - just log that delete succeeded
|
||||||
@ -582,7 +614,13 @@ func testRestHeadSpecCRUD(t *testing.T, serverURL string) {
|
|||||||
|
|
||||||
var result map[string]interface{}
|
var result map[string]interface{}
|
||||||
json.NewDecoder(resp.Body).Decode(&result)
|
json.NewDecoder(resp.Body).Decode(&result)
|
||||||
assert.True(t, result["success"].(bool), "Delete department should succeed")
|
// Check if response has "success" field (wrapped format) or direct data (unwrapped format)
|
||||||
|
if success, ok := result["success"]; ok && success != nil {
|
||||||
|
assert.True(t, success.(bool), "Delete department should succeed")
|
||||||
|
} else {
|
||||||
|
// Unwrapped format - verify we got a response (typically {"deleted": count})
|
||||||
|
assert.NotEmpty(t, result, "Delete department should return data")
|
||||||
|
}
|
||||||
logger.Info("Department deleted successfully: %s", deptID)
|
logger.Info("Department deleted successfully: %s", deptID)
|
||||||
})
|
})
|
||||||
|
|
||||||
|
|||||||
Loading…
Reference in New Issue
Block a user