diff --git a/README.md b/README.md index e69de29..d57b0cd 100644 --- a/README.md +++ b/README.md @@ -0,0 +1,3 @@ +# 🍥 Nigiri + +Delicious, hand-crafted in-memory database with JSON persistence. \ No newline at end of file diff --git a/collection.go b/collection.go new file mode 100644 index 0000000..da8e3b2 --- /dev/null +++ b/collection.go @@ -0,0 +1,195 @@ +package nigiri + +import ( + "fmt" + "os" + "path/filepath" + "sync" +) + +// StoreManager interface for stores that can load/save +type StoreManager interface { + LoadData(path string) error + SaveData(path string) error + Clear() +} + +// Collection manages multiple stores in a directory +type Collection struct { + baseDir string + stores map[string]StoreManager + mu sync.RWMutex +} + +// NewCollection creates a new collection in the specified directory +func NewCollection(baseDir string) *Collection { + return &Collection{ + baseDir: baseDir, + stores: make(map[string]StoreManager), + } +} + +// Register adds a store to the collection with a given name +func (c *Collection) Register(name string, store StoreManager) { + c.mu.Lock() + defer c.mu.Unlock() + c.stores[name] = store +} + +// Unregister removes a store from the collection +func (c *Collection) Unregister(name string) { + c.mu.Lock() + defer c.mu.Unlock() + delete(c.stores, name) +} + +// GetStore retrieves a registered store by name +func (c *Collection) GetStore(name string) (StoreManager, bool) { + c.mu.RLock() + defer c.mu.RUnlock() + store, exists := c.stores[name] + return store, exists +} + +// LoadAll loads all registered stores from their JSON files +func (c *Collection) LoadAll() error { + c.mu.RLock() + defer c.mu.RUnlock() + + if err := os.MkdirAll(c.baseDir, 0755); err != nil { + return fmt.Errorf("failed to create base directory: %w", err) + } + + var firstError error + for name, store := range c.stores { + path := filepath.Join(c.baseDir, name+".json") + if err := store.LoadData(path); err != nil && firstError == nil { + firstError = fmt.Errorf("failed to load %s: %w", name, err) + } + } + + return firstError +} + +// SaveAll saves all registered stores to their JSON files +func (c *Collection) SaveAll() error { + c.mu.RLock() + defer c.mu.RUnlock() + + if err := os.MkdirAll(c.baseDir, 0755); err != nil { + return fmt.Errorf("failed to create base directory: %w", err) + } + + var firstError error + for name, store := range c.stores { + path := filepath.Join(c.baseDir, name+".json") + if err := store.SaveData(path); err != nil && firstError == nil { + firstError = fmt.Errorf("failed to save %s: %w", name, err) + } + } + + return firstError +} + +// LoadStore loads a specific store by name +func (c *Collection) LoadStore(name string) error { + c.mu.RLock() + defer c.mu.RUnlock() + + store, exists := c.stores[name] + if !exists { + return fmt.Errorf("store %s not registered", name) + } + + path := filepath.Join(c.baseDir, name+".json") + return store.LoadData(path) +} + +// SaveStore saves a specific store by name +func (c *Collection) SaveStore(name string) error { + c.mu.RLock() + defer c.mu.RUnlock() + + store, exists := c.stores[name] + if !exists { + return fmt.Errorf("store %s not registered", name) + } + + if err := os.MkdirAll(c.baseDir, 0755); err != nil { + return fmt.Errorf("failed to create base directory: %w", err) + } + + path := filepath.Join(c.baseDir, name+".json") + return store.SaveData(path) +} + +// ClearAll clears all registered stores +func (c *Collection) ClearAll() { + c.mu.RLock() + defer c.mu.RUnlock() + + for _, store := range c.stores { + store.Clear() + } +} + +// ClearStore clears a specific store by name +func (c *Collection) ClearStore(name string) error { + c.mu.RLock() + defer c.mu.RUnlock() + + store, exists := c.stores[name] + if !exists { + return fmt.Errorf("store %s not registered", name) + } + + store.Clear() + return nil +} + +// ListStores returns names of all registered stores +func (c *Collection) ListStores() []string { + c.mu.RLock() + defer c.mu.RUnlock() + + names := make([]string, 0, len(c.stores)) + for name := range c.stores { + names = append(names, name) + } + return names +} + +// GetPath returns the file path for a store name +func (c *Collection) GetPath(name string) string { + return filepath.Join(c.baseDir, name+".json") +} + +// StoreExists checks if a store file exists on disk +func (c *Collection) StoreExists(name string) bool { + path := c.GetPath(name) + _, err := os.Stat(path) + return err == nil +} + +// RemoveStoreFile removes the JSON file for a store +func (c *Collection) RemoveStoreFile(name string) error { + path := c.GetPath(name) + err := os.Remove(path) + if os.IsNotExist(err) { + return nil // Already doesn't exist + } + return err +} + +// Helper function to create typed store accessor +func GetTypedStore[T any](collection *Collection, name string) (*BaseStore[T], bool) { + store, exists := collection.GetStore(name) + if !exists { + return nil, false + } + + if typedStore, ok := store.(*BaseStore[T]); ok { + return typedStore, true + } + return nil, false +} diff --git a/constraints.go b/constraints.go new file mode 100644 index 0000000..f761f8a --- /dev/null +++ b/constraints.go @@ -0,0 +1,119 @@ +package nigiri + +import ( + "reflect" + "strings" +) + +// Constraint types +type ConstraintType string + +const ( + ConstraintUnique ConstraintType = "unique" + ConstraintForeign ConstraintType = "fkey" + ConstraintRequired ConstraintType = "required" + ConstraintIndex ConstraintType = "index" +) + +type FieldConstraint struct { + Type ConstraintType + Field string + Target string // for foreign keys: "table.field" + IndexName string // for custom index names +} + +type SchemaInfo struct { + Fields map[string]reflect.Type + Constraints map[string][]FieldConstraint + Indices map[string]string // field -> index name +} + +// ParseSchema extracts constraints from struct tags +func ParseSchema[T any]() *SchemaInfo { + var zero T + t := reflect.TypeOf(zero) + if t.Kind() == reflect.Ptr { + t = t.Elem() + } + + schema := &SchemaInfo{ + Fields: make(map[string]reflect.Type), + Constraints: make(map[string][]FieldConstraint), + Indices: make(map[string]string), + } + + for i := 0; i < t.NumField(); i++ { + field := t.Field(i) + fieldName := field.Name + schema.Fields[fieldName] = field.Type + + dbTag := field.Tag.Get("db") + if dbTag == "" { + continue + } + + constraints := parseDBTag(fieldName, dbTag) + if len(constraints) > 0 { + schema.Constraints[fieldName] = constraints + } + + // Auto-create indices for unique and indexed fields + for _, constraint := range constraints { + if constraint.Type == ConstraintUnique || constraint.Type == ConstraintIndex { + indexName := constraint.IndexName + if indexName == "" { + indexName = fieldName + "_idx" + } + schema.Indices[fieldName] = indexName + } + } + } + + return schema +} + +func parseDBTag(fieldName, tag string) []FieldConstraint { + var constraints []FieldConstraint + parts := strings.Split(tag, ",") + + for _, part := range parts { + part = strings.TrimSpace(part) + if part == "" { + continue + } + + switch { + case part == "unique": + constraints = append(constraints, FieldConstraint{ + Type: ConstraintUnique, + Field: fieldName, + }) + case part == "required": + constraints = append(constraints, FieldConstraint{ + Type: ConstraintRequired, + Field: fieldName, + }) + case part == "index": + constraints = append(constraints, FieldConstraint{ + Type: ConstraintIndex, + Field: fieldName, + }) + case strings.HasPrefix(part, "index:"): + indexName := strings.TrimPrefix(part, "index:") + constraints = append(constraints, FieldConstraint{ + Type: ConstraintIndex, + Field: fieldName, + IndexName: indexName, + }) + case strings.HasPrefix(part, "fkey:"): + target := strings.TrimPrefix(part, "fkey:") + constraints = append(constraints, FieldConstraint{ + Type: ConstraintForeign, + Field: fieldName, + Target: target, + }) + } + } + + return constraints +} diff --git a/persistence.go b/persistence.go new file mode 100644 index 0000000..e1adf09 --- /dev/null +++ b/persistence.go @@ -0,0 +1,137 @@ +package nigiri + +import ( + "encoding/json" + "fmt" + "os" + "path/filepath" + "reflect" + "sort" +) + +// LoadFromJSON loads items from JSON using reflection +func (bs *BaseStore[T]) LoadFromJSON(filename string) error { + bs.mu.Lock() + defer bs.mu.Unlock() + + data, err := os.ReadFile(filename) + if err != nil { + if os.IsNotExist(err) { + return nil + } + return fmt.Errorf("failed to read JSON: %w", err) + } + + if len(data) == 0 { + return nil + } + + // Create slice of pointers to T + sliceType := reflect.SliceOf(reflect.PointerTo(bs.itemType)) + slicePtr := reflect.New(sliceType) + + if err := json.Unmarshal(data, slicePtr.Interface()); err != nil { + return fmt.Errorf("failed to unmarshal JSON: %w", err) + } + + // Clear existing data + bs.items = make(map[int]*T) + bs.maxID = 0 + + // Clear unique indices + for fieldName := range bs.uniqueIndices { + bs.uniqueIndices[fieldName] = make(map[any]int) + } + + // Extract items using reflection + slice := slicePtr.Elem() + for i := 0; i < slice.Len(); i++ { + item := slice.Index(i).Interface().(*T) + + // Get ID using reflection + itemValue := reflect.ValueOf(item).Elem() + idField := itemValue.FieldByName("ID") + if !idField.IsValid() { + return fmt.Errorf("item type must have an ID field") + } + + id := int(idField.Int()) + bs.items[id] = item + if id > bs.maxID { + bs.maxID = id + } + + // Update unique indices + bs.updateUniqueIndices(id, item, true) + } + + return nil +} + +// SaveToJSON saves items to JSON atomically with consistent ID ordering +func (bs *BaseStore[T]) SaveToJSON(filename string) error { + bs.mu.RLock() + defer bs.mu.RUnlock() + + // Get sorted IDs for consistent ordering + ids := make([]int, 0, len(bs.items)) + for id := range bs.items { + ids = append(ids, id) + } + sort.Ints(ids) + + // Build items slice in ID order + items := make([]*T, 0, len(bs.items)) + for _, id := range ids { + items = append(items, bs.items[id]) + } + + data, err := json.MarshalIndent(items, "", "\t") + if err != nil { + return fmt.Errorf("failed to marshal to JSON: %w", err) + } + + // Atomic write + tempFile := filename + ".tmp" + if err := os.WriteFile(tempFile, data, 0644); err != nil { + return fmt.Errorf("failed to write temp JSON: %w", err) + } + + if err := os.Rename(tempFile, filename); err != nil { + os.Remove(tempFile) + return fmt.Errorf("failed to rename temp JSON: %w", err) + } + + return nil +} + +// LoadData loads from JSON file or starts empty +func (bs *BaseStore[T]) LoadData(dataPath string) error { + if err := bs.LoadFromJSON(dataPath); err != nil { + if os.IsNotExist(err) { + fmt.Println("No existing data found, starting with empty store") + return nil + } + return fmt.Errorf("failed to load from JSON: %w", err) + } + + fmt.Printf("Loaded %d items from %s\n", len(bs.items), dataPath) + bs.RebuildIndices() // Rebuild indices after loading + return nil +} + +// SaveData saves to JSON file +func (bs *BaseStore[T]) SaveData(dataPath string) error { + // Ensure directory exists + dataDir := filepath.Dir(dataPath) + if err := os.MkdirAll(dataDir, 0755); err != nil { + return fmt.Errorf("failed to create data directory: %w", err) + } + + if err := bs.SaveToJSON(dataPath); err != nil { + return fmt.Errorf("failed to save to JSON: %w", err) + } + + fmt.Printf("Saved %d items to %s\n", len(bs.items), dataPath) + return nil +} diff --git a/store.go b/store.go new file mode 100644 index 0000000..dab3e8f --- /dev/null +++ b/store.go @@ -0,0 +1,533 @@ +package nigiri + +import ( + "fmt" + "maps" + "reflect" + "sort" + "strings" + "sync" +) + +// Validatable interface for entities that can validate themselves +type Validatable interface { + Validate() error +} + +// IndexBuilder function type for building custom indices +type IndexBuilder[T any] func(allItems map[int]*T) any + +// BaseStore provides generic storage with index management and constraints +type BaseStore[T any] struct { + items map[int]*T + maxID int + mu sync.RWMutex + itemType reflect.Type + indices map[string]any + indexBuilders map[string]IndexBuilder[T] + schema *SchemaInfo + uniqueIndices map[string]map[any]int // field -> value -> id +} + +// NewBaseStore creates a new base store for type T with schema parsing +func NewBaseStore[T any]() *BaseStore[T] { + var zero T + schema := ParseSchema[T]() + + store := &BaseStore[T]{ + items: make(map[int]*T), + maxID: 0, + itemType: reflect.TypeOf(zero), + indices: make(map[string]any), + indexBuilders: make(map[string]IndexBuilder[T]), + schema: schema, + uniqueIndices: make(map[string]map[any]int), + } + + // Initialize unique indices + for fieldName, constraints := range schema.Constraints { + for _, constraint := range constraints { + if constraint.Type == ConstraintUnique { + store.uniqueIndices[fieldName] = make(map[any]int) + } + } + } + + // Auto-register indices for indexed fields + store.registerSchemaIndices() + + return store +} + +func (bs *BaseStore[T]) registerSchemaIndices() { + for fieldName, indexName := range bs.schema.Indices { + bs.RegisterIndex(indexName, BuildFieldLookupIndex[T](fieldName)) + } +} + +// ValidateConstraints checks all constraints for an item +func (bs *BaseStore[T]) ValidateConstraints(id int, item *T) error { + itemValue := reflect.ValueOf(item).Elem() + + for fieldName, constraints := range bs.schema.Constraints { + fieldValue := itemValue.FieldByName(fieldName) + if !fieldValue.IsValid() { + continue + } + + for _, constraint := range constraints { + switch constraint.Type { + case ConstraintRequired: + if isZeroValue(fieldValue) { + return fmt.Errorf("field %s is required", fieldName) + } + case ConstraintUnique: + value := fieldValue.Interface() + if existingID, exists := bs.uniqueIndices[fieldName][value]; exists && existingID != id { + return fmt.Errorf("field %s value %v already exists", fieldName, value) + } + } + } + } + + return nil +} + +func isZeroValue(v reflect.Value) bool { + return v.Interface() == reflect.Zero(v.Type()).Interface() +} + +func (bs *BaseStore[T]) updateUniqueIndices(id int, item *T, add bool) { + itemValue := reflect.ValueOf(item).Elem() + + for fieldName := range bs.uniqueIndices { + fieldValue := itemValue.FieldByName(fieldName) + if !fieldValue.IsValid() { + continue + } + + value := fieldValue.Interface() + if add { + bs.uniqueIndices[fieldName][value] = id + } else { + delete(bs.uniqueIndices[fieldName], value) + } + } +} + +// RegisterIndex registers an index builder function +func (bs *BaseStore[T]) RegisterIndex(name string, builder IndexBuilder[T]) { + bs.mu.Lock() + defer bs.mu.Unlock() + bs.indexBuilders[name] = builder +} + +// GetIndex retrieves a named index +func (bs *BaseStore[T]) GetIndex(name string) (any, bool) { + bs.mu.RLock() + defer bs.mu.RUnlock() + index, exists := bs.indices[name] + return index, exists +} + +// RebuildIndices rebuilds all registered indices +func (bs *BaseStore[T]) RebuildIndices() { + bs.mu.Lock() + defer bs.mu.Unlock() + bs.rebuildIndicesUnsafe() +} + +func (bs *BaseStore[T]) rebuildIndicesUnsafe() { + allItems := make(map[int]*T, len(bs.items)) + maps.Copy(allItems, bs.items) + + for name, builder := range bs.indexBuilders { + bs.indices[name] = builder(allItems) + } +} + +// AddWithValidation adds item with constraint validation and index rebuild +func (bs *BaseStore[T]) AddWithValidation(id int, item *T) error { + bs.mu.Lock() + defer bs.mu.Unlock() + + // Validate constraints + if err := bs.ValidateConstraints(id, item); err != nil { + return err + } + + // Custom validation + if validatable, ok := any(item).(Validatable); ok { + if err := validatable.Validate(); err != nil { + return err + } + } + + // Update unique indices + bs.updateUniqueIndices(id, item, true) + + bs.items[id] = item + if id > bs.maxID { + bs.maxID = id + } + + bs.rebuildIndicesUnsafe() + return nil +} + +// AddWithRebuild adds item with validation and index rebuild +func (bs *BaseStore[T]) AddWithRebuild(id int, item *T) error { + return bs.AddWithValidation(id, item) +} + +// RemoveWithValidation removes item and updates constraints +func (bs *BaseStore[T]) RemoveWithValidation(id int) { + bs.mu.Lock() + defer bs.mu.Unlock() + + if item, exists := bs.items[id]; exists { + bs.updateUniqueIndices(id, item, false) + } + + delete(bs.items, id) + bs.rebuildIndicesUnsafe() +} + +// RemoveWithRebuild removes item and rebuilds indices +func (bs *BaseStore[T]) RemoveWithRebuild(id int) { + bs.RemoveWithValidation(id) +} + +// UpdateWithValidation updates item with validation and index rebuild +func (bs *BaseStore[T]) UpdateWithValidation(id int, item *T) error { + return bs.AddWithValidation(id, item) +} + +// UpdateWithRebuild updates item with validation and index rebuild +func (bs *BaseStore[T]) UpdateWithRebuild(id int, item *T) error { + return bs.AddWithValidation(id, item) +} + +// Find retrieves an item by ID +func (bs *BaseStore[T]) Find(id int) (*T, bool) { + bs.mu.RLock() + defer bs.mu.RUnlock() + item, exists := bs.items[id] + return item, exists +} + +// AllSorted returns all items using named sorted index +func (bs *BaseStore[T]) AllSorted(indexName string) []*T { + bs.mu.RLock() + defer bs.mu.RUnlock() + + if index, exists := bs.indices[indexName]; exists { + if sortedIDs, ok := index.([]int); ok { + result := make([]*T, 0, len(sortedIDs)) + for _, id := range sortedIDs { + if item, exists := bs.items[id]; exists { + result = append(result, item) + } + } + return result + } + } + + // Fallback: return all items by ID order + ids := make([]int, 0, len(bs.items)) + for id := range bs.items { + ids = append(ids, id) + } + sort.Ints(ids) + + result := make([]*T, 0, len(ids)) + for _, id := range ids { + result = append(result, bs.items[id]) + } + return result +} + +// LookupByIndex finds single item using string lookup index +func (bs *BaseStore[T]) LookupByIndex(indexName, key string) (*T, bool) { + bs.mu.RLock() + defer bs.mu.RUnlock() + + if index, exists := bs.indices[indexName]; exists { + if lookupMap, ok := index.(map[string]int); ok { + if id, found := lookupMap[key]; found { + if item, exists := bs.items[id]; exists { + return item, true + } + } + } + } + return nil, false +} + +// GroupByIndex returns items grouped by key +func (bs *BaseStore[T]) GroupByIndex(indexName string, key any) []*T { + bs.mu.RLock() + defer bs.mu.RUnlock() + + if index, exists := bs.indices[indexName]; exists { + switch groupMap := index.(type) { + case map[int][]int: + if intKey, ok := key.(int); ok { + if ids, found := groupMap[intKey]; found { + result := make([]*T, 0, len(ids)) + for _, id := range ids { + if item, exists := bs.items[id]; exists { + result = append(result, item) + } + } + return result + } + } + case map[string][]int: + if strKey, ok := key.(string); ok { + if ids, found := groupMap[strKey]; found { + result := make([]*T, 0, len(ids)) + for _, id := range ids { + if item, exists := bs.items[id]; exists { + result = append(result, item) + } + } + return result + } + } + } + } + return []*T{} +} + +// FilterByIndex returns items matching filter criteria +func (bs *BaseStore[T]) FilterByIndex(indexName string, filterFunc func(*T) bool) []*T { + bs.mu.RLock() + defer bs.mu.RUnlock() + + var sourceIDs []int + + if index, exists := bs.indices[indexName]; exists { + if sortedIDs, ok := index.([]int); ok { + sourceIDs = sortedIDs + } + } + + if sourceIDs == nil { + for id := range bs.items { + sourceIDs = append(sourceIDs, id) + } + sort.Ints(sourceIDs) + } + + var result []*T + for _, id := range sourceIDs { + if item, exists := bs.items[id]; exists && filterFunc(item) { + result = append(result, item) + } + } + return result +} + +// GetNextID returns the next available ID atomically +func (bs *BaseStore[T]) GetNextID() int { + bs.mu.Lock() + defer bs.mu.Unlock() + bs.maxID++ + return bs.maxID +} + +// GetByID retrieves an item by ID +func (bs *BaseStore[T]) GetByID(id int) (*T, bool) { + return bs.Find(id) +} + +// Add adds an item to the store (no validation) +func (bs *BaseStore[T]) Add(id int, item *T) { + bs.mu.Lock() + defer bs.mu.Unlock() + bs.items[id] = item + if id > bs.maxID { + bs.maxID = id + } +} + +// Remove removes an item from the store (no validation) +func (bs *BaseStore[T]) Remove(id int) { + bs.mu.Lock() + defer bs.mu.Unlock() + delete(bs.items, id) +} + +// GetAll returns all items +func (bs *BaseStore[T]) GetAll() map[int]*T { + bs.mu.RLock() + defer bs.mu.RUnlock() + result := make(map[int]*T, len(bs.items)) + maps.Copy(result, bs.items) + return result +} + +// Clear removes all items +func (bs *BaseStore[T]) Clear() { + bs.mu.Lock() + defer bs.mu.Unlock() + bs.items = make(map[int]*T) + bs.maxID = 0 + + // Clear unique indices + for fieldName := range bs.uniqueIndices { + bs.uniqueIndices[fieldName] = make(map[any]int) + } + + bs.rebuildIndicesUnsafe() +} + +// Index Builder Functions + +// BuildFieldLookupIndex creates index for any field by name +func BuildFieldLookupIndex[T any](fieldName string) IndexBuilder[T] { + return func(allItems map[int]*T) any { + index := make(map[string]int) + for id, item := range allItems { + itemValue := reflect.ValueOf(item).Elem() + fieldValue := itemValue.FieldByName(fieldName) + if !fieldValue.IsValid() { + continue + } + + key := fmt.Sprintf("%v", fieldValue.Interface()) + index[key] = id + } + return index + } +} + +// BuildStringLookupIndex creates string-to-ID mapping +func BuildStringLookupIndex[T any](keyFunc func(*T) string) IndexBuilder[T] { + return func(allItems map[int]*T) any { + index := make(map[string]int) + for id, item := range allItems { + key := keyFunc(item) + index[key] = id + } + return index + } +} + +// BuildCaseInsensitiveLookupIndex creates lowercase string-to-ID mapping +func BuildCaseInsensitiveLookupIndex[T any](keyFunc func(*T) string) IndexBuilder[T] { + return func(allItems map[int]*T) any { + index := make(map[string]int) + for id, item := range allItems { + key := strings.ToLower(keyFunc(item)) + index[key] = id + } + return index + } +} + +// BuildIntGroupIndex creates int-to-[]ID mapping +func BuildIntGroupIndex[T any](keyFunc func(*T) int) IndexBuilder[T] { + return func(allItems map[int]*T) any { + index := make(map[int][]int) + for id, item := range allItems { + key := keyFunc(item) + index[key] = append(index[key], id) + } + + // Sort each group by ID + for key := range index { + sort.Ints(index[key]) + } + + return index + } +} + +// BuildStringGroupIndex creates string-to-[]ID mapping +func BuildStringGroupIndex[T any](keyFunc func(*T) string) IndexBuilder[T] { + return func(allItems map[int]*T) any { + index := make(map[string][]int) + for id, item := range allItems { + key := keyFunc(item) + index[key] = append(index[key], id) + } + + // Sort each group by ID + for key := range index { + sort.Ints(index[key]) + } + + return index + } +} + +// BuildSortedListIndex creates sorted []ID list +func BuildSortedListIndex[T any](sortFunc func(*T, *T) bool) IndexBuilder[T] { + return func(allItems map[int]*T) any { + ids := make([]int, 0, len(allItems)) + for id := range allItems { + ids = append(ids, id) + } + + sort.Slice(ids, func(i, j int) bool { + return sortFunc(allItems[ids[i]], allItems[ids[j]]) + }) + + return ids + } +} + +// BuildFilteredIntGroupIndex creates int-to-[]ID mapping for items passing filter +func BuildFilteredIntGroupIndex[T any](filterFunc func(*T) bool, keyFunc func(*T) int) IndexBuilder[T] { + return func(allItems map[int]*T) any { + index := make(map[int][]int) + for id, item := range allItems { + if filterFunc(item) { + key := keyFunc(item) + index[key] = append(index[key], id) + } + } + + // Sort each group by ID + for key := range index { + sort.Ints(index[key]) + } + + return index + } +} + +// BuildFilteredStringGroupIndex creates string-to-[]ID mapping for items passing filter +func BuildFilteredStringGroupIndex[T any](filterFunc func(*T) bool, keyFunc func(*T) string) IndexBuilder[T] { + return func(allItems map[int]*T) any { + index := make(map[string][]int) + for id, item := range allItems { + if filterFunc(item) { + key := keyFunc(item) + index[key] = append(index[key], id) + } + } + + // Sort each group by ID + for key := range index { + sort.Ints(index[key]) + } + + return index + } +} + +// NewSingleton creates singleton store pattern with sync.Once +func NewSingleton[S any](initFunc func() *S) func() *S { + var store *S + var once sync.Once + + return func() *S { + once.Do(func() { + store = initFunc() + }) + return store + } +}