diff --git a/.gitignore b/.gitignore index 48b8bf9..0fe55f3 100644 --- a/.gitignore +++ b/.gitignore @@ -1 +1,2 @@ vendor/ +coverage.txt diff --git a/README.md b/README.md index d1a31be..8bc3cdb 100644 --- a/README.md +++ b/README.md @@ -1,18 +1,18 @@ # DataLoader -[![GoDoc](https://godoc.org/gopkg.in/graph-gophers/dataloader.v3?status.svg)](https://godoc.org/github.com/graph-gophers/dataloader) +[![GoDoc](https://godoc.org/gopkg.in/graph-gophers/dataloader.v7?status.svg)](https://godoc.org/github.com/graph-gophers/dataloader) [![Build Status](https://travis-ci.org/graph-gophers/dataloader.svg?branch=master)](https://travis-ci.org/graph-gophers/dataloader) This is an implementation of [Facebook's DataLoader](https://github.com/facebook/dataloader) in Golang. ## Install -`go get -u github.com/graph-gophers/dataloader` +`go get -u github.com/graph-gophers/dataloader/v8` ## Usage ```go // setup batch function - the first Context passed to the Loader's Load // function will be provided when the batch function is called. -batchFn := func(ctx context.Context, keys dataloader.Keys) []*dataloader.Result { - var results []*dataloader.Result +batchFn := func(ctx context.Context, keys dataloader.Keys[string]) []*dataloader.Result[any] { + var results []*dataloader.Result[any] // do some async work to get data for specified keys // append to this list resolved values return results @@ -32,7 +32,7 @@ loader := dataloader.NewBatchedLoader(batchFn) * The first context passed to Load is the object that will be passed * to the batch function. */ -thunk := loader.Load(context.TODO(), dataloader.StringKey("key1")) // StringKey is a convenience method that make wraps string to implement `Key` interface +thunk := loader.Load(context.TODO(), dataloader.KeyOf("key1")) // KeyOf is a convenience method that wraps any comparable type to implement `Key` interface result, err := thunk() if err != nil { // handle data error @@ -42,10 +42,19 @@ log.Printf("value: %#v", result) ``` ### Don't need/want to use context? -You're welcome to install the v1 version of this library. +You're welcome to install the `v1` version of this library. + +### Don't need/want to use type parameters? +Please feel free to use `v6` version of this library. + +### Don't need/want to use Key/Keys interface? +Just use the `v7` version of this library. This completely removes the need for the `Key` interface, but it limits +the key type parameter to `comparable` types only, whereas `v8` allows `any` type, as long as it is wrapped as `Key`, +and exports itself as `string`. ## Cache -This implementation contains a very basic cache that is intended only to be used for short lived DataLoaders (i.e. DataLoaders that only exist for the life of an http request). You may use your own implementation if you want. +This implementation contains a very basic cache that is intended only to be used for short-lived DataLoaders +(i.e. DataLoaders that only exist for the life of a http request). You may use your own implementation if you want. > it also has a `NoCache` type that implements the cache interface but all methods are noop. If you do not wish to cache anything. diff --git a/cache.go b/cache.go index 79190f9..64afe7d 100644 --- a/cache.go +++ b/cache.go @@ -3,26 +3,28 @@ package dataloader import "context" // The Cache interface. If a custom cache is provided, it must implement this interface. -type Cache[K comparable, V any] interface { - Get(context.Context, K) (Thunk[V], bool) - Set(context.Context, K, Thunk[V]) - Delete(context.Context, K) bool +type Cache[K any, V any] interface { + Get(context.Context, Key[K]) (Thunk[V], bool) + Set(context.Context, Key[K], Thunk[V]) + Delete(context.Context, Key[K]) bool Clear() } +var _ Cache[any, any] = (*NoCache[any, any])(nil) + // NoCache implements Cache interface where all methods are noops. // This is useful for when you don't want to cache items but still // want to use a data loader -type NoCache[K comparable, V any] struct{} +type NoCache[K any, V any] struct{} // Get is a NOOP -func (c *NoCache[K, V]) Get(context.Context, K) (Thunk[V], bool) { return nil, false } +func (c *NoCache[K, V]) Get(context.Context, Key[K]) (Thunk[V], bool) { return nil, false } // Set is a NOOP -func (c *NoCache[K, V]) Set(context.Context, K, Thunk[V]) { return } +func (c *NoCache[K, V]) Set(context.Context, Key[K], Thunk[V]) { return } // Delete is a NOOP -func (c *NoCache[K, V]) Delete(context.Context, K) bool { return false } +func (c *NoCache[K, V]) Delete(context.Context, Key[K]) bool { return false } // Clear is a NOOP func (c *NoCache[K, V]) Clear() { return } diff --git a/dataloader.go b/dataloader.go index ecb940a..bfc8109 100644 --- a/dataloader.go +++ b/dataloader.go @@ -1,4 +1,4 @@ -// Package dataloader is an implimentation of facebook's dataloader in go. +// Package dataloader is an implementation of facebook's dataloader in go. // See https://github.com/facebook/dataloader for more information package dataloader @@ -20,19 +20,19 @@ import ( // used in long-lived applications or those which serve many users with // different access permissions and consider creating a new instance per // web request. -type Interface[K comparable, V any] interface { - Load(context.Context, K) Thunk[V] - LoadMany(context.Context, []K) ThunkMany[V] - Clear(context.Context, K) Interface[K, V] +type Interface[K any, V any] interface { + Load(context.Context, Key[K]) Thunk[V] + LoadMany(context.Context, Keys[K]) ThunkMany[V] + Clear(context.Context, Key[K]) Interface[K, V] ClearAll() Interface[K, V] - Prime(ctx context.Context, key K, value V) Interface[K, V] + Prime(ctx context.Context, key Key[K], value V) Interface[K, V] } // BatchFunc is a function, which when given a slice of keys (string), returns a slice of `results`. // It's important that the length of the input keys matches the length of the output results. // // The keys passed to this function are guaranteed to be unique -type BatchFunc[K comparable, V any] func(context.Context, []K) []*Result[V] +type BatchFunc[K any, V any] func(context.Context, Keys[K]) []*Result[V] // Result is the data structure that a BatchFunc returns. // It contains the resolved data, and any errors that may have occurred while fetching the data. @@ -61,7 +61,7 @@ func (p *PanicErrorWrapper) Error() string { } // Loader implements the dataloader.Interface. -type Loader[K comparable, V any] struct { +type Loader[K any, V any] struct { // the batch function to be used by this loader batchFn BatchFunc[K, V] @@ -111,30 +111,30 @@ type Thunk[V any] func() (V, error) type ThunkMany[V any] func() ([]V, []error) // type used to on input channel -type batchRequest[K comparable, V any] struct { - key K +type batchRequest[K any, V any] struct { + key Key[K] channel chan *Result[V] } // Option allows for configuration of Loader fields. -type Option[K comparable, V any] func(*Loader[K, V]) +type Option[K any, V any] func(*Loader[K, V]) // WithCache sets the BatchedLoader cache. Defaults to InMemoryCache if a Cache is not set. -func WithCache[K comparable, V any](c Cache[K, V]) Option[K, V] { +func WithCache[K any, V any](c Cache[K, V]) Option[K, V] { return func(l *Loader[K, V]) { l.cache = c } } // WithBatchCapacity sets the batch capacity. Default is 0 (unbounded). -func WithBatchCapacity[K comparable, V any](c int) Option[K, V] { +func WithBatchCapacity[K any, V any](c int) Option[K, V] { return func(l *Loader[K, V]) { l.batchCap = c } } // WithInputCapacity sets the input capacity. Default is 1000. -func WithInputCapacity[K comparable, V any](c int) Option[K, V] { +func WithInputCapacity[K any, V any](c int) Option[K, V] { return func(l *Loader[K, V]) { l.inputCap = c } @@ -142,7 +142,7 @@ func WithInputCapacity[K comparable, V any](c int) Option[K, V] { // WithWait sets the amount of time to wait before triggering a batch. // Default duration is 16 milliseconds. -func WithWait[K comparable, V any](d time.Duration) Option[K, V] { +func WithWait[K any, V any](d time.Duration) Option[K, V] { return func(l *Loader[K, V]) { l.wait = d } @@ -150,7 +150,7 @@ func WithWait[K comparable, V any](d time.Duration) Option[K, V] { // WithClearCacheOnBatch allows batching of items but no long term caching. // It accomplishes this by clearing the cache after each batch operation. -func WithClearCacheOnBatch[K comparable, V any]() Option[K, V] { +func WithClearCacheOnBatch[K any, V any]() Option[K, V] { return func(l *Loader[K, V]) { l.cacheLock.Lock() l.clearCacheOnBatch = true @@ -159,21 +159,21 @@ func WithClearCacheOnBatch[K comparable, V any]() Option[K, V] { } // withSilentLogger turns of log messages. It's used by the tests -func withSilentLogger[K comparable, V any]() Option[K, V] { +func withSilentLogger[K any, V any]() Option[K, V] { return func(l *Loader[K, V]) { l.silent = true } } // WithTracer allows tracing of calls to Load and LoadMany -func WithTracer[K comparable, V any](tracer Tracer[K, V]) Option[K, V] { +func WithTracer[K any, V any](tracer Tracer[K, V]) Option[K, V] { return func(l *Loader[K, V]) { l.tracer = tracer } } // NewBatchedLoader constructs a new Loader with given options. -func NewBatchedLoader[K comparable, V any](batchFn BatchFunc[K, V], opts ...Option[K, V]) *Loader[K, V] { +func NewBatchedLoader[K any, V any](batchFn BatchFunc[K, V], opts ...Option[K, V]) *Loader[K, V] { loader := &Loader[K, V]{ batchFn: batchFn, inputCap: 1000, @@ -197,10 +197,10 @@ func NewBatchedLoader[K comparable, V any](batchFn BatchFunc[K, V], opts ...Opti return loader } -// Load load/resolves the given key, returning a channel that will contain the value and error. +// Load loads/resolves the given key, returning a channel that will contain the value and error. // The first context passed to this function within a given batch window will be provided to // the registered BatchFunc. -func (l *Loader[K, V]) Load(originalContext context.Context, key K) Thunk[V] { +func (l *Loader[K, V]) Load(originalContext context.Context, key Key[K]) Thunk[V] { ctx, finish := l.tracer.TraceLoad(originalContext, key) c := make(chan *Result[V], 1) @@ -242,8 +242,7 @@ func (l *Loader[K, V]) Load(originalContext context.Context, key K) Thunk[V] { l.cache.Set(ctx, key, thunk) l.cacheLock.Unlock() - // this is sent to batch fn. It contains the key and the channel to return the - // the result on + // this is sent to batch fn. It contains the key and the channel to return the result on req := &batchRequest[K, V]{key, c} l.batchLock.Lock() @@ -279,8 +278,8 @@ func (l *Loader[K, V]) Load(originalContext context.Context, key K) Thunk[V] { return thunk } -// LoadMany loads mulitiple keys, returning a thunk (type: ThunkMany) that will resolve the keys passed in. -func (l *Loader[K, V]) LoadMany(originalContext context.Context, keys []K) ThunkMany[V] { +// LoadMany loads multiple keys, returning a thunk (type: ThunkMany) that will resolve the keys passed in. +func (l *Loader[K, V]) LoadMany(originalContext context.Context, keys Keys[K]) ThunkMany[V] { ctx, finish := l.tracer.TraceLoadMany(originalContext, keys) var ( @@ -347,8 +346,8 @@ func (l *Loader[K, V]) LoadMany(originalContext context.Context, keys []K) Thunk return thunkMany } -// Clear clears the value at `key` from the cache, it it exsits. Returs self for method chaining -func (l *Loader[K, V]) Clear(ctx context.Context, key K) Interface[K, V] { +// Clear clears the value at `key` from the cache, if it exists. Returns self for method chaining +func (l *Loader[K, V]) Clear(ctx context.Context, key Key[K]) Interface[K, V] { l.cacheLock.Lock() l.cache.Delete(ctx, key) l.cacheLock.Unlock() @@ -366,7 +365,7 @@ func (l *Loader[K, V]) ClearAll() Interface[K, V] { // Prime adds the provided key and value to the cache. If the key already exists, no change is made. // Returns self for method chaining -func (l *Loader[K, V]) Prime(ctx context.Context, key K, value V) Interface[K, V] { +func (l *Loader[K, V]) Prime(ctx context.Context, key Key[K], value V) Interface[K, V] { if _, ok := l.cache.Get(ctx, key); !ok { thunk := func() (V, error) { return value, nil @@ -385,7 +384,7 @@ func (l *Loader[K, V]) reset() { } } -type batcher[K comparable, V any] struct { +type batcher[K any, V any] struct { input chan *batchRequest[K, V] batchFn BatchFunc[K, V] finished bool @@ -415,7 +414,7 @@ func (b *batcher[K, V]) end() { // execute the batch of all items in queue func (b *batcher[K, V]) batch(originalContext context.Context) { var ( - keys = make([]K, 0) + keys = make(Keys[K], 0) reqs = make([]*batchRequest[K, V], 0) items = make([]*Result[V], 0) panicErr interface{} diff --git a/dataloader_test.go b/dataloader_test.go index 629c996..2039bb4 100644 --- a/dataloader_test.go +++ b/dataloader_test.go @@ -19,7 +19,7 @@ func TestLoader(t *testing.T) { t.Parallel() identityLoader, _ := IDLoader[string](0) ctx := context.Background() - future := identityLoader.Load(ctx, "1") + future := identityLoader.Load(ctx, KeyOf("1")) value, err := future() if err != nil { t.Error(err.Error()) @@ -33,7 +33,7 @@ func TestLoader(t *testing.T) { t.Parallel() identityLoader, _ := IDLoader[string](0) ctx := context.Background() - future := identityLoader.Load(ctx, "1") + future := identityLoader.Load(ctx, KeyOf("1")) go future() go future() }) @@ -48,7 +48,7 @@ func TestLoader(t *testing.T) { }() panicLoader, _ := PanicLoader[string](0) ctx := context.Background() - future := panicLoader.Load(ctx, "1") + future := panicLoader.Load(ctx, KeyOf("1")) _, err := future() if err == nil || err.Error() != "Panic received in batch function: Programming error" { t.Error("Panic was not propagated as an error.") @@ -59,9 +59,9 @@ func TestLoader(t *testing.T) { t.Parallel() errorCacheLoader, _ := ErrorCacheLoader[string](0) ctx := context.Background() - futures := []Thunk[string]{} + var futures []Thunk[string] for i := 0; i < 2; i++ { - futures = append(futures, errorCacheLoader.Load(ctx, strconv.Itoa(i))) + futures = append(futures, errorCacheLoader.Load(ctx, KeyOf(strconv.Itoa(i)))) } for _, f := range futures { @@ -70,7 +70,7 @@ func TestLoader(t *testing.T) { t.Error("Error was not propagated") } } - nextFuture := errorCacheLoader.Load(ctx, "1") + nextFuture := errorCacheLoader.Load(ctx, KeyOf("1")) _, err := nextFuture() // Normal errors should be cached. @@ -88,10 +88,10 @@ func TestLoader(t *testing.T) { } }() panicLoader, _ := PanicCacheLoader[string](0) - futures := []Thunk[string]{} + var futures []Thunk[string] ctx := context.Background() for i := 0; i < 3; i++ { - futures = append(futures, panicLoader.Load(ctx, strconv.Itoa(i))) + futures = append(futures, panicLoader.Load(ctx, KeyOf(strconv.Itoa(i)))) } for _, f := range futures { _, err := f() @@ -102,7 +102,7 @@ func TestLoader(t *testing.T) { futures = []Thunk[string]{} for i := 0; i < 3; i++ { - futures = append(futures, panicLoader.Load(ctx, strconv.Itoa(1))) + futures = append(futures, panicLoader.Load(ctx, KeyOf(strconv.Itoa(1)))) } for _, f := range futures { @@ -117,7 +117,7 @@ func TestLoader(t *testing.T) { t.Parallel() errorLoader, _ := ErrorLoader[string](0) ctx := context.Background() - future := errorLoader.LoadMany(ctx, []string{"1", "2", "3"}) + future := errorLoader.LoadMany(ctx, KeysFrom("1", "2", "3")) _, err := future() if len(err) != 3 { t.Error("LoadMany didn't return right number of errors") @@ -128,7 +128,7 @@ func TestLoader(t *testing.T) { t.Parallel() loader, _ := OneErrorLoader[string](3) ctx := context.Background() - future := loader.LoadMany(ctx, []string{"1", "2", "3"}) + future := loader.LoadMany(ctx, KeysFrom("1", "2", "3")) _, errs := future() if len(errs) != 3 { t.Errorf("LoadMany didn't return right number of errors (should match size of input)") @@ -156,7 +156,7 @@ func TestLoader(t *testing.T) { t.Parallel() loader, _ := IDLoader[string](0) ctx := context.Background() - _, err := loader.LoadMany(ctx, []string{"1", "2", "3"})() + _, err := loader.LoadMany(ctx, KeysFrom("1", "2", "3"))() if err != nil { t.Errorf("Expected LoadMany() to return nil error slice when no errors occurred") } @@ -166,7 +166,7 @@ func TestLoader(t *testing.T) { t.Parallel() identityLoader, _ := IDLoader[string](0) ctx := context.Background() - future := identityLoader.LoadMany(ctx, []string{"1", "2", "3"}) + future := identityLoader.LoadMany(ctx, KeysFrom("1", "2", "3")) go future() go future() }) @@ -181,13 +181,13 @@ func TestLoader(t *testing.T) { }() panicLoader, _ := PanicCacheLoader[string](0) ctx := context.Background() - future := panicLoader.LoadMany(ctx, []string{"1", "2"}) + future := panicLoader.LoadMany(ctx, KeysFrom("1", "2")) _, errs := future() if len(errs) < 2 || errs[0].Error() != "Panic received in batch function: Programming error" { t.Error("Panic was not propagated as an error.") } - future = panicLoader.LoadMany(ctx, []string{"1"}) + future = panicLoader.LoadMany(ctx, KeysFrom("1")) _, errs = future() if len(errs) > 0 { @@ -200,7 +200,7 @@ func TestLoader(t *testing.T) { t.Parallel() identityLoader, _ := IDLoader[string](0) ctx := context.Background() - future := identityLoader.LoadMany(ctx, []string{"1", "2", "3"}) + future := identityLoader.LoadMany(ctx, KeysFrom("1", "2", "3")) results, _ := future() if results[0] != "1" || results[1] != "2" || results[2] != "3" { t.Error("loadmany didn't return the right value") @@ -211,8 +211,8 @@ func TestLoader(t *testing.T) { t.Parallel() identityLoader, loadCalls := IDLoader[string](0) ctx := context.Background() - future1 := identityLoader.Load(ctx, "1") - future2 := identityLoader.Load(ctx, "2") + future1 := identityLoader.Load(ctx, KeyOf("1")) + future2 := identityLoader.Load(ctx, KeyOf("2")) _, err := future1() if err != nil { @@ -224,8 +224,8 @@ func TestLoader(t *testing.T) { } calls := *loadCalls - inner := []string{"1", "2"} - expected := [][]string{inner} + inner := KeysFrom("1", "2") + expected := []Keys[string]{inner} if !reflect.DeepEqual(calls, expected) { t.Errorf("did not call batchFn in right order. Expected %#v, got %#v", expected, calls) } @@ -237,11 +237,11 @@ func TestLoader(t *testing.T) { ctx := context.Background() n := 10 - reqs := []Thunk[string]{} + var reqs []Thunk[string] var keys []string for i := 0; i < n; i++ { key := strconv.Itoa(i) - reqs = append(reqs, faultyLoader.Load(ctx, key)) + reqs = append(reqs, faultyLoader.Load(ctx, KeyOf(key))) keys = append(keys, key) } @@ -259,9 +259,9 @@ func TestLoader(t *testing.T) { t.Parallel() identityLoader, loadCalls := IDLoader[string](2) ctx := context.Background() - future1 := identityLoader.Load(ctx, "1") - future2 := identityLoader.Load(ctx, "2") - future3 := identityLoader.Load(ctx, "3") + future1 := identityLoader.Load(ctx, KeyOf("1")) + future2 := identityLoader.Load(ctx, KeyOf("2")) + future3 := identityLoader.Load(ctx, KeyOf("3")) _, err := future1() if err != nil { @@ -277,9 +277,9 @@ func TestLoader(t *testing.T) { } calls := *loadCalls - inner1 := []string{"1", "2"} - inner2 := []string{"3"} - expected := [][]string{inner1, inner2} + inner1 := KeysFrom("1", "2") + inner2 := KeysFrom("3") + expected := []Keys[string]{inner1, inner2} if !reflect.DeepEqual(calls, expected) { t.Errorf("did not respect max batch size. Expected %#v, got %#v", expected, calls) } @@ -289,8 +289,8 @@ func TestLoader(t *testing.T) { t.Parallel() identityLoader, loadCalls := IDLoader[string](0) ctx := context.Background() - future1 := identityLoader.Load(ctx, "1") - future2 := identityLoader.Load(ctx, "1") + future1 := identityLoader.Load(ctx, KeyOf("1")) + future2 := identityLoader.Load(ctx, KeyOf("1")) _, err := future1() if err != nil { @@ -302,8 +302,8 @@ func TestLoader(t *testing.T) { } calls := *loadCalls - inner := []string{"1"} - expected := [][]string{inner} + inner := KeysFrom("1") + expected := []Keys[string]{inner} if !reflect.DeepEqual(calls, expected) { t.Errorf("did not respect max batch size. Expected %#v, got %#v", expected, calls) } @@ -313,9 +313,9 @@ func TestLoader(t *testing.T) { t.Parallel() identityLoader, loadCalls := IDLoader[string](0) ctx := context.Background() - identityLoader.Prime(ctx, "A", "Cached") - future1 := identityLoader.Load(ctx, "1") - future2 := identityLoader.Load(ctx, "A") + identityLoader.Prime(ctx, KeyOf("A"), "Cached") + future1 := identityLoader.Load(ctx, KeyOf("1")) + future2 := identityLoader.Load(ctx, KeyOf("A")) _, err := future1() if err != nil { @@ -327,8 +327,8 @@ func TestLoader(t *testing.T) { } calls := *loadCalls - inner := []string{"1"} - expected := [][]string{inner} + inner := KeysFrom("1") + expected := []Keys[string]{inner} if !reflect.DeepEqual(calls, expected) { t.Errorf("did not respect max batch size. Expected %#v, got %#v", expected, calls) } @@ -342,11 +342,11 @@ func TestLoader(t *testing.T) { t.Parallel() identityLoader, loadCalls := IDLoader[string](0) ctx := context.Background() - identityLoader.Prime(ctx, "A", "Cached") - identityLoader.Prime(ctx, "B", "B") - future1 := identityLoader.Load(ctx, "1") - future2 := identityLoader.Clear(ctx, "A").Load(ctx, "A") - future3 := identityLoader.Load(ctx, "B") + identityLoader.Prime(ctx, KeyOf("A"), "Cached") + identityLoader.Prime(ctx, KeyOf("B"), "B") + future1 := identityLoader.Load(ctx, KeyOf("1")) + future2 := identityLoader.Clear(ctx, KeyOf("A")).Load(ctx, KeyOf("A")) + future3 := identityLoader.Load(ctx, KeyOf("B")) _, err := future1() if err != nil { @@ -362,8 +362,8 @@ func TestLoader(t *testing.T) { } calls := *loadCalls - inner := []string{"1", "A"} - expected := [][]string{inner} + inner := KeysFrom("1", "A") + expected := []Keys[string]{inner} if !reflect.DeepEqual(calls, expected) { t.Errorf("did not respect max batch size. Expected %#v, got %#v", expected, calls) } @@ -377,8 +377,8 @@ func TestLoader(t *testing.T) { t.Parallel() batchOnlyLoader, loadCalls := BatchOnlyLoader[string](0) ctx := context.Background() - future1 := batchOnlyLoader.Load(ctx, "1") - future2 := batchOnlyLoader.Load(ctx, "1") + future1 := batchOnlyLoader.Load(ctx, KeyOf("1")) + future2 := batchOnlyLoader.Load(ctx, KeyOf("1")) _, err := future1() if err != nil { @@ -390,13 +390,13 @@ func TestLoader(t *testing.T) { } calls := *loadCalls - inner := []string{"1"} - expected := [][]string{inner} + inner := KeysFrom("1") + expected := []Keys[string]{inner} if !reflect.DeepEqual(calls, expected) { t.Errorf("did not batch queries. Expected %#v, got %#v", expected, calls) } - if _, found := batchOnlyLoader.cache.Get(ctx, "1"); found { + if _, found := batchOnlyLoader.cache.Get(ctx, KeyOf("1")); found { t.Errorf("did not clear cache after batch. Expected %#v, got %#v", false, found) } }) @@ -405,14 +405,14 @@ func TestLoader(t *testing.T) { t.Parallel() identityLoader, loadCalls := IDLoader[string](0) ctx := context.Background() - identityLoader.Prime(ctx, "A", "Cached") - identityLoader.Prime(ctx, "B", "B") + identityLoader.Prime(ctx, KeyOf("A"), "Cached") + identityLoader.Prime(ctx, KeyOf("B"), "B") identityLoader.ClearAll() - future1 := identityLoader.Load(ctx, "1") - future2 := identityLoader.Load(ctx, "A") - future3 := identityLoader.Load(ctx, "B") + future1 := identityLoader.Load(ctx, KeyOf("1")) + future2 := identityLoader.Load(ctx, KeyOf("A")) + future3 := identityLoader.Load(ctx, KeyOf("B")) _, err := future1() if err != nil { @@ -428,8 +428,8 @@ func TestLoader(t *testing.T) { } calls := *loadCalls - inner := []string{"1", "A", "B"} - expected := [][]string{inner} + inner := KeysFrom("1", "A", "B") + expected := []Keys[string]{inner} if !reflect.DeepEqual(calls, expected) { t.Errorf("did not respect max batch size. Expected %#v, got %#v", expected, calls) } @@ -439,14 +439,14 @@ func TestLoader(t *testing.T) { t.Parallel() identityLoader, loadCalls := NoCacheLoader[string](0) ctx := context.Background() - identityLoader.Prime(ctx, "A", "Cached") - identityLoader.Prime(ctx, "B", "B") + identityLoader.Prime(ctx, KeyOf("A"), "Cached") + identityLoader.Prime(ctx, KeyOf("B"), "B") identityLoader.ClearAll() - future1 := identityLoader.Clear(ctx, "1").Load(ctx, "1") - future2 := identityLoader.Load(ctx, "A") - future3 := identityLoader.Load(ctx, "B") + future1 := identityLoader.Clear(ctx, KeyOf("1")).Load(ctx, KeyOf("1")) + future2 := identityLoader.Load(ctx, KeyOf("A")) + future3 := identityLoader.Load(ctx, KeyOf("B")) _, err := future1() if err != nil { @@ -462,8 +462,8 @@ func TestLoader(t *testing.T) { } calls := *loadCalls - inner := []string{"1", "A", "B"} - expected := [][]string{inner} + inner := KeysFrom("1", "A", "B") + expected := []Keys[string]{inner} if !reflect.DeepEqual(calls, expected) { t.Errorf("did not respect max batch size. Expected %#v, got %#v", expected, calls) } @@ -473,12 +473,12 @@ func TestLoader(t *testing.T) { t.Parallel() identityLoader, loadCalls := NoCacheLoader[string](0) ctx := context.Background() - identityLoader.Prime(ctx, "A", "Cached") - identityLoader.Prime(ctx, "B", "B") + identityLoader.Prime(ctx, KeyOf("A"), "Cached") + identityLoader.Prime(ctx, KeyOf("B"), "B") - future1 := identityLoader.Load(ctx, "1") - future2 := identityLoader.Load(ctx, "A") - future3 := identityLoader.Load(ctx, "B") + future1 := identityLoader.Load(ctx, KeyOf("1")) + future2 := identityLoader.Load(ctx, KeyOf("A")) + future3 := identityLoader.Load(ctx, KeyOf("B")) _, err := future1() if err != nil { @@ -494,65 +494,67 @@ func TestLoader(t *testing.T) { } calls := *loadCalls - inner := []string{"1", "A", "B"} - expected := [][]string{inner} + inner := KeysFrom("1", "A", "B") + expected := []Keys[string]{inner} if !reflect.DeepEqual(calls, expected) { t.Errorf("did not respect max batch size. Expected %#v, got %#v", expected, calls) } }) - } // test helpers -func IDLoader[K comparable](max int) (*Loader[K, K], *[][]K) { +func IDLoader[K comparable](max int) (*Loader[K, K], *[]Keys[K]) { var mu sync.Mutex - var loadCalls [][]K - identityLoader := NewBatchedLoader(func(_ context.Context, keys []K) []*Result[K] { + var loadCalls []Keys[K] + identityLoader := NewBatchedLoader(func(_ context.Context, keys Keys[K]) []*Result[K] { var results []*Result[K] mu.Lock() loadCalls = append(loadCalls, keys) mu.Unlock() - for _, key := range keys { - results = append(results, &Result[K]{key, nil}) + for _, raw := range keys.Raws() { + results = append(results, &Result[K]{raw, nil}) } return results }, WithBatchCapacity[K, K](max)) return identityLoader, &loadCalls } -func BatchOnlyLoader[K comparable](max int) (*Loader[K, K], *[][]K) { + +func BatchOnlyLoader[K comparable](max int) (*Loader[K, K], *[]Keys[K]) { var mu sync.Mutex - var loadCalls [][]K - identityLoader := NewBatchedLoader(func(_ context.Context, keys []K) []*Result[K] { + var loadCalls []Keys[K] + identityLoader := NewBatchedLoader(func(_ context.Context, keys Keys[K]) []*Result[K] { var results []*Result[K] mu.Lock() loadCalls = append(loadCalls, keys) mu.Unlock() - for _, key := range keys { - results = append(results, &Result[K]{key, nil}) + for _, raw := range keys.Raws() { + results = append(results, &Result[K]{raw, nil}) } return results }, WithBatchCapacity[K, K](max), WithClearCacheOnBatch[K, K]()) return identityLoader, &loadCalls } -func ErrorLoader[K comparable](max int) (*Loader[K, K], *[][]K) { + +func ErrorLoader[K comparable](max int) (*Loader[K, K], *[]Keys[K]) { var mu sync.Mutex - var loadCalls [][]K - identityLoader := NewBatchedLoader(func(_ context.Context, keys []K) []*Result[K] { + var loadCalls []Keys[K] + identityLoader := NewBatchedLoader(func(_ context.Context, keys Keys[K]) []*Result[K] { var results []*Result[K] mu.Lock() loadCalls = append(loadCalls, keys) mu.Unlock() - for _, key := range keys { - results = append(results, &Result[K]{key, fmt.Errorf("this is a test error")}) + for _, raw := range keys.Raws() { + results = append(results, &Result[K]{raw, fmt.Errorf("this is a test error")}) } return results }, WithBatchCapacity[K, K](max)) return identityLoader, &loadCalls } -func OneErrorLoader[K comparable](max int) (*Loader[K, K], *[][]K) { + +func OneErrorLoader[K comparable](max int) (*Loader[K, K], *[]Keys[K]) { var mu sync.Mutex - var loadCalls [][]K - identityLoader := NewBatchedLoader(func(_ context.Context, keys []K) []*Result[K] { + var loadCalls []Keys[K] + identityLoader := NewBatchedLoader(func(_ context.Context, keys Keys[K]) []*Result[K] { results := make([]*Result[K], max) mu.Lock() loadCalls = append(loadCalls, keys) @@ -562,23 +564,24 @@ func OneErrorLoader[K comparable](max int) (*Loader[K, K], *[][]K) { if i == 0 { err = errors.New("always error on the first key") } - results[i] = &Result[K]{keys[i], err} + results[i] = &Result[K]{keys[i].Raw(), err} } return results }, WithBatchCapacity[K, K](max)) return identityLoader, &loadCalls } -func PanicLoader[K comparable](max int) (*Loader[K, K], *[][]K) { - var loadCalls [][]K - panicLoader := NewBatchedLoader(func(_ context.Context, keys []K) []*Result[K] { + +func PanicLoader[K comparable](max int) (*Loader[K, K], *[]Keys[K]) { + var loadCalls []Keys[K] + panicLoader := NewBatchedLoader(func(_ context.Context, keys Keys[K]) []*Result[K] { panic("Programming error") }, WithBatchCapacity[K, K](max), withSilentLogger[K, K]()) return panicLoader, &loadCalls } -func PanicCacheLoader[K comparable](max int) (*Loader[K, K], *[][]K) { - var loadCalls [][]K - panicCacheLoader := NewBatchedLoader(func(_ context.Context, keys []K) []*Result[K] { +func PanicCacheLoader[K comparable](max int) (*Loader[K, K], *[]Keys[K]) { + var loadCalls []Keys[K] + panicCacheLoader := NewBatchedLoader(func(_ context.Context, keys Keys[K]) []*Result[K] { if len(keys) > 1 { panic("Programming error") } @@ -586,7 +589,7 @@ func PanicCacheLoader[K comparable](max int) (*Loader[K, K], *[][]K) { returnResult := make([]*Result[K], len(keys)) for idx := range returnResult { returnResult[idx] = &Result[K]{ - keys[0], + keys[0].Raw(), nil, } } @@ -597,13 +600,13 @@ func PanicCacheLoader[K comparable](max int) (*Loader[K, K], *[][]K) { return panicCacheLoader, &loadCalls } -func ErrorCacheLoader[K comparable](max int) (*Loader[K, K], *[][]K) { - var loadCalls [][]K - errorCacheLoader := NewBatchedLoader(func(_ context.Context, keys []K) []*Result[K] { +func ErrorCacheLoader[K comparable](max int) (*Loader[K, K], *[]Keys[K]) { + var loadCalls []Keys[K] + errorCacheLoader := NewBatchedLoader(func(_ context.Context, keys Keys[K]) []*Result[K] { if len(keys) > 1 { var results []*Result[K] for _, key := range keys { - results = append(results, &Result[K]{key, fmt.Errorf("this is a test error")}) + results = append(results, &Result[K]{key.Raw(), fmt.Errorf("this is a test error")}) } return results } @@ -611,7 +614,7 @@ func ErrorCacheLoader[K comparable](max int) (*Loader[K, K], *[][]K) { returnResult := make([]*Result[K], len(keys)) for idx := range returnResult { returnResult[idx] = &Result[K]{ - keys[0], + keys[0].Raw(), nil, } } @@ -622,31 +625,31 @@ func ErrorCacheLoader[K comparable](max int) (*Loader[K, K], *[][]K) { return errorCacheLoader, &loadCalls } -func BadLoader[K comparable](max int) (*Loader[K, K], *[][]K) { +func BadLoader[K comparable](max int) (*Loader[K, K], *[]Keys[K]) { var mu sync.Mutex - var loadCalls [][]K - identityLoader := NewBatchedLoader(func(_ context.Context, keys []K) []*Result[K] { + var loadCalls []Keys[K] + identityLoader := NewBatchedLoader(func(_ context.Context, keys Keys[K]) []*Result[K] { var results []*Result[K] mu.Lock() loadCalls = append(loadCalls, keys) mu.Unlock() - results = append(results, &Result[K]{keys[0], nil}) + results = append(results, &Result[K]{keys[0].Raw(), nil}) return results }, WithBatchCapacity[K, K](max)) return identityLoader, &loadCalls } -func NoCacheLoader[K comparable](max int) (*Loader[K, K], *[][]K) { +func NoCacheLoader[K comparable](max int) (*Loader[K, K], *[]Keys[K]) { var mu sync.Mutex - var loadCalls [][]K + var loadCalls []Keys[K] cache := &NoCache[K, K]{} - identityLoader := NewBatchedLoader(func(_ context.Context, keys []K) []*Result[K] { + identityLoader := NewBatchedLoader(func(_ context.Context, keys Keys[K]) []*Result[K] { var results []*Result[K] mu.Lock() loadCalls = append(loadCalls, keys) mu.Unlock() for _, key := range keys { - results = append(results, &Result[K]{key, nil}) + results = append(results, &Result[K]{key.Raw(), nil}) } return results }, WithCache[K, K](cache), WithBatchCapacity[K, K](max)) @@ -654,11 +657,11 @@ func NoCacheLoader[K comparable](max int) (*Loader[K, K], *[][]K) { } // FaultyLoader gives len(keys)-1 results. -func FaultyLoader[K comparable]() (*Loader[K, K], *[][]K) { +func FaultyLoader[K comparable]() (*Loader[K, K], *[]Keys[K]) { var mu sync.Mutex - var loadCalls [][]K + var loadCalls []Keys[K] - loader := NewBatchedLoader(func(_ context.Context, keys []K) []*Result[K] { + loader := NewBatchedLoader(func(_ context.Context, keys Keys[K]) []*Result[K] { var results []*Result[K] mu.Lock() loadCalls = append(loadCalls, keys) @@ -670,7 +673,7 @@ func FaultyLoader[K comparable]() (*Loader[K, K], *[][]K) { break } - results = append(results, &Result[K]{key, nil}) + results = append(results, &Result[K]{key.Raw(), nil}) } return results }) @@ -683,10 +686,10 @@ func FaultyLoader[K comparable]() (*Loader[K, K], *[][]K) { /////////////////////////////////////////////////// var a = &Avg{} -func batchIdentity[K comparable](_ context.Context, keys []K) (results []*Result[K]) { +func batchIdentity[K comparable](_ context.Context, keys Keys[K]) (results []*Result[K]) { a.Add(len(keys)) for _, key := range keys { - results = append(results, &Result[K]{key, nil}) + results = append(results, &Result[K]{key.Raw(), nil}) } return } @@ -697,7 +700,7 @@ func BenchmarkLoader(b *testing.B) { UserLoader := NewBatchedLoader[string, string](batchIdentity[string]) b.ResetTimer() for i := 0; i < b.N; i++ { - UserLoader.Load(_ctx, (strconv.Itoa(i))) + UserLoader.Load(_ctx, KeyOf(strconv.Itoa(i))) } log.Printf("avg: %f", a.Avg()) } diff --git a/example/lru_cache/golang_lru_test.go b/example/lru_cache/golang_lru_test.go index 9b2caf5..7084a46 100644 --- a/example/lru_cache/golang_lru_test.go +++ b/example/lru_cache/golang_lru_test.go @@ -1,23 +1,23 @@ -// package lru_cache_test contains an exmaple of using go-cache as a long term cache solution for dataloader. +// package lru_cache_test contains an example of using go-cache as a long term cache solution for dataloader. package lru_cache_test import ( "context" "fmt" - dataloader "github.com/graph-gophers/dataloader/v7" - lru "github.com/hashicorp/golang-lru" + + "github.com/graph-gophers/dataloader/v8" ) // Cache implements the dataloader.Cache interface -type cache[K comparable, V any] struct { - *lru.ARCCache +type cache[K any, V any] struct { + arc *lru.ARCCache } // Get gets an item from the cache -func (c *cache[K, V]) Get(_ context.Context, key K) (dataloader.Thunk[V], bool) { - v, ok := c.ARCCache.Get(key) +func (c *cache[K, V]) Get(_ context.Context, key dataloader.Key[K]) (dataloader.Thunk[V], bool) { + v, ok := c.arc.Get(key.String()) if ok { return v.(dataloader.Thunk[V]), ok } @@ -25,22 +25,22 @@ func (c *cache[K, V]) Get(_ context.Context, key K) (dataloader.Thunk[V], bool) } // Set sets an item in the cache -func (c *cache[K, V]) Set(_ context.Context, key K, value dataloader.Thunk[V]) { - c.ARCCache.Add(key, value) +func (c *cache[K, V]) Set(_ context.Context, key dataloader.Key[K], value dataloader.Thunk[V]) { + c.arc.Add(key.String(), value) } // Delete deletes an item in the cache -func (c *cache[K, V]) Delete(_ context.Context, key K) bool { - if c.ARCCache.Contains(key) { - c.ARCCache.Remove(key) +func (c *cache[K, V]) Delete(_ context.Context, key dataloader.Key[K]) bool { + if c.arc.Contains(key.String()) { + c.arc.Remove(key.String()) return true } return false } -// Clear cleasrs the cache +// Clear clears the cache func (c *cache[K, V]) Clear() { - c.ARCCache.Purge() + c.arc.Purge() } func ExampleGolangLRU() { @@ -52,25 +52,25 @@ func ExampleGolangLRU() { } m := map[int]*User{ - 5: &User{ID: 5, FirstName: "John", LastName: "Smith", Email: "john@example.com"}, + 5: {ID: 5, FirstName: "John", LastName: "Smith", Email: "john@example.com"}, } - batchFunc := func(_ context.Context, keys []int) []*dataloader.Result[*User] { + batchFunc := func(_ context.Context, keys dataloader.Keys[int]) []*dataloader.Result[*User] { var results []*dataloader.Result[*User] // do some pretend work to resolve keys for _, k := range keys { - results = append(results, &dataloader.Result[*User]{Data: m[k]}) + results = append(results, &dataloader.Result[*User]{Data: m[k.Raw()]}) } return results } // go-cache will automaticlly cleanup expired items on given duration. - c, _ := lru.NewARC(100) - cache := &cache[int, *User]{ARCCache: c} - loader := dataloader.NewBatchedLoader(batchFunc, dataloader.WithCache[int, *User](cache)) + arc, _ := lru.NewARC(100) + c := &cache[int, *User]{arc: arc} + loader := dataloader.NewBatchedLoader(batchFunc, dataloader.WithCache[int, *User](c)) // immediately call the future function from loader - result, err := loader.Load(context.TODO(), 5)() + result, err := loader.Load(context.TODO(), dataloader.KeyOf(5))() if err != nil { // handle error } diff --git a/example/no_cache/no_cache_test.go b/example/no_cache/no_cache_test.go index 15f167c..3b05df7 100644 --- a/example/no_cache/no_cache_test.go +++ b/example/no_cache/no_cache_test.go @@ -4,7 +4,7 @@ import ( "context" "fmt" - dataloader "github.com/graph-gophers/dataloader/v7" + "github.com/graph-gophers/dataloader/v8" ) func ExampleNoCache() { @@ -16,23 +16,22 @@ func ExampleNoCache() { } m := map[int]*User{ - 5: &User{ID: 5, FirstName: "John", LastName: "Smith", Email: "john@example.com"}, + 5: {ID: 5, FirstName: "John", LastName: "Smith", Email: "john@example.com"}, } - batchFunc := func(_ context.Context, keys []int) []*dataloader.Result[*User] { + batchFunc := func(_ context.Context, keys dataloader.Keys[int]) []*dataloader.Result[*User] { var results []*dataloader.Result[*User] // do some pretend work to resolve keys for _, k := range keys { - results = append(results, &dataloader.Result[*User]{Data: m[k]}) + results = append(results, &dataloader.Result[*User]{Data: m[k.Raw()]}) } return results } - // go-cache will automaticlly cleanup expired items on given diration cache := &dataloader.NoCache[int, *User]{} loader := dataloader.NewBatchedLoader(batchFunc, dataloader.WithCache[int, *User](cache)) - result, err := loader.Load(context.Background(), 5)() + result, err := loader.Load(context.Background(), dataloader.KeyOf(5))() if err != nil { // handle error } diff --git a/example/ttl_cache/go_cache_test.go b/example/ttl_cache/go_cache_test.go index 4b369b3..bac1c52 100644 --- a/example/ttl_cache/go_cache_test.go +++ b/example/ttl_cache/go_cache_test.go @@ -1,4 +1,4 @@ -// package ttl_cache_test contains an exmaple of using go-cache as a long term cache solution for dataloader. +// package ttl_cache_test contains an example of using go-cache as a long term cache solution for dataloader. package ttl_cache_test import ( @@ -6,20 +6,19 @@ import ( "fmt" "time" - dataloader "github.com/graph-gophers/dataloader/v7" + "github.com/graph-gophers/dataloader/v8" - cache "github.com/patrickmn/go-cache" + "github.com/patrickmn/go-cache" ) // Cache implements the dataloader.Cache interface -type Cache[K comparable, V any] struct { +type Cache[K any, V any] struct { c *cache.Cache } // Get gets a value from the cache -func (c *Cache[K, V]) Get(_ context.Context, key K) (dataloader.Thunk[V], bool) { - k := fmt.Sprintf("%v", key) // convert the key to string because the underlying library doesn't support Generics yet - v, ok := c.c.Get(k) +func (c *Cache[K, V]) Get(_ context.Context, key dataloader.Key[K]) (dataloader.Thunk[V], bool) { + v, ok := c.c.Get(key.String()) if ok { return v.(dataloader.Thunk[V]), ok } @@ -27,14 +26,13 @@ func (c *Cache[K, V]) Get(_ context.Context, key K) (dataloader.Thunk[V], bool) } // Set sets a value in the cache -func (c *Cache[K, V]) Set(_ context.Context, key K, value dataloader.Thunk[V]) { - k := fmt.Sprintf("%v", key) // convert the key to string because the underlying library doesn't support Generics yet - c.c.Set(k, value, 0) +func (c *Cache[K, V]) Set(_ context.Context, key dataloader.Key[K], value dataloader.Thunk[V]) { + c.c.Set(key.String(), value, 0) } // Delete deletes and item in the cache -func (c *Cache[K, V]) Delete(_ context.Context, key K) bool { - k := fmt.Sprintf("%v", key) // convert the key to string because the underlying library doesn't support Generics yet +func (c *Cache[K, V]) Delete(_ context.Context, key dataloader.Key[K]) bool { + k := key.String() if _, found := c.c.Get(k); found { c.c.Delete(k) return true @@ -56,25 +54,25 @@ func ExampleTTLCache() { } m := map[int]*User{ - 5: &User{ID: 5, FirstName: "John", LastName: "Smith", Email: "john@example.com"}, + 5: {ID: 5, FirstName: "John", LastName: "Smith", Email: "john@example.com"}, } - batchFunc := func(_ context.Context, keys []int) []*dataloader.Result[*User] { + batchFunc := func(_ context.Context, keys dataloader.Keys[int]) []*dataloader.Result[*User] { var results []*dataloader.Result[*User] // do some pretend work to resolve keys for _, k := range keys { - results = append(results, &dataloader.Result[*User]{Data: m[k]}) + results = append(results, &dataloader.Result[*User]{Data: m[k.Raw()]}) } return results } - // go-cache will automaticlly cleanup expired items on given diration + // go-cache will automatically remove expired items on given duration c := cache.New(15*time.Minute, 15*time.Minute) - cache := &Cache[int, *User]{c} - loader := dataloader.NewBatchedLoader(batchFunc, dataloader.WithCache[int, *User](cache)) + wrapCache := &Cache[int, *User]{c} + loader := dataloader.NewBatchedLoader(batchFunc, dataloader.WithCache[int, *User](wrapCache)) // immediately call the future function from loader - result, err := loader.Load(context.Background(), 5)() + result, err := loader.Load(context.Background(), dataloader.KeyOf(5))() if err != nil { // handle error } diff --git a/go.mod b/go.mod index d5fe9d0..9f1b68f 100644 --- a/go.mod +++ b/go.mod @@ -1,16 +1,21 @@ -module github.com/graph-gophers/dataloader/v7 +module github.com/graph-gophers/dataloader/v8 go 1.18 require ( - github.com/hashicorp/golang-lru v0.5.4 + github.com/hashicorp/golang-lru v1.0.2 github.com/opentracing/opentracing-go v1.2.0 github.com/patrickmn/go-cache v2.1.0+incompatible - go.opentelemetry.io/otel v1.6.3 - go.opentelemetry.io/otel/trace v1.6.3 + github.com/stretchr/testify v1.8.4 + go.opentelemetry.io/otel v1.22.0 + go.opentelemetry.io/otel/trace v1.22.0 ) require ( - github.com/go-logr/logr v1.2.3 // indirect + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/go-logr/logr v1.4.1 // indirect github.com/go-logr/stdr v1.2.2 // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect + go.opentelemetry.io/otel/metric v1.22.0 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/go.sum b/go.sum index b7b7de5..ec67c79 100644 --- a/go.sum +++ b/go.sum @@ -1,14 +1,14 @@ -github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= -github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= +github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= -github.com/google/go-cmp v0.5.7 h1:81/ik6ipDQS2aGcBfIN5dHDB36BwrStyeAQquSYCV4o= -github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= -github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc= -github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/hashicorp/golang-lru v1.0.2 h1:dV3g9Z/unq5DpblPpw+Oqcv4dU/1omnb4Ok8iPY6p1c= +github.com/hashicorp/golang-lru v1.0.2/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs= github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc= github.com/patrickmn/go-cache v2.1.0+incompatible h1:HRMgzkcYKYpi3C8ajMPV8OFXaaRUnok+kx1WdO15EQc= @@ -17,13 +17,15 @@ github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZb github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.7.1 h1:5TQK59W5E3v0r2duFAb7P95B6hEeOyEnHRa8MjYSMTY= -github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -go.opentelemetry.io/otel v1.6.3 h1:FLOfo8f9JzFVFVyU+MSRJc2HdEAXQgm7pIv2uFKRSZE= -go.opentelemetry.io/otel v1.6.3/go.mod h1:7BgNga5fNlF/iZjG06hM3yofffp0ofKCDwSXx1GC4dI= -go.opentelemetry.io/otel/trace v1.6.3 h1:IqN4L+5b0mPNjdXIiZ90Ni4Bl5BRkDQywePLWemd9bc= -go.opentelemetry.io/otel/trace v1.6.3/go.mod h1:GNJQusJlUgZl9/TQBPKU/Y/ty+0iVB5fjhKeJGZPGFs= -golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +go.opentelemetry.io/otel v1.22.0 h1:xS7Ku+7yTFvDfDraDIJVpw7XPyuHlB9MCiqqX5mcJ6Y= +go.opentelemetry.io/otel v1.22.0/go.mod h1:eoV4iAi3Ea8LkAEI9+GFT44O6T/D0GWAVFyZVCC6pMI= +go.opentelemetry.io/otel/metric v1.22.0 h1:lypMQnGyJYeuYPhOM/bgjbFM6WE44W1/T45er4d8Hhg= +go.opentelemetry.io/otel/metric v1.22.0/go.mod h1:evJGjVpZv0mQ5QBRJoBF64yMuOf4xCWdXjK8pzFvliY= +go.opentelemetry.io/otel/trace v1.22.0 h1:Hg6pPujv0XG9QaVbGOBVHunyuLcCC3jN7WEhPx83XD0= +go.opentelemetry.io/otel/trace v1.22.0/go.mod h1:RbbHXVqKES9QhzZq/fE5UnOSILqRt40a21sPw2He1xo= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= -gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/in_memory_cache.go b/in_memory_cache.go index 354cece..f1cb9aa 100644 --- a/in_memory_cache.go +++ b/in_memory_cache.go @@ -8,35 +8,35 @@ import ( // InMemoryCache is an in memory implementation of Cache interface. // This simple implementation is well suited for // a "per-request" dataloader (i.e. one that only lives -// for the life of an http request) but it's not well suited -// for long lived cached items. -type InMemoryCache[K comparable, V any] struct { - items map[K]Thunk[V] +// for the life of a http request) but it's not well suited +// for long-lived cached items. +type InMemoryCache[K any, V any] struct { + items map[string]Thunk[V] mu sync.RWMutex } // NewCache constructs a new InMemoryCache -func NewCache[K comparable, V any]() *InMemoryCache[K, V] { - items := make(map[K]Thunk[V]) +func NewCache[K any, V any]() *InMemoryCache[K, V] { + items := make(map[string]Thunk[V]) return &InMemoryCache[K, V]{ items: items, } } // Set sets the `value` at `key` in the cache -func (c *InMemoryCache[K, V]) Set(_ context.Context, key K, value Thunk[V]) { +func (c *InMemoryCache[K, V]) Set(_ context.Context, key Key[K], value Thunk[V]) { c.mu.Lock() - c.items[key] = value + c.items[key.String()] = value c.mu.Unlock() } -// Get gets the value at `key` if it exsits, returns value (or nil) and bool +// Get gets the value at `key` if it exists, returns value (or nil) and bool // indicating of value was found -func (c *InMemoryCache[K, V]) Get(_ context.Context, key K) (Thunk[V], bool) { +func (c *InMemoryCache[K, V]) Get(_ context.Context, key Key[K]) (Thunk[V], bool) { c.mu.RLock() defer c.mu.RUnlock() - item, found := c.items[key] + item, found := c.items[key.String()] if !found { return nil, false } @@ -45,11 +45,11 @@ func (c *InMemoryCache[K, V]) Get(_ context.Context, key K) (Thunk[V], bool) { } // Delete deletes item at `key` from cache -func (c *InMemoryCache[K, V]) Delete(ctx context.Context, key K) bool { +func (c *InMemoryCache[K, V]) Delete(ctx context.Context, key Key[K]) bool { if _, found := c.Get(ctx, key); found { c.mu.Lock() defer c.mu.Unlock() - delete(c.items, key) + delete(c.items, key.String()) return true } return false @@ -58,6 +58,6 @@ func (c *InMemoryCache[K, V]) Delete(ctx context.Context, key K) bool { // Clear clears the entire cache func (c *InMemoryCache[K, V]) Clear() { c.mu.Lock() - c.items = map[K]Thunk[V]{} + c.items = map[string]Thunk[V]{} c.mu.Unlock() } diff --git a/key.go b/key.go new file mode 100644 index 0000000..6d8552f --- /dev/null +++ b/key.go @@ -0,0 +1,92 @@ +package dataloader + +import "fmt" + +// Key is the interface that all keys need to implement +type Key[K any] interface { + fmt.Stringer + // Raw returns the underlying value of the key + Raw() K +} + +// Keys wraps a slice of Key types to provide some convenience methods. +type Keys[K any] []Key[K] + +// Keys returns the list of strings. One for each "Key" in the list +func (l Keys[K]) Keys() []string { + list := make([]string, len(l)) + for i := range l { + list[i] = l[i].String() + } + return list +} + +// Raws returns the list of raw values in the key list +func (l Keys[K]) Raws() []K { + list := make([]K, len(l)) + for i := range l { + list[i] = l[i].Raw() + } + return list +} + +// KeyOf wraps the given comparable type as Key +func KeyOf[K comparable](item K) Key[K] { + return comparableKey[K]{item} +} + +// KeysFrom wraps a variadic list of comparable types as Keys +func KeysFrom[K comparable](items ...K) Keys[K] { + list := make(Keys[K], len(items)) + for i := range items { + list[i] = comparableKey[K]{items[i]} + } + + return list +} + +// StringerKey wraps the given fmt.Stringer implementation as Key, so it can be used in the dataloader +// The Key ist strictly typed to the implementing type, and cannot be mixed with other Keys, +// which themselves implement the fmt.Stringer interface +func StringerKey[K fmt.Stringer](item K) Key[K] { + return stringerKey[K]{item} +} + +// KeysFromStringers wraps the given variadic list of fmt.Stringer implementations as Keys +// The Keys are strictly typed to the implementing type of the first element +// The normal rules of type inference on type parameters apply +func KeysFromStringers[K fmt.Stringer](items ...K) Keys[K] { + list := make(Keys[K], len(items)) + for i := range items { + list[i] = stringerKey[K]{items[i]} + } + + return list +} + +// comparableKey implements the Key interface for any comparable type +type comparableKey[K comparable] struct { + cmp K +} + +func (k comparableKey[K]) String() string { + return fmt.Sprintf("%v", k.cmp) +} + +func (k comparableKey[K]) Raw() K { + return k.cmp +} + +var _ Key[Key[string]] = (*stringerKey[Key[string]])(nil) + +type stringerKey[K fmt.Stringer] struct { + raw K +} + +func (k stringerKey[K]) String() string { + return k.raw.String() +} + +func (k stringerKey[K]) Raw() K { + return k.raw +} diff --git a/key_test.go b/key_test.go new file mode 100644 index 0000000..b5addc9 --- /dev/null +++ b/key_test.go @@ -0,0 +1,116 @@ +package dataloader_test + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/assert" + + . "github.com/graph-gophers/dataloader/v8" +) + +func TestKeyOf(t *testing.T) { + t.Run("int", func(t *testing.T) { + key := KeyOf[int](5) + assert.Implements(t, (*Key[int])(nil), key) + assert.Equal(t, "5", key.String()) + assert.Equal(t, 5, key.Raw()) + }) + + t.Run("uint32", func(t *testing.T) { + key := KeyOf[uint32](53) + assert.Implements(t, (*Key[uint32])(nil), key) + assert.Equal(t, "53", key.String()) + assert.Equal(t, uint32(53), key.Raw()) + }) + + t.Run("[2]int", func(t *testing.T) { + key := KeyOf([...]int{5, 3}) + assert.Implements(t, (*Key[[2]int])(nil), key) + assert.Equal(t, "[5 3]", key.String()) + assert.Equal(t, [2]int{5, 3}, key.Raw()) + }) + + t.Run("comparable struct", func(t *testing.T) { + type foo struct { + a int + b string + } + + raw := foo{a: 5, b: "bar"} + key := KeyOf(raw) + assert.Implements(t, (*Key[foo])(nil), key) + assert.Equal(t, "{5 bar}", key.String()) + assert.Equal(t, raw, key.Raw()) + }) +} + +func TestKeysFrom(t *testing.T) { + t.Run("int", func(t *testing.T) { + keys := KeysFrom[int](5, 6) + assert.IsType(t, (Keys[int])(nil), keys) + assert.Equal(t, []string{"5", "6"}, keys.Keys()) + assert.Equal(t, []int{5, 6}, keys.Raws()) + }) + + t.Run("uint32", func(t *testing.T) { + keys := KeysFrom[uint32](5, 3) + assert.IsType(t, (Keys[uint32])(nil), keys) + assert.Equal(t, []string{"5", "3"}, keys.Keys()) + assert.Equal(t, []uint32{5, 3}, keys.Raws()) + }) + + t.Run("[2]int", func(t *testing.T) { + keys := KeysFrom([...]int{5, 3}, [...]int{4, 9}) + assert.IsType(t, (Keys[[2]int])(nil), keys) + assert.Equal(t, []string{"[5 3]", "[4 9]"}, keys.Keys()) + assert.Equal(t, [][2]int{{5, 3}, {4, 9}}, keys.Raws()) + }) + + t.Run("comparable struct", func(t *testing.T) { + type foo struct { + a int + b string + } + + keys := KeysFrom(foo{a: 5, b: "bar"}, foo{a: 42, b: "foobar"}) + assert.IsType(t, (Keys[foo])(nil), keys) + assert.Equal(t, []string{"{5 bar}", "{42 foobar}"}, keys.Keys()) + assert.Equal(t, []foo{{a: 5, b: "bar"}, {a: 42, b: "foobar"}}, keys.Raws()) + }) +} + +func TestStringerKey(t *testing.T) { + raw := stringer{"foo", []int{42, 23}} + key := StringerKey(raw) + assert.Implements(t, (*Key[stringer])(nil), key) + expectedID := raw.String() + assert.Equalf(t, expectedID, key.String(), "String() value must match expected `%s`", expectedID) + assert.Equal(t, raw, key.Raw()) +} + +func TestKeysFromStringers(t *testing.T) { + raws := []stringer{ + {"foo", []int{42, 23}}, + {"bar", []int{4711, 1337, 1887}}, + } + + keys := KeysFromStringers(raws...) + assert.IsType(t, (Keys[stringer])(nil), keys) + assert.Equal(t, []string{ + "foo[42 23]", + "bar[4711 1337 1887]", + }, keys.Keys()) + assert.Equal(t, raws, keys.Raws()) +} + +// stringer represents struct, which is not comparable, but shall still be used as dataloader key +type stringer struct { + name string + // this field prevents this type to be used as comparable + kpis []int +} + +func (s stringer) String() string { + return fmt.Sprintf("%s%v", s.name, s.kpis) +} diff --git a/trace.go b/trace.go index 5af640d..68caf94 100644 --- a/trace.go +++ b/trace.go @@ -9,29 +9,29 @@ type TraceLoadManyFinishFunc[V any] func(ThunkMany[V]) type TraceBatchFinishFunc[V any] func([]*Result[V]) // Tracer is an interface that may be used to implement tracing. -type Tracer[K comparable, V any] interface { +type Tracer[K any, V any] interface { // TraceLoad will trace the calls to Load. - TraceLoad(ctx context.Context, key K) (context.Context, TraceLoadFinishFunc[V]) + TraceLoad(ctx context.Context, key Key[K]) (context.Context, TraceLoadFinishFunc[V]) // TraceLoadMany will trace the calls to LoadMany. - TraceLoadMany(ctx context.Context, keys []K) (context.Context, TraceLoadManyFinishFunc[V]) + TraceLoadMany(ctx context.Context, keys Keys[K]) (context.Context, TraceLoadManyFinishFunc[V]) // TraceBatch will trace data loader batches. - TraceBatch(ctx context.Context, keys []K) (context.Context, TraceBatchFinishFunc[V]) + TraceBatch(ctx context.Context, keys Keys[K]) (context.Context, TraceBatchFinishFunc[V]) } // NoopTracer is the default (noop) tracer -type NoopTracer[K comparable, V any] struct{} +type NoopTracer[K any, V any] struct{} // TraceLoad is a noop function -func (NoopTracer[K, V]) TraceLoad(ctx context.Context, key K) (context.Context, TraceLoadFinishFunc[V]) { +func (NoopTracer[K, V]) TraceLoad(ctx context.Context, _ Key[K]) (context.Context, TraceLoadFinishFunc[V]) { return ctx, func(Thunk[V]) {} } // TraceLoadMany is a noop function -func (NoopTracer[K, V]) TraceLoadMany(ctx context.Context, keys []K) (context.Context, TraceLoadManyFinishFunc[V]) { +func (NoopTracer[K, V]) TraceLoadMany(ctx context.Context, _ Keys[K]) (context.Context, TraceLoadManyFinishFunc[V]) { return ctx, func(ThunkMany[V]) {} } // TraceBatch is a noop function -func (NoopTracer[K, V]) TraceBatch(ctx context.Context, keys []K) (context.Context, TraceBatchFinishFunc[V]) { +func (NoopTracer[K, V]) TraceBatch(ctx context.Context, _ Keys[K]) (context.Context, TraceBatchFinishFunc[V]) { return ctx, func(result []*Result[V]) {} } diff --git a/trace/opentracing/trace.go b/trace/opentracing/trace.go index 6ad9fba..4f7f878 100644 --- a/trace/opentracing/trace.go +++ b/trace/opentracing/trace.go @@ -2,21 +2,20 @@ package opentracing import ( "context" - "fmt" - "github.com/graph-gophers/dataloader/v7" + "github.com/graph-gophers/dataloader/v8" "github.com/opentracing/opentracing-go" ) // Tracer implements a tracer that can be used with the Open Tracing standard. -type Tracer[K comparable, V any] struct{} +type Tracer[K any, V any] struct{} // TraceLoad will trace a call to dataloader.LoadMany with Open Tracing. -func (Tracer[K, V]) TraceLoad(ctx context.Context, key K) (context.Context, dataloader.TraceLoadFinishFunc[V]) { +func (Tracer[K, V]) TraceLoad(ctx context.Context, key dataloader.Key[K]) (context.Context, dataloader.TraceLoadFinishFunc[V]) { span, spanCtx := opentracing.StartSpanFromContext(ctx, "Dataloader: load") - span.SetTag("dataloader.key", fmt.Sprintf("%v", key)) + span.SetTag("dataloader.key", key.String()) return spanCtx, func(thunk dataloader.Thunk[V]) { span.Finish() @@ -24,10 +23,10 @@ func (Tracer[K, V]) TraceLoad(ctx context.Context, key K) (context.Context, data } // TraceLoadMany will trace a call to dataloader.LoadMany with Open Tracing. -func (Tracer[K, V]) TraceLoadMany(ctx context.Context, keys []K) (context.Context, dataloader.TraceLoadManyFinishFunc[V]) { +func (Tracer[K, V]) TraceLoadMany(ctx context.Context, keys dataloader.Keys[K]) (context.Context, dataloader.TraceLoadManyFinishFunc[V]) { span, spanCtx := opentracing.StartSpanFromContext(ctx, "Dataloader: loadmany") - span.SetTag("dataloader.keys", fmt.Sprintf("%v", keys)) + span.SetTag("dataloader.keys", keys.Keys()) return spanCtx, func(thunk dataloader.ThunkMany[V]) { span.Finish() @@ -35,10 +34,10 @@ func (Tracer[K, V]) TraceLoadMany(ctx context.Context, keys []K) (context.Contex } // TraceBatch will trace a call to dataloader.LoadMany with Open Tracing. -func (Tracer[K, V]) TraceBatch(ctx context.Context, keys []K) (context.Context, dataloader.TraceBatchFinishFunc[V]) { +func (Tracer[K, V]) TraceBatch(ctx context.Context, keys dataloader.Keys[K]) (context.Context, dataloader.TraceBatchFinishFunc[V]) { span, spanCtx := opentracing.StartSpanFromContext(ctx, "Dataloader: batch") - span.SetTag("dataloader.keys", fmt.Sprintf("%v", keys)) + span.SetTag("dataloader.keys", keys.Keys()) return spanCtx, func(results []*dataloader.Result[V]) { span.Finish() diff --git a/trace/opentracing/trace_test.go b/trace/opentracing/trace_test.go index d267d9b..3eb7d53 100644 --- a/trace/opentracing/trace_test.go +++ b/trace/opentracing/trace_test.go @@ -3,8 +3,8 @@ package opentracing_test import ( "testing" - "github.com/graph-gophers/dataloader/v7" - "github.com/graph-gophers/dataloader/v7/trace/opentracing" + "github.com/graph-gophers/dataloader/v8" + "github.com/graph-gophers/dataloader/v8/trace/opentracing" ) func TestInterfaceImplementation(t *testing.T) { diff --git a/trace/otel/trace.go b/trace/otel/trace.go index a3828eb..910e745 100644 --- a/trace/otel/trace.go +++ b/trace/otel/trace.go @@ -2,9 +2,8 @@ package otel import ( "context" - "fmt" - "github.com/graph-gophers/dataloader/v7" + "github.com/graph-gophers/dataloader/v8" "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/attribute" @@ -12,11 +11,11 @@ import ( ) // Tracer implements a tracer that can be used with the Open Tracing standard. -type Tracer[K comparable, V any] struct { +type Tracer[K any, V any] struct { tr trace.Tracer } -func NewTracer[K comparable, V any](tr trace.Tracer) *Tracer[K, V] { +func NewTracer[K any, V any](tr trace.Tracer) *Tracer[K, V] { return &Tracer[K, V]{tr: tr} } @@ -28,10 +27,10 @@ func (t *Tracer[K, V]) Tracer() trace.Tracer { } // TraceLoad will trace a call to dataloader.LoadMany with Open Tracing. -func (t Tracer[K, V]) TraceLoad(ctx context.Context, key K) (context.Context, dataloader.TraceLoadFinishFunc[V]) { +func (t Tracer[K, V]) TraceLoad(ctx context.Context, key dataloader.Key[K]) (context.Context, dataloader.TraceLoadFinishFunc[V]) { spanCtx, span := t.Tracer().Start(ctx, "Dataloader: load") - span.SetAttributes(attribute.String("dataloader.key", fmt.Sprintf("%v", key))) + span.SetAttributes(attribute.String("dataloader.key", key.String())) return spanCtx, func(thunk dataloader.Thunk[V]) { span.End() @@ -39,10 +38,10 @@ func (t Tracer[K, V]) TraceLoad(ctx context.Context, key K) (context.Context, da } // TraceLoadMany will trace a call to dataloader.LoadMany with Open Tracing. -func (t Tracer[K, V]) TraceLoadMany(ctx context.Context, keys []K) (context.Context, dataloader.TraceLoadManyFinishFunc[V]) { +func (t Tracer[K, V]) TraceLoadMany(ctx context.Context, keys dataloader.Keys[K]) (context.Context, dataloader.TraceLoadManyFinishFunc[V]) { spanCtx, span := t.Tracer().Start(ctx, "Dataloader: loadmany") - span.SetAttributes(attribute.String("dataloader.keys", fmt.Sprintf("%v", keys))) + span.SetAttributes(attribute.StringSlice("dataloader.keys", keys.Keys())) return spanCtx, func(thunk dataloader.ThunkMany[V]) { span.End() @@ -50,10 +49,10 @@ func (t Tracer[K, V]) TraceLoadMany(ctx context.Context, keys []K) (context.Cont } // TraceBatch will trace a call to dataloader.LoadMany with Open Tracing. -func (t Tracer[K, V]) TraceBatch(ctx context.Context, keys []K) (context.Context, dataloader.TraceBatchFinishFunc[V]) { +func (t Tracer[K, V]) TraceBatch(ctx context.Context, keys dataloader.Keys[K]) (context.Context, dataloader.TraceBatchFinishFunc[V]) { spanCtx, span := t.Tracer().Start(ctx, "Dataloader: batch") - span.SetAttributes(attribute.String("dataloader.keys", fmt.Sprintf("%v", keys))) + span.SetAttributes(attribute.StringSlice("dataloader.keys", keys.Keys())) return spanCtx, func(results []*dataloader.Result[V]) { span.End() diff --git a/trace/otel/trace_test.go b/trace/otel/trace_test.go index 6ec146c..9358142 100644 --- a/trace/otel/trace_test.go +++ b/trace/otel/trace_test.go @@ -3,8 +3,8 @@ package otel_test import ( "testing" - "github.com/graph-gophers/dataloader/v7" - "github.com/graph-gophers/dataloader/v7/trace/otel" + "github.com/graph-gophers/dataloader/v8" + "github.com/graph-gophers/dataloader/v8/trace/otel" ) func TestInterfaceImplementation(t *testing.T) {