* cache: add sharded cache implementation Add Cache impl and a few tests. This cache is 256-way sharded, mainly so each shard has it's own lock. The main cache structure is a readonly jump plane into the right shard. This should remove the single lock contention on the main lock and provide more concurrent throughput - Obviously this hasn't been tested or measured. The key into the cache was made a uint32 (hash.fnv) and the hashing op is not using strings.ToLower anymore remove any GC in that code path. * here too * Minimum shard size * typos * blurp * small cleanups no defer * typo * Add freq based on Johns idea * cherry-pick conflict resolv * typo * update from early code review from john * add prefetch to the cache * mw/cache: add prefetch * remove println * remove comment * Fix tests * Test prefetch in setup * Add start of cache * try add diff cache options * Add hacky testcase * not needed * allow the use of a percentage for prefetch If the TTL falls below xx% do a prefetch, if the record was popular. Some other fixes and correctly prefetch only popular records.
36 lines
723 B
Go
36 lines
723 B
Go
package freq
|
|
|
|
import (
|
|
"testing"
|
|
"time"
|
|
)
|
|
|
|
func TestFreqUpdate(t *testing.T) {
|
|
now := time.Now().UTC()
|
|
f := New(now)
|
|
window := 1 * time.Minute
|
|
|
|
f.Update(window, time.Now().UTC())
|
|
f.Update(window, time.Now().UTC())
|
|
f.Update(window, time.Now().UTC())
|
|
hitsCheck(t, f, 3)
|
|
|
|
f.Reset(now, 0)
|
|
history := time.Now().UTC().Add(-3 * time.Minute)
|
|
f.Update(window, history)
|
|
hitsCheck(t, f, 1)
|
|
}
|
|
|
|
func TestReset(t *testing.T) {
|
|
f := New(time.Now().UTC())
|
|
f.Update(1*time.Minute, time.Now().UTC())
|
|
hitsCheck(t, f, 1)
|
|
f.Reset(time.Now().UTC(), 0)
|
|
hitsCheck(t, f, 0)
|
|
}
|
|
|
|
func hitsCheck(t *testing.T, f *Freq, expected int) {
|
|
if x := f.Hits(); x != expected {
|
|
t.Fatalf("Expected hits to be %d, got %d", expected, x)
|
|
}
|
|
}
|