Skip to content

Commit

Permalink
Update main.go
Browse files Browse the repository at this point in the history
  • Loading branch information
Narasimha1997 authored and Prasanna19971124 committed Oct 18, 2022
1 parent f203855 commit bac3a0a
Show file tree
Hide file tree
Showing 2 changed files with 150 additions and 1 deletion.
3 changes: 2 additions & 1 deletion README.md
Original file line number Diff line number Diff line change
@@ -1,2 +1,3 @@
# clfu
Implementation of constant time LFU cache in go with concurrency in mind
Implementation of Constant Time LFU (least frequently used) cache in Go with concurrency safety. This implementation is based on the paper [An O(1) algorithm for implementing the LFU
cache eviction scheme](http://dhruvbird.com/lfu.pdf) by Prof. Ketan Shah, Anirban Mitra and Dhruv Matani. As opposed to priority heap based LFU cache, this mechanism provides almost O(1) insertion, retrieval and eviction operations.
148 changes: 148 additions & 0 deletions examples/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -3,8 +3,152 @@ package main
import (
"clfu"
"fmt"
"runtime"
"sync"
"time"
)

func normal() {
lfuCache := clfu.NewLFUCache(1000)
for i := 0; i < 1000; i++ {
lfuCache.Put(i, i, false)
}

// get the data 4M times and compute the latency

routine := func(wg *sync.WaitGroup) {
defer wg.Done()
for i := 0; i < 4*1000000; i++ {
lfuCache.Get(i % 1000)
}
}

wg := sync.WaitGroup{}
st := time.Now()
// run a goroutine for each CPU
for i := 0; i < runtime.NumCPU(); i++ {
wg.Add(1)
go routine(&wg)
}

wg.Wait()

et := time.Since(st)
fmt.Printf("Normal (lazy not enabled): n_goroutines=%d, n_access=%d, time_taken=%v\n", runtime.NumCPU(), runtime.NumCPU()*4*1000000, et)
}

func lazy() {
lfuCache := clfu.NewLazyLFUCache(1000, 1000)
for i := 0; i < 1000; i++ {
lfuCache.Put(i, i, false)
}

// get the data 4M times and compute the latency

routine := func(wg *sync.WaitGroup) {
defer wg.Done()
for i := 0; i < 4*1000000; i++ {
lfuCache.Get(i % 1000)
}
}

wg := sync.WaitGroup{}
st := time.Now()
// run a goroutine for each CPU
for i := 0; i < runtime.NumCPU(); i++ {
wg.Add(1)
go routine(&wg)
}

wg.Wait()

et := time.Since(st)
fmt.Printf("Lazy (with size 1000 as cache size): n_goroutines=%d, n_access=%d, time_taken=%v\n", runtime.NumCPU(), runtime.NumCPU()*4*1000000, et)
}

func timeTaken() {
// this function is very compute intensive

fmt.Println("Checking lazy vs normal execution speeds")

normal()

lazy()

}

func averageAccessTimeNormal() {
lfuCache := clfu.NewLFUCache(1000)
for i := 0; i < 1000; i++ {
lfuCache.Put(i, i, false)
}

// get the data 4M times and compute the latency
totalTime := 0

routine := func(wg *sync.WaitGroup) {
defer wg.Done()
for i := 0; i < 4*1000000; i++ {
st := time.Now()
lfuCache.Get(i % 1000)
et := time.Since(st)
totalTime += int(et.Nanoseconds())
}
}

wg := sync.WaitGroup{}
// run a goroutine for each CPU
for i := 0; i < runtime.NumCPU(); i++ {
wg.Add(1)
go routine(&wg)
}

wg.Wait()

averageTotalTime := totalTime / (4 * 1000000 * runtime.NumCPU())
fmt.Printf("Normal: n_goroutines=%d, n_access=%d, average_time_per_access=%v\n", runtime.NumCPU(), runtime.NumCPU()*4*1000000, averageTotalTime)
}

func averageAccessTimeLazy() {
lfuCache := clfu.NewLazyLFUCache(1000, 1000)
for i := 0; i < 1000; i++ {
lfuCache.Put(i, i, false)
}

// get the data 4M times and compute the latency
totalTime := 0

routine := func(wg *sync.WaitGroup) {
defer wg.Done()
for i := 0; i < 4*1000000; i++ {
st := time.Now()
lfuCache.Get(i % 1000)
et := time.Since(st)
totalTime += int(et.Nanoseconds())
}
}

wg := sync.WaitGroup{}
// run a goroutine for each CPU
for i := 0; i < runtime.NumCPU(); i++ {
wg.Add(1)
go routine(&wg)
}

wg.Wait()

averageTotalTime := totalTime / (4 * 1000000 * runtime.NumCPU())
fmt.Printf("Lazy (with size 1000 as cache size): n_goroutines=%d, n_access=%d, average_time_per_access=%v\n", runtime.NumCPU(), runtime.NumCPU()*4*1000000, averageTotalTime)
}

func averageAccessTime() {
fmt.Println("Checking average access time - lazy vs normal")

averageAccessTimeNormal()

averageAccessTimeLazy()
}

func main() {

// create a new instance of LFU cache with a max size
Expand Down Expand Up @@ -46,4 +190,8 @@ func main() {
if err != nil {
fmt.Printf("failed to delete, no key 'u939804'")
}

// these functions are provided for benchmark purposes
timeTaken()
averageAccessTime()
}

0 comments on commit bac3a0a

Please sign in to comment.