diff --git a/.github/workflows/go-cross.yml b/.github/workflows/go-cross.yml
index f579d66..91eb0c6 100644
--- a/.github/workflows/go-cross.yml
+++ b/.github/workflows/go-cross.yml
@@ -11,7 +11,7 @@ jobs:
strategy:
matrix:
- go-version: [ 1.15, 1.x ]
+ go-version: [ 1.16, 1.x ]
os: [ubuntu-latest, macos-latest, windows-latest]
steps:
diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml
index 630b69a..52c7ad2 100644
--- a/.github/workflows/main.yml
+++ b/.github/workflows/main.yml
@@ -14,8 +14,8 @@ jobs:
name: Main Process
runs-on: ubuntu-latest
env:
- GO_VERSION: 1.15
- GOLANGCI_LINT_VERSION: v1.33.0
+ GO_VERSION: 1.16
+ GOLANGCI_LINT_VERSION: v1.39.0
YAEGI_VERSION: v0.9.8
CGO_ENABLED: 0
defaults:
diff --git a/.gitignore b/.gitignore
index f32e31a..ffd2e01 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,2 +1,4 @@
.idea/
.DS_Store
+cover.html
+profile.cov
\ No newline at end of file
diff --git a/.golangci.toml b/.golangci.toml
index c9010b3..7ffdeb4 100644
--- a/.golangci.toml
+++ b/.golangci.toml
@@ -36,6 +36,13 @@
"testpackage",
"paralleltest",
"tparallel",
+ "scopelint",
+ "godox",
+ "funlen", # Re-enable it
+ "gocognit", # Re-enable it
+ "cyclop", # Re-enable it
+ "exhaustivestruct",
+ "nilerr",
]
[issues]
diff --git a/.traefik.yml b/.traefik.yml
index 2fcbfe8..b2eb797 100644
--- a/.traefik.yml
+++ b/.traefik.yml
@@ -1,11 +1,12 @@
-displayName: Demo Plugin
+displayName: Image optimizer
type: middleware
-import: github.com/traefik/plugindemo
+import: github.com/agravelot/imageopti
-summary: '[Demo] Add Request Header'
+summary: 'Image optimizer middleware is a middleware designed to optimize image responses on the fly.'
testData:
- Headers:
- X-Demo: test
- X-URL: '{{URL}}'
+ config:
+ processor: none
+ cache: none
+
diff --git a/Makefile b/Makefile
index 04f6f05..137a5bb 100644
--- a/Makefile
+++ b/Makefile
@@ -1,4 +1,4 @@
-.PHONY: lint test vendor clean
+.PHONY: lint test vendor clean coverage demo
export GO111MODULE=on
@@ -8,6 +8,8 @@ lint:
golangci-lint run
test:
+ rm -fr .cache | true
+ mkdir .cache
go test -v -cover ./...
yaegi_test:
@@ -17,4 +19,19 @@ vendor:
go mod vendor
clean:
- rm -rf ./vendor
\ No newline at end of file
+ rm -rf ./vendor
+
+coverage:
+ rm profile.cov cover.html && go test -v -coverpkg=./... -coverprofile=profile.cov ./... && go tool cover -html=profile.cov -o cover.html
+
+demo-init:
+ docker network create traefik-net | true
+
+demo-up: demo-init
+ docker-compose --env-file=demo/.env -f demo/gateway/docker-compose.yml -f demo/frontend/docker-compose.yml -f demo/imaginary/docker-compose.yml up -d
+
+demo-restart: demo-init
+ docker-compose --env-file=demo/.env -f demo/gateway/docker-compose.yml -f demo/frontend/docker-compose.yml -f demo/imaginary/docker-compose.yml restart
+
+demo-logs: demo-up
+ docker-compose --env-file=demo/.env -f demo/gateway/docker-compose.yml -f demo/frontend/docker-compose.yml -f demo/imaginary/docker-compose.yml logs -f gateway imaginary
\ No newline at end of file
diff --git a/cache/factory.go b/cache/factory.go
new file mode 100644
index 0000000..05afc4a
--- /dev/null
+++ b/cache/factory.go
@@ -0,0 +1,51 @@
+// Package cache provide caching systems for images.
+package cache
+
+import (
+ "fmt"
+ "sync"
+ "time"
+
+ "github.com/agravelot/imageopti/config"
+)
+
+// Cache Define cache system interface.
+type Cache interface {
+ Get(key string) ([]byte, error)
+ Set(key string, val []byte, expiry time.Duration) error
+}
+
+const defaultCacheExpiry = 100 * time.Second
+
+// New is the cache factory to instantiate a new instance of cache.
+func New(conf config.Config) (Cache, error) {
+ // if conf.Processor == "redis" {
+ // opt, err := redis.ParseURL(conf.Redis.URL)
+ // if err != nil {
+ // return nil, err
+ // }
+
+ // client := redis.NewClient(opt)
+
+ // return &RedisCache{
+ // client: client,
+ // }, nil
+ // }
+
+ if conf.Cache == "file" {
+ return newFileCache(conf.File.Path, defaultCacheExpiry)
+ }
+
+ if conf.Cache == "memory" {
+ return &MemoryCache{
+ m: map[string][]byte{},
+ mtx: sync.RWMutex{},
+ }, nil
+ }
+
+ if conf.Cache == "none" || conf.Cache == "" {
+ return &NoneCache{}, nil
+ }
+
+ return nil, fmt.Errorf("unable to resolve given cache %s", conf.Cache)
+}
diff --git a/cache/factory_test.go b/cache/factory_test.go
new file mode 100644
index 0000000..310e3f2
--- /dev/null
+++ b/cache/factory_test.go
@@ -0,0 +1,62 @@
+package cache_test
+
+import (
+ "reflect"
+ "testing"
+
+ "github.com/agravelot/imageopti/cache"
+ "github.com/agravelot/imageopti/config"
+)
+
+func TestNew(t *testing.T) {
+ type args struct {
+ conf config.Config
+ }
+
+ tests := []struct {
+ name string
+ args args
+ want cache.Cache
+ wantErr bool
+ }{
+ {
+ name: "should be able to memory cache",
+ args: args{config.Config{Processor: "none", Cache: "memory"}},
+ want: &cache.MemoryCache{},
+ wantErr: false,
+ },
+ {
+ name: "should be able to memory cache",
+ args: args{config.Config{Processor: "none", Cache: "none"}},
+ want: &cache.NoneCache{},
+ wantErr: false,
+ },
+ {
+ name: "should not be able to init cache without valid driver",
+ args: args{
+ config.Config{
+ Processor: "imaginary",
+ Imaginary: config.ImaginaryProcessorConfig{URL: "http://localhost"},
+ Cache: "unsupported",
+ },
+ },
+ want: nil,
+ wantErr: true,
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ got, err := cache.New(tt.args.conf)
+ if (err != nil) != tt.wantErr {
+ t.Fatalf("New() error = %v, wantErr %v", err, tt.wantErr)
+ }
+
+ typeGot := reflect.TypeOf(got)
+ typeWanted := reflect.TypeOf(tt.want)
+
+ if typeGot != typeWanted {
+ t.Errorf("New() = %v, want %v", typeGot, typeWanted)
+ }
+ })
+ }
+}
diff --git a/cache/file.go b/cache/file.go
new file mode 100644
index 0000000..8634668
--- /dev/null
+++ b/cache/file.go
@@ -0,0 +1,223 @@
+package cache
+
+// Original source : https://github.com/traefik/plugin-simplecache
+
+import (
+ "encoding/binary"
+ "encoding/hex"
+ "errors"
+ "fmt"
+ "hash/crc32"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "strings"
+ "sync"
+ "time"
+)
+
+var errCacheMiss = errors.New("cache miss")
+
+type fileCache struct {
+ path string
+ pm *pathMutex
+}
+
+func newFileCache(path string, vacuum time.Duration) (*fileCache, error) {
+ info, err := os.Stat(path)
+ if err != nil {
+ return nil, fmt.Errorf("invalid cache path: %w", err)
+ }
+
+ if !info.IsDir() {
+ return nil, errors.New("path must be a directory")
+ }
+
+ fc := &fileCache{
+ path: path,
+ pm: &pathMutex{lock: map[string]*fileLock{}},
+ }
+
+ go fc.vacuum(vacuum)
+
+ return fc, nil
+}
+
+func (c *fileCache) vacuum(interval time.Duration) {
+ timer := time.NewTicker(interval)
+ defer timer.Stop()
+
+ for range timer.C {
+ _ = filepath.Walk(c.path, func(path string, info os.FileInfo, err error) error {
+ switch {
+ case err != nil:
+ return err
+ case info.IsDir():
+ return nil
+ }
+
+ mu := c.pm.MutexAt(filepath.Base(path))
+ mu.Lock()
+ defer mu.Unlock()
+
+ // Get the expiry.
+ var t [8]byte
+ f, err := os.Open(filepath.Clean(path))
+ if err != nil {
+ // Just skip the file in this case.
+ return nil
+ }
+ if n, err := f.Read(t[:]); err != nil && n != 8 {
+ return nil
+ }
+ _ = f.Close()
+
+ expires := time.Unix(int64(binary.LittleEndian.Uint64(t[:])), 0)
+ if !expires.Before(time.Now()) {
+ return nil
+ }
+
+ // Delete the file.
+ _ = os.Remove(path)
+ return nil
+ })
+ }
+}
+
+func (c *fileCache) Get(key string) ([]byte, error) {
+ mu := c.pm.MutexAt(key)
+ mu.RLock()
+ defer mu.RUnlock()
+
+ p := keyPath(c.path, key)
+ if info, err := os.Stat(p); err != nil || info.IsDir() {
+ return nil, errCacheMiss
+ }
+
+ b, err := ioutil.ReadFile(filepath.Clean(p))
+ if err != nil {
+ return nil, fmt.Errorf("error reading file %q: %w", p, err)
+ }
+
+ expires := time.Unix(int64(binary.LittleEndian.Uint64(b[:8])), 0)
+ if expires.Before(time.Now()) {
+ _ = os.Remove(p)
+ return nil, errCacheMiss
+ }
+
+ return b[8:], nil
+}
+
+func (c *fileCache) Set(key string, val []byte, expiry time.Duration) error {
+ mu := c.pm.MutexAt(key)
+ mu.Lock()
+ defer mu.Unlock()
+
+ p := keyPath(c.path, key)
+ if err := os.MkdirAll(filepath.Dir(p), 0700); err != nil {
+ return fmt.Errorf("error creating file path: %w", err)
+ }
+
+ f, err := os.OpenFile(filepath.Clean(p), os.O_RDWR|os.O_CREATE, 0600)
+ if err != nil {
+ return fmt.Errorf("error creating file: %w", err)
+ }
+
+ defer func() {
+ _ = f.Close()
+ }()
+
+ timestamp := uint64(time.Now().Add(expiry).Unix())
+
+ var t [8]byte
+
+ binary.LittleEndian.PutUint64(t[:], timestamp)
+
+ if _, err = f.Write(t[:]); err != nil {
+ return fmt.Errorf("error writing file: %w", err)
+ }
+
+ if _, err = f.Write(val); err != nil {
+ return fmt.Errorf("error writing file: %w", err)
+ }
+
+ return nil
+}
+
+func keyHash(key string) [4]byte {
+ h := crc32.Checksum([]byte(key), crc32.IEEETable)
+
+ var b [4]byte
+
+ binary.LittleEndian.PutUint32(b[:], h)
+
+ return b
+}
+
+func keyPath(path, key string) string {
+ h := keyHash(key)
+ key = strings.NewReplacer("/", "-", ":", "_").Replace(key)
+
+ return filepath.Join(
+ path,
+ hex.EncodeToString(h[0:1]),
+ hex.EncodeToString(h[1:2]),
+ hex.EncodeToString(h[2:3]),
+ hex.EncodeToString(h[3:4]),
+ key,
+ )
+}
+
+type pathMutex struct {
+ mu sync.Mutex
+ lock map[string]*fileLock
+}
+
+func (m *pathMutex) MutexAt(path string) *fileLock {
+ m.mu.Lock()
+ defer m.mu.Unlock()
+
+ if fl, ok := m.lock[path]; ok {
+ fl.ref++
+ return fl
+ }
+
+ fl := &fileLock{ref: 1}
+ fl.cleanup = func() {
+ m.mu.Lock()
+ defer m.mu.Unlock()
+
+ fl.ref--
+ if fl.ref == 0 {
+ delete(m.lock, path)
+ }
+ }
+ m.lock[path] = fl
+
+ return fl
+}
+
+type fileLock struct {
+ ref int
+ cleanup func()
+
+ mu sync.RWMutex
+}
+
+func (l *fileLock) RLock() {
+ l.mu.RLock()
+}
+
+func (l *fileLock) RUnlock() {
+ l.mu.RUnlock()
+ l.cleanup()
+}
+
+func (l *fileLock) Lock() {
+ l.mu.Lock()
+}
+
+func (l *fileLock) Unlock() {
+ l.mu.Unlock()
+ l.cleanup()
+}
diff --git a/cache/file_test.go b/cache/file_test.go
new file mode 100644
index 0000000..e2c0f0b
--- /dev/null
+++ b/cache/file_test.go
@@ -0,0 +1,175 @@
+package cache
+
+import (
+ "bytes"
+ "context"
+ "fmt"
+ "io/ioutil"
+ "os"
+ "sync"
+ "sync/atomic"
+ "testing"
+ "time"
+)
+
+const testCacheKey = "GETlocalhost:8080/test/path"
+
+func TestFileCache(t *testing.T) {
+ dir := createTempDir(t)
+
+ fc, err := newFileCache(dir, time.Second)
+ if err != nil {
+ t.Errorf("unexpected newFileCache error: %v", err)
+ }
+
+ _, err = fc.Get(testCacheKey)
+ if err == nil {
+ t.Error("unexpected cache content")
+ }
+
+ cacheContent := []byte("some random cache content that should be exact")
+
+ err = fc.Set(testCacheKey, cacheContent, time.Second)
+ if err != nil {
+ t.Errorf("unexpected cache set error: %v", err)
+ }
+
+ got, err := fc.Get(testCacheKey)
+ if err != nil {
+ t.Errorf("unexpected cache get error: %v", err)
+ }
+
+ if !bytes.Equal(got, cacheContent) {
+ t.Errorf("unexpected cache content: want %s, got %s", cacheContent, got)
+ }
+}
+
+func TestFileCache_ConcurrentAccess(t *testing.T) {
+ ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
+ defer cancel()
+
+ defer func() {
+ if r := recover(); r != nil {
+ t.Fatal(r)
+ }
+ }()
+
+ dir := createTempDir(t)
+
+ fc, err := newFileCache(dir, time.Second)
+ if err != nil {
+ t.Errorf("unexpected newFileCache error: %v", err)
+ }
+
+ cacheContent := []byte("some random cache content that should be exact")
+
+ var wg sync.WaitGroup
+
+ wg.Add(2)
+
+ go func() {
+ defer wg.Done()
+
+ for {
+ got, _ := fc.Get(testCacheKey)
+ if got != nil && !bytes.Equal(got, cacheContent) {
+ panic(fmt.Errorf("unexpected cache content: want %s, got %s", cacheContent, got))
+ }
+
+ select {
+ case <-ctx.Done():
+ return
+ default:
+ }
+ }
+ }()
+
+ go func() {
+ defer wg.Done()
+
+ for {
+ err = fc.Set(testCacheKey, cacheContent, time.Second)
+ if err != nil {
+ panic(fmt.Errorf("unexpected cache set error: %w", err))
+ }
+
+ select {
+ case <-ctx.Done():
+ return
+ default:
+ }
+ }
+ }()
+
+ wg.Wait()
+}
+
+func TestPathMutex(t *testing.T) {
+ pm := &pathMutex{lock: map[string]*fileLock{}}
+
+ mu := pm.MutexAt("sometestpath")
+ mu.Lock()
+
+ var (
+ wg sync.WaitGroup
+ locked uint32
+ )
+
+ wg.Add(1)
+
+ go func() {
+ defer wg.Done()
+
+ mu := pm.MutexAt("sometestpath")
+ mu.Lock()
+ defer mu.Unlock()
+
+ atomic.AddUint32(&locked, 1)
+ }()
+
+ // locked should be 0 as we already have a lock on the path.
+ if atomic.LoadUint32(&locked) != 0 {
+ t.Error("unexpected second lock")
+ }
+
+ mu.Unlock()
+
+ wg.Wait()
+
+ if l := len(pm.lock); l > 0 {
+ t.Errorf("unexpected lock length: want 0, got %d", l)
+ }
+}
+
+func BenchmarkFileCache_Get(b *testing.B) {
+ dir := createTempDir(b)
+
+ fc, err := newFileCache(dir, time.Minute)
+ if err != nil {
+ b.Errorf("unexpected newFileCache error: %v", err)
+ }
+
+ _ = fc.Set(testCacheKey, []byte("some random cache content that should be exact"), time.Minute)
+
+ b.ReportAllocs()
+ b.ResetTimer()
+
+ for i := 0; i < b.N; i++ {
+ _, _ = fc.Get(testCacheKey)
+ }
+}
+
+func createTempDir(tb testing.TB) string {
+ dir, err := ioutil.TempDir("./", "example")
+ if err != nil {
+ tb.Fatal(err)
+ }
+
+ tb.Cleanup(func() {
+ if err = os.RemoveAll(dir); err != nil {
+ tb.Fatal(err)
+ }
+ })
+
+ return dir
+}
diff --git a/cache/memory.go b/cache/memory.go
new file mode 100644
index 0000000..139f53f
--- /dev/null
+++ b/cache/memory.go
@@ -0,0 +1,47 @@
+package cache
+
+import (
+ "fmt"
+ "sync"
+ "time"
+)
+
+// MemoryCache in-memory cache system struct.
+type MemoryCache struct {
+ mtx sync.RWMutex
+ m map[string][]byte
+}
+
+// Get return cached image with given key.
+func (c *MemoryCache) Get(key string) ([]byte, error) {
+ c.mtx.Lock()
+ defer c.mtx.Unlock()
+
+ v, ok := c.m[key]
+ if !ok {
+ return nil, fmt.Errorf("no result found with key = %s", key)
+ }
+
+ return v, nil
+}
+
+// Set add a value into in-memory with custom expiry.
+func (c *MemoryCache) Set(key string, v []byte, expiry time.Duration) error {
+ c.mtx.Lock()
+ defer c.mtx.Unlock()
+
+ c.m[key] = v
+
+ time.AfterFunc(expiry, func() {
+ c.delete(key)
+ })
+
+ return nil
+}
+
+func (c *MemoryCache) delete(key string) {
+ c.mtx.Lock()
+ defer c.mtx.Unlock()
+
+ delete(c.m, key)
+}
diff --git a/cache/memory_test.go b/cache/memory_test.go
new file mode 100644
index 0000000..43830f0
--- /dev/null
+++ b/cache/memory_test.go
@@ -0,0 +1,223 @@
+package cache
+
+import (
+ "bytes"
+ "fmt"
+ "reflect"
+ "sync"
+ "testing"
+ "time"
+)
+
+func TestMemoryCache_Get(t *testing.T) {
+ type fields struct {
+ m map[string][]byte
+ }
+
+ type args struct {
+ key string
+ }
+
+ tests := []struct {
+ name string
+ fields fields
+ args args
+ want []byte
+ wantErr bool
+ }{
+ {
+ name: "should be able to get cached media",
+ fields: fields{m: map[string][]byte{
+ "/test.jpeg": []byte("result"),
+ }},
+ args: args{key: "/test.jpeg"},
+ want: []byte("result"),
+ wantErr: false,
+ },
+ {
+ name: "should return error if not defined in cache",
+ fields: fields{m: map[string][]byte{}},
+ args: args{key: "/test.jpeg"},
+ want: nil,
+ wantErr: true,
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ c := &MemoryCache{
+ mtx: sync.RWMutex{},
+ m: tt.fields.m,
+ }
+ got, err := c.Get(tt.args.key)
+ if (err != nil) != tt.wantErr {
+ t.Errorf("MemoryCache.Get() error = %v, wantErr %v", err, tt.wantErr)
+ return
+ }
+ if !reflect.DeepEqual(got, tt.want) {
+ t.Errorf("MemoryCache.Get() = %v, want %v", got, tt.want)
+ }
+ })
+ }
+}
+
+func TestMemoryCache_Set(t *testing.T) {
+ type fields struct {
+ m map[string][]byte
+ }
+
+ type args struct {
+ key string
+ v []byte
+ }
+
+ tests := []struct {
+ name string
+ fields fields
+ args args
+ wantErr bool
+ }{
+ {
+ name: "should be able to set cached media",
+ fields: fields{m: map[string][]byte{}},
+ args: args{key: "/test.jpeg", v: []byte("result")},
+ wantErr: false,
+ },
+ {
+ name: "should be able to replace cached media",
+ fields: fields{m: map[string][]byte{
+ "/test.jpeg": []byte("result"),
+ }},
+ args: args{key: "/test.jpeg", v: []byte("result2")},
+ wantErr: false,
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ c := &MemoryCache{
+ mtx: sync.RWMutex{},
+ m: tt.fields.m,
+ }
+ if err := c.Set(tt.args.key, tt.args.v, 100*time.Millisecond); (err != nil) != tt.wantErr {
+ t.Errorf("MemoryCache.Set() error = %v, wantErr %v", err, tt.wantErr)
+ }
+
+ v, err := c.Get(tt.args.key)
+ if err != nil {
+ panic(err)
+ }
+
+ if !bytes.Equal(v, tt.args.v) {
+ t.Errorf("result differ")
+ }
+
+ time.Sleep(1 * time.Second)
+
+ _, err = c.Get(tt.args.key)
+ if err == nil {
+ t.Errorf("value must be deleted after expiry")
+ }
+ })
+ }
+}
+
+func BenchmarkMemoryCache_Get(b *testing.B) {
+ testCacheKey := "test-key"
+
+ c := &MemoryCache{
+ mtx: sync.RWMutex{},
+ m: map[string][]byte{},
+ }
+
+ _ = c.Set(testCacheKey, []byte("a good media value"), 0)
+
+ b.ReportAllocs()
+ b.ResetTimer()
+
+ for i := 0; i < b.N; i++ {
+ _, _ = c.Get(testCacheKey)
+ }
+}
+
+func BenchmarkMemoryCache_SetSameKey(b *testing.B) {
+ testCacheKey := "test-key"
+
+ c := &MemoryCache{
+ mtx: sync.RWMutex{},
+ m: map[string][]byte{},
+ }
+
+ _ = c.Set(testCacheKey, []byte("a good media value"), 0)
+
+ b.ReportAllocs()
+ b.ResetTimer()
+
+ for i := 0; i < b.N; i++ {
+ _ = c.Set(testCacheKey, []byte("a good media value"), 0)
+ }
+}
+
+func BenchmarkMemoryCache_SetNewKey(b *testing.B) {
+ testCacheKey := "test-key"
+
+ c := &MemoryCache{
+ mtx: sync.RWMutex{},
+ m: map[string][]byte{},
+ }
+
+ _ = c.Set(testCacheKey, []byte("a good media value"), 0)
+
+ b.ReportAllocs()
+ b.ResetTimer()
+
+ for i := 0; i < b.N; i++ {
+ _ = c.Set(fmt.Sprintf("%s-%d", testCacheKey, i), []byte("a good media value"), 0)
+ }
+}
+
+func TestMemoryCache_delete(t *testing.T) {
+ type fields struct {
+ m map[string][]byte
+ }
+
+ type args struct {
+ key string
+ }
+
+ tests := []struct {
+ name string
+ fields fields
+ args args
+ wantErr bool
+ }{
+ {
+ name: "should return not error when delete non existent cache",
+ fields: fields{m: map[string][]byte{}},
+ args: args{key: "/test.jpeg"},
+ wantErr: false,
+ },
+ {
+ name: "should be able to delete cache",
+ fields: fields{m: map[string][]byte{
+ "/test.jpeg": []byte("result"),
+ }},
+ args: args{key: "/test.jpeg"},
+ wantErr: false,
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ c := &MemoryCache{
+ mtx: sync.RWMutex{},
+ m: tt.fields.m,
+ }
+
+ c.delete(tt.args.key)
+
+ _, ok := c.m[tt.args.key]
+
+ if ok {
+ t.Errorf("MemoryCache.delete() key must be deleted : %v", tt.args.key)
+ }
+ })
+ }
+}
diff --git a/cache/none.go b/cache/none.go
new file mode 100644
index 0000000..01636d1
--- /dev/null
+++ b/cache/none.go
@@ -0,0 +1,19 @@
+package cache
+
+import (
+ "fmt"
+ "time"
+)
+
+// NoneCache dummy cache system.
+type NoneCache struct{}
+
+// Get always return nil with not found error.
+func (c *NoneCache) Get(key string) ([]byte, error) {
+ return nil, fmt.Errorf("no result found with key = %s", key)
+}
+
+// Set always return nil.
+func (c *NoneCache) Set(_ string, _ []byte, _ time.Duration) error {
+ return nil
+}
diff --git a/cache/redis.go b/cache/redis.go
new file mode 100644
index 0000000..f762590
--- /dev/null
+++ b/cache/redis.go
@@ -0,0 +1,27 @@
+package cache
+
+import "time"
+
+// RedisCache hold redis client.
+type RedisCache struct{} // client *redis.Client
+
+// Get return cached media with given key from redis.
+func (c *RedisCache) Get(key string) ([]byte, error) {
+ // v, err := c.client.Get(ctx, key).Bytes()
+
+ // if err == redis.Nil {
+ // return nil, fmt.Errorf("no result found with key = %s", key)
+ // } else if err != nil {
+ // return nil, err
+ // }
+
+ // return v, nil
+
+ return []byte("unsafe not supported by yaegi"), nil
+}
+
+// Set add a new image in cache with custom expiry.
+func (c *RedisCache) Set(key string, v []byte, expiry time.Duration) error {
+ // return c.client.Set(ctx, key, v, expiry).Err()
+ return nil
+}
diff --git a/cache/tokenizer.go b/cache/tokenizer.go
new file mode 100644
index 0000000..b0ab5fb
--- /dev/null
+++ b/cache/tokenizer.go
@@ -0,0 +1,17 @@
+package cache
+
+import (
+ "fmt"
+ "net/http"
+)
+
+// Tokenize generate unique key for request caching strategy.
+func Tokenize(req *http.Request) (string, error) {
+ width := req.URL.Query().Get("w")
+
+ if len(width) == 0 {
+ width = "original"
+ }
+
+ return fmt.Sprintf("%s:%s:%s:%s:%s", req.Method, req.URL.Scheme, req.Host, req.URL.Path, width), nil
+}
diff --git a/cache/tokenizer_test.go b/cache/tokenizer_test.go
new file mode 100644
index 0000000..1155f79
--- /dev/null
+++ b/cache/tokenizer_test.go
@@ -0,0 +1,62 @@
+package cache
+
+import (
+ "context"
+ "net/http"
+ "testing"
+)
+
+func TestTokenize(t *testing.T) {
+ ctx := context.Background()
+
+ newRequest := func(method, url string) *http.Request {
+ req, err := http.NewRequestWithContext(ctx, method, url, nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ return req
+ }
+
+ type args struct {
+ req *http.Request
+ }
+
+ tests := []struct {
+ name string
+ args args
+ want string
+ wantErr bool
+ }{
+ {
+ name: "should return correct token",
+ args: args{req: newRequest(http.MethodGet, "http://localhost/img.jpeg")},
+ want: "GET:http:localhost:/img.jpeg:original",
+ wantErr: false,
+ },
+ {
+ name: "should return correct token with width query param",
+ args: args{req: newRequest(http.MethodGet, "http://localhost/img.jpeg?w=1024")},
+ want: "GET:http:localhost:/img.jpeg:1024",
+ wantErr: false,
+ },
+ {
+ name: "should return correct token with width query param",
+ args: args{req: newRequest(http.MethodDelete, "http://localhost/img.jpeg?w=1024")},
+ want: "DELETE:http:localhost:/img.jpeg:1024",
+ wantErr: false,
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ got, err := Tokenize(tt.args.req)
+ if (err != nil) != tt.wantErr {
+ t.Errorf("Tokenize() error = %v, wantErr %v", err, tt.wantErr)
+ return
+ }
+ if got != tt.want {
+ t.Errorf("Tokenize() = %v, want %v", got, tt.want)
+ }
+ })
+ }
+}
diff --git a/config/config.go b/config/config.go
new file mode 100644
index 0000000..8ee9e34
--- /dev/null
+++ b/config/config.go
@@ -0,0 +1,27 @@
+// Package config provide configurations structs for imageopti middleware.
+package config
+
+// ImaginaryProcessorConfig define imaginary image processor configurations.
+type ImaginaryProcessorConfig struct {
+ URL string `json:"url" yaml:"url" toml:"url"`
+}
+
+// RedisCacheConfig define redis cache system configurations.
+type RedisCacheConfig struct {
+ URL string `json:"url" yaml:"url" toml:"url"`
+}
+
+// FileCacheConfig define file cache system configurations.
+type FileCacheConfig struct {
+ Path string `json:"path" yaml:"path" toml:"path"`
+}
+
+// Config the plugin configuration.
+type Config struct {
+ Processor string `json:"processor" yaml:"processor" toml:"processor"`
+ Imaginary ImaginaryProcessorConfig `json:"imaginary,omitempty" yaml:"imaginary,omitempty" toml:"imaginary,omitempty"`
+ // Cache
+ Cache string `json:"cache" yaml:"cache" toml:"cache"`
+ Redis RedisCacheConfig `json:"redis,omitempty" yaml:"redis,omitempty" toml:"redis,omitempty"`
+ File FileCacheConfig `json:"file,omitempty" yaml:"file,omitempty" toml:"file,omitempty"`
+}
diff --git a/demo.go b/demo.go
deleted file mode 100644
index 88d8c18..0000000
--- a/demo.go
+++ /dev/null
@@ -1,66 +0,0 @@
-// Package plugindemo a demo plugin.
-package plugindemo
-
-import (
- "bytes"
- "context"
- "fmt"
- "net/http"
- "text/template"
-)
-
-// Config the plugin configuration.
-type Config struct {
- Headers map[string]string `json:"headers,omitempty"`
-}
-
-// CreateConfig creates the default plugin configuration.
-func CreateConfig() *Config {
- return &Config{
- Headers: make(map[string]string),
- }
-}
-
-// Demo a Demo plugin.
-type Demo struct {
- next http.Handler
- headers map[string]string
- name string
- template *template.Template
-}
-
-// New created a new Demo plugin.
-func New(ctx context.Context, next http.Handler, config *Config, name string) (http.Handler, error) {
- if len(config.Headers) == 0 {
- return nil, fmt.Errorf("headers cannot be empty")
- }
-
- return &Demo{
- headers: config.Headers,
- next: next,
- name: name,
- template: template.New("demo").Delims("[[", "]]"),
- }, nil
-}
-
-func (a *Demo) ServeHTTP(rw http.ResponseWriter, req *http.Request) {
- for key, value := range a.headers {
- tmpl, err := a.template.Parse(value)
- if err != nil {
- http.Error(rw, err.Error(), http.StatusInternalServerError)
- return
- }
-
- writer := &bytes.Buffer{}
-
- err = tmpl.Execute(writer, req)
- if err != nil {
- http.Error(rw, err.Error(), http.StatusInternalServerError)
- return
- }
-
- req.Header.Set(key, writer.String())
- }
-
- a.next.ServeHTTP(rw, req)
-}
diff --git a/demo/.env.example b/demo/.env.example
new file mode 100644
index 0000000..c7679d3
--- /dev/null
+++ b/demo/.env.example
@@ -0,0 +1 @@
+PILOT_TOKEN=
\ No newline at end of file
diff --git a/demo/.gitignore b/demo/.gitignore
new file mode 100644
index 0000000..2eea525
--- /dev/null
+++ b/demo/.gitignore
@@ -0,0 +1 @@
+.env
\ No newline at end of file
diff --git a/demo/frontend/docker-compose.yml b/demo/frontend/docker-compose.yml
new file mode 100644
index 0000000..ed7706b
--- /dev/null
+++ b/demo/frontend/docker-compose.yml
@@ -0,0 +1,24 @@
+version: "3.8"
+
+services:
+ frontend:
+ image: nginx:alpine
+ networks:
+ - traefik-net
+ expose:
+ - 80
+ volumes:
+ - "../frontend/html:/usr/share/nginx/html"
+ labels:
+ - "traefik.enable=true"
+ - "traefik.http.routers.demo.rule=Host(`demo.localhost`)"
+ - "traefik.http.middlewares.imageopti.plugin.dev.config.processor=imaginary"
+ - "traefik.http.middlewares.imageopti.plugin.dev.config.imaginary.url=http://imaginary:9000"
+ - "traefik.http.middlewares.imageopti.plugin.dev.config.cache=file"
+ - "traefik.http.middlewares.imageopti.plugin.dev.config.file.path=/root"
+ - "traefik.http.routers.demo.middlewares=imageopti"
+
+networks:
+ traefik-net:
+ external:
+ name: traefik-net
diff --git a/demo/frontend/html/compressed.html b/demo/frontend/html/compressed.html
new file mode 100644
index 0000000..0b67889
--- /dev/null
+++ b/demo/frontend/html/compressed.html
@@ -0,0 +1,7 @@
+
+
+
diff --git a/demo/frontend/html/index.html b/demo/frontend/html/index.html
new file mode 100644
index 0000000..8d74437
--- /dev/null
+++ b/demo/frontend/html/index.html
@@ -0,0 +1,9 @@
+
Welcome to image optimizer demo
+
+
+
+
diff --git a/demo/frontend/html/original.html b/demo/frontend/html/original.html
new file mode 100644
index 0000000..a4be2a1
--- /dev/null
+++ b/demo/frontend/html/original.html
@@ -0,0 +1,6 @@
+
+
diff --git a/demo/frontend/html/reponsive.html b/demo/frontend/html/reponsive.html
new file mode 100644
index 0000000..9b66afc
--- /dev/null
+++ b/demo/frontend/html/reponsive.html
@@ -0,0 +1,11 @@
+
+
+
diff --git a/demo/frontend/html/very_big.jpg b/demo/frontend/html/very_big.jpg
new file mode 100644
index 0000000..27b79f1
Binary files /dev/null and b/demo/frontend/html/very_big.jpg differ
diff --git a/demo/gateway/Dockerfile b/demo/gateway/Dockerfile
new file mode 100644
index 0000000..4b634b0
--- /dev/null
+++ b/demo/gateway/Dockerfile
@@ -0,0 +1 @@
+FROM traefik:2.4
diff --git a/demo/gateway/docker-compose.yml b/demo/gateway/docker-compose.yml
new file mode 100644
index 0000000..4f942da
--- /dev/null
+++ b/demo/gateway/docker-compose.yml
@@ -0,0 +1,90 @@
+version: "3.3"
+
+services:
+ gateway:
+ build: ../gateway
+ command:
+ # - "--log.level=DEBUG"
+ - "--api.insecure=true"
+ - "--providers.docker=true"
+ - "--providers.docker.network=traefik-net"
+ - "--providers.docker.exposedbydefault=false"
+ - "--providers.docker.endpoint=unix:///var/run/docker.sock"
+ - "--entrypoints.web.address=:80"
+ - "--pilot.token=${PILOT_TOKEN}"
+ - "--metrics=true"
+ - "--metrics.prometheus=true"
+ - "--metrics.prometheus.buckets=0.100000, 0.300000, 1.200000, 5.000000"
+ - "--entryPoints.metrics.address=:8082"
+ - "--metrics.prometheus.entryPoint=metrics"
+ - "--experimental.devPlugin.goPath=/plugins"
+ - "--experimental.devPlugin.moduleName=github.com/agravelot/imageopti"
+ ports:
+ - "80:80"
+ - "443:443"
+ - "8080:8080"
+ volumes:
+ - "/home/agravelot/Lab/traefik-image-optimization:/plugins/src/github.com/agravelot/imageopti:rw,delegated"
+ # - "./gateway/traefik.yml:/etc/traefik/traefik.yml:ro,delegated"
+ - "letsencrypt:/letsencrypt:rw,delegated"
+ - "/var/run/docker.sock:/var/run/docker.sock:ro"
+ networks:
+ - metrics
+ - traefik-net
+
+ prometheus:
+ image: prom/prometheus
+ volumes:
+ - ./prometheus/:/etc/prometheus/
+ - prometheus_data:/prometheus
+ command:
+ - '--config.file=/etc/prometheus/prometheus.yml'
+ - '--storage.tsdb.path=/prometheus'
+ - '--web.console.libraries=/usr/share/prometheus/console_libraries'
+ - '--web.console.templates=/usr/share/prometheus/consoles'
+ labels:
+ - "traefik.enable=true"
+ - "traefik.http.routers.prometheus.rule=Host(`prometheus.localhost`)"
+ # - "traefik.http.routers.prometheus.service=prometheus"
+ # - "traefik.http.routers.prometheus.entrypoints=web"
+ # - "traefik.http.routers.prometheus.service=prometheus"
+ # - "traefik.http.services.prometheus.loadbalancer.server.port=9090"
+ # - "traefik.docker.network=inbound"
+ networks:
+ - metrics
+ - traefik-net
+
+ grafana:
+ image: grafana/grafana
+ depends_on:
+ - prometheus
+ volumes:
+ - grafana_data:/var/lib/grafana
+ - ./grafana/provisioning/:/etc/grafana/provisioning/
+ environment:
+ GF_LOG_LEVEL: 'debug'
+ env_file:
+ - ./grafana/config.monitoring
+ user: "104"
+ networks:
+ - metrics
+ - traefik-net
+ labels:
+ - "traefik.enable=true"
+ - "traefik.http.routers.grafana.rule=Host(`grafana.localhost`)"
+ # - "traefik.http.routers.grafana.service=grafana"
+ # - "traefik.http.routers.grafana.entrypoints=web"
+ # - "traefik.http.routers.grafana.service=grafana"
+ # - "traefik.http.services.grafana.loadbalancer.server.port=3000"
+ # - "traefik.docker.network=inbound"
+
+volumes:
+ letsencrypt:
+ prometheus_data:
+ grafana_data:
+
+networks:
+ metrics:
+ traefik-net:
+ external:
+ name: traefik-net
diff --git a/demo/gateway/grafana/config.monitoring b/demo/gateway/grafana/config.monitoring
new file mode 100644
index 0000000..c458967
--- /dev/null
+++ b/demo/gateway/grafana/config.monitoring
@@ -0,0 +1,3 @@
+GF_SECURITY_ADMIN_PASSWORD=admin
+GF_USERS_ALLOW_SIGN_UP=false
+GF_INSTALL_PLUGINS=grafana-piechart-panel
diff --git a/demo/gateway/grafana/provisioning/dashboards/dashboard.yml b/demo/gateway/grafana/provisioning/dashboards/dashboard.yml
new file mode 100644
index 0000000..bc14030
--- /dev/null
+++ b/demo/gateway/grafana/provisioning/dashboards/dashboard.yml
@@ -0,0 +1,21 @@
+apiVersion: 1
+
+providers:
+ # provider name
+- name: 'default'
+ # org id. will default to orgId 1 if not specified
+ orgId: 1
+ # name of the dashboard folder. Required
+ folder: ''
+ # folder UID. will be automatically generated if not specified
+ folderUid: ''
+ # provider type. Required
+ type: file
+ # disable dashboard deletion
+ disableDeletion: false
+ # enable dashboard editing
+ editable: true
+ # how often Grafana will scan for changed dashboards
+ updateIntervalSeconds: 10
+ options:
+ path: /etc/grafana/provisioning/dashboards
\ No newline at end of file
diff --git a/demo/gateway/grafana/provisioning/dashboards/traefik_rev4.json b/demo/gateway/grafana/provisioning/dashboards/traefik_rev4.json
new file mode 100644
index 0000000..05e4787
--- /dev/null
+++ b/demo/gateway/grafana/provisioning/dashboards/traefik_rev4.json
@@ -0,0 +1,693 @@
+{
+ "annotations": {
+ "list": [
+ {
+ "builtIn": 1,
+ "datasource": "-- Grafana --",
+ "enable": true,
+ "hide": true,
+ "iconColor": "rgba(0, 211, 255, 1)",
+ "name": "Annotations & Alerts",
+ "type": "dashboard"
+ }
+ ]
+ },
+ "description": "Traefik dashboard prometheus",
+ "editable": true,
+ "gnetId": 4475,
+ "graphTooltip": 0,
+ "id": 1,
+ "iteration": 1574689270305,
+ "links": [],
+ "panels": [
+ {
+ "datasource": null,
+ "gridPos": {
+ "h": 1,
+ "w": 24,
+ "x": 0,
+ "y": 0
+ },
+ "id": 10,
+ "title": "$service stats",
+ "type": "row"
+ },
+ {
+ "cacheTimeout": null,
+ "colorBackground": false,
+ "colorValue": true,
+ "colors": [
+ "#d44a3a",
+ "rgba(237, 129, 40, 0.89)",
+ "#299c46"
+ ],
+ "datasource": "Prometheus",
+ "format": "none",
+ "gauge": {
+ "maxValue": 100,
+ "minValue": 0,
+ "show": false,
+ "thresholdLabels": false,
+ "thresholdMarkers": true
+ },
+ "gridPos": {
+ "h": 7,
+ "w": 8,
+ "x": 0,
+ "y": 1
+ },
+ "id": 1,
+ "interval": null,
+ "links": [],
+ "mappingType": 1,
+ "mappingTypes": [
+ {
+ "name": "value to text",
+ "value": 1
+ },
+ {
+ "name": "range to text",
+ "value": 2
+ }
+ ],
+ "maxDataPoints": 100,
+ "nullPointMode": "connected",
+ "nullText": null,
+ "options": {},
+ "postfix": "",
+ "postfixFontSize": "50%",
+ "prefix": "",
+ "prefixFontSize": "50%",
+ "rangeMaps": [
+ {
+ "from": "null",
+ "text": "N/A",
+ "to": "null"
+ }
+ ],
+ "sparkline": {
+ "fillColor": "rgba(31, 118, 189, 0.18)",
+ "full": false,
+ "lineColor": "rgb(31, 120, 193)",
+ "show": false
+ },
+ "tableColumn": "",
+ "targets": [
+ {
+ "expr": "count(traefik_service_open_connections)",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "refId": "A"
+ }
+ ],
+ "thresholds": "0,1",
+ "title": "Number of Traefik Services ",
+ "type": "singlestat",
+ "valueFontSize": "80%",
+ "valueMaps": [
+ {
+ "op": "=",
+ "text": "OK",
+ "value": "1"
+ }
+ ],
+ "valueName": "current"
+ },
+ {
+ "cacheTimeout": null,
+ "datasource": "Prometheus",
+ "gridPos": {
+ "h": 7,
+ "w": 8,
+ "x": 8,
+ "y": 1
+ },
+ "id": 2,
+ "links": [],
+ "options": {
+ "fieldOptions": {
+ "calcs": [
+ "mean"
+ ],
+ "defaults": {
+ "mappings": [
+ {
+ "id": 0,
+ "op": "=",
+ "text": "N/A",
+ "type": 1,
+ "value": "null"
+ }
+ ],
+ "max": 5,
+ "min": 0,
+ "nullValueMode": "connected",
+ "thresholds": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 3
+ }
+ ],
+ "unit": "none"
+ },
+ "override": {},
+ "values": false
+ },
+ "orientation": "horizontal",
+ "showThresholdLabels": false,
+ "showThresholdMarkers": true
+ },
+ "pluginVersion": "6.5.0",
+ "targets": [
+ {
+ "expr": "traefik_service_request_duration_seconds_sum{service=\"$service\"}",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "legendFormat": "{{$service}} | {{method}}",
+ "refId": "A"
+ }
+ ],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Average response time",
+ "type": "gauge"
+ },
+ {
+ "aliasColors": {},
+ "breakPoint": "50%",
+ "cacheTimeout": null,
+ "combine": {
+ "label": "Others",
+ "threshold": 0
+ },
+ "datasource": "Prometheus",
+ "fontSize": "80%",
+ "format": "short",
+ "gridPos": {
+ "h": 7,
+ "w": 8,
+ "x": 16,
+ "y": 1
+ },
+ "id": 4,
+ "interval": null,
+ "legend": {
+ "show": true,
+ "values": true
+ },
+ "legendType": "Under graph",
+ "links": [],
+ "maxDataPoints": 3,
+ "nullPointMode": "connected",
+ "options": {},
+ "pieType": "pie",
+ "pluginVersion": "6.5.0",
+ "strokeWidth": 1,
+ "targets": [
+ {
+ "expr": "avg_over_time(traefik_service_request_duration_seconds_sum{service=\"$service\"}[5m])",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "legendFormat": "{{service}}",
+ "refId": "A"
+ }
+ ],
+ "title": "Avergae Service response time",
+ "type": "grafana-piechart-panel",
+ "valueName": "current"
+ },
+ {
+ "cacheTimeout": null,
+ "datasource": "Prometheus",
+ "gridPos": {
+ "h": 7,
+ "w": 24,
+ "x": 0,
+ "y": 8
+ },
+ "id": 3,
+ "links": [],
+ "options": {
+ "displayMode": "basic",
+ "fieldOptions": {
+ "calcs": [
+ "range"
+ ],
+ "defaults": {
+ "mappings": [],
+ "max": 100,
+ "min": 0,
+ "thresholds": [
+ {
+ "color": "green",
+ "value": null
+ }
+ ],
+ "unit": "short"
+ },
+ "override": {},
+ "values": false
+ },
+ "orientation": "horizontal"
+ },
+ "pluginVersion": "6.5.0",
+ "targets": [
+ {
+ "expr": "avg(traefik_service_requests_total{service=\"$service\"})",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "legendFormat": "{{$service}} | {{method}}",
+ "refId": "A"
+ }
+ ],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Total requests over 5min $service",
+ "type": "bargauge"
+ },
+ {
+ "collapsed": false,
+ "datasource": null,
+ "gridPos": {
+ "h": 1,
+ "w": 24,
+ "x": 0,
+ "y": 15
+ },
+ "id": 12,
+ "panels": [],
+ "title": "Global stats",
+ "type": "row"
+ },
+ {
+ "aliasColors": {},
+ "bars": true,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "Prometheus",
+ "fill": 1,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 7,
+ "w": 12,
+ "x": 0,
+ "y": 16
+ },
+ "hiddenSeries": false,
+ "id": 5,
+ "legend": {
+ "alignAsTable": true,
+ "avg": false,
+ "current": true,
+ "max": true,
+ "min": true,
+ "rightSide": true,
+ "show": true,
+ "total": false,
+ "values": true
+ },
+ "lines": false,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "options": {
+ "dataLinks": []
+ },
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": true,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "rate(traefik_entrypoint_requests_total{entrypoint=~\"$entrypoint\",code=\"200\"}[5m])",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "legendFormat": "{{method}} : {{code}}",
+ "refId": "A"
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "Status code 200 over 5min",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "decimals": null,
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "decimals": null,
+ "format": "short",
+ "label": "",
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "datasource": "Prometheus",
+ "gridPos": {
+ "h": 7,
+ "w": 12,
+ "x": 12,
+ "y": 16
+ },
+ "id": 6,
+ "links": [],
+ "options": {
+ "displayMode": "basic",
+ "fieldOptions": {
+ "calcs": [
+ "mean"
+ ],
+ "defaults": {
+ "decimals": 0,
+ "mappings": [],
+ "max": 100,
+ "min": 0,
+ "thresholds": [
+ {
+ "color": "green",
+ "value": null
+ }
+ ],
+ "unit": "short"
+ },
+ "limit": 10,
+ "override": {},
+ "values": false
+ },
+ "orientation": "horizontal"
+ },
+ "pluginVersion": "6.5.0",
+ "targets": [
+ {
+ "expr": "rate(traefik_entrypoint_requests_total{entrypoint=~\"$entrypoint\",code!=\"200\"}[5m]) * 100",
+ "format": "time_series",
+ "instant": false,
+ "intervalFactor": 2,
+ "legendFormat": "{{ method }} : {{code}}",
+ "refId": "A"
+ },
+ {
+ "expr": "avg_over_time",
+ "refId": "B"
+ }
+ ],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Entrypoint Error Codes",
+ "type": "bargauge"
+ },
+ {
+ "aliasColors": {},
+ "breakPoint": "50%",
+ "cacheTimeout": null,
+ "combine": {
+ "label": "Others",
+ "threshold": 0
+ },
+ "datasource": "Prometheus",
+ "fontSize": "80%",
+ "format": "short",
+ "gridPos": {
+ "h": 7,
+ "w": 12,
+ "x": 0,
+ "y": 23
+ },
+ "id": 7,
+ "interval": null,
+ "legend": {
+ "show": true,
+ "values": true
+ },
+ "legendType": "Right side",
+ "links": [],
+ "maxDataPoints": 3,
+ "nullPointMode": "connected",
+ "options": {},
+ "pieType": "pie",
+ "strokeWidth": 1,
+ "targets": [
+ {
+ "expr": "sum(rate(traefik_service_requests_total[5m])) by (service) ",
+ "format": "time_series",
+ "interval": "",
+ "intervalFactor": 2,
+ "legendFormat": "{{ service }}",
+ "refId": "A"
+ }
+ ],
+ "title": "Requests by service",
+ "type": "grafana-piechart-panel",
+ "valueName": "total"
+ },
+ {
+ "aliasColors": {},
+ "breakPoint": "50%",
+ "cacheTimeout": null,
+ "combine": {
+ "label": "Others",
+ "threshold": 0
+ },
+ "datasource": "Prometheus",
+ "fontSize": "80%",
+ "format": "short",
+ "gridPos": {
+ "h": 7,
+ "w": 12,
+ "x": 12,
+ "y": 23
+ },
+ "id": 8,
+ "interval": null,
+ "legend": {
+ "show": true,
+ "values": true
+ },
+ "legendType": "Right side",
+ "links": [],
+ "maxDataPoints": 3,
+ "nullPointMode": "connected",
+ "options": {},
+ "pieType": "pie",
+ "strokeWidth": 1,
+ "targets": [
+ {
+ "expr": "sum(rate(traefik_entrypoint_requests_total{entrypoint =~ \"$entrypoint\"}[5m])) by (entrypoint) ",
+ "format": "time_series",
+ "interval": "",
+ "intervalFactor": 2,
+ "legendFormat": "{{ entrypoint }}",
+ "refId": "A"
+ }
+ ],
+ "title": "Requests by protocol",
+ "type": "grafana-piechart-panel",
+ "valueName": "total"
+ }
+ ],
+ "schemaVersion": 21,
+ "style": "dark",
+ "tags": [
+ "traefik",
+ "prometheus"
+ ],
+ "templating": {
+ "list": [
+ {
+ "allValue": null,
+ "current": {
+ "tags": [],
+ "text": "prometheus@docker",
+ "value": "prometheus@docker"
+ },
+ "datasource": "Prometheus",
+ "definition": "label_values(service)",
+ "hide": 0,
+ "includeAll": true,
+ "label": null,
+ "multi": false,
+ "name": "service",
+ "options": [],
+ "query": "label_values(service)",
+ "refresh": 1,
+ "regex": "",
+ "skipUrlSync": false,
+ "sort": 0,
+ "tagValuesQuery": "",
+ "tags": [],
+ "tagsQuery": "",
+ "type": "query",
+ "useTags": false
+ },
+ {
+ "allValue": null,
+ "current": {
+ "selected": false,
+ "tags": [],
+ "text": "All",
+ "value": [
+ "$__all"
+ ]
+ },
+ "datasource": "Prometheus",
+ "definition": "label_values(entrypoint)",
+ "hide": 0,
+ "includeAll": true,
+ "label": null,
+ "multi": true,
+ "name": "entrypoint",
+ "options": [],
+ "query": "label_values(entrypoint)",
+ "refresh": 1,
+ "regex": "",
+ "skipUrlSync": false,
+ "sort": 0,
+ "tagValuesQuery": "",
+ "tags": [],
+ "tagsQuery": "",
+ "type": "query",
+ "useTags": false
+ },
+ {
+ "allValue": null,
+ "current": {
+ "text": "200",
+ "value": "200"
+ },
+ "datasource": "Prometheus",
+ "definition": "label_values(code)",
+ "hide": 2,
+ "includeAll": false,
+ "label": null,
+ "multi": false,
+ "name": "code",
+ "options": [
+ {
+ "selected": true,
+ "text": "200",
+ "value": "200"
+ },
+ {
+ "selected": false,
+ "text": "302",
+ "value": "302"
+ },
+ {
+ "selected": false,
+ "text": "304",
+ "value": "304"
+ },
+ {
+ "selected": false,
+ "text": "400",
+ "value": "400"
+ },
+ {
+ "selected": false,
+ "text": "404",
+ "value": "404"
+ },
+ {
+ "selected": false,
+ "text": "429",
+ "value": "429"
+ },
+ {
+ "selected": false,
+ "text": "499",
+ "value": "499"
+ },
+ {
+ "selected": false,
+ "text": "500",
+ "value": "500"
+ },
+ {
+ "selected": false,
+ "text": "503",
+ "value": "503"
+ }
+ ],
+ "query": "label_values(code)",
+ "refresh": 0,
+ "regex": "",
+ "skipUrlSync": false,
+ "sort": 0,
+ "tagValuesQuery": "",
+ "tags": [],
+ "tagsQuery": "",
+ "type": "query",
+ "useTags": false
+ }
+ ]
+ },
+ "time": {
+ "from": "now-5m",
+ "to": "now"
+ },
+ "timepicker": {
+ "refresh_intervals": [
+ "5s",
+ "10s",
+ "30s",
+ "1m",
+ "5m",
+ "15m",
+ "30m",
+ "1h",
+ "2h",
+ "1d"
+ ],
+ "time_options": [
+ "5m",
+ "15m",
+ "1h",
+ "6h",
+ "12h",
+ "24h",
+ "2d",
+ "7d",
+ "30d"
+ ]
+ },
+ "timezone": "",
+ "title": "Traefik2",
+ "uid": "qPdAviJmz1",
+ "version": 1
+}
\ No newline at end of file
diff --git a/demo/gateway/grafana/provisioning/datasources/datasource.yml b/demo/gateway/grafana/provisioning/datasources/datasource.yml
new file mode 100644
index 0000000..7524447
--- /dev/null
+++ b/demo/gateway/grafana/provisioning/datasources/datasource.yml
@@ -0,0 +1,50 @@
+# config file version
+apiVersion: 1
+
+# list of datasources that should be deleted from the database
+deleteDatasources:
+ - name: Prometheus
+ orgId: 1
+
+# list of datasources to insert/update depending
+# whats available in the database
+datasources:
+ # name of the datasource. Required
+- name: Prometheus
+ # datasource type. Required
+ type: prometheus
+ # access mode. direct or proxy. Required
+ access: browser
+ # org id. will default to orgId 1 if not specified
+ orgId: 1
+ # url
+ url: http://prometheus:9090
+ # database password, if used
+ password:
+ # database user, if used
+ user:
+ # database name, if used
+ database:
+ # enable/disable basic auth
+ basicAuth: false
+ # basic auth username
+ basicAuthUser: admin
+ # basic auth password
+ basicAuthPassword: foobar
+ # enable/disable with credentials headers
+ withCredentials:
+ # mark as default datasource. Max one per org
+ isDefault: true
+ #