diff --git a/.github/workflows/go-cross.yml b/.github/workflows/go-cross.yml index f579d66..91eb0c6 100644 --- a/.github/workflows/go-cross.yml +++ b/.github/workflows/go-cross.yml @@ -11,7 +11,7 @@ jobs: strategy: matrix: - go-version: [ 1.15, 1.x ] + go-version: [ 1.16, 1.x ] os: [ubuntu-latest, macos-latest, windows-latest] steps: diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index 630b69a..52c7ad2 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -14,8 +14,8 @@ jobs: name: Main Process runs-on: ubuntu-latest env: - GO_VERSION: 1.15 - GOLANGCI_LINT_VERSION: v1.33.0 + GO_VERSION: 1.16 + GOLANGCI_LINT_VERSION: v1.39.0 YAEGI_VERSION: v0.9.8 CGO_ENABLED: 0 defaults: diff --git a/.gitignore b/.gitignore index f32e31a..ffd2e01 100644 --- a/.gitignore +++ b/.gitignore @@ -1,2 +1,4 @@ .idea/ .DS_Store +cover.html +profile.cov \ No newline at end of file diff --git a/.golangci.toml b/.golangci.toml index c9010b3..7ffdeb4 100644 --- a/.golangci.toml +++ b/.golangci.toml @@ -36,6 +36,13 @@ "testpackage", "paralleltest", "tparallel", + "scopelint", + "godox", + "funlen", # Re-enable it + "gocognit", # Re-enable it + "cyclop", # Re-enable it + "exhaustivestruct", + "nilerr", ] [issues] diff --git a/.traefik.yml b/.traefik.yml index 2fcbfe8..b2eb797 100644 --- a/.traefik.yml +++ b/.traefik.yml @@ -1,11 +1,12 @@ -displayName: Demo Plugin +displayName: Image optimizer type: middleware -import: github.com/traefik/plugindemo +import: github.com/agravelot/imageopti -summary: '[Demo] Add Request Header' +summary: 'Image optimizer middleware is a middleware designed to optimize image responses on the fly.' testData: - Headers: - X-Demo: test - X-URL: '{{URL}}' + config: + processor: none + cache: none + diff --git a/Makefile b/Makefile index 04f6f05..137a5bb 100644 --- a/Makefile +++ b/Makefile @@ -1,4 +1,4 @@ -.PHONY: lint test vendor clean +.PHONY: lint test vendor clean coverage demo export GO111MODULE=on @@ -8,6 +8,8 @@ lint: golangci-lint run test: + rm -fr .cache | true + mkdir .cache go test -v -cover ./... yaegi_test: @@ -17,4 +19,19 @@ vendor: go mod vendor clean: - rm -rf ./vendor \ No newline at end of file + rm -rf ./vendor + +coverage: + rm profile.cov cover.html && go test -v -coverpkg=./... -coverprofile=profile.cov ./... && go tool cover -html=profile.cov -o cover.html + +demo-init: + docker network create traefik-net | true + +demo-up: demo-init + docker-compose --env-file=demo/.env -f demo/gateway/docker-compose.yml -f demo/frontend/docker-compose.yml -f demo/imaginary/docker-compose.yml up -d + +demo-restart: demo-init + docker-compose --env-file=demo/.env -f demo/gateway/docker-compose.yml -f demo/frontend/docker-compose.yml -f demo/imaginary/docker-compose.yml restart + +demo-logs: demo-up + docker-compose --env-file=demo/.env -f demo/gateway/docker-compose.yml -f demo/frontend/docker-compose.yml -f demo/imaginary/docker-compose.yml logs -f gateway imaginary \ No newline at end of file diff --git a/cache/factory.go b/cache/factory.go new file mode 100644 index 0000000..05afc4a --- /dev/null +++ b/cache/factory.go @@ -0,0 +1,51 @@ +// Package cache provide caching systems for images. +package cache + +import ( + "fmt" + "sync" + "time" + + "github.com/agravelot/imageopti/config" +) + +// Cache Define cache system interface. +type Cache interface { + Get(key string) ([]byte, error) + Set(key string, val []byte, expiry time.Duration) error +} + +const defaultCacheExpiry = 100 * time.Second + +// New is the cache factory to instantiate a new instance of cache. +func New(conf config.Config) (Cache, error) { + // if conf.Processor == "redis" { + // opt, err := redis.ParseURL(conf.Redis.URL) + // if err != nil { + // return nil, err + // } + + // client := redis.NewClient(opt) + + // return &RedisCache{ + // client: client, + // }, nil + // } + + if conf.Cache == "file" { + return newFileCache(conf.File.Path, defaultCacheExpiry) + } + + if conf.Cache == "memory" { + return &MemoryCache{ + m: map[string][]byte{}, + mtx: sync.RWMutex{}, + }, nil + } + + if conf.Cache == "none" || conf.Cache == "" { + return &NoneCache{}, nil + } + + return nil, fmt.Errorf("unable to resolve given cache %s", conf.Cache) +} diff --git a/cache/factory_test.go b/cache/factory_test.go new file mode 100644 index 0000000..310e3f2 --- /dev/null +++ b/cache/factory_test.go @@ -0,0 +1,62 @@ +package cache_test + +import ( + "reflect" + "testing" + + "github.com/agravelot/imageopti/cache" + "github.com/agravelot/imageopti/config" +) + +func TestNew(t *testing.T) { + type args struct { + conf config.Config + } + + tests := []struct { + name string + args args + want cache.Cache + wantErr bool + }{ + { + name: "should be able to memory cache", + args: args{config.Config{Processor: "none", Cache: "memory"}}, + want: &cache.MemoryCache{}, + wantErr: false, + }, + { + name: "should be able to memory cache", + args: args{config.Config{Processor: "none", Cache: "none"}}, + want: &cache.NoneCache{}, + wantErr: false, + }, + { + name: "should not be able to init cache without valid driver", + args: args{ + config.Config{ + Processor: "imaginary", + Imaginary: config.ImaginaryProcessorConfig{URL: "http://localhost"}, + Cache: "unsupported", + }, + }, + want: nil, + wantErr: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := cache.New(tt.args.conf) + if (err != nil) != tt.wantErr { + t.Fatalf("New() error = %v, wantErr %v", err, tt.wantErr) + } + + typeGot := reflect.TypeOf(got) + typeWanted := reflect.TypeOf(tt.want) + + if typeGot != typeWanted { + t.Errorf("New() = %v, want %v", typeGot, typeWanted) + } + }) + } +} diff --git a/cache/file.go b/cache/file.go new file mode 100644 index 0000000..8634668 --- /dev/null +++ b/cache/file.go @@ -0,0 +1,223 @@ +package cache + +// Original source : https://github.com/traefik/plugin-simplecache + +import ( + "encoding/binary" + "encoding/hex" + "errors" + "fmt" + "hash/crc32" + "io/ioutil" + "os" + "path/filepath" + "strings" + "sync" + "time" +) + +var errCacheMiss = errors.New("cache miss") + +type fileCache struct { + path string + pm *pathMutex +} + +func newFileCache(path string, vacuum time.Duration) (*fileCache, error) { + info, err := os.Stat(path) + if err != nil { + return nil, fmt.Errorf("invalid cache path: %w", err) + } + + if !info.IsDir() { + return nil, errors.New("path must be a directory") + } + + fc := &fileCache{ + path: path, + pm: &pathMutex{lock: map[string]*fileLock{}}, + } + + go fc.vacuum(vacuum) + + return fc, nil +} + +func (c *fileCache) vacuum(interval time.Duration) { + timer := time.NewTicker(interval) + defer timer.Stop() + + for range timer.C { + _ = filepath.Walk(c.path, func(path string, info os.FileInfo, err error) error { + switch { + case err != nil: + return err + case info.IsDir(): + return nil + } + + mu := c.pm.MutexAt(filepath.Base(path)) + mu.Lock() + defer mu.Unlock() + + // Get the expiry. + var t [8]byte + f, err := os.Open(filepath.Clean(path)) + if err != nil { + // Just skip the file in this case. + return nil + } + if n, err := f.Read(t[:]); err != nil && n != 8 { + return nil + } + _ = f.Close() + + expires := time.Unix(int64(binary.LittleEndian.Uint64(t[:])), 0) + if !expires.Before(time.Now()) { + return nil + } + + // Delete the file. + _ = os.Remove(path) + return nil + }) + } +} + +func (c *fileCache) Get(key string) ([]byte, error) { + mu := c.pm.MutexAt(key) + mu.RLock() + defer mu.RUnlock() + + p := keyPath(c.path, key) + if info, err := os.Stat(p); err != nil || info.IsDir() { + return nil, errCacheMiss + } + + b, err := ioutil.ReadFile(filepath.Clean(p)) + if err != nil { + return nil, fmt.Errorf("error reading file %q: %w", p, err) + } + + expires := time.Unix(int64(binary.LittleEndian.Uint64(b[:8])), 0) + if expires.Before(time.Now()) { + _ = os.Remove(p) + return nil, errCacheMiss + } + + return b[8:], nil +} + +func (c *fileCache) Set(key string, val []byte, expiry time.Duration) error { + mu := c.pm.MutexAt(key) + mu.Lock() + defer mu.Unlock() + + p := keyPath(c.path, key) + if err := os.MkdirAll(filepath.Dir(p), 0700); err != nil { + return fmt.Errorf("error creating file path: %w", err) + } + + f, err := os.OpenFile(filepath.Clean(p), os.O_RDWR|os.O_CREATE, 0600) + if err != nil { + return fmt.Errorf("error creating file: %w", err) + } + + defer func() { + _ = f.Close() + }() + + timestamp := uint64(time.Now().Add(expiry).Unix()) + + var t [8]byte + + binary.LittleEndian.PutUint64(t[:], timestamp) + + if _, err = f.Write(t[:]); err != nil { + return fmt.Errorf("error writing file: %w", err) + } + + if _, err = f.Write(val); err != nil { + return fmt.Errorf("error writing file: %w", err) + } + + return nil +} + +func keyHash(key string) [4]byte { + h := crc32.Checksum([]byte(key), crc32.IEEETable) + + var b [4]byte + + binary.LittleEndian.PutUint32(b[:], h) + + return b +} + +func keyPath(path, key string) string { + h := keyHash(key) + key = strings.NewReplacer("/", "-", ":", "_").Replace(key) + + return filepath.Join( + path, + hex.EncodeToString(h[0:1]), + hex.EncodeToString(h[1:2]), + hex.EncodeToString(h[2:3]), + hex.EncodeToString(h[3:4]), + key, + ) +} + +type pathMutex struct { + mu sync.Mutex + lock map[string]*fileLock +} + +func (m *pathMutex) MutexAt(path string) *fileLock { + m.mu.Lock() + defer m.mu.Unlock() + + if fl, ok := m.lock[path]; ok { + fl.ref++ + return fl + } + + fl := &fileLock{ref: 1} + fl.cleanup = func() { + m.mu.Lock() + defer m.mu.Unlock() + + fl.ref-- + if fl.ref == 0 { + delete(m.lock, path) + } + } + m.lock[path] = fl + + return fl +} + +type fileLock struct { + ref int + cleanup func() + + mu sync.RWMutex +} + +func (l *fileLock) RLock() { + l.mu.RLock() +} + +func (l *fileLock) RUnlock() { + l.mu.RUnlock() + l.cleanup() +} + +func (l *fileLock) Lock() { + l.mu.Lock() +} + +func (l *fileLock) Unlock() { + l.mu.Unlock() + l.cleanup() +} diff --git a/cache/file_test.go b/cache/file_test.go new file mode 100644 index 0000000..e2c0f0b --- /dev/null +++ b/cache/file_test.go @@ -0,0 +1,175 @@ +package cache + +import ( + "bytes" + "context" + "fmt" + "io/ioutil" + "os" + "sync" + "sync/atomic" + "testing" + "time" +) + +const testCacheKey = "GETlocalhost:8080/test/path" + +func TestFileCache(t *testing.T) { + dir := createTempDir(t) + + fc, err := newFileCache(dir, time.Second) + if err != nil { + t.Errorf("unexpected newFileCache error: %v", err) + } + + _, err = fc.Get(testCacheKey) + if err == nil { + t.Error("unexpected cache content") + } + + cacheContent := []byte("some random cache content that should be exact") + + err = fc.Set(testCacheKey, cacheContent, time.Second) + if err != nil { + t.Errorf("unexpected cache set error: %v", err) + } + + got, err := fc.Get(testCacheKey) + if err != nil { + t.Errorf("unexpected cache get error: %v", err) + } + + if !bytes.Equal(got, cacheContent) { + t.Errorf("unexpected cache content: want %s, got %s", cacheContent, got) + } +} + +func TestFileCache_ConcurrentAccess(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + + defer func() { + if r := recover(); r != nil { + t.Fatal(r) + } + }() + + dir := createTempDir(t) + + fc, err := newFileCache(dir, time.Second) + if err != nil { + t.Errorf("unexpected newFileCache error: %v", err) + } + + cacheContent := []byte("some random cache content that should be exact") + + var wg sync.WaitGroup + + wg.Add(2) + + go func() { + defer wg.Done() + + for { + got, _ := fc.Get(testCacheKey) + if got != nil && !bytes.Equal(got, cacheContent) { + panic(fmt.Errorf("unexpected cache content: want %s, got %s", cacheContent, got)) + } + + select { + case <-ctx.Done(): + return + default: + } + } + }() + + go func() { + defer wg.Done() + + for { + err = fc.Set(testCacheKey, cacheContent, time.Second) + if err != nil { + panic(fmt.Errorf("unexpected cache set error: %w", err)) + } + + select { + case <-ctx.Done(): + return + default: + } + } + }() + + wg.Wait() +} + +func TestPathMutex(t *testing.T) { + pm := &pathMutex{lock: map[string]*fileLock{}} + + mu := pm.MutexAt("sometestpath") + mu.Lock() + + var ( + wg sync.WaitGroup + locked uint32 + ) + + wg.Add(1) + + go func() { + defer wg.Done() + + mu := pm.MutexAt("sometestpath") + mu.Lock() + defer mu.Unlock() + + atomic.AddUint32(&locked, 1) + }() + + // locked should be 0 as we already have a lock on the path. + if atomic.LoadUint32(&locked) != 0 { + t.Error("unexpected second lock") + } + + mu.Unlock() + + wg.Wait() + + if l := len(pm.lock); l > 0 { + t.Errorf("unexpected lock length: want 0, got %d", l) + } +} + +func BenchmarkFileCache_Get(b *testing.B) { + dir := createTempDir(b) + + fc, err := newFileCache(dir, time.Minute) + if err != nil { + b.Errorf("unexpected newFileCache error: %v", err) + } + + _ = fc.Set(testCacheKey, []byte("some random cache content that should be exact"), time.Minute) + + b.ReportAllocs() + b.ResetTimer() + + for i := 0; i < b.N; i++ { + _, _ = fc.Get(testCacheKey) + } +} + +func createTempDir(tb testing.TB) string { + dir, err := ioutil.TempDir("./", "example") + if err != nil { + tb.Fatal(err) + } + + tb.Cleanup(func() { + if err = os.RemoveAll(dir); err != nil { + tb.Fatal(err) + } + }) + + return dir +} diff --git a/cache/memory.go b/cache/memory.go new file mode 100644 index 0000000..139f53f --- /dev/null +++ b/cache/memory.go @@ -0,0 +1,47 @@ +package cache + +import ( + "fmt" + "sync" + "time" +) + +// MemoryCache in-memory cache system struct. +type MemoryCache struct { + mtx sync.RWMutex + m map[string][]byte +} + +// Get return cached image with given key. +func (c *MemoryCache) Get(key string) ([]byte, error) { + c.mtx.Lock() + defer c.mtx.Unlock() + + v, ok := c.m[key] + if !ok { + return nil, fmt.Errorf("no result found with key = %s", key) + } + + return v, nil +} + +// Set add a value into in-memory with custom expiry. +func (c *MemoryCache) Set(key string, v []byte, expiry time.Duration) error { + c.mtx.Lock() + defer c.mtx.Unlock() + + c.m[key] = v + + time.AfterFunc(expiry, func() { + c.delete(key) + }) + + return nil +} + +func (c *MemoryCache) delete(key string) { + c.mtx.Lock() + defer c.mtx.Unlock() + + delete(c.m, key) +} diff --git a/cache/memory_test.go b/cache/memory_test.go new file mode 100644 index 0000000..43830f0 --- /dev/null +++ b/cache/memory_test.go @@ -0,0 +1,223 @@ +package cache + +import ( + "bytes" + "fmt" + "reflect" + "sync" + "testing" + "time" +) + +func TestMemoryCache_Get(t *testing.T) { + type fields struct { + m map[string][]byte + } + + type args struct { + key string + } + + tests := []struct { + name string + fields fields + args args + want []byte + wantErr bool + }{ + { + name: "should be able to get cached media", + fields: fields{m: map[string][]byte{ + "/test.jpeg": []byte("result"), + }}, + args: args{key: "/test.jpeg"}, + want: []byte("result"), + wantErr: false, + }, + { + name: "should return error if not defined in cache", + fields: fields{m: map[string][]byte{}}, + args: args{key: "/test.jpeg"}, + want: nil, + wantErr: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + c := &MemoryCache{ + mtx: sync.RWMutex{}, + m: tt.fields.m, + } + got, err := c.Get(tt.args.key) + if (err != nil) != tt.wantErr { + t.Errorf("MemoryCache.Get() error = %v, wantErr %v", err, tt.wantErr) + return + } + if !reflect.DeepEqual(got, tt.want) { + t.Errorf("MemoryCache.Get() = %v, want %v", got, tt.want) + } + }) + } +} + +func TestMemoryCache_Set(t *testing.T) { + type fields struct { + m map[string][]byte + } + + type args struct { + key string + v []byte + } + + tests := []struct { + name string + fields fields + args args + wantErr bool + }{ + { + name: "should be able to set cached media", + fields: fields{m: map[string][]byte{}}, + args: args{key: "/test.jpeg", v: []byte("result")}, + wantErr: false, + }, + { + name: "should be able to replace cached media", + fields: fields{m: map[string][]byte{ + "/test.jpeg": []byte("result"), + }}, + args: args{key: "/test.jpeg", v: []byte("result2")}, + wantErr: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + c := &MemoryCache{ + mtx: sync.RWMutex{}, + m: tt.fields.m, + } + if err := c.Set(tt.args.key, tt.args.v, 100*time.Millisecond); (err != nil) != tt.wantErr { + t.Errorf("MemoryCache.Set() error = %v, wantErr %v", err, tt.wantErr) + } + + v, err := c.Get(tt.args.key) + if err != nil { + panic(err) + } + + if !bytes.Equal(v, tt.args.v) { + t.Errorf("result differ") + } + + time.Sleep(1 * time.Second) + + _, err = c.Get(tt.args.key) + if err == nil { + t.Errorf("value must be deleted after expiry") + } + }) + } +} + +func BenchmarkMemoryCache_Get(b *testing.B) { + testCacheKey := "test-key" + + c := &MemoryCache{ + mtx: sync.RWMutex{}, + m: map[string][]byte{}, + } + + _ = c.Set(testCacheKey, []byte("a good media value"), 0) + + b.ReportAllocs() + b.ResetTimer() + + for i := 0; i < b.N; i++ { + _, _ = c.Get(testCacheKey) + } +} + +func BenchmarkMemoryCache_SetSameKey(b *testing.B) { + testCacheKey := "test-key" + + c := &MemoryCache{ + mtx: sync.RWMutex{}, + m: map[string][]byte{}, + } + + _ = c.Set(testCacheKey, []byte("a good media value"), 0) + + b.ReportAllocs() + b.ResetTimer() + + for i := 0; i < b.N; i++ { + _ = c.Set(testCacheKey, []byte("a good media value"), 0) + } +} + +func BenchmarkMemoryCache_SetNewKey(b *testing.B) { + testCacheKey := "test-key" + + c := &MemoryCache{ + mtx: sync.RWMutex{}, + m: map[string][]byte{}, + } + + _ = c.Set(testCacheKey, []byte("a good media value"), 0) + + b.ReportAllocs() + b.ResetTimer() + + for i := 0; i < b.N; i++ { + _ = c.Set(fmt.Sprintf("%s-%d", testCacheKey, i), []byte("a good media value"), 0) + } +} + +func TestMemoryCache_delete(t *testing.T) { + type fields struct { + m map[string][]byte + } + + type args struct { + key string + } + + tests := []struct { + name string + fields fields + args args + wantErr bool + }{ + { + name: "should return not error when delete non existent cache", + fields: fields{m: map[string][]byte{}}, + args: args{key: "/test.jpeg"}, + wantErr: false, + }, + { + name: "should be able to delete cache", + fields: fields{m: map[string][]byte{ + "/test.jpeg": []byte("result"), + }}, + args: args{key: "/test.jpeg"}, + wantErr: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + c := &MemoryCache{ + mtx: sync.RWMutex{}, + m: tt.fields.m, + } + + c.delete(tt.args.key) + + _, ok := c.m[tt.args.key] + + if ok { + t.Errorf("MemoryCache.delete() key must be deleted : %v", tt.args.key) + } + }) + } +} diff --git a/cache/none.go b/cache/none.go new file mode 100644 index 0000000..01636d1 --- /dev/null +++ b/cache/none.go @@ -0,0 +1,19 @@ +package cache + +import ( + "fmt" + "time" +) + +// NoneCache dummy cache system. +type NoneCache struct{} + +// Get always return nil with not found error. +func (c *NoneCache) Get(key string) ([]byte, error) { + return nil, fmt.Errorf("no result found with key = %s", key) +} + +// Set always return nil. +func (c *NoneCache) Set(_ string, _ []byte, _ time.Duration) error { + return nil +} diff --git a/cache/redis.go b/cache/redis.go new file mode 100644 index 0000000..f762590 --- /dev/null +++ b/cache/redis.go @@ -0,0 +1,27 @@ +package cache + +import "time" + +// RedisCache hold redis client. +type RedisCache struct{} // client *redis.Client + +// Get return cached media with given key from redis. +func (c *RedisCache) Get(key string) ([]byte, error) { + // v, err := c.client.Get(ctx, key).Bytes() + + // if err == redis.Nil { + // return nil, fmt.Errorf("no result found with key = %s", key) + // } else if err != nil { + // return nil, err + // } + + // return v, nil + + return []byte("unsafe not supported by yaegi"), nil +} + +// Set add a new image in cache with custom expiry. +func (c *RedisCache) Set(key string, v []byte, expiry time.Duration) error { + // return c.client.Set(ctx, key, v, expiry).Err() + return nil +} diff --git a/cache/tokenizer.go b/cache/tokenizer.go new file mode 100644 index 0000000..b0ab5fb --- /dev/null +++ b/cache/tokenizer.go @@ -0,0 +1,17 @@ +package cache + +import ( + "fmt" + "net/http" +) + +// Tokenize generate unique key for request caching strategy. +func Tokenize(req *http.Request) (string, error) { + width := req.URL.Query().Get("w") + + if len(width) == 0 { + width = "original" + } + + return fmt.Sprintf("%s:%s:%s:%s:%s", req.Method, req.URL.Scheme, req.Host, req.URL.Path, width), nil +} diff --git a/cache/tokenizer_test.go b/cache/tokenizer_test.go new file mode 100644 index 0000000..1155f79 --- /dev/null +++ b/cache/tokenizer_test.go @@ -0,0 +1,62 @@ +package cache + +import ( + "context" + "net/http" + "testing" +) + +func TestTokenize(t *testing.T) { + ctx := context.Background() + + newRequest := func(method, url string) *http.Request { + req, err := http.NewRequestWithContext(ctx, method, url, nil) + if err != nil { + t.Fatal(err) + } + + return req + } + + type args struct { + req *http.Request + } + + tests := []struct { + name string + args args + want string + wantErr bool + }{ + { + name: "should return correct token", + args: args{req: newRequest(http.MethodGet, "http://localhost/img.jpeg")}, + want: "GET:http:localhost:/img.jpeg:original", + wantErr: false, + }, + { + name: "should return correct token with width query param", + args: args{req: newRequest(http.MethodGet, "http://localhost/img.jpeg?w=1024")}, + want: "GET:http:localhost:/img.jpeg:1024", + wantErr: false, + }, + { + name: "should return correct token with width query param", + args: args{req: newRequest(http.MethodDelete, "http://localhost/img.jpeg?w=1024")}, + want: "DELETE:http:localhost:/img.jpeg:1024", + wantErr: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := Tokenize(tt.args.req) + if (err != nil) != tt.wantErr { + t.Errorf("Tokenize() error = %v, wantErr %v", err, tt.wantErr) + return + } + if got != tt.want { + t.Errorf("Tokenize() = %v, want %v", got, tt.want) + } + }) + } +} diff --git a/config/config.go b/config/config.go new file mode 100644 index 0000000..8ee9e34 --- /dev/null +++ b/config/config.go @@ -0,0 +1,27 @@ +// Package config provide configurations structs for imageopti middleware. +package config + +// ImaginaryProcessorConfig define imaginary image processor configurations. +type ImaginaryProcessorConfig struct { + URL string `json:"url" yaml:"url" toml:"url"` +} + +// RedisCacheConfig define redis cache system configurations. +type RedisCacheConfig struct { + URL string `json:"url" yaml:"url" toml:"url"` +} + +// FileCacheConfig define file cache system configurations. +type FileCacheConfig struct { + Path string `json:"path" yaml:"path" toml:"path"` +} + +// Config the plugin configuration. +type Config struct { + Processor string `json:"processor" yaml:"processor" toml:"processor"` + Imaginary ImaginaryProcessorConfig `json:"imaginary,omitempty" yaml:"imaginary,omitempty" toml:"imaginary,omitempty"` + // Cache + Cache string `json:"cache" yaml:"cache" toml:"cache"` + Redis RedisCacheConfig `json:"redis,omitempty" yaml:"redis,omitempty" toml:"redis,omitempty"` + File FileCacheConfig `json:"file,omitempty" yaml:"file,omitempty" toml:"file,omitempty"` +} diff --git a/demo.go b/demo.go deleted file mode 100644 index 88d8c18..0000000 --- a/demo.go +++ /dev/null @@ -1,66 +0,0 @@ -// Package plugindemo a demo plugin. -package plugindemo - -import ( - "bytes" - "context" - "fmt" - "net/http" - "text/template" -) - -// Config the plugin configuration. -type Config struct { - Headers map[string]string `json:"headers,omitempty"` -} - -// CreateConfig creates the default plugin configuration. -func CreateConfig() *Config { - return &Config{ - Headers: make(map[string]string), - } -} - -// Demo a Demo plugin. -type Demo struct { - next http.Handler - headers map[string]string - name string - template *template.Template -} - -// New created a new Demo plugin. -func New(ctx context.Context, next http.Handler, config *Config, name string) (http.Handler, error) { - if len(config.Headers) == 0 { - return nil, fmt.Errorf("headers cannot be empty") - } - - return &Demo{ - headers: config.Headers, - next: next, - name: name, - template: template.New("demo").Delims("[[", "]]"), - }, nil -} - -func (a *Demo) ServeHTTP(rw http.ResponseWriter, req *http.Request) { - for key, value := range a.headers { - tmpl, err := a.template.Parse(value) - if err != nil { - http.Error(rw, err.Error(), http.StatusInternalServerError) - return - } - - writer := &bytes.Buffer{} - - err = tmpl.Execute(writer, req) - if err != nil { - http.Error(rw, err.Error(), http.StatusInternalServerError) - return - } - - req.Header.Set(key, writer.String()) - } - - a.next.ServeHTTP(rw, req) -} diff --git a/demo/.env.example b/demo/.env.example new file mode 100644 index 0000000..c7679d3 --- /dev/null +++ b/demo/.env.example @@ -0,0 +1 @@ +PILOT_TOKEN= \ No newline at end of file diff --git a/demo/.gitignore b/demo/.gitignore new file mode 100644 index 0000000..2eea525 --- /dev/null +++ b/demo/.gitignore @@ -0,0 +1 @@ +.env \ No newline at end of file diff --git a/demo/frontend/docker-compose.yml b/demo/frontend/docker-compose.yml new file mode 100644 index 0000000..ed7706b --- /dev/null +++ b/demo/frontend/docker-compose.yml @@ -0,0 +1,24 @@ +version: "3.8" + +services: + frontend: + image: nginx:alpine + networks: + - traefik-net + expose: + - 80 + volumes: + - "../frontend/html:/usr/share/nginx/html" + labels: + - "traefik.enable=true" + - "traefik.http.routers.demo.rule=Host(`demo.localhost`)" + - "traefik.http.middlewares.imageopti.plugin.dev.config.processor=imaginary" + - "traefik.http.middlewares.imageopti.plugin.dev.config.imaginary.url=http://imaginary:9000" + - "traefik.http.middlewares.imageopti.plugin.dev.config.cache=file" + - "traefik.http.middlewares.imageopti.plugin.dev.config.file.path=/root" + - "traefik.http.routers.demo.middlewares=imageopti" + +networks: + traefik-net: + external: + name: traefik-net diff --git a/demo/frontend/html/compressed.html b/demo/frontend/html/compressed.html new file mode 100644 index 0000000..0b67889 --- /dev/null +++ b/demo/frontend/html/compressed.html @@ -0,0 +1,7 @@ + + + diff --git a/demo/frontend/html/index.html b/demo/frontend/html/index.html new file mode 100644 index 0000000..8d74437 --- /dev/null +++ b/demo/frontend/html/index.html @@ -0,0 +1,9 @@ +

Welcome to image optimizer demo

+ + + + diff --git a/demo/frontend/html/original.html b/demo/frontend/html/original.html new file mode 100644 index 0000000..a4be2a1 --- /dev/null +++ b/demo/frontend/html/original.html @@ -0,0 +1,6 @@ + + diff --git a/demo/frontend/html/reponsive.html b/demo/frontend/html/reponsive.html new file mode 100644 index 0000000..9b66afc --- /dev/null +++ b/demo/frontend/html/reponsive.html @@ -0,0 +1,11 @@ + + + diff --git a/demo/frontend/html/very_big.jpg b/demo/frontend/html/very_big.jpg new file mode 100644 index 0000000..27b79f1 Binary files /dev/null and b/demo/frontend/html/very_big.jpg differ diff --git a/demo/gateway/Dockerfile b/demo/gateway/Dockerfile new file mode 100644 index 0000000..4b634b0 --- /dev/null +++ b/demo/gateway/Dockerfile @@ -0,0 +1 @@ +FROM traefik:2.4 diff --git a/demo/gateway/docker-compose.yml b/demo/gateway/docker-compose.yml new file mode 100644 index 0000000..4f942da --- /dev/null +++ b/demo/gateway/docker-compose.yml @@ -0,0 +1,90 @@ +version: "3.3" + +services: + gateway: + build: ../gateway + command: + # - "--log.level=DEBUG" + - "--api.insecure=true" + - "--providers.docker=true" + - "--providers.docker.network=traefik-net" + - "--providers.docker.exposedbydefault=false" + - "--providers.docker.endpoint=unix:///var/run/docker.sock" + - "--entrypoints.web.address=:80" + - "--pilot.token=${PILOT_TOKEN}" + - "--metrics=true" + - "--metrics.prometheus=true" + - "--metrics.prometheus.buckets=0.100000, 0.300000, 1.200000, 5.000000" + - "--entryPoints.metrics.address=:8082" + - "--metrics.prometheus.entryPoint=metrics" + - "--experimental.devPlugin.goPath=/plugins" + - "--experimental.devPlugin.moduleName=github.com/agravelot/imageopti" + ports: + - "80:80" + - "443:443" + - "8080:8080" + volumes: + - "/home/agravelot/Lab/traefik-image-optimization:/plugins/src/github.com/agravelot/imageopti:rw,delegated" + # - "./gateway/traefik.yml:/etc/traefik/traefik.yml:ro,delegated" + - "letsencrypt:/letsencrypt:rw,delegated" + - "/var/run/docker.sock:/var/run/docker.sock:ro" + networks: + - metrics + - traefik-net + + prometheus: + image: prom/prometheus + volumes: + - ./prometheus/:/etc/prometheus/ + - prometheus_data:/prometheus + command: + - '--config.file=/etc/prometheus/prometheus.yml' + - '--storage.tsdb.path=/prometheus' + - '--web.console.libraries=/usr/share/prometheus/console_libraries' + - '--web.console.templates=/usr/share/prometheus/consoles' + labels: + - "traefik.enable=true" + - "traefik.http.routers.prometheus.rule=Host(`prometheus.localhost`)" + # - "traefik.http.routers.prometheus.service=prometheus" + # - "traefik.http.routers.prometheus.entrypoints=web" + # - "traefik.http.routers.prometheus.service=prometheus" + # - "traefik.http.services.prometheus.loadbalancer.server.port=9090" + # - "traefik.docker.network=inbound" + networks: + - metrics + - traefik-net + + grafana: + image: grafana/grafana + depends_on: + - prometheus + volumes: + - grafana_data:/var/lib/grafana + - ./grafana/provisioning/:/etc/grafana/provisioning/ + environment: + GF_LOG_LEVEL: 'debug' + env_file: + - ./grafana/config.monitoring + user: "104" + networks: + - metrics + - traefik-net + labels: + - "traefik.enable=true" + - "traefik.http.routers.grafana.rule=Host(`grafana.localhost`)" + # - "traefik.http.routers.grafana.service=grafana" + # - "traefik.http.routers.grafana.entrypoints=web" + # - "traefik.http.routers.grafana.service=grafana" + # - "traefik.http.services.grafana.loadbalancer.server.port=3000" + # - "traefik.docker.network=inbound" + +volumes: + letsencrypt: + prometheus_data: + grafana_data: + +networks: + metrics: + traefik-net: + external: + name: traefik-net diff --git a/demo/gateway/grafana/config.monitoring b/demo/gateway/grafana/config.monitoring new file mode 100644 index 0000000..c458967 --- /dev/null +++ b/demo/gateway/grafana/config.monitoring @@ -0,0 +1,3 @@ +GF_SECURITY_ADMIN_PASSWORD=admin +GF_USERS_ALLOW_SIGN_UP=false +GF_INSTALL_PLUGINS=grafana-piechart-panel diff --git a/demo/gateway/grafana/provisioning/dashboards/dashboard.yml b/demo/gateway/grafana/provisioning/dashboards/dashboard.yml new file mode 100644 index 0000000..bc14030 --- /dev/null +++ b/demo/gateway/grafana/provisioning/dashboards/dashboard.yml @@ -0,0 +1,21 @@ +apiVersion: 1 + +providers: + # provider name +- name: 'default' + # org id. will default to orgId 1 if not specified + orgId: 1 + # name of the dashboard folder. Required + folder: '' + # folder UID. will be automatically generated if not specified + folderUid: '' + # provider type. Required + type: file + # disable dashboard deletion + disableDeletion: false + # enable dashboard editing + editable: true + # how often Grafana will scan for changed dashboards + updateIntervalSeconds: 10 + options: + path: /etc/grafana/provisioning/dashboards \ No newline at end of file diff --git a/demo/gateway/grafana/provisioning/dashboards/traefik_rev4.json b/demo/gateway/grafana/provisioning/dashboards/traefik_rev4.json new file mode 100644 index 0000000..05e4787 --- /dev/null +++ b/demo/gateway/grafana/provisioning/dashboards/traefik_rev4.json @@ -0,0 +1,693 @@ +{ + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": "-- Grafana --", + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "type": "dashboard" + } + ] + }, + "description": "Traefik dashboard prometheus", + "editable": true, + "gnetId": 4475, + "graphTooltip": 0, + "id": 1, + "iteration": 1574689270305, + "links": [], + "panels": [ + { + "datasource": null, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 0 + }, + "id": 10, + "title": "$service stats", + "type": "row" + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": true, + "colors": [ + "#d44a3a", + "rgba(237, 129, 40, 0.89)", + "#299c46" + ], + "datasource": "Prometheus", + "format": "none", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + "h": 7, + "w": 8, + "x": 0, + "y": 1 + }, + "id": 1, + "interval": null, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "options": {}, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "tableColumn": "", + "targets": [ + { + "expr": "count(traefik_service_open_connections)", + "format": "time_series", + "intervalFactor": 2, + "refId": "A" + } + ], + "thresholds": "0,1", + "title": "Number of Traefik Services ", + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "OK", + "value": "1" + } + ], + "valueName": "current" + }, + { + "cacheTimeout": null, + "datasource": "Prometheus", + "gridPos": { + "h": 7, + "w": 8, + "x": 8, + "y": 1 + }, + "id": 2, + "links": [], + "options": { + "fieldOptions": { + "calcs": [ + "mean" + ], + "defaults": { + "mappings": [ + { + "id": 0, + "op": "=", + "text": "N/A", + "type": 1, + "value": "null" + } + ], + "max": 5, + "min": 0, + "nullValueMode": "connected", + "thresholds": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 3 + } + ], + "unit": "none" + }, + "override": {}, + "values": false + }, + "orientation": "horizontal", + "showThresholdLabels": false, + "showThresholdMarkers": true + }, + "pluginVersion": "6.5.0", + "targets": [ + { + "expr": "traefik_service_request_duration_seconds_sum{service=\"$service\"}", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{$service}} | {{method}}", + "refId": "A" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "Average response time", + "type": "gauge" + }, + { + "aliasColors": {}, + "breakPoint": "50%", + "cacheTimeout": null, + "combine": { + "label": "Others", + "threshold": 0 + }, + "datasource": "Prometheus", + "fontSize": "80%", + "format": "short", + "gridPos": { + "h": 7, + "w": 8, + "x": 16, + "y": 1 + }, + "id": 4, + "interval": null, + "legend": { + "show": true, + "values": true + }, + "legendType": "Under graph", + "links": [], + "maxDataPoints": 3, + "nullPointMode": "connected", + "options": {}, + "pieType": "pie", + "pluginVersion": "6.5.0", + "strokeWidth": 1, + "targets": [ + { + "expr": "avg_over_time(traefik_service_request_duration_seconds_sum{service=\"$service\"}[5m])", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{service}}", + "refId": "A" + } + ], + "title": "Avergae Service response time", + "type": "grafana-piechart-panel", + "valueName": "current" + }, + { + "cacheTimeout": null, + "datasource": "Prometheus", + "gridPos": { + "h": 7, + "w": 24, + "x": 0, + "y": 8 + }, + "id": 3, + "links": [], + "options": { + "displayMode": "basic", + "fieldOptions": { + "calcs": [ + "range" + ], + "defaults": { + "mappings": [], + "max": 100, + "min": 0, + "thresholds": [ + { + "color": "green", + "value": null + } + ], + "unit": "short" + }, + "override": {}, + "values": false + }, + "orientation": "horizontal" + }, + "pluginVersion": "6.5.0", + "targets": [ + { + "expr": "avg(traefik_service_requests_total{service=\"$service\"})", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{$service}} | {{method}}", + "refId": "A" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "Total requests over 5min $service", + "type": "bargauge" + }, + { + "collapsed": false, + "datasource": null, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 15 + }, + "id": 12, + "panels": [], + "title": "Global stats", + "type": "row" + }, + { + "aliasColors": {}, + "bars": true, + "dashLength": 10, + "dashes": false, + "datasource": "Prometheus", + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 12, + "x": 0, + "y": 16 + }, + "hiddenSeries": false, + "id": 5, + "legend": { + "alignAsTable": true, + "avg": false, + "current": true, + "max": true, + "min": true, + "rightSide": true, + "show": true, + "total": false, + "values": true + }, + "lines": false, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "options": { + "dataLinks": [] + }, + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "rate(traefik_entrypoint_requests_total{entrypoint=~\"$entrypoint\",code=\"200\"}[5m])", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{method}} : {{code}}", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Status code 200 over 5min", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "decimals": null, + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "decimals": null, + "format": "short", + "label": "", + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "datasource": "Prometheus", + "gridPos": { + "h": 7, + "w": 12, + "x": 12, + "y": 16 + }, + "id": 6, + "links": [], + "options": { + "displayMode": "basic", + "fieldOptions": { + "calcs": [ + "mean" + ], + "defaults": { + "decimals": 0, + "mappings": [], + "max": 100, + "min": 0, + "thresholds": [ + { + "color": "green", + "value": null + } + ], + "unit": "short" + }, + "limit": 10, + "override": {}, + "values": false + }, + "orientation": "horizontal" + }, + "pluginVersion": "6.5.0", + "targets": [ + { + "expr": "rate(traefik_entrypoint_requests_total{entrypoint=~\"$entrypoint\",code!=\"200\"}[5m]) * 100", + "format": "time_series", + "instant": false, + "intervalFactor": 2, + "legendFormat": "{{ method }} : {{code}}", + "refId": "A" + }, + { + "expr": "avg_over_time", + "refId": "B" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "Entrypoint Error Codes", + "type": "bargauge" + }, + { + "aliasColors": {}, + "breakPoint": "50%", + "cacheTimeout": null, + "combine": { + "label": "Others", + "threshold": 0 + }, + "datasource": "Prometheus", + "fontSize": "80%", + "format": "short", + "gridPos": { + "h": 7, + "w": 12, + "x": 0, + "y": 23 + }, + "id": 7, + "interval": null, + "legend": { + "show": true, + "values": true + }, + "legendType": "Right side", + "links": [], + "maxDataPoints": 3, + "nullPointMode": "connected", + "options": {}, + "pieType": "pie", + "strokeWidth": 1, + "targets": [ + { + "expr": "sum(rate(traefik_service_requests_total[5m])) by (service) ", + "format": "time_series", + "interval": "", + "intervalFactor": 2, + "legendFormat": "{{ service }}", + "refId": "A" + } + ], + "title": "Requests by service", + "type": "grafana-piechart-panel", + "valueName": "total" + }, + { + "aliasColors": {}, + "breakPoint": "50%", + "cacheTimeout": null, + "combine": { + "label": "Others", + "threshold": 0 + }, + "datasource": "Prometheus", + "fontSize": "80%", + "format": "short", + "gridPos": { + "h": 7, + "w": 12, + "x": 12, + "y": 23 + }, + "id": 8, + "interval": null, + "legend": { + "show": true, + "values": true + }, + "legendType": "Right side", + "links": [], + "maxDataPoints": 3, + "nullPointMode": "connected", + "options": {}, + "pieType": "pie", + "strokeWidth": 1, + "targets": [ + { + "expr": "sum(rate(traefik_entrypoint_requests_total{entrypoint =~ \"$entrypoint\"}[5m])) by (entrypoint) ", + "format": "time_series", + "interval": "", + "intervalFactor": 2, + "legendFormat": "{{ entrypoint }}", + "refId": "A" + } + ], + "title": "Requests by protocol", + "type": "grafana-piechart-panel", + "valueName": "total" + } + ], + "schemaVersion": 21, + "style": "dark", + "tags": [ + "traefik", + "prometheus" + ], + "templating": { + "list": [ + { + "allValue": null, + "current": { + "tags": [], + "text": "prometheus@docker", + "value": "prometheus@docker" + }, + "datasource": "Prometheus", + "definition": "label_values(service)", + "hide": 0, + "includeAll": true, + "label": null, + "multi": false, + "name": "service", + "options": [], + "query": "label_values(service)", + "refresh": 1, + "regex": "", + "skipUrlSync": false, + "sort": 0, + "tagValuesQuery": "", + "tags": [], + "tagsQuery": "", + "type": "query", + "useTags": false + }, + { + "allValue": null, + "current": { + "selected": false, + "tags": [], + "text": "All", + "value": [ + "$__all" + ] + }, + "datasource": "Prometheus", + "definition": "label_values(entrypoint)", + "hide": 0, + "includeAll": true, + "label": null, + "multi": true, + "name": "entrypoint", + "options": [], + "query": "label_values(entrypoint)", + "refresh": 1, + "regex": "", + "skipUrlSync": false, + "sort": 0, + "tagValuesQuery": "", + "tags": [], + "tagsQuery": "", + "type": "query", + "useTags": false + }, + { + "allValue": null, + "current": { + "text": "200", + "value": "200" + }, + "datasource": "Prometheus", + "definition": "label_values(code)", + "hide": 2, + "includeAll": false, + "label": null, + "multi": false, + "name": "code", + "options": [ + { + "selected": true, + "text": "200", + "value": "200" + }, + { + "selected": false, + "text": "302", + "value": "302" + }, + { + "selected": false, + "text": "304", + "value": "304" + }, + { + "selected": false, + "text": "400", + "value": "400" + }, + { + "selected": false, + "text": "404", + "value": "404" + }, + { + "selected": false, + "text": "429", + "value": "429" + }, + { + "selected": false, + "text": "499", + "value": "499" + }, + { + "selected": false, + "text": "500", + "value": "500" + }, + { + "selected": false, + "text": "503", + "value": "503" + } + ], + "query": "label_values(code)", + "refresh": 0, + "regex": "", + "skipUrlSync": false, + "sort": 0, + "tagValuesQuery": "", + "tags": [], + "tagsQuery": "", + "type": "query", + "useTags": false + } + ] + }, + "time": { + "from": "now-5m", + "to": "now" + }, + "timepicker": { + "refresh_intervals": [ + "5s", + "10s", + "30s", + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ], + "time_options": [ + "5m", + "15m", + "1h", + "6h", + "12h", + "24h", + "2d", + "7d", + "30d" + ] + }, + "timezone": "", + "title": "Traefik2", + "uid": "qPdAviJmz1", + "version": 1 +} \ No newline at end of file diff --git a/demo/gateway/grafana/provisioning/datasources/datasource.yml b/demo/gateway/grafana/provisioning/datasources/datasource.yml new file mode 100644 index 0000000..7524447 --- /dev/null +++ b/demo/gateway/grafana/provisioning/datasources/datasource.yml @@ -0,0 +1,50 @@ +# config file version +apiVersion: 1 + +# list of datasources that should be deleted from the database +deleteDatasources: + - name: Prometheus + orgId: 1 + +# list of datasources to insert/update depending +# whats available in the database +datasources: + # name of the datasource. Required +- name: Prometheus + # datasource type. Required + type: prometheus + # access mode. direct or proxy. Required + access: browser + # org id. will default to orgId 1 if not specified + orgId: 1 + # url + url: http://prometheus:9090 + # database password, if used + password: + # database user, if used + user: + # database name, if used + database: + # enable/disable basic auth + basicAuth: false + # basic auth username + basicAuthUser: admin + # basic auth password + basicAuthPassword: foobar + # enable/disable with credentials headers + withCredentials: + # mark as default datasource. Max one per org + isDefault: true + # fields that will be converted to json and stored in json_data + jsonData: + graphiteVersion: "1.1" + tlsAuth: false + tlsAuthWithCACert: false + # json object of data that will be encrypted. + secureJsonData: + tlsCACert: "..." + tlsClientCert: "..." + tlsClientKey: "..." + version: 1 + # allow users to edit datasources from the UI. + editable: true diff --git a/demo/gateway/prometheus/alert.rules b/demo/gateway/prometheus/alert.rules new file mode 100644 index 0000000..543bf91 --- /dev/null +++ b/demo/gateway/prometheus/alert.rules @@ -0,0 +1,22 @@ +groups: +- name: example + rules: + + # Alert for any instance that is unreachable for >2 minutes. + - alert: service_down + expr: up == 0 + for: 2m + labels: + severity: page + annotations: + summary: "Instance {{ $labels.instance }} down" + description: "{{ $labels.instance }} of job {{ $labels.job }} has been down for more than 2 minutes." + + - alert: high_load + expr: node_load1 > 0.5 + for: 2m + labels: + severity: page + annotations: + summary: "Instance {{ $labels.instance }} under high load" + description: "{{ $labels.instance }} of job {{ $labels.job }} is under high load." diff --git a/demo/gateway/prometheus/prometheus.yml b/demo/gateway/prometheus/prometheus.yml new file mode 100644 index 0000000..900e2b2 --- /dev/null +++ b/demo/gateway/prometheus/prometheus.yml @@ -0,0 +1,49 @@ +# my global config +global: + scrape_interval: 15s # By default, scrape targets every 15 seconds. + evaluation_interval: 15s # By default, scrape targets every 15 seconds. + # scrape_timeout is set to the global default (10s). + + # Attach these labels to any time series or alerts when communicating with + # external systems (federation, remote storage, Alertmanager). + external_labels: + monitor: 'my-project' + +# Load and evaluate rules in this file every 'evaluation_interval' seconds. +rule_files: + - 'alert.rules' + # - "first.rules" + # - "second.rules" + +# alert +# alerting: +# alertmanagers: +# - scheme: http +# static_configs: +# - targets: +# - "alertmanager:9093" + +# A scrape configuration containing exactly one endpoint to scrape: +# Here it's Prometheus itself. +scrape_configs: + # The job name is added as a label `job=` to any timeseries scraped from this config. + + - job_name: 'prometheus' + + # Override the global default and scrape targets from this job every 5 seconds. + scrape_interval: 5s + + static_configs: + - targets: ['gateway:8080'] + + + - job_name: 'traefik' + + # Override the global default and scrape targets from this job every 5 seconds. + scrape_interval: 5s + + dns_sd_configs: + - names: + - 'gateway' + type: 'A' + port: 8080 diff --git a/demo/gateway/traefik.yml b/demo/gateway/traefik.yml new file mode 100644 index 0000000..9a72e1e --- /dev/null +++ b/demo/gateway/traefik.yml @@ -0,0 +1,58 @@ +global: + checkNewVersion: false + +experimental: + devPlugin: + goPath: /plugins + moduleName: imageopti + +entryPoints: + web: + address: ":80" + # http: + # redirections: + # entrypoint: + # to: websecure + # scheme: https + websecure: + address: ":443" + +# providers: + # docker: + # endpoint: "unix:///var/run/docker.sock" + # exposedByDefault: false + # network: traefik-net + +metrics: + prometheus: {} + +log: + level: DEBUG + +api: + insecure: true + +# certificatesResolvers: +# myhttpchallenge: +# acme: +# email: antoine.gravelot@hotmail.fr +# storage: /letsencrypt/acme.json +# httpChallenge: +# # used during the challenge +# entryPoint: web + +# tls: +# options: +# default: +# sniStrict: true +# minVersion: VersionTLS12 +# cipherSuites: +# # https://ssl-config.mozilla.org/#server=traefik&version=2.1.2&config=intermediate&guideline=5.4 +# - TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 +# - TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 +# - TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 +# - TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 +# - TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305 +# - TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 +# mintls13: +# minVersion: VersionTLS13 \ No newline at end of file diff --git a/demo/imaginary/docker-compose.yml b/demo/imaginary/docker-compose.yml new file mode 100644 index 0000000..55beced --- /dev/null +++ b/demo/imaginary/docker-compose.yml @@ -0,0 +1,17 @@ +version: "3" +services: + imaginary: + image: h2non/imaginary:latest + expose: + - 9000 + networks: + - traefik-net + environment: + PORT: 9000 + DEBUG: "*" + command: -enable-url-source -http-cache-ttl 0 + +networks: + traefik-net: + external: + name: traefik-net \ No newline at end of file diff --git a/demo_test.go b/demo_test.go deleted file mode 100644 index dd0edcf..0000000 --- a/demo_test.go +++ /dev/null @@ -1,49 +0,0 @@ -package plugindemo_test - -import ( - "context" - "net/http" - "net/http/httptest" - "testing" - - "github.com/traefik/plugindemo" -) - -func TestDemo(t *testing.T) { - cfg := plugindemo.CreateConfig() - cfg.Headers["X-Host"] = "[[.Host]]" - cfg.Headers["X-Method"] = "[[.Method]]" - cfg.Headers["X-URL"] = "[[.URL]]" - cfg.Headers["X-URL"] = "[[.URL]]" - cfg.Headers["X-Demo"] = "test" - - ctx := context.Background() - next := http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) {}) - - handler, err := plugindemo.New(ctx, next, cfg, "demo-plugin") - if err != nil { - t.Fatal(err) - } - - recorder := httptest.NewRecorder() - - req, err := http.NewRequestWithContext(ctx, http.MethodGet, "http://localhost", nil) - if err != nil { - t.Fatal(err) - } - - handler.ServeHTTP(recorder, req) - - assertHeader(t, req, "X-Host", "localhost") - assertHeader(t, req, "X-URL", "http://localhost") - assertHeader(t, req, "X-Method", "GET") - assertHeader(t, req, "X-Demo", "test") -} - -func assertHeader(t *testing.T, req *http.Request, key, expected string) { - t.Helper() - - if req.Header.Get(key) != expected { - t.Errorf("invalid header value: %s", req.Header.Get(key)) - } -} diff --git a/go.mod b/go.mod index 45d5478..529ec15 100644 --- a/go.mod +++ b/go.mod @@ -1,3 +1,3 @@ -module github.com/traefik/plugindemo +module github.com/agravelot/imageopti -go 1.15 +go 1.16 diff --git a/go.sum b/go.sum new file mode 100644 index 0000000..e69de29 diff --git a/image_optimizer.go b/image_optimizer.go new file mode 100644 index 0000000..389042a --- /dev/null +++ b/image_optimizer.go @@ -0,0 +1,172 @@ +// Package imageopti Image optimizer plugin +package imageopti + +import ( + "bytes" + "context" + "errors" + "fmt" + "log" + "net/http" + "strconv" + "strings" + "time" + + "github.com/agravelot/imageopti/cache" + "github.com/agravelot/imageopti/config" + "github.com/agravelot/imageopti/processor" +) + +// Config the plugin configuration. +type Config struct { + config.Config +} + +// CreateConfig creates the default plugin configuration. +func CreateConfig() *Config { + return &Config{ + config.Config{ + Processor: "", + Cache: "", + Imaginary: config.ImaginaryProcessorConfig{URL: ""}, + Redis: config.RedisCacheConfig{URL: ""}, + File: config.FileCacheConfig{Path: ""}, + }, + } +} + +// ImageOptimizer middleware plugin struct. +type ImageOptimizer struct { + next http.Handler + name string + p processor.Processor + c cache.Cache +} + +// New created a new ImageOptimizer plugin. +func New(ctx context.Context, next http.Handler, conf *Config, name string) (http.Handler, error) { + log.Println("Loading image optimization plugin...") + + if conf.Processor == "" { + return nil, fmt.Errorf("processor must be defined") + } + + c, err := cache.New(conf.Config) + if err != nil { + panic(err) + } + + p, err := processor.New(conf.Config) + if err != nil { + panic(err) + } + + return &ImageOptimizer{ + p: p, + c: c, + next: next, + name: name, + }, nil +} + +const ( + contentLength = "Content-Length" + contentType = "Content-Type" + cacheStatus = "Cache-Status" + cacheHitStatus = "hit" + cacheMissStatus = "miss" + cacheExpiry = 100 * time.Second + targetFormat = "image/webp" +) + +func (a *ImageOptimizer) ServeHTTP(rw http.ResponseWriter, req *http.Request) { + // TODO Check if cacheable + key, err := cache.Tokenize(req) + if err != nil { + panic(err) + } + // Return cached result here. + if v, err := a.c.Get(key); err == nil { + rw.Header().Set(contentLength, fmt.Sprint(len(v))) + rw.Header().Set(contentType, targetFormat) + rw.Header().Set(cacheStatus, cacheHitStatus) + _, err = rw.Write(v) + + if err != nil { + panic(err) + } + + return + } + + wrappedWriter := &responseWriter{ + ResponseWriter: rw, + bypassHeader: true, + wroteHeader: false, + buffer: bytes.Buffer{}, + } + + a.next.ServeHTTP(wrappedWriter, req) + + wrappedWriter.bypassHeader = false + bodyBytes := wrappedWriter.buffer.Bytes() + + // If not image response, forward original and leave it here. + if !isImageResponse(rw) { + _, err = rw.Write(bodyBytes) + if err != nil { + panic(err) + } + + return + } + + width, err := imageWidthRequest(req) + if err != nil { + panic(err) + } + + optimized, ct, err := a.p.Optimize(bodyBytes, rw.Header().Get(contentType), targetFormat, 75, width) + if err != nil { + panic(err) + } + + rw.Header().Set(contentLength, fmt.Sprint(len(optimized))) + rw.Header().Set(contentType, ct) + rw.Header().Set(cacheStatus, cacheMissStatus) + + _, err = rw.Write(optimized) + if err != nil { + panic(err) + } + + err = a.c.Set(key, optimized, cacheExpiry) + if err != nil { + panic(err) + } +} + +func imageWidthRequest(req *http.Request) (int, error) { + w := req.URL.Query().Get("w") + + // if no query param + if len(w) == 0 { + return 0, nil + } + + v, err := strconv.Atoi(w) + if err != nil { + return 0, fmt.Errorf("unable to convert w query param to int: %w", err) + } + + if v < 0 { + return 0, errors.New("width cannot be negative value") + } + + return v, nil +} + +// isImageResponse Determine with Content-Type header if the response is an image. +func isImageResponse(rw http.ResponseWriter) bool { + return strings.HasPrefix(rw.Header().Get(contentType), "image/") +} diff --git a/image_optimizer_test.go b/image_optimizer_test.go new file mode 100644 index 0000000..74b848e --- /dev/null +++ b/image_optimizer_test.go @@ -0,0 +1,301 @@ +package imageopti + +import ( + "bytes" + "context" + "net/http" + "net/http/httptest" + "testing" + + "github.com/agravelot/imageopti/config" +) + +func TestImageOptimizer_ServeHTTP(t *testing.T) { + type args struct { + config config.Config + } + + tests := []struct { + name string + args args + wantedContentType string + wantedCacheStatus string + wantedSecondCacheStatus string + remoteResponseContentType string + remoteResponseContent []byte + want bool + wantErr bool + }{ + { + name: "should pass with processor", + args: args{ + config: config.Config{ + Processor: "imaginary", + Imaginary: config.ImaginaryProcessorConfig{URL: "http://localhost"}, + Cache: "none", + Redis: config.RedisCacheConfig{URL: ""}, + File: config.FileCacheConfig{Path: ""}, + }, + }, + want: false, + wantErr: false, + wantedCacheStatus: "", + wantedSecondCacheStatus: "", + wantedContentType: "text/html", + remoteResponseContentType: "text/html", + remoteResponseContent: []byte("dummy response"), + }, + { + name: "should not pass without processor and cache and return no cache status header", + args: args{ + config: config.Config{ + Processor: "", + Cache: "", + Redis: config.RedisCacheConfig{URL: ""}, + File: config.FileCacheConfig{Path: ""}, + Imaginary: config.ImaginaryProcessorConfig{URL: ""}, + }, + }, + want: false, + wantedCacheStatus: "", + wantedSecondCacheStatus: "", + wantErr: true, + wantedContentType: "text/html", + remoteResponseContentType: "text/html", + remoteResponseContent: []byte("dummy response"), + }, + // TODO Require to save response headers in cache + // { + // name: "should not modify image with none driver and cache file driver with cache status", + // args: args{config: config.Config{Processor: "none", Cache: "memory"}}, + // want: false, + // wantErr: false, + // wantedCacheStatus: "miss", + // wantedSecondCacheStatus: "hit", + // wantedContentType: "image/jpeg", + // remoteResponseContentType: "image/jpeg", + // remoteResponseContent: []byte("dummy image"), + // }, + { + name: "should not modify image with none driver and cache file driver with cache status", + args: args{ + config: config.Config{ + Processor: "local", + Cache: "memory", + Redis: config.RedisCacheConfig{URL: ""}, + File: config.FileCacheConfig{Path: ""}, + Imaginary: config.ImaginaryProcessorConfig{URL: ""}, + }, + }, + want: false, + wantErr: false, + wantedCacheStatus: "miss", + wantedSecondCacheStatus: "hit", + wantedContentType: "image/webp", + remoteResponseContentType: "image/jpeg", + remoteResponseContent: []byte("dummy image"), + }, + { + name: "should return original response if not image request and return no cache status header", + args: args{ + config: config.Config{ + Processor: "none", + Cache: "", + Redis: config.RedisCacheConfig{URL: ""}, + File: config.FileCacheConfig{Path: ""}, + Imaginary: config.ImaginaryProcessorConfig{URL: ""}, + }, + }, + want: false, + wantErr: false, + wantedCacheStatus: "", + wantedSecondCacheStatus: "", + wantedContentType: "text/html", + remoteResponseContentType: "text/html", + remoteResponseContent: []byte("dummy response"), + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + cfg := CreateConfig() + cfg.Processor = tt.args.config.Processor + cfg.Imaginary = tt.args.config.Imaginary + cfg.Cache = tt.args.config.Cache + cfg.File = tt.args.config.File + + ctx := context.Background() + next := http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) { + rw.Header().Add("content-type", tt.remoteResponseContentType) + _, err := rw.Write(tt.remoteResponseContent) + if err != nil { + t.Fatal(err) + } + }) + + handler, err := New(ctx, next, cfg, "demo-plugin") + + if (err != nil) != tt.wantErr { + t.Fatalf("New() error = %v, wantErr %v", err, tt.wantErr) + } + + if err != nil { + return + } + + req, err := http.NewRequestWithContext(ctx, http.MethodGet, "http://localhost", nil) + if err != nil { + t.Fatal(err) + } + recorder := httptest.NewRecorder() + + handler.ServeHTTP(recorder, req) + + if !bytes.Equal(recorder.Body.Bytes(), tt.remoteResponseContent) { + t.Fatal("response are not equals") + } + + if recorder.Header().Get("content-type") != tt.wantedContentType { + t.Fatalf("response content-type expected: %v got: %v", tt.wantedContentType, recorder.Header().Get("content-type")) + } + + if recorder.Header().Get("cache-status") != tt.wantedCacheStatus { + t.Fatalf("response cache-status expected: %v got: %v", tt.wantedCacheStatus, recorder.Header().Get("cache-status")) + } + + recorder = httptest.NewRecorder() + + handler.ServeHTTP(recorder, req) + + if !bytes.Equal(recorder.Body.Bytes(), tt.remoteResponseContent) { + t.Fatal("response are not equals") + } + + if recorder.Header().Get("content-type") != tt.wantedContentType { + t.Fatalf("response content-type expected: %v got: %v", tt.wantedContentType, recorder.Header().Get("content-type")) + } + + if recorder.Header().Get("cache-status") != tt.wantedSecondCacheStatus { + t.Fatalf( + "response cache-status expected: %v got: %v", + tt.wantedSecondCacheStatus, + recorder.Header().Get("cache-status"), + ) + } + }) + } +} + +func TestIsImageResponse(t *testing.T) { + type args struct { + contentType string + } + + tests := []struct { + name string + args args + want bool + wantErr bool + }{ + { + name: "should return false with empty string", + args: args{contentType: ""}, + want: false, + wantErr: false, + }, + { + name: "should return true with jpeg content type", + args: args{contentType: "image/jpeg"}, + want: true, + wantErr: false, + }, + { + name: "should return true with webp content type", + args: args{contentType: "image/webp"}, + want: true, + wantErr: false, + }, + { + name: "should return false with json content type", + args: args{contentType: "application/json"}, + want: false, + wantErr: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + recorder := httptest.NewRecorder() + recorder.Header().Add("Content-Type", tt.args.contentType) + + got := isImageResponse(recorder) + + if got != tt.want { + t.Errorf("IsImageResponse() = %v, want %v", got, tt.want) + } + }) + } +} + +func TestImageWidthRequest(t *testing.T) { + type args struct{ url string } + + tests := []struct { + name string + args args + want int + wantErr bool + }{ + { + name: "should return error with positive width", + args: args{url: "http://localhost?w=124"}, + want: 124, + wantErr: false, + }, + { + name: "should return error with negative width", + args: args{url: "http://localhost?w=-124"}, + want: 0, + wantErr: true, + }, + { + name: "should return error with text as width", + args: args{url: "http://localhost?w=azeaze"}, + want: 0, + wantErr: true, + }, + { + name: "should return error with empty width", + args: args{url: "http://localhost?w="}, + want: 0, + wantErr: false, + }, + { + name: "should return error with no width", + args: args{url: "http://localhost"}, + want: 0, + wantErr: false, + }, + } + + for _, tt := range tests { + ctx := context.Background() + + t.Run(tt.name, func(t *testing.T) { + req, err := http.NewRequestWithContext(ctx, http.MethodGet, tt.args.url, nil) + if err != nil { + t.Fatal(err) + } + + got, err := imageWidthRequest(req) + + if (err != nil) != tt.wantErr { + t.Fatalf("imageWidthRequest() error = %v, wantErr %v", err, tt.wantErr) + } + + if got != tt.want { + t.Errorf("ImageWidthRequest() = %v, want %v", got, tt.want) + } + }) + } +} diff --git a/processor/factory.go b/processor/factory.go new file mode 100644 index 0000000..0354fa1 --- /dev/null +++ b/processor/factory.go @@ -0,0 +1,35 @@ +// Package processor Handle image processing, including resizing, converting and stripping unwanted metadata. +package processor + +import ( + "fmt" + + "github.com/agravelot/imageopti/config" +) + +// Processor Define processor interface. +type Processor interface { + Optimize(media []byte, originalFormat string, targetFormat string, quality, width int) ([]byte, string, error) +} + +// New Processor factory from dynamic configurations. +func New(conf config.Config) (Processor, error) { + if conf.Processor == "imaginary" { + p, err := NewImaginary(conf) + if err != nil { + return nil, err + } + + return p, nil + } + + if conf.Processor == "local" { + return &LocalProcessor{}, nil + } + + if conf.Processor == "none" { + return &NoneProcessor{}, nil + } + + return nil, fmt.Errorf("unable to resolver given optimizer %s", conf.Processor) +} diff --git a/processor/factory_test.go b/processor/factory_test.go new file mode 100644 index 0000000..dc4c41c --- /dev/null +++ b/processor/factory_test.go @@ -0,0 +1,97 @@ +package processor + +import ( + "net/http" + "reflect" + "testing" + "time" + + "github.com/agravelot/imageopti/config" +) + +const defaultTimeout = 1 * time.Second + +func TestNew(t *testing.T) { + type args struct { + conf config.Config + } + + tests := []struct { + name string + args args + want Processor + wantErr bool + }{ + { + name: "should be able to return imaginary optimizer", + args: args{ + config.Config{ + Processor: "imaginary", + Imaginary: config.ImaginaryProcessorConfig{URL: "http://localhost"}, + Cache: "none", + Redis: config.RedisCacheConfig{URL: ""}, + File: config.FileCacheConfig{Path: ""}, + }, + }, + want: &ImaginaryProcessor{"", http.Client{Timeout: defaultTimeout}}, + wantErr: false, + }, + { + name: "should not be able to init imaginary without valid url", + args: args{config.Config{Processor: "imaginary", Imaginary: config.ImaginaryProcessorConfig{URL: "localhost"}}}, + want: nil, + wantErr: true, + }, + { + name: "should not be able to init imaginary without valid url 2 ", + args: args{ + config.Config{ + Processor: "imaginary", + Imaginary: config.ImaginaryProcessorConfig{URL: "htt://localhost"}, + }, + }, + want: nil, + wantErr: true, + }, + { + name: "should not be able to init imaginary without url", + args: args{config.Config{Processor: "imaginary"}}, + want: nil, + wantErr: true, + }, + { + name: "should be able to return local optimizer", + args: args{config.Config{Processor: "local"}}, + want: &LocalProcessor{}, + wantErr: false, + }, + { + name: "should return error with unsupported processor", + args: args{config.Config{Processor: "unsupported"}}, + want: nil, + wantErr: true, + }, + { + name: "should return error with empty processor", + args: args{config.Config{Processor: "unsupported"}}, + want: nil, + wantErr: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := New(tt.args.conf) + if (err != nil) != tt.wantErr { + t.Fatalf("New() error = %v, wantErr %v", err, tt.wantErr) + } + + typeGot := reflect.TypeOf(got) + typeWanted := reflect.TypeOf(tt.want) + + if typeGot != typeWanted { + t.Errorf("New() = %v, want %v", typeGot, typeWanted) + } + }) + } +} diff --git a/processor/imaginary.go b/processor/imaginary.go new file mode 100644 index 0000000..89345ad --- /dev/null +++ b/processor/imaginary.go @@ -0,0 +1,131 @@ +// Package processor Add support for imaginary imge processing service. +package processor + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "mime/multipart" + "net/http" + "net/url" + "time" + + "github.com/agravelot/imageopti/config" +) + +const httpTimeout = 5 * time.Second + +type pipelineOperationParams struct { + Font string `json:"font,omitempty"` + Height int `json:"height,omitempty"` + Opacity float64 `json:"opacity,omitempty"` + Rotate int `json:"rotate,omitempty"` + Text string `json:"text,omitempty"` + Textwidth int `json:"textwidth,omitempty"` + Type string `json:"type,omitempty"` + Width int `json:"width,omitempty"` + StripMeta bool `json:"stripmeta,omitempty"` +} + +type pipelineOperation struct { + Operation string `json:"operation"` + Params pipelineOperationParams `json:"params"` +} + +// ImaginaryProcessor define imaginary processor settings. +type ImaginaryProcessor struct { + URL string + client http.Client +} + +func isValidURL(s string) error { + if s == "" { + return fmt.Errorf("url cannot be empty") + } + + u, err := url.ParseRequestURI(s) + if err != nil { + return fmt.Errorf("unable to parse imaginary url: %w", err) + } + + if u.Scheme != "http" && u.Scheme != "https" { + return fmt.Errorf("unvalid imaginary scheme") + } + + return nil +} + +// NewImaginary instantiate a new imaginary instance with given config. +func NewImaginary(conf config.Config) (*ImaginaryProcessor, error) { + err := isValidURL(conf.Imaginary.URL) + if err != nil { + return nil, err + } + + return &ImaginaryProcessor{ + client: http.Client{ + Timeout: httpTimeout, + }, + URL: conf.Imaginary.URL, + }, nil +} + +// Optimize method to process image with imaginary with given parameters. +func (ip *ImaginaryProcessor) Optimize(media []byte, of string, tf string, q, width int) ([]byte, string, error) { + ope := []pipelineOperation{ + {Operation: "convert", Params: pipelineOperationParams{Type: "webp", StripMeta: true}}, + } + + if width > 0 { + ope = append(ope, pipelineOperation{Operation: "resize", Params: pipelineOperationParams{Width: width}}) + } + + opString, err := json.Marshal(ope) + if err != nil { + return nil, "", fmt.Errorf("unable generate imaginary operations: %w", err) + } + + u := fmt.Sprintf("%s/pipeline?operations=%s", ip.URL, url.QueryEscape(string(opString))) + method := "POST" + + payload := &bytes.Buffer{} + writer := multipart.NewWriter(payload) + fileWriter, err := writer.CreateFormFile("file", "tmp.jpg") + if err != nil { + return nil, "", fmt.Errorf("unable to create file to imaginary file writer: %w", err) + } + + _, err = fileWriter.Write(media) + if err != nil { + return nil, "", fmt.Errorf("unable to write file to imaginary file writer: %w", err) + } + + err = writer.Close() + if err != nil { + return nil, "", fmt.Errorf("unable to close imaginary file writer: %w", err) + } + + req, err := http.NewRequest(method, u, payload) + if err != nil { + return nil, "", fmt.Errorf("unable to create imaginary request: %w", err) + } + + req.Header.Set("Content-Type", writer.FormDataContentType()) + res, err := ip.client.Do(req) + if err != nil { + return nil, "", fmt.Errorf("unable to send imaginary request: %w", err) + } + + body, err := ioutil.ReadAll(res.Body) + if err != nil { + return nil, "", fmt.Errorf("unable to read imaginary response body: %w", err) + } + + err = res.Body.Close() + if err != nil { + return nil, "", fmt.Errorf("unable to close imaginary body response: %w", err) + } + + return body, "image/webp", nil +} diff --git a/processor/local.go b/processor/local.go new file mode 100644 index 0000000..1e2a97a --- /dev/null +++ b/processor/local.go @@ -0,0 +1,13 @@ +package processor + +// LocalProcessor process images directly in traefik itself, unsupported with interpreter limitations. +type LocalProcessor struct{} + +// Optimize optimize image with given params. +func (lp *LocalProcessor) Optimize(media []byte, of string, tf string, q, w int) ([]byte, string, error) { + // newImage, err := bimg.NewImage(media).Convert(bimg.WEBP) + // if err != nil { + // return nil, err + // } + return media, tf, nil +} diff --git a/processor/none.go b/processor/none.go new file mode 100644 index 0000000..eacdfcb --- /dev/null +++ b/processor/none.go @@ -0,0 +1,9 @@ +package processor + +// NoneProcessor dummy processor, using null pattern. +type NoneProcessor struct{} + +// Optimize return same data from media. +func (lp *NoneProcessor) Optimize(media []byte, of string, tf string, q, w int) ([]byte, string, error) { + return media, of, nil +} diff --git a/readme.md b/readme.md index bb2379d..db1a67c 100644 --- a/readme.md +++ b/readme.md @@ -1,25 +1,19 @@ -# Developing a Traefik plugin +# Image optimizer Traefik middleware plugin -[Traefik](https://traefik.io) plugins are developed using the [Go language](https://golang.org). +Image optimizer middleware is a [Traefik](https://traefik.io) plugin designed to optimize image responses on the fly. +It can remove unwanted metadata, convert to the desired format (like webp) and resize images. -A [Traefik](https://traefik.io) middleware plugin is just a [Go package](https://golang.org/ref/spec#Packages) that provides an `http.Handler` to perform specific processing of requests and responses. - -Rather than being pre-compiled and linked, however, plugins are executed on the fly by [Yaegi](https://github.com/traefik/yaegi), an embedded Go interpreter. +Easy to extend, you can implement your own image processing logic and caching systems, please refer to the [documentation](https://doc.traefik.io/traefik/plugins/). ## Usage -For a plugin to be active for a given Traefik instance, it must be declared in the static configuration. - -Plugins are parsed and loaded exclusively during startup, which allows Traefik to check the integrity of the code and catch errors early on. -If an error occurs during loading, the plugin is disabled. - -For security reasons, it is not possible to start a new plugin or modify an existing one while Traefik is running. +To be active for a given Traefik instance, it must be declared in the static configuration. -Once loaded, middleware plugins behave exactly like statically compiled middlewares. -Their instantiation and behavior are driven by the dynamic configuration. - -Plugin dependencies must be [vendored](https://golang.org/ref/mod#tmp_25) for each plugin. -Vendored packages should be included in the plugin's GitHub repository. ([Go modules](https://blog.golang.org/using-go-modules) are not supported.) +You can request your images as usual, and just add `w=` in query param. +```bash +curl "http://demo.localhost/very_big.jpg" # return converted to webp and without metadata +curl "http://demo.localhost/very_big.jpg?w=1725" # return resized with 1725px width, converted to webp and without metadata +``` ### Configuration @@ -34,9 +28,9 @@ pilot: experimental: plugins: - example: - moduleName: github.com/traefik/plugindemo - version: v0.2.1 + imageopti: + moduleName: github.com/agravelot/imageopti + version: v0.1.0 ``` Here is an example of a file provider dynamic configuration (given here in YAML), where the interesting part is the `http.middlewares` section: @@ -52,7 +46,7 @@ http: entryPoints: - web middlewares: - - my-plugin + - imageopti services: service-foo: @@ -61,194 +55,58 @@ http: - url: http://127.0.0.1:5000 middlewares: - my-plugin: + imageopti: plugin: - example: - headers: - Foo: Bar + config: + processor: + imaginary: + url: http://imaginary:9000 + cache: + file: + path: /tmp + redis: + url: redis://:@localhost:6379/ ``` -### Dev Mode +List of available processors: -Traefik also offers a developer mode that can be used for temporary testing of plugins not hosted on GitHub. -To use a plugin in dev mode, the Traefik static configuration must define the module name (as is usual for Go packages) and a path to a [Go workspace](https://golang.org/doc/gopath_code.html#Workspaces), which can be the local GOPATH or any directory. +| Name | Note | +| -------------|:---------------------------:| +| imaginary | Use [imaginary](https://github.com/h2non/imaginary) as processor to manipulate images, can be easily scaled. (recommended) | +| local | Process images in Traefik itself, ⚠️ currently **not implemented** cause of interpreter limitations. | +| none | Keep images untouched (default) | -```yaml -# Static configuration -pilot: - token: xxxxx - -experimental: - devPlugin: - goPath: /plugins/go - moduleName: github.com/traefik/plugindemo -``` +List of available caches: -(In the above example, the `plugindemo` plugin will be loaded from the path `/plugins/go/src/github.com/traefik/plugindemo`.) +| Name | Note | +| -------------|:---------------------------:| +| file | Save images in given directory. (recommended) | +| redis | Save images in redis, work best in HA environments. ⚠️ currently **not implemented** cause of interpreter limitations. | +| memory | Keep images directly in memory, only recommended in development. ⚠️ Cache invalidity not implemented yet. | +| none | Do not cache images (default) | -```yaml -# Dynamic configuration - -http: - routers: - my-router: - rule: host(`demo.localhost`) - service: service-foo - entryPoints: - - web - middlewares: - - my-plugin - - services: - service-foo: - loadBalancer: - servers: - - url: http://127.0.0.1:5000 - - middlewares: - my-plugin: - plugin: - dev: - headers: - Foo: Bar -``` - -#### Dev Mode Limitations - -Note that only one plugin can be tested in dev mode at a time, and when using dev mode, Traefik will shut down after 30 minutes. - -## Defining a Plugin - -A plugin package must define the following exported Go objects: - -- A type `type Config struct { ... }`. The struct fields are arbitrary. -- A function `func CreateConfig() *Config`. -- A function `func New(ctx context.Context, next http.Handler, config *Config, name string) (http.Handler, error)`. - -```go -// Package example a example plugin. -package example - -import ( - "context" - "net/http" -) - -// Config the plugin configuration. -type Config struct { - // ... -} - -// CreateConfig creates the default plugin configuration. -func CreateConfig() *Config { - return &Config{ - // ... - } -} - -// Example a plugin. -type Example struct { - next http.Handler - name string - // ... -} - -// New created a new plugin. -func New(ctx context.Context, next http.Handler, config *Config, name string) (http.Handler, error) { - // ... - return &Example{ - // ... - }, nil -} - -func (e *Example) ServeHTTP(rw http.ResponseWriter, req *http.Request) { - // ... - e.next.ServeHTTP(rw, req) -} -``` - -## Traefik Pilot - -Traefik plugins are stored and hosted as public GitHub repositories. - -Every 30 minutes, the Traefik Pilot online service polls Github to find plugins and add them to its catalog. - -### Prerequisites - -To be recognized by Traefik Pilot, your repository must meet the following criteria: - -- The `traefik-plugin` topic must be set. -- The `.traefik.yml` manifest must exist, and be filled with valid contents. - -If your repository fails to meet either of these prerequisites, Traefik Pilot will not see it. - -### Manifest - -A manifest is also mandatory, and it should be named `.traefik.yml` and stored at the root of your project. - -This YAML file provides Traefik Pilot with information about your plugin, such as a description, a full name, and so on. - -Here is an example of a typical `.traefik.yml`file: - -```yaml -# The name of your plugin as displayed in the Traefik Pilot web UI. -displayName: Name of your plugin - -# For now, `middleware` is the only type available. -type: middleware - -# The import path of your plugin. -import: github.com/username/my-plugin +### Dev Mode -# A brief description of what your plugin is doing. -summary: Description of what my plugin is doing +An easy to bootstrap development environment using docker is available in the `demo/` folder. -# Medias associated to the plugin (optional) -iconPath: foo/icon.png -bannerPath: foo/banner.png +Make sure to copy `demo/.env.example` to `demo/.env` and provide the required Traefik pilot token. +To quickly start the demo, please run at the root of this repository: -# Configuration data for your plugin. -# This is mandatory, -# and Traefik Pilot will try to execute the plugin with the data you provide as part of its startup validity tests. -testData: - Headers: - Foo: Bar +```bash +make demo ``` -Properties include: - -- `displayName` (required): The name of your plugin as displayed in the Traefik Pilot web UI. -- `type` (required): For now, `middleware` is the only type available. -- `import` (required): The import path of your plugin. -- `summary` (required): A brief description of what your plugin is doing. -- `testData` (required): Configuration data for your plugin. This is mandatory, and Traefik Pilot will try to execute the plugin with the data you provide as part of its startup validity tests. -- `iconPath` (optional): A local path in the repository to the icon of the project. -- `bannerPath` (optional): A local path in the repository to the image that will be used when you will share your plugin page in social medias. - -There should also be a `go.mod` file at the root of your project. Traefik Pilot will use this file to validate the name of the project. +Services are now accessible with these endpoints. -### Tags and Dependencies +| Service | URL | +| ----------------- |:---------------------------:| +| Demo frontend | http://demo.localhost | +| Traefik dashboard | http://localhost:8080 | +| Grafana | http://grafana.localhost | +| Prometheus | http://prometheus.localhost | -Traefik Pilot gets your sources from a Go module proxy, so your plugins need to be versioned with a git tag. +You can now implement our own image processing or caching systems by implementing `processor.Processor` and `cache.Cache` interfaces. +After that, make sure to add your new configuration and add it to the corresponding factory. -Last but not least, if your plugin middleware has Go package dependencies, you need to vendor them and add them to your GitHub repository. - -If something goes wrong with the integration of your plugin, Traefik Pilot will create an issue inside your Github repository and will stop trying to add your repo until you close the issue. - -## Troubleshooting - -If Traefik Pilot fails to recognize your plugin, you will need to make one or more changes to your GitHub repository. - -In order for your plugin to be successfully imported by Traefik Pilot, consult this checklist: - -- The `traefik-plugin` topic must be set on your repository. -- There must be a `.traefik.yml` file at the root of your project describing your plugin, and it must have a valid `testData` property for testing purposes. -- There must be a valid `go.mod` file at the root of your project. -- Your plugin must be versioned with a git tag. -- If you have package dependencies, they must be vendored and added to your GitHub repository. - -## Sample Code - -This repository includes an example plugin, `demo`, for you to use as a reference for developing your own plugins. +Note that only one plugin can be tested in dev mode at a time, and when using dev mode, Traefik will shut down after 30 minutes. -[![Build Status](https://github.com/traefik/plugindemo/workflows/Main/badge.svg?branch=master)](https://github.com/traefik/plugindemo/actions) diff --git a/response-writer.go b/response-writer.go new file mode 100644 index 0000000..1babba6 --- /dev/null +++ b/response-writer.go @@ -0,0 +1,41 @@ +// Package imageopti Bypass default response writer to override headers and body +package imageopti + +import ( + "bytes" + "fmt" + "net/http" +) + +type responseWriter struct { + buffer bytes.Buffer + bypassHeader bool + wroteHeader bool // Control when to write header + + http.ResponseWriter +} + +func (r *responseWriter) WriteHeader(statusCode int) { + if !r.bypassHeader { + r.ResponseWriter.WriteHeader(statusCode) + } +} + +func (r *responseWriter) Write(p []byte) (int, error) { + if !r.wroteHeader { + r.WriteHeader(http.StatusOK) + } + + i, err := r.buffer.Write(p) + if err != nil { + return i, fmt.Errorf("unable to write response body: %w", err) + } + + return i, nil +} + +func (r *responseWriter) Flush() { + if flusher, ok := r.ResponseWriter.(http.Flusher); ok { + flusher.Flush() + } +}