Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

refactor: restrcture process-version #255

Open
wants to merge 1 commit into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
82 changes: 82 additions & 0 deletions cmd/process-version/compress.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,82 @@
package main

import (
"context"
"fmt"
"io/ioutil"
"log"
"os"
"path"
"runtime"
"sync"

"github.com/cdnjs/tools/compress"
"github.com/cdnjs/tools/packages"
"github.com/pkg/errors"
)

func compressPackage(ctx context.Context, config *packages.Package) error {
files, err := ioutil.ReadDir(OUTPUT)
if err != nil {
return errors.Wrap(err, "failed to list output files")
}
cpuCount := runtime.NumCPU()
jobs := make(chan compressionJob, cpuCount)

var wg sync.WaitGroup

for w := 1; w <= cpuCount; w++ {
go compressionWorker(&wg, jobs)
}

for _, file := range files {
if file.IsDir() {
continue
}
wg.Add(1)
jobs <- compressionJob{
Ctx: ctx,
File: file.Name(),
}
}
close(jobs)

wg.Wait()
return nil
}

type compressionJob struct {
Ctx context.Context
File string
}

func compressionWorker(wg *sync.WaitGroup, jobs <-chan compressionJob) {
for j := range jobs {
src := path.Join(OUTPUT, j.File)
ext := path.Ext(src)

if _, ok := doNotCompress[ext]; !ok {
outBr := fmt.Sprintf("%s.br", src)
if _, err := os.Stat(outBr); err == nil {
log.Printf("file %s already exists at the output\n", outBr)
} else {
compress.Brotli11CLI(j.Ctx, src, outBr)
log.Printf("br %s -> %s\n", src, outBr)
}

outGz := fmt.Sprintf("%s.gz", src)
if _, err := os.Stat(outGz); err == nil {
log.Printf("file %s already exists at the output\n", outGz)
} else {
compress.Gzip9Native(j.Ctx, src, outGz)
log.Printf("gz %s -> %s\n", src, outGz)
}

// Original file can be removed because we keep the compressed
// version
os.Remove(src)
}

wg.Done()
}
}
160 changes: 30 additions & 130 deletions cmd/process-version/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -5,20 +5,15 @@ import (
"compress/gzip"
"context"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"log"
"os"
"path"
"path/filepath"
"runtime"
"strings"
"sync"

"github.com/cdnjs/tools/compress"
"github.com/cdnjs/tools/packages"
"github.com/cdnjs/tools/sri"

"github.com/pkg/errors"
)
Expand All @@ -28,6 +23,7 @@ var (
// but not compressed
doNotCompress = map[string]bool{
".woff2": true,
".sri": true, // internal SRI hash file
}
// we calculate SRIs for these file extensions
calculateSRI = map[string]bool{
Expand All @@ -37,9 +33,9 @@ var (
)

const (
INPUT = "/input"
OUTPUT = "/output"
WORKSPACE = "/tmp/work"
INPUT = "/input"
OUTPUT = "/output"
PACKAGE = "/tmp/pkg"
)

func main() {
Expand All @@ -50,76 +46,32 @@ func main() {
log.Fatalf("could not read config: %s", err)
}

if err := os.MkdirAll(WORKSPACE, 0700); err != nil {
log.Fatalf("could not create workspace: %s", err)
if err := os.MkdirAll(PACKAGE, 0700); err != nil {
log.Fatalf("could not create PACKAGE: %s", err)
}

if err := extractInput(*config.Autoupdate.Source); err != nil {
log.Fatalf("failed to extract input: %s", err)
}

if err := optimizePackage(ctx, config); err != nil {
// Step 1. copy all package files to their destination according to the
// fileMap configuration.
if err := copyPackage(ctx, config); err != nil {
log.Fatalf("failed to optimize files: %s", err)
}
log.Printf("processed %s\n", *config.Name)
}

type optimizeJob struct {
Ctx context.Context
Optimization *packages.Optimization
File string
Dest string
}

func (j optimizeJob) clone() optimizeJob {
return optimizeJob{
Ctx: j.Ctx,
Optimization: j.Optimization,
File: j.File,
Dest: j.Dest,
}
}

func (j optimizeJob) emitFromWorkspace(src string) {
dest := path.Join(OUTPUT, j.Dest)
if err := os.MkdirAll(path.Dir(dest), 0755); err != nil {
log.Fatalf("could not create dest dir: %s", err)
// Step 2. iterate over the last output and minify files
if err := optimizePackage(ctx, config); err != nil {
log.Fatalf("failed to optimize files: %s", err)
}

ext := path.Ext(src)
if _, ok := calculateSRI[ext]; ok {
outSRI := fmt.Sprintf("%s.sri", dest)
sri.CalculateFileSRI(src, outSRI)
log.Printf("sri %s -> %s\n", src, outSRI)
// Step 3. iterate over the last output and calculate SRIs for each files
if err := calcSriPackage(ctx, config); err != nil {
log.Fatalf("failed to optimize files: %s", err)
}

if _, ok := doNotCompress[ext]; !ok {
outBr := fmt.Sprintf("%s.br", dest)
if _, err := os.Stat(outBr); err == nil {
log.Printf("file %s already exists at the output\n", outBr)
} else {
compress.Brotli11CLI(j.Ctx, src, outBr)
log.Printf("br %s -> %s\n", src, outBr)
}

outGz := fmt.Sprintf("%s.gz", dest)
if _, err := os.Stat(outGz); err == nil {
log.Printf("file %s already exists at the output\n", outGz)
} else {
compress.Gzip9Native(j.Ctx, src, outGz)
log.Printf("gz %s -> %s\n", src, outGz)
}
} else {
if err := copyFile(src, dest); err != nil {
log.Fatalf("failed to copy file: %s", err)
}
log.Printf("copy %s -> %s\n", src, dest)
// Step 4. iterate over the last output and compress all files
if err := compressPackage(ctx, config); err != nil {
log.Fatalf("failed to optimize files: %s", err)
}
}

func (j optimizeJob) emit(name string) {
src := path.Join(WORKSPACE, name)
j.emitFromWorkspace(src)
log.Printf("processed %s\n", *config.Name)
}

func removePackageDir(path string) string {
Expand Down Expand Up @@ -186,10 +138,10 @@ func extractInput(source string) error {
case tar.TypeDir:
// ignore dirs
case tar.TypeReg:
if err := os.MkdirAll(path.Join(WORKSPACE, filepath.Dir(target)), 0755); err != nil {
if err := os.MkdirAll(path.Join(PACKAGE, filepath.Dir(target)), 0755); err != nil {
return errors.Wrap(err, "ExtractTarGz: Mkdir() failed")
}
outFile, err := os.Create(path.Join(WORKSPACE, target))
outFile, err := os.Create(path.Join(PACKAGE, target))
if err != nil {
return errors.Wrap(err, "ExtractTarGz: Create() failed")
}
Expand All @@ -207,72 +159,20 @@ func extractInput(source string) error {
return nil
}

func optimizeWorker(wg *sync.WaitGroup, jobs <-chan optimizeJob) {
for j := range jobs {
intputFile := path.Join(WORKSPACE, j.File)
ext := path.Ext(j.File)
switch ext {
case ".jpg", ".jpeg":
if j.Optimization.Jpg() {
compress.Jpeg(j.Ctx, intputFile)
}
case ".png":
if j.Optimization.Png() {
compress.Png(j.Ctx, intputFile)
}
case ".js":
if j.Optimization.Js() {
if out := compress.Js(j.Ctx, intputFile); out != nil {
j := j.clone()
j.Dest = strings.Replace(j.Dest, ".js", ".min.js", 1)
j.emitFromWorkspace(*out)
}
}
case ".css":
if j.Optimization.Css() {
if out := compress.CSS(j.Ctx, intputFile); out != nil {
j := j.clone()
j.Dest = strings.Replace(j.Dest, ".css", ".min.css", 1)
j.emitFromWorkspace(*out)
}
}
}

j.emit(j.File)
wg.Done()
}
}

// Optimizes/minifies package's files on disk for a particular package version.
func optimizePackage(ctx context.Context, config *packages.Package) error {
log.Printf("optimizing files (Js %t, Css %t, Png %t, Jpg %t)\n",
config.Optimization.Js(),
config.Optimization.Css(),
config.Optimization.Png(),
config.Optimization.Jpg())

files := config.NpmFilesFrom(WORKSPACE)
cpuCount := runtime.NumCPU()
jobs := make(chan optimizeJob, cpuCount)

var wg sync.WaitGroup
wg.Add(len(files))

for w := 1; w <= cpuCount; w++ {
go optimizeWorker(&wg, jobs)
}
// Copy the files package to their inteded location
func copyPackage(ctx context.Context, config *packages.Package) error {
files := config.NpmFilesFrom(PACKAGE)

for _, file := range files {
jobs <- optimizeJob{
Ctx: ctx,
Optimization: config.Optimization,
File: file.From,
Dest: file.To,
src := path.Join(PACKAGE, file.From)
dest := path.Join(OUTPUT, file.To)

if err := copyFile(src, dest); err != nil {
log.Fatalf("failed to copy file: %s", err)
}
log.Printf("copy %s -> %s\n", src, dest)
}
close(jobs)

wg.Wait()
return nil
}

Expand Down
Loading