From e7de543fd4588ebcbe3ed5f3b5e4da2594a8bd4d Mon Sep 17 00:00:00 2001 From: pascal Date: Wed, 21 Feb 2018 12:04:29 +0100 Subject: [PATCH] used gofmt -d -s -w *.go --- base/GD.go | 25 +-- base/base.go | 15 +- linear_model/Base.go | 2 + linear_model/bayes.go | 376 ++++++++++++++++++------------------- linear_model/bayes_test.go | 2 - preprocessing/data.go | 18 +- preprocessing/data_test.go | 38 ++-- 7 files changed, 242 insertions(+), 234 deletions(-) diff --git a/base/GD.go b/base/GD.go index 7230f70..a06b789 100644 --- a/base/GD.go +++ b/base/GD.go @@ -1,32 +1,33 @@ package base import ( - _ "fmt" "math" ) +// GD contains data for Gradient Descent regressor type GD struct { RegressorMixin - Epochs int - LearningRate, Decay, Tol, Momentum, Alpha, L1_ratio float - Coefs_ []float + Epochs int + LearningRate, Decay, Tol, Momentum, Alpha, L1Ratio float + Coefs []float } +// NewGD create a GD with reasonable defaults func NewGD() *GD { - self := &GD{Epochs: 3000, LearningRate: 1e-3, Decay: .95, Tol: 1e-3, Momentum: .5, L1_ratio: .15} + self := &GD{Epochs: 3000, LearningRate: 1e-3, Decay: .95, Tol: 1e-3, Momentum: .5} self.Predicter = self return self } +// Fit learns GD Coefs // adapted from gdSolver from https://github.com/ohheydom/linearregression/blob/master/linear_regression.go // Gradient Descent algorithm. - func (gd *GD) Fit(x [][]float64, y []float64) *GD { n, nFeatures := len(x), len(x[0]) gamma := gd.LearningRate / float(n) w := make([]float64, nFeatures+1) dw := make([]float64, nFeatures+1) - gd.Coefs_ = w + gd.Coefs = w errors := make([]float64, n) for i := 0; i < gd.Epochs; i++ { Shuffle(x, y) @@ -53,8 +54,8 @@ func (gd *GD) Fit(x [][]float64, y []float64) *GD { w[l] += dw[l] } } - // L1 : floats.sum(ewize(w,math.Abs));L2:=sum(ewise(w,func(w float)float{return w*w}));R=gd.L1_ratio*L1+(1-gd.L1_ratio*L2) - // TODO: use L1_ratio + // L1 : floats.sum(ewize(w,math.Abs));L2:=sum(ewise(w,func(w float)float{return w*w}));R=gd.L1Ratio*L1+(1-gd.L1Ratio*L2) + // TODO: use L1Ratio //decrease lr/n // TODO learning_rate=optimal eta(t)=1/(alpha*(t0+t)) @@ -65,13 +66,13 @@ func (gd *GD) Fit(x [][]float64, y []float64) *GD { break } } - gd.Coefs_ = w + gd.Coefs = w return gd } -// predY uses the given weights to calculate each sample's label. +// Predict uses the GD Coefs to calculate each sample's label. func (gd *GD) Predict(x [][]float64) []float64 { - w := gd.Coefs_ + w := gd.Coefs n, nFeatures := len(x), len(x[0]) predY := make([]float64, n) for i := 0; i < n; i++ { diff --git a/base/base.go b/base/base.go index 88e4242..6cac747 100644 --- a/base/base.go +++ b/base/base.go @@ -7,20 +7,25 @@ import ( type float = float64 +// Predicter is an interface for Predict method type Predicter interface { Predict([][]float) []float } + +// RegressorMixin is a base for predicters. provides a Score(X,w,weights) method type RegressorMixin struct{ Predicter } -func (self *RegressorMixin) Score(X [][]float, y, sample_weight []float) float { - y_pred := self.Predict(X) - return metrics.R2Score(y, y_pred, sample_weight, "variance_weighted") +// Score returns R2Score of predicter +func (predicter *RegressorMixin) Score(X [][]float, y, sampleWeight []float) float { + yPred := predicter.Predict(X) + return metrics.R2Score(y, yPred, sampleWeight, "variance_weighted") } +// Shuffle shuffles X,y samples func Shuffle(X [][]float, y []float) { - n_samples := len(X) + nSamples := len(X) for i := range X { - j := i + rand.Intn(n_samples-i) + j := i + rand.Intn(nSamples-i) X[i], X[j] = X[j], X[i] if y != nil { y[i], y[j] = y[j], y[i] diff --git a/linear_model/Base.go b/linear_model/Base.go index 1db58a5..db2a8fd 100644 --- a/linear_model/Base.go +++ b/linear_model/Base.go @@ -522,3 +522,5 @@ func preprocess_data(X [][]float, y []float, fit_intercept bool, normalize bool) return } + +func unused(...interface{}) {} diff --git a/linear_model/bayes.go b/linear_model/bayes.go index 0807574..0fa7129 100644 --- a/linear_model/bayes.go +++ b/linear_model/bayes.go @@ -1,31 +1,31 @@ package linear_model import ( - "fmt" - "github.com/gonum/floats" - "github.com/gonum/stat" - "github.com/pa-m/sklearn/base" - "gonum.org/v1/gonum/mat" - "math" + "fmt" + "github.com/gonum/floats" + "github.com/gonum/stat" + "github.com/pa-m/sklearn/base" + "gonum.org/v1/gonum/mat" + "math" ) type BayesianRidge struct { - LinearModel - base.RegressorMixin - N_iter int - Tol, Alpha_1, Alpha_2, Lambda_1, Lambda_2 float - ComputeScore, Copy_X, Verbose bool - Alpha_, Lambda_ float - Sigma_ []float - Scores_ []float + LinearModel + base.RegressorMixin + N_iter int + Tol, Alpha_1, Alpha_2, Lambda_1, Lambda_2 float + ComputeScore, Copy_X, Verbose bool + Alpha_, Lambda_ float + Sigma_ []float + Scores_ []float } func NewBayesianRidge() *BayesianRidge { - self := &BayesianRidge{LinearModel: LinearModel{FitIntercept: true}, RegressorMixin: base.RegressorMixin{}, N_iter: 300, Tol: 1e-3, Alpha_1: 1e-6, Alpha_2: 1e-6, - Lambda_1: 1e-6, Lambda_2: 1e-6, ComputeScore: false, Verbose: false, - } - self.RegressorMixin.Predicter = self - return self + self := &BayesianRidge{LinearModel: LinearModel{FitIntercept: true}, RegressorMixin: base.RegressorMixin{}, N_iter: 300, Tol: 1e-3, Alpha_1: 1e-6, Alpha_2: 1e-6, + Lambda_1: 1e-6, Lambda_2: 1e-6, ComputeScore: false, Verbose: false, + } + self.RegressorMixin.Predicter = self + return self } // Fit the model @@ -37,161 +37,161 @@ func NewBayesianRidge() *BayesianRidge { // Target values. Will be cast to X's dtype if necessary func (self *BayesianRidge) Fit(X0 [][]float, y0 []float) *BayesianRidge { - var n_samples, n_features = len(X0), len(X0[0]) - var X, y, X_offset_, y_offset_, X_scale_ = preprocess_data( - X0, y0, self.FitIntercept, self.Normalize) - self.X_offset_ = X_offset_ - self.X_scale_ = X_scale_ - alpha_ := 1. / stat.Variance(y, ones(n_samples)) - lambda_ := 1. - verbose := self.Verbose - lambda_1 := self.Lambda_1 - lambda_2 := self.Lambda_2 - alpha_1 := self.Alpha_1 - alpha_2 := self.Alpha_2 - self.Scores_ = make([]float, 0) - var coef_old_ *mat.Dense = mat.NewDense(n_features, 1, nil) - var logdet_sigma_ float - XT_y := mat.NewDense(n_features, 1, nil) - var coef_, sigma_ *mat.Dense - for j := 0; j < n_features; j++ { - var XT_yj = 0. - for i := range X { - XT_yj += X[i][j] * y[i] - } - XT_y.Set(j, 0, XT_yj) - } - Xm := mat.NewDense(n_samples, n_features, nil) - for i, Xi := range X { - Xm.SetRow(i, Xi) - } - var svd mat.SVD - if !svd.Factorize(Xm, mat.SVDThin) { - panic("svd failed") - } - U, S, VhT := svd.UTo(nil), svd.Values(nil), svd.VTo(nil) + var n_samples, n_features = len(X0), len(X0[0]) + var X, y, X_offset_, y_offset_, X_scale_ = preprocess_data( + X0, y0, self.FitIntercept, self.Normalize) + self.X_offset_ = X_offset_ + self.X_scale_ = X_scale_ + alpha_ := 1. / stat.Variance(y, ones(n_samples)) + lambda_ := 1. + verbose := self.Verbose + lambda_1 := self.Lambda_1 + lambda_2 := self.Lambda_2 + alpha_1 := self.Alpha_1 + alpha_2 := self.Alpha_2 + self.Scores_ = make([]float, 0) + var coef_old_ *mat.Dense = mat.NewDense(n_features, 1, nil) + var logdet_sigma_ float + XT_y := mat.NewDense(n_features, 1, nil) + var coef_, sigma_ *mat.Dense + for j := 0; j < n_features; j++ { + var XT_yj = 0. + for i := range X { + XT_yj += X[i][j] * y[i] + } + XT_y.Set(j, 0, XT_yj) + } + Xm := mat.NewDense(n_samples, n_features, nil) + for i, Xi := range X { + Xm.SetRow(i, Xi) + } + var svd mat.SVD + if !svd.Factorize(Xm, mat.SVDThin) { + panic("svd failed") + } + U, S, VhT := svd.UTo(nil), svd.Values(nil), svd.VTo(nil) + unused(U) - U = U - eigen_vals_ := make([]float, len(S), len(S)) - for j, Sj := range S { - eigen_vals_[j] = Sj * Sj - } + eigen_vals_ := make([]float, len(S), len(S)) + for j, Sj := range S { + eigen_vals_[j] = Sj * Sj + } - calcsigma := func(VhT *mat.Dense, eigen_vals_ []float, lambda_, alpha_ float) *mat.Dense { - // compute np.dot(Vh.T,Vh / (eigen_vals_ +lambda_ / alpha_)[:, np.newaxis]) - coef_ := mat.NewDense(n_features, n_features, nil) - right := mat.NewDense(n_features, n_features, nil) - for i := 0; i < n_features; i++ { - rightrow := make([]float, n_features, n_features) - mat.Col(rightrow, i, VhT) - floats.Scale(1./(eigen_vals_[i]+lambda_/alpha_), rightrow) - right.SetRow(i, rightrow) - } - coef_.Mul(VhT, right) - return coef_ - } - // dimstr := func(m *mat.Dense) string { r, c := m.Dims(); return fmt.Sprint("(%d x %d)", r, c) } - // dimstr = dimstr + calcsigma := func(VhT *mat.Dense, eigen_vals_ []float, lambda_, alpha_ float) *mat.Dense { + // compute np.dot(Vh.T,Vh / (eigen_vals_ +lambda_ / alpha_)[:, np.newaxis]) + coef_ := mat.NewDense(n_features, n_features, nil) + right := mat.NewDense(n_features, n_features, nil) + for i := 0; i < n_features; i++ { + rightrow := make([]float, n_features, n_features) + mat.Col(rightrow, i, VhT) + floats.Scale(1./(eigen_vals_[i]+lambda_/alpha_), rightrow) + right.SetRow(i, rightrow) + } + coef_.Mul(VhT, right) + return coef_ + } + // dimstr := func(m *mat.Dense) string { r, c := m.Dims(); return fmt.Sprint("(%d x %d)", r, c) } + // dimstr = dimstr - // # Convergence loop of the bayesian ridge regression - for iter_ := 0; iter_ < self.N_iter; iter_++ { - // # Compute mu and sigma - // # sigma_ = lambda_ / alpha_ * np.eye(n_features) + np.dot(X.T, X) - // # coef_ = sigma_^-1 * XT * y - if n_samples > n_features { - // coef_ = np.dot(Vh.T,Vh / (eigen_vals_ +lambda_ / alpha_)[:, np.newaxis]) - coeftmp_ := calcsigma(VhT, eigen_vals_, lambda_, alpha_) - //coef_ = np.dot(coef_, XT_y) - coef_ = mat.NewDense(n_features, 1, nil) - coef_.Mul(coeftmp_, XT_y) - if self.ComputeScore { - //logdet_sigma_ = - np.sum(np.log(lambda_ + alpha_ * eigen_vals_)) - logdet_sigma_ = 0. - for _, evi := range eigen_vals_ { - logdet_sigma_ += math.Log(lambda_ + alpha_*evi) - } - } - } else { // n_samples<=n_features - panic("unimplemented n_samples<=n_features") - } - // # Preserve the alpha and lambda values that were used to - // # calculate the final coefficients - self.Alpha_ = alpha_ - self.Lambda_ = lambda_ - // # Update alpha and lambda - // rmse_ = np.sum((y - np.dot(X, coef_)) ** 2) - rmse_ := 0. - for i, Xi := range X { - sumxc := 0. - for j, Xij := range Xi { - sumxc += Xij * coef_.At(j, 0) - } - rmse_ += math.Pow(y[i]-sumxc, 2.) - } - // gamma_ = (np.sum((alpha_ * eigen_vals_) /(lambda_ + alpha_*eigen_vals_))) - gamma_ := 0. - { - var left, right []float - copy(left, eigen_vals_) + // # Convergence loop of the bayesian ridge regression + for iter_ := 0; iter_ < self.N_iter; iter_++ { + // # Compute mu and sigma + // # sigma_ = lambda_ / alpha_ * np.eye(n_features) + np.dot(X.T, X) + // # coef_ = sigma_^-1 * XT * y + if n_samples > n_features { + // coef_ = np.dot(Vh.T,Vh / (eigen_vals_ +lambda_ / alpha_)[:, np.newaxis]) + coeftmp_ := calcsigma(VhT, eigen_vals_, lambda_, alpha_) + //coef_ = np.dot(coef_, XT_y) + coef_ = mat.NewDense(n_features, 1, nil) + coef_.Mul(coeftmp_, XT_y) + if self.ComputeScore { + //logdet_sigma_ = - np.sum(np.log(lambda_ + alpha_ * eigen_vals_)) + logdet_sigma_ = 0. + for _, evi := range eigen_vals_ { + logdet_sigma_ += math.Log(lambda_ + alpha_*evi) + } + } + } else { // n_samples<=n_features + panic("unimplemented n_samples<=n_features") + } + // # Preserve the alpha and lambda values that were used to + // # calculate the final coefficients + self.Alpha_ = alpha_ + self.Lambda_ = lambda_ + // # Update alpha and lambda + // rmse_ = np.sum((y - np.dot(X, coef_)) ** 2) + rmse_ := 0. + for i, Xi := range X { + sumxc := 0. + for j, Xij := range Xi { + sumxc += Xij * coef_.At(j, 0) + } + rmse_ += math.Pow(y[i]-sumxc, 2.) + } + // gamma_ = (np.sum((alpha_ * eigen_vals_) /(lambda_ + alpha_*eigen_vals_))) + gamma_ := 0. + { + var left, right []float + copy(left, eigen_vals_) - floats.Scale(alpha_, left) - copy(right, left) - floats.AddConst(lambda_, right) - floats.Div(left, right) - gamma_ = floats.Sum(left) - } - //lambda_ = ((gamma_ + 2*lambda_1) /(np.sum(coef_**2) + 2*lambda_2)) - lambda_ = 0. - sumcoef2 := 0. - { - var coef2 []float = make([]float, n_features, n_features) - mat.Col(coef2, 0, coef_) - floats.Mul(coef2, coef2) - sumcoef2 = floats.Sum(coef2) - lambda_ = (gamma_ + 2*lambda_1) / (sumcoef2 + 2*lambda_2) + floats.Scale(alpha_, left) + copy(right, left) + floats.AddConst(lambda_, right) + floats.Div(left, right) + gamma_ = floats.Sum(left) + } + //lambda_ = ((gamma_ + 2*lambda_1) /(np.sum(coef_**2) + 2*lambda_2)) + lambda_ = 0. + sumcoef2 := 0. + { + var coef2 []float = make([]float, n_features, n_features) + mat.Col(coef2, 0, coef_) + floats.Mul(coef2, coef2) + sumcoef2 = floats.Sum(coef2) + lambda_ = (gamma_ + 2*lambda_1) / (sumcoef2 + 2*lambda_2) - } - alpha_ = ((float(n_samples) - gamma_ + 2*alpha_1) / (rmse_ + 2*alpha_2)) - // # Compute the objective function - if self.ComputeScore { - s := lambda_1*log(lambda_) - lambda_2*lambda_ - s += alpha_1*log(alpha_) - alpha_2*alpha_ - s += 0.5 * (float(n_features)*log(lambda_) + - float(n_samples)*log(alpha_) - - alpha_*rmse_ - - (lambda_ * sumcoef2) - - logdet_sigma_ - - float(n_samples)*log(2*math.Pi)) - self.Scores_ = append(self.Scores_, s) - } - // # Check for convergence - if iter_ > 0 { - sumabsdiff := 0. - for j := 0; j < n_features; j++ { - sumabsdiff += math.Abs(coef_old_.At(j, 0) - coef_.At(j, 0)) - } - if sumabsdiff < self.Tol { - if verbose { - fmt.Println("Convergence after ", iter_, " iterations") - } - break - } - } + } + alpha_ = ((float(n_samples) - gamma_ + 2*alpha_1) / (rmse_ + 2*alpha_2)) + // # Compute the objective function + if self.ComputeScore { + s := lambda_1*log(lambda_) - lambda_2*lambda_ + s += alpha_1*log(alpha_) - alpha_2*alpha_ + s += 0.5 * (float(n_features)*log(lambda_) + + float(n_samples)*log(alpha_) - + alpha_*rmse_ - + (lambda_ * sumcoef2) - + logdet_sigma_ - + float(n_samples)*log(2*math.Pi)) + self.Scores_ = append(self.Scores_, s) + } + // # Check for convergence + if iter_ > 0 { + sumabsdiff := 0. + for j := 0; j < n_features; j++ { + sumabsdiff += math.Abs(coef_old_.At(j, 0) - coef_.At(j, 0)) + } + if sumabsdiff < self.Tol { + if verbose { + fmt.Println("Convergence after ", iter_, " iterations") + } + break + } + } - coef_old_.Copy(coef_) + coef_old_.Copy(coef_) - } - self.Coef_ = make([]float, n_features) - mat.Col(self.Coef_, 0, coef_) - //sigma_ = np.dot(Vh.T,Vh / (eigen_vals_ + lambda_ / alpha_)[:, np.newaxis]) - //self.sigma_ = (1. / alpha_) * sigma_ - sigma_ = calcsigma(VhT, eigen_vals_, lambda_, alpha_) - mat.Col(self.Sigma_, 0, sigma_) - floats.Scale(1./alpha_, self.Sigma_) + } + self.Coef_ = make([]float, n_features) + mat.Col(self.Coef_, 0, coef_) + //sigma_ = np.dot(Vh.T,Vh / (eigen_vals_ + lambda_ / alpha_)[:, np.newaxis]) + //self.sigma_ = (1. / alpha_) * sigma_ + sigma_ = calcsigma(VhT, eigen_vals_, lambda_, alpha_) + mat.Col(self.Sigma_, 0, sigma_) + floats.Scale(1./alpha_, self.Sigma_) - self._set_intercept(X_offset_, y_offset_, X_scale_) - return self - //https://github.com/scikit-learn/scikit-learn/blob/a24c8b46/sklearn/linear_model/bayes.py#L23 + self._set_intercept(X_offset_, y_offset_, X_scale_) + return self + //https://github.com/scikit-learn/scikit-learn/blob/a24c8b46/sklearn/linear_model/bayes.py#L23 } // """Predict using the linear model. @@ -207,31 +207,31 @@ func (self *BayesianRidge) Fit(X0 [][]float, y0 []float) *BayesianRidge { // Mean of predictive distribution of query points. // """ func (self *BayesianRidge) Predict(X [][]float) (y_mean []float) { - y_mean = self.DecisionFunction(X) - return + y_mean = self.DecisionFunction(X) + return } func (self *BayesianRidge) Predict2(X0 [][]float) (y_mean, y_std []float) { - y_mean = self.DecisionFunction(X0) - var X [][]float - copy(X, X0) - if self.Normalize { - for i := range X { - floats.Sub(X[i], self.X_offset_) - floats.Div(X[i], self.X_scale_) - } - } - //sigmas_squared_data = (np.dot(X, self.sigma_) * X).sum(axis=1) - sigmas_squared_data := 0. - y_std = make([]float, len(X0), len(X0)) - for i, Xi := range X { - for j, s := range self.Sigma_ { - sigmas_squared_data = Xi[j] * s * Xi[j] - } - y_std[i] = math.Sqrt(sigmas_squared_data + 1./self.Alpha_) - } - return + y_mean = self.DecisionFunction(X0) + var X [][]float + copy(X, X0) + if self.Normalize { + for i := range X { + floats.Sub(X[i], self.X_offset_) + floats.Div(X[i], self.X_scale_) + } + } + //sigmas_squared_data = (np.dot(X, self.sigma_) * X).sum(axis=1) + sigmas_squared_data := 0. + y_std = make([]float, len(X0), len(X0)) + for i, Xi := range X { + for j, s := range self.Sigma_ { + sigmas_squared_data = Xi[j] * s * Xi[j] + } + y_std[i] = math.Sqrt(sigmas_squared_data + 1./self.Alpha_) + } + return } func (self *BayesianRidge) GetPredicter() base.Predicter { - return self + return self } diff --git a/linear_model/bayes_test.go b/linear_model/bayes_test.go index e402b13..62fdbe1 100644 --- a/linear_model/bayes_test.go +++ b/linear_model/bayes_test.go @@ -43,8 +43,6 @@ func TestBayesianRidge(t *testing.T) { } } -func unused(...interface{}) {} - func ExampleBayesianRidge() { var X [][]float = make([][]float, 10000) Y := make([]float, len(X)) diff --git a/preprocessing/data.go b/preprocessing/data.go index c391798..ad298f0 100644 --- a/preprocessing/data.go +++ b/preprocessing/data.go @@ -313,13 +313,15 @@ func (self *PolynomialFeatures) Fit(X [][]float, y []float) *PolynomialFeatures func (self *PolynomialFeatures) Transform(X [][]float) [][]float { Xout := make([][]float, len(X), len(X)) - for isample,Xi:=range X { - Xout[isample]=make([]float,len(self.Powers),len(self.Powers)) - for ioutput,p:=range self.Powers { - v:=1. - for j,pj:=range p {v*=math.Pow(Xi[j],float(pj))} - Xout[isample][ioutput] = v - } - } + for isample, Xi := range X { + Xout[isample] = make([]float, len(self.Powers), len(self.Powers)) + for ioutput, p := range self.Powers { + v := 1. + for j, pj := range p { + v *= math.Pow(Xi[j], float(pj)) + } + Xout[isample][ioutput] = v + } + } return Xout } diff --git a/preprocessing/data_test.go b/preprocessing/data_test.go index 310b2bf..3f855d7 100644 --- a/preprocessing/data_test.go +++ b/preprocessing/data_test.go @@ -11,26 +11,26 @@ import ( func TestMinMaxScaler(t *testing.T) { m := NewMinMaxScaler([]float{0, 1}) m.Fit([][]float{ - []float{1, 2, 3}, - []float{1, 4, 7}, - []float{1, 5, 9}, + {1, 2, 3}, + {1, 4, 7}, + {1, 5, 9}, }, nil) if !floats.EqualApprox(m.Scale, []float{1, 1. / 3, 1. / 6}, 1e-6) { fmt.Println("bad scale") t.Fail() } m.Fit([][]float{ - []float{1, 2, 3}, - []float{1, 4, 7}, - []float{9, 5, 9}, + {1, 2, 3}, + {1, 4, 7}, + {9, 5, 9}, }, nil) - X := [][]float{[]float{1, 2, 3}} + X := [][]float{{1, 2, 3}} Y := m.Transform(X) if !floats.EqualApprox(Y[0], []float{0, 0, 0}, 1e-6) { fmt.Println("bad min") t.Fail() } - X = [][]float{[]float{9, 5, 9}} + X = [][]float{{9, 5, 9}} Y = m.Transform(X) if !floats.EqualApprox(Y[0], []float{1, 1, 1}, 1e-6) { fmt.Printf("bad Y=%v\n", Y) @@ -39,11 +39,11 @@ func TestMinMaxScaler(t *testing.T) { m = NewMinMaxScaler([]float{0, 10}) m.Fit([][]float{ - []float{1, 2, 3}, - []float{1, 4, 7}, - []float{9, 5, 9}, + {1, 2, 3}, + {1, 4, 7}, + {9, 5, 9}, }, nil) - X = [][]float{[]float{8, 8, 8}} + X = [][]float{{8, 8, 8}} Y = m.Transform(X) X2 := m.InverseTransform(Y) if !floats.EqualApprox(X[0], X2[0], 1e-6) { @@ -54,11 +54,11 @@ func TestMinMaxScaler(t *testing.T) { func TestStandardScaler(t *testing.T) { m := NewStandardScaler() m.Fit([][]float{ - []float{1, 2, 3}, - []float{1, 4, 7}, - []float{9, 5, 9}, + {1, 2, 3}, + {1, 4, 7}, + {9, 5, 9}, }, nil) - X := [][]float{[]float{8, 8, 8}} + X := [][]float{{8, 8, 8}} Y := m.Transform(X) //fmt.Printf("Y=%#v\n", Y) X2 := m.InverseTransform(Y) @@ -73,7 +73,7 @@ func TestPolynomialFeatures(t *testing.T) { fmt.Println("TestPolynomialFeatures") pf.IncludeBias = true pf.InteractionOnly = false - X := [][]float{[]float{1, 2, 3}} + X := [][]float{{1, 2, 3}} pf.Fit(X, nil) fmt.Printf("powers=%v\n", pf.Powers) if fmt.Sprintf("%v", pf.Powers) != "[[0 0 0] [0 0 1] [0 0 2] [0 0 3] [0 1 0] [0 1 1] [0 1 2] [0 2 0] [0 2 1] [0 3 0] [1 0 0] [1 0 1] [1 0 2] [1 1 0] [1 1 1] [1 2 0] [2 0 0] [2 0 1] [2 1 0] [3 0 0]]" { @@ -83,7 +83,7 @@ func TestPolynomialFeatures(t *testing.T) { pf.IncludeBias = true pf.InteractionOnly = true - pf.Fit([][]float{[]float{1, 2, 3}}, nil) + pf.Fit([][]float{{1, 2, 3}}, nil) fmt.Printf("powers interactiononly=%v\n", pf.Powers) if fmt.Sprintf("%v", pf.Powers) != "[[0 0 0] [0 0 1] [0 0 2] [0 0 3] [0 1 0] [0 2 0] [0 3 0] [1 0 0] [2 0 0] [3 0 0]]" { fmt.Println("failed interactiononly") @@ -92,7 +92,7 @@ func TestPolynomialFeatures(t *testing.T) { pf.IncludeBias = false pf.InteractionOnly = false - pf.Fit([][]float{[]float{1, 2, 3}}, nil) + pf.Fit([][]float{{1, 2, 3}}, nil) fmt.Printf("powers=%v\n", pf.Powers) if fmt.Sprintf("%v", pf.Powers) != "[[0 0 1] [0 0 2] [0 0 3] [0 1 0] [0 1 1] [0 1 2] [0 2 0] [0 2 1] [0 3 0] [1 0 0] [1 0 1] [1 0 2] [1 1 0] [1 1 1] [1 2 0] [2 0 0] [2 0 1] [2 1 0] [3 0 0]]" { t.Fail()