Skip to content

Commit

Permalink
Fix errors
Browse files Browse the repository at this point in the history
  • Loading branch information
suleyman-kaya committed Jul 29, 2024
1 parent 66d1c58 commit 5346992
Show file tree
Hide file tree
Showing 2 changed files with 36 additions and 188 deletions.
162 changes: 32 additions & 130 deletions ml/svm.v
Original file line number Diff line number Diff line change
@@ -1,14 +1,12 @@
module ml

import math
import rand

pub struct SVMConfig {
pub mut:
max_iterations int = 1000
learning_rate f64 = 0.01
tolerance f64 = 1e-6
c f64 = 1.0 // Regularization parameter
}

pub struct DataPoint {
Expand All @@ -19,170 +17,74 @@ pub mut:

pub struct SVMModel {
pub mut:
support_vectors []DataPoint
alphas []f64
b f64
kernel KernelFunction @[required]
config SVMConfig
weights []f64
bias f64
config SVMConfig
}

pub struct SVM {
pub mut:
model &SVMModel = unsafe { nil }
kernel KernelFunction @[required]
config SVMConfig
}

type KernelFunction = fn ([]f64, []f64) f64

fn vector_dot(x []f64, y []f64) f64 {
mut sum := 0.0
for i := 0; i < x.len; i++ {
sum += x[i] * y[i]
}
return sum
}

fn vector_subtract(x []f64, y []f64) []f64 {
mut result := []f64{len: x.len}
for i := 0; i < x.len; i++ {
result[i] = x[i] - y[i]
}
return result
}

pub fn linear_kernel(x []f64, y []f64) f64 {
return vector_dot(x, y)
}

pub fn polynomial_kernel(degree int) KernelFunction {
return fn [degree] (x []f64, y []f64) f64 {
return math.pow(vector_dot(x, y) + 1.0, f64(degree))
}
}

pub fn rbf_kernel(gamma f64) KernelFunction {
return fn [gamma] (x []f64, y []f64) f64 {
diff := vector_subtract(x, y)
return math.exp(-gamma * vector_dot(diff, diff))
}
}

pub fn SVM.new(kernel KernelFunction, config SVMConfig) &SVM {
pub fn SVM.new(config SVMConfig) &SVM {
return &SVM{
kernel: kernel
config: config
}
}

pub fn (mut s SVM) train(data []DataPoint) {
s.model = train_svm(data, s.kernel, s.config)
s.model = train_svm(data, s.config)
}

pub fn (s &SVM) predict(x []f64) int {
return predict(s.model, x)
}

pub fn train_svm(data []DataPoint, kernel KernelFunction, config SVMConfig) &SVMModel {
fn vector_dot(x []f64, y []f64) f64 {
mut sum := 0.0
for i := 0; i < x.len; i++ {
sum += x[i] * y[i]
}
return sum
}

pub fn train_svm(data []DataPoint, config SVMConfig) &SVMModel {
mut model := &SVMModel{
support_vectors: []DataPoint{}
alphas: []f64{len: data.len, init: 0.0}
b: 0.0
kernel: kernel
weights: []f64{len: data[0].x.len, init: 0.0}
bias: 0.0
config: config
}

mut passes := 0
for {
mut num_changed_alphas := 0
for i in 0 .. data.len {
ei := predict_raw(model, data[i].x) - f64(data[i].y)
if (data[i].y * ei < -model.config.tolerance && model.alphas[i] < model.config.c)
|| (data[i].y * ei > model.config.tolerance && model.alphas[i] > 0) {
j := rand.int_in_range(0, data.len - 1) or { panic(err) }
ej := predict_raw(model, data[j].x) - f64(data[j].y)

alpha_i_old := model.alphas[i]
alpha_j_old := model.alphas[j]
for _ in 0 .. config.max_iterations {
mut cost := 0.0
for point in data {
prediction := vector_dot(model.weights, point.x) + model.bias
margin := f64(point.y) * prediction

mut l, mut h := 0.0, 0.0
if data[i].y != data[j].y {
l = math.max(0.0, model.alphas[j] - model.alphas[i])
h = math.min(model.config.c, model.config.c + model.alphas[j] - model.alphas[i])
} else {
l = math.max(0.0, model.alphas[i] + model.alphas[j] - model.config.c)
h = math.min(model.config.c, model.alphas[i] + model.alphas[j])
if margin < 1 {
for i in 0 .. model.weights.len {
model.weights[i] += config.learning_rate * (f64(point.y) * point.x[i] - 2 * config.tolerance * model.weights[i])
}

if l == h {
continue
}

eta := 2 * model.kernel(data[i].x, data[j].x) - model.kernel(data[i].x,
data[i].x) - model.kernel(data[j].x, data[j].x)

if eta >= 0 {
continue
}

model.alphas[j] = alpha_j_old - f64(data[j].y) * (ei - ej) / eta
model.alphas[j] = math.max(l, math.min(h, model.alphas[j]))

if math.abs(model.alphas[j] - alpha_j_old) < 1e-5 {
continue
}

model.alphas[i] = alpha_i_old +
f64(data[i].y * data[j].y) * (alpha_j_old - model.alphas[j])

b1 := model.b - ei - f64(data[i].y) * (model.alphas[i] - alpha_i_old) * model.kernel(data[i].x,
data[i].x) - f64(data[j].y) * (model.alphas[j] - alpha_j_old) * model.kernel(data[i].x,
data[j].x)

b2 := model.b - ej - f64(data[i].y) * (model.alphas[i] - alpha_i_old) * model.kernel(data[i].x,
data[j].x) - f64(data[j].y) * (model.alphas[j] - alpha_j_old) * model.kernel(data[j].x,
data[j].x)

if 0 < model.alphas[i] && model.alphas[i] < model.config.c {
model.b = b1
} else if 0 < model.alphas[j] && model.alphas[j] < model.config.c {
model.b = b2
} else {
model.b = (b1 + b2) / 2
model.bias += config.learning_rate * f64(point.y)
cost += 1 - margin
} else {
for i in 0 .. model.weights.len {
model.weights[i] -= config.learning_rate * 2 * config.tolerance * model.weights[i]
}

num_changed_alphas++
}
}

if num_changed_alphas == 0 {
passes++
} else {
passes = 0
}

if passes >= model.config.max_iterations {
if cost == 0 {
break
}
}

for i in 0 .. data.len {
if model.alphas[i] > 0 {
model.support_vectors << data[i]
}
}

return model
}

fn predict_raw(model &SVMModel, x []f64) f64 {
mut sum := 0.0
for i, sv in model.support_vectors {
sum += model.alphas[i] * f64(sv.y) * model.kernel(x, sv.x)
}
return sum + model.b
}

pub fn predict(model &SVMModel, x []f64) int {
return if predict_raw(model, x) >= 0 { 1 } else { -1 }
prediction := vector_dot(model.weights, x) + model.bias
return if prediction >= 0 { 1 } else { -1 }
}
62 changes: 4 additions & 58 deletions ml/svm_test.v
Original file line number Diff line number Diff line change
Expand Up @@ -9,48 +9,14 @@ fn test_vector_dot() {
assert math.abs(result - 32.0) < 1e-6
}

fn test_vector_subtract() {
x := [1.0, 2.0, 3.0]
y := [4.0, 5.0, 6.0]
result := vector_subtract(x, y)
assert result == [-3.0, -3.0, -3.0]
}

fn test_linear_kernel() {
x := [1.0, 2.0, 3.0]
y := [4.0, 5.0, 6.0]
result := linear_kernel(x, y)
assert math.abs(result - 32.0) < 1e-6
}

fn test_polynomial_kernel() {
x := [1.0, 2.0, 3.0]
y := [4.0, 5.0, 6.0]
kernel := polynomial_kernel(3)
result := kernel(x, y)
expected := math.pow(32.0 + 1.0, 3)
assert math.abs(result - expected) < 1e-6
}

fn test_rbf_kernel() {
x := [1.0, 2.0, 3.0]
y := [4.0, 5.0, 6.0]
gamma := 0.5
kernel := rbf_kernel(gamma)
result := kernel(x, y)
expected := math.exp(-gamma * 27.0)
assert math.abs(result - expected) < 1e-6
}

fn test_svm_new() {
config := SVMConfig{}
svm := SVM.new(linear_kernel, config)
assert svm.kernel == linear_kernel
svm := SVM.new(config)
assert svm.config == config
}

fn test_svm_train_and_predict() {
mut svm := SVM.new(linear_kernel, SVMConfig{})
mut svm := SVM.new(SVMConfig{})
data := [
DataPoint{[2.0, 3.0], 1},
DataPoint{[1.0, 1.0], -1},
Expand All @@ -73,29 +39,14 @@ fn test_train_svm() {
DataPoint{[0.0, 0.0], -1},
]
config := SVMConfig{}
model := train_svm(data, linear_kernel, config)
model := train_svm(data, config)

for point in data {
prediction := predict(model, point.x)
assert prediction == point.y
}
}

fn test_predict_raw() {
data := [
DataPoint{[2.0, 3.0], 1},
DataPoint{[1.0, 1.0], -1},
]
config := SVMConfig{}
model := train_svm(data, linear_kernel, config)

result := predict_raw(model, [2.0, 3.0])
assert result > 0

result2 := predict_raw(model, [1.0, 1.0])
assert result2 < 0
}

fn test_predict() {
data := [
DataPoint{[2.0, 3.0], 1},
Expand All @@ -104,7 +55,7 @@ fn test_predict() {
DataPoint{[0.0, 0.0], -1},
]
config := SVMConfig{}
model := train_svm(data, linear_kernel, config)
model := train_svm(data, config)

for point in data {
prediction := predict(model, point.x)
Expand All @@ -114,14 +65,9 @@ fn test_predict() {

fn main() {
test_vector_dot()
test_vector_subtract()
test_linear_kernel()
test_polynomial_kernel()
test_rbf_kernel()
test_svm_new()
test_svm_train_and_predict()
test_train_svm()
test_predict_raw()
test_predict()
println('All tests passed successfully!')
}

0 comments on commit 5346992

Please sign in to comment.