【问题标题】:Implementing a gradient descent实现梯度下降
【发布时间】:2019-11-13 23:29:33
【问题描述】:

我正在尝试在 Go 中实现梯度下降。我的目标是根据行驶里程预测汽车的成本。 这是我的数据集:

km,price
240000,3650
139800,3800
150500,4400
185530,4450
176000,5250
114800,5350
166800,5800
89000,5990
144500,5999
84000,6200
82029,6390
63060,6390
74000,6600
97500,6800
67000,6800
76025,6900
48235,6900
93000,6990
60949,7490
65674,7555
54000,7990
68500,7990
22899,7990
61789,8290

我尝试了各种方法,例如规范化数据集,不规范化它,保持 thetas 不变,非规范化 thetas...但我无法得到正确的结果。 我的数学一定是在某个地方,但我不知道在哪里。 我试图得到的结果应该是大约 t0 = 8500, t1 = -0.02 我的实现如下:

package main

import (
    "encoding/csv"
    "fmt"
    "log"
    "math"
    "os"
    "strconv"
)

const (
    dataFile     = "data.csv"
    iterations   = 20000
    learningRate = 0.1
)

type dataSet [][]float64

var minKm, maxKm, minPrice, maxPrice float64

func (d dataSet) getExtremes(column int) (float64, float64) {

    min := math.Inf(1)
    max := math.Inf(-1)
    for _, row := range d {
        item := row[column]
        if item > max {
            max = item
        }
        if item < min {
            min = item
        }
    }

    return min, max
}

func normalizeItem(item, min, max float64) float64 {

    return (item - min) / (max - min)
}

func (d *dataSet) normalize() {

    minKm, maxKm = d.getExtremes(0)
    minPrice, maxPrice = d.getExtremes(1)
    for _, row := range *d {
        row[0], row[1] = normalizeItem(row[0], minKm, maxKm), normalizeItem(row[1], minPrice, maxPrice)
    }
}

func processEntry(entry []string) []float64 {

    if len(entry) != 2 {
        log.Fatalln("expected two fields")
    }
    km, err := strconv.ParseFloat(entry[0], 64)
    if err != nil {
        log.Fatalln(err)
    }
    price, err := strconv.ParseFloat(entry[1], 64)
    if err != nil {
        log.Fatalln(err)
    }
    return []float64{km, price}
}

func getData() dataSet {

    file, err := os.Open(dataFile)
    if err != nil {
        log.Fatalln(err)
    }
    reader := csv.NewReader(file)
    entries, err := reader.ReadAll()
    if err != nil {
        log.Fatalln(err)
    }
    entries = entries[1:]

    data := make(dataSet, len(entries))
    for k, entry := range entries {
        data[k] = processEntry(entry)
    }
    return data
}

func outputResult(theta0, theta1 float64) {
    file, err := os.OpenFile("weights.csv", os.O_WRONLY, 0644)
    if err != nil {
        log.Fatalln(err)
    }
    defer file.Close()
    file.Truncate(0)
    file.Seek(0, 0)
    file.WriteString(fmt.Sprintf("theta0,%.6f\ntheta1,%.6f\n", theta0, theta1))
}

func estimatePrice(theta0, theta1, mileage float64) float64 {

    return theta0 + theta1*mileage
}

func (d dataSet) computeThetas(theta0, theta1 float64) (float64, float64) {

    dataSize := float64(len(d))
    t0sum, t1sum := 0.0, 0.0
    for _, it := range d {
        mileage := it[0]
        price := it[1]
        err := estimatePrice(theta0, theta1, mileage) - price
        t0sum += err
        t1sum += err * mileage
    }

    return theta0 - (t0sum / dataSize * learningRate), theta1 - (t1sum / dataSize * learningRate)
}

func denormalize(theta, min, max float64) float64 {

    return theta*(max-min) + min
}

func main() {

    data := getData()
    data.normalize()
    theta0, theta1 := 0.0, 0.0
    for k := 0; k < iterations; k++ {
        theta0, theta1 = data.computeThetas(theta0, theta1)
    }
    theta0 = denormalize(theta0, minKm, maxKm)
    theta1 = denormalize(theta1, minPrice, maxPrice)
    outputResult(theta0, theta1)
}

为了正确实现梯度下降,我应该解决什么问题?

【问题讨论】:

  • 好吧,在你的代码中,你根本就没有任何类似于线性回归的东西。实现en.wikipedia.org/wiki/… 应该很简单。

标签: go machine-learning gradient-descent


【解决方案1】:

Linear Regression 真的很简单:

// yi = alpha + beta*xi + ei
func linearRegression(x, y []float64) (float64, float64) {
    EX := expected(x)
    EY := expected(y)
    EXY := expectedXY(x, y)
    EXX := expectedXY(x, x)

    covariance := EXY - EX*EY
    variance := EXX - EX*EX
    beta := covariance / variance
    alpha := EY - beta*EX
    return alpha, beta
}

试试here,输出:

8499.599649933218 -0.021448963591702314 396270.87871142407

代码:

package main

import (
    "encoding/csv"
    "fmt"
    "strconv"
    "strings"
)

func main() {
    x, y := readXY(`data.csv`)
    alpha, beta := linearRegression(x, y)
    fmt.Println(alpha, beta, -alpha/beta) // 8499.599649933218 -0.021448963591702314 396270.87871142407
}

// https://en.wikipedia.org/wiki/Ordinary_least_squares#Simple_linear_regression_model
// yi = alpha + beta*xi + ei
func linearRegression(x, y []float64) (float64, float64) {
    EX := expected(x)
    EY := expected(y)
    EXY := expectedXY(x, y)
    EXX := expectedXY(x, x)

    covariance := EXY - EX*EY
    variance := EXX - EX*EX
    beta := covariance / variance
    alpha := EY - beta*EX
    return alpha, beta
}

// E[X]
func expected(x []float64) float64 {
    sum := 0.0
    for _, v := range x {
        sum += v
    }
    return sum / float64(len(x))
}

// E[XY]
func expectedXY(x, y []float64) float64 {
    sum := 0.0
    for i, v := range x {
        sum += v * y[i]
    }
    return sum / float64(len(x))
}

func readXY(filename string) ([]float64, []float64) {
    // file, err := os.Open(filename)
    // if err != nil {
    //  panic(err)
    // }
    // defer file.Close()
    file := strings.NewReader(data)

    reader := csv.NewReader(file)
    records, err := reader.ReadAll()
    if err != nil {
        panic(err)
    }
    records = records[1:]
    size := len(records)
    x := make([]float64, size)
    y := make([]float64, size)
    for i, v := range records {
        val, err := strconv.ParseFloat(v[0], 64)
        if err != nil {
            panic(err)
        }
        x[i] = val
        val, err = strconv.ParseFloat(v[1], 64)
        if err != nil {
            panic(err)
        }
        y[i] = val
    }
    return x, y
}

var data = `km,price
240000,3650
139800,3800
150500,4400
185530,4450
176000,5250
114800,5350
166800,5800
89000,5990
144500,5999
84000,6200
82029,6390
63060,6390
74000,6600
97500,6800
67000,6800
76025,6900
48235,6900
93000,6990
60949,7490
65674,7555
54000,7990
68500,7990
22899,7990
61789,8290`

Gradient descent 是基于以下观察:如果多变量函数 F(x) 在点 a 的邻域内定义且可微,则如果从 a 开始,F(x) 下降最快Fa,-∇F(a)处的负梯度方向,例如:

// F(x)
f := func(x float64) float64 {
    return alpha + beta*x // write your target function here
}

导数函数:

h := 0.000001
// Derivative function ∇F(x)
df := func(x float64) float64 {
    return (f(x+h) - f(x-h)) / (2 * h) // write your target function derivative here
}

搜索:

minimunAt := 1.0       // We start the search here
gamma := 0.01          // Step size multiplier
precision := 0.0000001 // Desired precision of result
max := 100000          // Maximum number of iterations
currentX := 0.0
step := 0.0
for i := 0; i < max; i++ {
    currentX = minimunAt
    minimunAt = currentX - gamma*df(currentX)
    step = minimunAt - currentX
    if math.Abs(step) <= precision {
        break
    }
}

fmt.Printf("Minimum at %.8f value: %v\n", minimunAt, f(minimunAt))

【讨论】:

  • 我很抱歉,我想我把名字弄错了。我认为我正在寻找的是梯度下降,因为应该有学习率和迭代的概念。
  • 查看新的编辑并尝试here
猜你喜欢
  • 1970-01-01
  • 2016-09-25
  • 2014-03-14
  • 2012-05-22
  • 2021-02-20
  • 2011-07-04
  • 2019-08-08
  • 2015-07-10
  • 2020-11-15
相关资源
最近更新 更多