neural

package module
v0.0.0-...-f22b9ab Latest Latest
Warning

This package is not in the latest version of its module.

Go to latest
Published: Aug 25, 2018 License: BSD-3-Clause Imports: 4 Imported by: 1

README

godoc

Neural

Neural is a feedforward neural network implementation that is vectorized and supports multiple layers. Other features include: dropout, regression, and tanh activation functions.

Documentation

Index

Examples

Constants

This section is empty.

Variables

This section is empty.

Functions

func WeightInitializer32Basic

func WeightInitializer32Basic(in, out int) float32

WeightInitializer32Basic basic weight initialization

func WeightInitializer32FanIn

func WeightInitializer32FanIn(in, out int) float32

WeightInitializer32FanIn fan in weight initialization

func WeightInitializer32FanInFanOut

func WeightInitializer32FanInFanOut(in, out int) float32

WeightInitializer32FanInFanOut fan in/fan out weight initialization

Types

type Context32

type Context32 struct {
	*Neural32
	Activations [][]float32
}

Context32 is an inference context

func (*Context32) BackPropagate

func (c *Context32) BackPropagate(targets []float32, lRate, mFactor float32) float32

BackPropagate run the backpropagation algorithm

func (*Context32) GetOutput

func (c *Context32) GetOutput() []float32

GetOutput gets the output of the neural network

func (*Context32) Infer

func (c *Context32) Infer()

Infer runs inference

func (*Context32) InferWithT

func (c *Context32) InferWithT()

InferWithT runs inference using a transform in between layers

func (*Context32) SetInput

func (c *Context32) SetInput(input []float32)

SetInput sets the input to the neural network

type Function32

type Function32 func(x float32) float32

Function32 defines a function that takes a float32 and returns a float32

type FunctionPair32

type FunctionPair32 struct {
	F, T, DF Function32
}

FunctionPair32 represents a function, a derivative of the function, and a transform used for inference during training

type Neural32

type Neural32 struct {
	Layers    []int
	Weights   [][][]float32
	Changes   [][][]float32
	Functions []FunctionPair32
}

Neural32 is a 32 bit neural network

Example
rand.Seed(0)

config := func(neural *Neural32) {
	neural.Init(WeightInitializer32FanIn, 2, 2, 1)
}
n := NewNeural32(config)

n.Train(source, 1000, 0.6, 0.4)

n.test(patterns)
Output:

[0 0] -> [0.057274867]  :  [0]
[0 1] -> [0.9332078]  :  [1]
[1 0] -> [0.93215084]  :  [1]
[1 1] -> [0.08947442]  :  [0]
Example (Second)
rand.Seed(0)

config := func(neural *Neural32) {
	neural.Init(WeightInitializer32FanIn, 2, 2, 2, 1)
}
n := NewNeural32(config)

n.Train(source, 10000, 0.6, 0.4)

n.test(patterns)
Output:

[0 0] -> [0.01086853]  :  [0]
[0 1] -> [0.9948494]  :  [1]
[1 0] -> [0.98888844]  :  [1]
[1 1] -> [0.010121063]  :  [0]

func NewNeural32

func NewNeural32(config func(neural *Neural32)) *Neural32

NewNeural32 creates a neural network with the given configuration

func (*Neural32) EnableDropout

func (n *Neural32) EnableDropout(probability float32)

EnableDropout enables dropout based regularization See: http://iamtrask.github.io/2015/07/28/dropout/

Example
rand.Seed(0)

config := func(neural *Neural32) {
	neural.Init(WeightInitializer32FanIn, 2, 8, 1)
	neural.EnableDropout(.2)
}
n := NewNeural32(config)
size := len(patterns)
randomized := make([][][]float32, size)
copy(randomized, patterns)
src := func(iterations int) [][][]float32 {
	for i := 0; i < size; i++ {
		j := i + rand.Intn(size-i)
		randomized[i], randomized[j] = randomized[j], randomized[i]
	}
	return randomized
}
n.Train(src, 10000, 0.6, 0.4)

n.test(patterns)
Output:

[0 0] -> [0.00061443914]  :  [0]
[0 1] -> [0.9990952]  :  [1]
[1 0] -> [0.9832545]  :  [1]
[1 1] -> [0.0011786821]  :  [0]

func (*Neural32) EnableRegression

func (n *Neural32) EnableRegression()

EnableRegression removes the activation function from the last layer so that regression is performed

Example
rand.Seed(0)

config := func(neural *Neural32) {
	neural.Init(WeightInitializer32FanIn, 2, 2, 1)
	neural.EnableRegression()
}
n := NewNeural32(config)

n.Train(source, 1000, 0.6, 0.4)

n.test(patterns)
Output:

[0 0] -> [0.00039592385]  :  [0]
[0 1] -> [1.0000901]  :  [1]
[1 0] -> [1.0000368]  :  [1]
[1 1] -> [6.206334e-05]  :  [0]

func (*Neural32) Init

func (n *Neural32) Init(initializer WeightInitializer32, layers ...int)

Init initializes the neural network

func (*Neural32) NewContext

func (n *Neural32) NewContext() *Context32

NewContext creates a new inference context from the given neural network

func (*Neural32) Train

func (n *Neural32) Train(source func(iteration int) [][][]float32, iterations int, lRate, mFactor float32) []float32

Train trains a neural network using data from source

func (*Neural32) UseTanh

func (n *Neural32) UseTanh()

UseTanh use tanh for the activation function

Example
rand.Seed(0)

config := func(neural *Neural32) {
	neural.Init(WeightInitializer32FanIn, 2, 2, 1)
	neural.UseTanh()
}
n := NewNeural32(config)
size := len(patternsTanh)
randomized := make([][][]float32, size)
copy(randomized, patternsTanh)
src := func(iterations int) [][][]float32 {
	for i := 0; i < size; i++ {
		j := i + rand.Intn(size-i)
		randomized[i], randomized[j] = randomized[j], randomized[i]
	}
	return randomized
}
n.Train(src, 1000, 0.6, 0.4)

n.test(patternsTanh)
Output:

[-1 -1] -> [-0.9955744]  :  [-1]
[-1 1] -> [0.9861643]  :  [1]
[1 -1] -> [0.9856719]  :  [1]
[1 1] -> [-0.9887588]  :  [-1]

type WeightInitializer32

type WeightInitializer32 func(in, out int) float32

WeightInitializer32 is a function that initializes the neural network weights See: http://stats.stackexchange.com/questions/47590/what-are-good-initial-weights-in-a-neural-network

Jump to

Keyboard shortcuts

? : This menu
/ : Search site
f or F : Jump to
y or Y : Canonical URL