Documentation
¶
Index ¶
- func WeightInitializer32Basic(in, out int) float32
- func WeightInitializer32FanIn(in, out int) float32
- func WeightInitializer32FanInFanOut(in, out int) float32
- type Context32
- type Function32
- type FunctionPair32
- type Neural32
- func (n *Neural32) EnableDropout(probability float32)
- func (n *Neural32) EnableRegression()
- func (n *Neural32) Init(initializer WeightInitializer32, layers ...int)
- func (n *Neural32) NewContext() *Context32
- func (n *Neural32) Train(source func(iteration int) [][][]float32, iterations int, ...) []float32
- func (n *Neural32) UseTanh()
- type WeightInitializer32
Examples ¶
Constants ¶
This section is empty.
Variables ¶
This section is empty.
Functions ¶
func WeightInitializer32Basic ¶
WeightInitializer32Basic basic weight initialization
func WeightInitializer32FanIn ¶
WeightInitializer32FanIn fan in weight initialization
func WeightInitializer32FanInFanOut ¶
WeightInitializer32FanInFanOut fan in/fan out weight initialization
Types ¶
type Context32 ¶
Context32 is an inference context
func (*Context32) BackPropagate ¶
BackPropagate run the backpropagation algorithm
func (*Context32) InferWithT ¶
func (c *Context32) InferWithT()
InferWithT runs inference using a transform in between layers
type Function32 ¶
Function32 defines a function that takes a float32 and returns a float32
type FunctionPair32 ¶
type FunctionPair32 struct {
F, T, DF Function32
}
FunctionPair32 represents a function, a derivative of the function, and a transform used for inference during training
type Neural32 ¶
type Neural32 struct {
Layers []int
Weights [][][]float32
Changes [][][]float32
Functions []FunctionPair32
}
Neural32 is a 32 bit neural network
Example ¶
rand.Seed(0)
config := func(neural *Neural32) {
neural.Init(WeightInitializer32FanIn, 2, 2, 1)
}
n := NewNeural32(config)
n.Train(source, 1000, 0.6, 0.4)
n.test(patterns)
Output: [0 0] -> [0.057274867] : [0] [0 1] -> [0.9332078] : [1] [1 0] -> [0.93215084] : [1] [1 1] -> [0.08947442] : [0]
Example (Second) ¶
rand.Seed(0)
config := func(neural *Neural32) {
neural.Init(WeightInitializer32FanIn, 2, 2, 2, 1)
}
n := NewNeural32(config)
n.Train(source, 10000, 0.6, 0.4)
n.test(patterns)
Output: [0 0] -> [0.01086853] : [0] [0 1] -> [0.9948494] : [1] [1 0] -> [0.98888844] : [1] [1 1] -> [0.010121063] : [0]
func NewNeural32 ¶
NewNeural32 creates a neural network with the given configuration
func (*Neural32) EnableDropout ¶
EnableDropout enables dropout based regularization See: http://iamtrask.github.io/2015/07/28/dropout/
Example ¶
rand.Seed(0)
config := func(neural *Neural32) {
neural.Init(WeightInitializer32FanIn, 2, 8, 1)
neural.EnableDropout(.2)
}
n := NewNeural32(config)
size := len(patterns)
randomized := make([][][]float32, size)
copy(randomized, patterns)
src := func(iterations int) [][][]float32 {
for i := 0; i < size; i++ {
j := i + rand.Intn(size-i)
randomized[i], randomized[j] = randomized[j], randomized[i]
}
return randomized
}
n.Train(src, 10000, 0.6, 0.4)
n.test(patterns)
Output: [0 0] -> [0.00061443914] : [0] [0 1] -> [0.9990952] : [1] [1 0] -> [0.9832545] : [1] [1 1] -> [0.0011786821] : [0]
func (*Neural32) EnableRegression ¶
func (n *Neural32) EnableRegression()
EnableRegression removes the activation function from the last layer so that regression is performed
Example ¶
rand.Seed(0)
config := func(neural *Neural32) {
neural.Init(WeightInitializer32FanIn, 2, 2, 1)
neural.EnableRegression()
}
n := NewNeural32(config)
n.Train(source, 1000, 0.6, 0.4)
n.test(patterns)
Output: [0 0] -> [0.00039592385] : [0] [0 1] -> [1.0000901] : [1] [1 0] -> [1.0000368] : [1] [1 1] -> [6.206334e-05] : [0]
func (*Neural32) Init ¶
func (n *Neural32) Init(initializer WeightInitializer32, layers ...int)
Init initializes the neural network
func (*Neural32) NewContext ¶
NewContext creates a new inference context from the given neural network
func (*Neural32) Train ¶
func (n *Neural32) Train(source func(iteration int) [][][]float32, iterations int, lRate, mFactor float32) []float32
Train trains a neural network using data from source
func (*Neural32) UseTanh ¶
func (n *Neural32) UseTanh()
UseTanh use tanh for the activation function
Example ¶
rand.Seed(0)
config := func(neural *Neural32) {
neural.Init(WeightInitializer32FanIn, 2, 2, 1)
neural.UseTanh()
}
n := NewNeural32(config)
size := len(patternsTanh)
randomized := make([][][]float32, size)
copy(randomized, patternsTanh)
src := func(iterations int) [][][]float32 {
for i := 0; i < size; i++ {
j := i + rand.Intn(size-i)
randomized[i], randomized[j] = randomized[j], randomized[i]
}
return randomized
}
n.Train(src, 1000, 0.6, 0.4)
n.test(patternsTanh)
Output: [-1 -1] -> [-0.9955744] : [-1] [-1 1] -> [0.9861643] : [1] [1 -1] -> [0.9856719] : [1] [1 1] -> [-0.9887588] : [-1]
type WeightInitializer32 ¶
WeightInitializer32 is a function that initializes the neural network weights See: http://stats.stackexchange.com/questions/47590/what-are-good-initial-weights-in-a-neural-network