summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorpepperpepperpepper <pepper@scannerjammer.com>2015-10-17 18:37:04 -0700
committerpepperpepperpepper <pepper@scannerjammer.com>2015-10-17 18:37:04 -0700
commit10a619b2c7227b2ad214fbb985141b884fbe87fb (patch)
treec70b711213f88de613b18791c6c123a6ad73a3ae
parentb10e46d0efca25234b18c28a47393cbf365b3c0d (diff)
ok pybrain time
-rw-r--r--pybrain_experiments/classification_test.py112
-rw-r--r--pybrain_experiments/test.py35
-rw-r--r--pybrain_experiments/test_recurrent.py19
-rw-r--r--ricky/imgradient/selections.py10
4 files changed, 173 insertions, 3 deletions
diff --git a/pybrain_experiments/classification_test.py b/pybrain_experiments/classification_test.py
new file mode 100644
index 0000000..ac5f272
--- /dev/null
+++ b/pybrain_experiments/classification_test.py
@@ -0,0 +1,112 @@
+from pybrain.datasets import ClassificationDataSet
+from pybrain.utilities import percentError
+from pybrain.tools.shortcuts import buildNetwork
+from pybrain.supervised.trainers import BackpropTrainer
+from pybrain.structure.modules import SoftmaxLayer
+
+from pylab import ion, ioff, figure, draw, contourf, clf, show, hold, plot
+from scipy import diag, arange, meshgrid, where
+from numpy.random import multivariate_normal
+
+
+# To have a nice dataset for visualization, we produce a set of points in
+# 2D belonging to three different classes. You could also read in your data
+# from a file, e.g. using pylab.load().
+
+means = [(-1,0),(2,4),(3,1)]
+cov = [diag([1,1]), diag([0.5,1.2]), diag([1.5,0.7])]
+alldata = ClassificationDataSet(2, 1, nb_classes=3)
+for n in xrange(400):
+ for klass in range(3):
+ input = multivariate_normal(means[klass],cov[klass])
+ alldata.addSample(input, [klass])
+
+
+# Randomly split the dataset into 75% training and 25% test data sets.
+# Of course, we could also have created two different datasets to begin with.
+
+tstdata, trndata = alldata.splitWithProportion( 0.25 )
+
+
+# For neural network classification, it is highly advisable to encode
+# classes with one output neuron per class. Note that this operation duplicates
+# the original targets and stores them in an (integer) field named ‘class’.
+trndata._convertToOneOfMany( )
+tstdata._convertToOneOfMany( )
+
+
+print "Number of training patterns: ", len(trndata)
+print "Input and output dimensions: ", trndata.indim, trndata.outdim
+print "First sample (input, target, class):"
+print trndata['input'][0], trndata['target'][0], trndata['class'][0]
+
+
+
+
+
+# Now build a feed-forward network with 5 hidden units. We use the shortcut
+# buildNetwork() for this. The input and output layer size must match the
+# dataset’s input and target dimension. You could add additional hidden
+# layers by inserting more numbers giving the desired layer sizes.
+#
+# The output layer uses a softmax function because we are doing classification.
+# There are more options to explore here, e.g. try changing the hidden layer
+# transfer function to linear instead of (the default) sigmoid.
+#
+# See also Description buildNetwork() for more info on options, and the Network
+# tutorial Building Networks with Modules and Connections for info on how to
+# build your own non-standard networks.
+fnn = buildNetwork( trndata.indim, 5, trndata.outdim, outclass=SoftmaxLayer )
+
+
+# Set up a trainer that basically takes the network and training dataset
+# as input. For a list of trainers, see trainers. We are using a
+# BackpropTrainer for this.
+
+trainer = BackpropTrainer( fnn, dataset=trndata, momentum=0.1,
+ verbose=True, weightdecay=0.01)
+
+
+# Now generate a square grid of data points and put it into a dataset,
+# which we can then classify to obtain a nice contour field for visualization.
+# Therefore the target values for this data set can be ignored.
+
+ticks = arange(-3.,6.,0.2)
+X, Y = meshgrid(ticks, ticks)
+# need column vectors in dataset, not arrays
+griddata = ClassificationDataSet(2,1, nb_classes=3)
+for i in xrange(X.size):
+ griddata.addSample([X.ravel()[i],Y.ravel()[i]], [0])
+griddata._convertToOneOfMany() # this is still needed to make the fnn feel comfy
+
+
+for i in range(20):
+# Train the network for some epochs. Usually you would
+# set something like 5 here, but for visualization purposes we
+# do this one epoch at a time.
+ trainer.trainEpochs( 1 )
+ trnresult = percentError( trainer.testOnClassData(),
+ trndata['class'] )
+ tstresult = percentError( trainer.testOnClassData(
+ dataset=tstdata ), tstdata['class'] )
+
+ print "epoch: %4d" % trainer.totalepochs, \
+ " train error: %5.2f%%" % trnresult, \
+ " test error: %5.2f%%" % tstresult
+ out = fnn.activateOnDataset(griddata)
+ out = out.argmax(axis=1) # the highest output activation gives the class
+ out = out.reshape(X.shape)
+ figure(1)
+ ioff() # interactive graphics off
+ clf() # clear the plot
+ hold(True) # overplot on
+ for c in [0,1,2]:
+ here, _ = where(tstdata['class']==c)
+ plot(tstdata['input'][here,0],tstdata['input'][here,1],'o')
+ if out.max()!=out.min(): # safety check against flat field
+ contourf(X, Y, out) # plot the contour
+ ion() # interactive graphics on
+ draw() # update the plot
+
+ioff()
+show()
diff --git a/pybrain_experiments/test.py b/pybrain_experiments/test.py
new file mode 100644
index 0000000..f7b0a01
--- /dev/null
+++ b/pybrain_experiments/test.py
@@ -0,0 +1,35 @@
+from pybrain.structure import FeedForwardNetwork
+from pybrain.structure import LinearLayer, SigmoidLayer
+from pybrain.structure import FullConnection
+n = FeedForwardNetwork()
+
+inLayer = LinearLayer(2)
+hiddenLayer = SigmoidLayer(3)
+outLayer = LinearLayer(1)
+
+n.addInputModule(inLayer)
+n.addModule(hiddenLayer)
+n.addOutputModule(outLayer)
+
+in_to_hidden = FullConnection(inLayer, hiddenLayer)
+hidden_to_out = FullConnection(hiddenLayer, outLayer)
+
+
+n.addConnection(in_to_hidden)
+n.addConnection(hidden_to_out)
+
+
+# everything is wired together now
+# this makes it usable
+
+n.sortModules()
+
+
+if __name__ == "__main__":
+ #Again, this might look different on your machine -
+ #the weights of the connections have already been initialized randomly.
+ print n.activate([1, 2])
+ #look at the hidden weights
+ print in_to_hidden.params
+ print hidden_to_out.params
+ print n.params #weights here too
diff --git a/pybrain_experiments/test_recurrent.py b/pybrain_experiments/test_recurrent.py
new file mode 100644
index 0000000..692898a
--- /dev/null
+++ b/pybrain_experiments/test_recurrent.py
@@ -0,0 +1,19 @@
+from pybrain.structure import RecurrentNetwork
+n = RecurrentNetwork()
+
+n.addInputModule(LinearLayer(2, name='in'))
+n.addModule(SigmoidLayer(3, name='hidden'))
+n.addOutputModule(LinearLayer(1, name='out'))
+n.addConnection(FullConnection(n['in'], n['hidden'], name='c1'))
+n.addConnection(FullConnection(n['hidden'], n['out'], name='c2'))
+n.addRecurrentConnection(FullConnection(n['hidden'], n['hidden'], name='c3'))
+
+
+n.sortModules()
+n.activate((2, 2))
+array([-0.1959887])
+n.activate((2, 2))
+array([-0.19623716])
+n.activate((2, 2))
+array([-0.19675801])
+n.reset() #clears history
diff --git a/ricky/imgradient/selections.py b/ricky/imgradient/selections.py
index d5eda16..5d0d345 100644
--- a/ricky/imgradient/selections.py
+++ b/ricky/imgradient/selections.py
@@ -37,7 +37,7 @@ stripenumber_selections = Selections.from_dict(
stripeintensity_selections = Selections.from_dict(
{"value": 1000, "weight": 10},
{"value": 4, "weight": 10},
-)
+)
# contrast_selections = \
brightness_selections = \
saturation_selections = \
@@ -68,8 +68,12 @@ bevel_selections = Selections.from_dict(
blurriness_selections = \
percentbeveled_selections = Selections.from_dict(
- {"value": 30, "weight": 20},
- {"value": 10, "weight": 2},
+ {"value": 30, "weight": 10},
+ {"value": 10, "weight": 10},
+ {"value": 5, "weight": 10},
+ {"value": 20, "weight": 10},
+ {"value": 25, "weight": 10},
+ {"value": 7, "weight": 10},
{"value": "", "weight": 1},
)
rotate_selections = \