Skip to content

Commit 2dde18d

Browse files
committed
pruned out stuff for dojo
1 parent 25e6413 commit 2dde18d

File tree

1 file changed

+27
-75
lines changed

1 file changed

+27
-75
lines changed

rbm.py

+27-75
Original file line numberDiff line numberDiff line change
@@ -41,29 +41,24 @@ def train(self, data, max_epochs = 1000):
4141
for epoch in range(max_epochs):
4242
## Clamp to the data and sample from the hidden units.
4343
# (This is the "positive CD phase", aka the reality phase.)
44-
pos_hidden_activations = np.dot(data, self.weights)
45-
pos_hidden_probs = self._logistic(pos_hidden_activations)
46-
pos_hidden_states = pos_hidden_probs > np.random.rand(num_examples, self.num_hidden + 1)
47-
# Note that we're using the activation *probabilities* of the hidden states, not the hidden states
48-
# themselves, when computing associations. We could also use the states; see section 3 of Hinton's
49-
# "A Practical Guide to Training Restricted Boltzmann Machines" for more.
50-
pos_associations = np.dot(data.T, pos_hidden_probs)
44+
#
45+
# ********** FILL IN HERE **********
46+
#
5147

5248
## Reconstruct the visible units and sample again from the hidden units.
5349
# (This is the "negative CD phase", aka the daydreaming phase.)
54-
neg_visible_activations = np.dot(pos_hidden_states, self.weights.T)
55-
neg_visible_probs = self._logistic(neg_visible_activations)
56-
neg_visible_probs[:,0] = 1 # Fix the bias unit.
57-
neg_hidden_activations = np.dot(neg_visible_probs, self.weights)
58-
neg_hidden_probs = self._logistic(neg_hidden_activations)
59-
# Note, again, that we're using the activation *probabilities* when computing associations, not the states
60-
# themselves.
61-
neg_associations = np.dot(neg_visible_probs.T, neg_hidden_probs)
62-
63-
# Update weights.
64-
self.weights += self.learning_rate * ((pos_associations - neg_associations) / num_examples)
65-
66-
error = np.sum((data - neg_visible_probs) ** 2)
50+
#
51+
# ********** FILL IN HERE **********
52+
#
53+
54+
## Update weights.
55+
#
56+
# ********** FILL IN HERE **********
57+
58+
## Compute final error (between data and visible unit probabilities)
59+
#
60+
# ********** FILL IN HERE **********
61+
6762
print("Epoch %s: error is %s" % (epoch, error))
6863

6964
def run_visible(self, data):
@@ -127,74 +122,31 @@ def gibbs_step(self, data, weights):
127122
# obtain output dimension from weights
128123
num_output_nodes = weights.shape[1]
129124

130-
# Create a matrix, where each row is to be the output units (plus a bias unit)
125+
## Create a matrix, where each row is to be the output units (plus a bias unit)
131126
# sampled from a training example.
132127
output_states = np.ones((num_examples, num_output_nodes))
133128

134-
# Insert bias units of 1 into the first column of data.
129+
## Insert bias units of 1 into the first column of data.
135130
data = np.insert(data, 0, 1, axis = 1)
136131

137-
# Calculate the activations of the visible units.
138-
activations = np.dot(data, weights)
132+
## Calculate the activations of the output units.
133+
#
134+
# ********** FILL IN HERE **********
139135

140-
# Calculate the probabilities of turning the visible units on.
141-
probs = self._logistic(activations)
136+
## Calculate the probabilities of turning the output units on.
137+
#
138+
# ********** FILL IN HERE **********
142139

143-
# Turn the visible units on with their specified probabilities.
144-
output_states[:,:] = probs > np.random.rand(num_examples, num_output_nodes)
140+
## Turn the output units on with their specified probabilities.
141+
#
142+
# ********** FILL IN HERE **********
145143

146-
# Always fix the bias unit to 1.
144+
## Always fix the bias unit to 1.
147145
# output_states[:,0] = 1
148146

149147
# Ignore the bias units.
150148
output_states = output_states[:,1:]
151149
return output_states
152-
153-
def daydream(self, num_samples):
154-
"""
155-
Randomly initialize the visible units once, and start running alternating Gibbs sampling steps
156-
(where each step consists of updating all the hidden units, and then updating all of the visible units),
157-
taking a sample of the visible units at each step.
158-
Note that we only initialize the network *once*, so these samples are correlated.
159-
160-
Returns
161-
-------
162-
samples: A matrix, where each row is a sample of the visible units produced while the network was
163-
daydreaming.
164-
"""
165-
166-
# Create a matrix, where each row is to be a sample of of the visible units
167-
# (with an extra bias unit), initialized to all ones.
168-
samples = np.ones((num_samples, self.num_visible + 1))
169-
170-
# Take the first sample from a uniform distribution.
171-
samples[0,1:] = np.random.rand(self.num_visible)
172-
173-
# Start the alternating Gibbs sampling.
174-
# Note that we keep the hidden units binary states, but leave the
175-
# visible units as real probabilities. See section 3 of Hinton's
176-
# "A Practical Guide to Training Restricted Boltzmann Machines"
177-
# for more on why.
178-
for i in range(1, num_samples):
179-
visible = samples[i-1,:]
180-
181-
# Calculate the activations of the hidden units.
182-
hidden_activations = np.dot(visible, self.weights)
183-
# Calculate the probabilities of turning the hidden units on.
184-
hidden_probs = self._logistic(hidden_activations)
185-
# Turn the hidden units on with their specified probabilities.
186-
hidden_states = hidden_probs > np.random.rand(self.num_hidden + 1)
187-
# Always fix the bias unit to 1.
188-
hidden_states[0] = 1
189-
190-
# Recalculate the probabilities that the visible units are on.
191-
visible_activations = np.dot(hidden_states, self.weights.T)
192-
visible_probs = self._logistic(visible_activations)
193-
visible_states = visible_probs > np.random.rand(self.num_visible + 1)
194-
samples[i,:] = visible_states
195-
196-
# Ignore the bias units (the first column), since they're always set to 1.
197-
return samples[:,1:]
198150

199151
def _logistic(self, x):
200152
return 1.0 / (1 + np.exp(-x))

0 commit comments

Comments
 (0)