@@ -42,6 +42,12 @@ def __init__(
4242 epochs (int): Number of training epochs.
4343 batch_size (int): Batch size.
4444 mode (str): Sampling mode ('bernoulli' or 'gaussian').
45+
46+ >>> rbm = RBM(3, 2)
47+ >>> rbm.n_visible
48+ 3
49+ >>> rbm.n_hidden
50+ 2
4551 """
4652 self .n_visible = n_visible
4753 self .n_hidden = n_hidden
@@ -75,7 +81,6 @@ def sigmoid(self, input_array: np.ndarray) -> np.ndarray:
7581 ... np.array([0.5, 1/(1+np.exp(-1))])
7682 ... )
7783 True
78-
7984 """
8085 return 1.0 / (1.0 + np .exp (- input_array ))
8186
@@ -108,6 +113,12 @@ def sample_hidden_given_visible(
108113
109114 Returns:
110115 tuple: (hidden probabilities, hidden samples)
116+
117+ >>> rbm = RBM(3, 2)
118+ >>> visible = np.array([[0., 1., 0.]])
119+ >>> probs, samples = rbm.sample_hidden_given_visible(visible)
120+ >>> probs.shape == samples.shape == (1, 2)
121+ True
111122 """
112123 hid_probs = self .sigmoid (np .dot (visible_batch , self .weights ) + self .hidden_bias )
113124 hid_samples = self .sample_prob (hid_probs )
@@ -124,6 +135,12 @@ def sample_visible_given_hidden(
124135
125136 Returns:
126137 tuple: (visible probabilities, visible samples)
138+
139+ >>> rbm = RBM(3, 2)
140+ >>> hidden = np.array([[0., 1.]])
141+ >>> probs, samples = rbm.sample_visible_given_hidden(hidden)
142+ >>> probs.shape == samples.shape == (1, 3)
143+ True
127144 """
128145 vis_probs = self .sigmoid (
129146 np .dot (hidden_batch , self .weights .T ) + self .visible_bias
@@ -140,6 +157,11 @@ def contrastive_divergence(self, visible_zero: np.ndarray) -> float:
140157
141158 Returns:
142159 float: Reconstruction loss (mean squared error) for batch.
160+
161+ >>> rbm = RBM(3, 2, cd_steps=2)
162+ >>> data = np.array([[0., 1., 0.]])
163+ >>> round(rbm.contrastive_divergence(data), 5)
164+ 0.0 < 1.0 # Loss should be a non-negative float less than 1
143165 """
144166 h_probs0 , h0 = self .sample_hidden_given_visible (visible_zero )
145167 vk , hk = visible_zero , h0
@@ -203,6 +225,9 @@ def __init__(
203225 cd_steps (int): Number of sampling steps in generate_input_for_layer.
204226 save_path (str, optional): Path for saving trained model parameters.
205227
228+ >>> dbn = DeepBeliefNetwork(4, [3])
229+ >>> dbn.input_size
230+ 4
206231 """
207232 self .input_size = input_size
208233 self .layers = layers
@@ -228,7 +253,6 @@ def sigmoid(self, input_array: np.ndarray) -> np.ndarray:
228253 ... np.array([0.5, 1/(1+np.exp(-1))])
229254 ... )
230255 True
231-
232256 """
233257 return 1.0 / (1.0 + np .exp (- input_array ))
234258
@@ -241,6 +265,12 @@ def sample_prob(self, probabilities: np.ndarray) -> np.ndarray:
241265
242266 Returns:
243267 np.ndarray: Binary sampled values.
268+
269+ >>> dbn = DeepBeliefNetwork(4, [3])
270+ >>> probs = np.array([0., 1.])
271+ >>> result = dbn.sample_prob(probs)
272+ >>> set(result).issubset({0., 1.})
273+ True
244274 """
245275 rng = np .random .default_rng ()
246276 return (rng .random (probabilities .shape ) < probabilities ).astype (float )
@@ -258,6 +288,13 @@ def sample_h(
258288
259289 Returns:
260290 tuple: Hidden probabilities and binary samples.
291+
292+ >>> dbn = DeepBeliefNetwork(4, [3])
293+ >>> import numpy as np
294+ >>> visible = np.array([[0., 1., 0., 1.]])
295+ >>> probs, samples = dbn.sample_h(visible, np.ones((4,3)), np.zeros(3))
296+ >>> probs.shape == samples.shape == (1, 3)
297+ True
261298 """
262299 probs = self .sigmoid (np .dot (visible_units , weights ) + hidden_bias )
263300 samples = self .sample_prob (probs )
@@ -276,6 +313,13 @@ def sample_v(
276313
277314 Returns:
278315 tuple: Visible probabilities and binary samples.
316+
317+ >>> dbn = DeepBeliefNetwork(4, [3])
318+ >>> import numpy as np
319+ >>> hidden = np.array([[0., 1., 1.]])
320+ >>> probs, samples = dbn.sample_v(hidden, np.ones((4,3)), np.zeros(4))
321+ >>> probs.shape == samples.shape == (1, 4)
322+ True
279323 """
280324 probs = self .sigmoid (np .dot (hidden_units , weights .T ) + visible_bias )
281325 samples = self .sample_prob (probs )
@@ -293,6 +337,11 @@ def generate_input_for_layer(
293337
294338 Returns:
295339 np.ndarray: Smoothed input for the layer.
340+
341+ >>> dbn = DeepBeliefNetwork(4, [3])
342+ >>> data = np.ones((2, 4))
343+ >>> np.allclose(dbn.generate_input_for_layer(0, data), data)
344+ True
296345 """
297346 if layer_index == 0 :
298347 return original_input .copy ()
@@ -312,6 +361,10 @@ def train_dbn(self, training_data: np.ndarray) -> None:
312361
313362 Args:
314363 training_data (np.ndarray): Training dataset.
364+
365+ >>> dbn = DeepBeliefNetwork(4, [3])
366+ >>> data = np.random.randint(0, 2, (10, 4)).astype(float)
367+ >>> dbn.train_dbn(data) # runs without error
315368 """
316369 for idx , layer_size in enumerate (self .layers ):
317370 n_visible = self .input_size if idx == 0 else self .layers [idx - 1 ]
@@ -336,6 +389,12 @@ def reconstruct(
336389
337390 Returns:
338391 tuple: (encoded representation, reconstructed input, MSE error)
392+
393+ >>> dbn = DeepBeliefNetwork(4, [3])
394+ >>> data = np.ones((2, 4))
395+ >>> encoded, reconstructed, error = dbn.reconstruct(data)
396+ >>> encoded.shape
397+ (2, 3)
339398 """
340399 h = input_data .copy ()
341400 for i in range (len (self .layer_params )):
0 commit comments