Skip to content

Commit 1daad72

Browse files
committed
back preversion
1 parent fc90ead commit 1daad72

File tree

2 files changed

+110
-162
lines changed

2 files changed

+110
-162
lines changed

matrix/matrix_pro.h

Lines changed: 96 additions & 53 deletions
Original file line numberDiff line numberDiff line change
@@ -593,69 +593,112 @@ Matrix3d conv_test(Matrix3d mid1, int input_dim = 3, int output_channels = 3, in
593593
}
594594
}
595595
Matrix3d conv_test_with_output(Matrix3d mid1,
596-
int input_dim = 3,
597-
int output_channels = 3,
598-
int stride = 1,
599-
int kernel_size = 2,
600-
int mode = 0,
601-
int padding = 0,
602-
bool verbose = false)
603-
{
604-
// 如果需要padding,先对输入进行padding
605-
if (padding > 0) {
606-
Matrix3d padded_input = CreateMatrix3d(mid1.dep, mid1.wid + 2*padding, mid1.high + 2*padding);
607-
for (int c = 0; c < mid1.dep; c++) {
608-
padded_input.matrix3d[c] = edge_padding(mid1.matrix3d[c],
609-
mid1.wid + 2*padding,
610-
mid1.high + 2*padding);
611-
}
612-
mid1 = padded_input;
596+
int input_dim = 3,
597+
int output_channels = 3,
598+
int stride = 1,
599+
int kernel_size = 2,
600+
int mode = 0,
601+
bool verbose = false)
602+
// padding 暂未实现
603+
{
604+
if (verbose)
605+
{
606+
cout << "Input Matrix3d: " << endl;
607+
cout_mat3d(mid1);
608+
cout << "Parameters: input_dim = " << input_dim
609+
<< ", output_channels = " << output_channels
610+
<< ", stride = " << stride
611+
<< ", kernel_size = " << kernel_size
612+
<< ", mode = " << mode;
613613
}
614614

615-
// 计算输出尺寸
616-
int output_height = ((mid1.wid - kernel_size) / stride) + 1;
617-
int output_width = ((mid1.high - kernel_size) / stride) + 1;
615+
// Compute padding widths and heights
616+
int padding_wid = stride - (mid1.wid - kernel_size) % stride;
617+
if (padding_wid == stride)
618+
{
619+
padding_wid = 0;
620+
}
621+
int padding_high = stride - (mid1.high - kernel_size) % stride;
622+
if (padding_high == stride)
623+
{
624+
padding_high = 0;
625+
}
626+
if (verbose)
627+
{
628+
cout << "Padding widths: " << padding_wid << ", padding heights: " << padding_high << endl;
629+
}
618630

619-
Matrix3d output3d = CreateMatrix3d(output_channels, output_height, output_width);
620-
621-
// 构造卷积核
622-
Matrix** filters = (Matrix**)malloc(input_dim * sizeof(Matrix*));
623-
for(int i = 0; i < input_dim; i++) {
624-
filters[i] = (Matrix*)malloc(output_channels * sizeof(Matrix));
625-
for(int j = 0; j < output_channels; j++) {
626-
filters[i][j] = ones(kernel_size, kernel_size);
631+
// Pad each RGB channel in the 3D matrix
632+
Matrix mid_rgb[input_dim];
633+
for (int rgb_idx = 0; rgb_idx < input_dim; rgb_idx++)
634+
{
635+
mid_rgb[rgb_idx] = edge_padding(mid1.matrix3d[rgb_idx],
636+
mid1.matrix3d[rgb_idx].row + padding_high,
637+
mid1.matrix3d[rgb_idx].col + padding_wid);
638+
if (verbose)
639+
{
640+
cout << "RGB[" << rgb_idx << "] channel after padding: " << endl;
641+
cout_mat(mid_rgb[rgb_idx]);
627642
}
628643
}
629644

630-
// 执行卷积操作
631-
for(int out_c = 0; out_c < output_channels; out_c++) {
632-
for(int h = 0; h < output_height; h++) {
633-
for(int w = 0; w < output_width; w++) {
634-
float sum = 0;
635-
for(int in_c = 0; in_c < input_dim; in_c++) {
636-
for(int kh = 0; kh < kernel_size; kh++) {
637-
for(int kw = 0; kw < kernel_size; kw++) {
638-
int h_in = h * stride + kh;
639-
int w_in = w * stride + kw;
640-
sum += mid1.matrix3d[in_c].matrix[h_in][w_in] *
641-
filters[in_c][out_c].matrix[kh][kw];
642-
}
643-
}
644-
}
645-
output3d.matrix3d[out_c].matrix[h][w] = sum;
646-
}
645+
// Construct filters
646+
Matrix filters[input_dim][output_channels];
647+
for (int channel_index = 0; channel_index < input_dim; channel_index++)
648+
{
649+
650+
for (int filter_index = 0; filter_index < output_channels; filter_index++)
651+
{
652+
Matrix kernel = ones(kernel_size, kernel_size);
653+
filters[channel_index][filter_index] = kernel;
647654
}
648655
}
649656

650-
// 释放内存
651-
for(int i = 0; i < input_dim; i++) {
652-
for(int j = 0; j < output_channels; j++) {
653-
free_mat(filters[i][j]);
657+
// Compute convolution results for each filter
658+
Matrix kernel = ones(kernel_size, kernel_size);
659+
Matrix feature_maps[output_channels];
660+
for (int filter_idx = 0; filter_idx < output_channels; filter_idx++)
661+
{
662+
Matrix sum_rgb = CreateMatrix(((mid1.wid - kernel_size + 2 * padding_wid) / stride) + 1,
663+
((mid1.high - kernel_size + 2 * padding_high) / stride) + 1);
664+
for (int channel_idx = 0; channel_idx < input_dim; channel_idx++)
665+
{
666+
// Compute convolution result for a single RGB channel and a single filter
667+
Matrix element = conv_element(mid_rgb[channel_idx],
668+
filters[channel_idx][filter_idx],
669+
kernel_size, stride);
670+
if (verbose)
671+
{
672+
cout << "Convolution of RGB[" << channel_idx << "] channel with Filter["
673+
<< filter_idx << "] : " << endl;
674+
cout_mat(mid_rgb[channel_idx]);
675+
cout << " * " << endl;
676+
cout_mat(filters[channel_idx][filter_idx]);
677+
cout << " = " << endl;
678+
cout_mat(element);
679+
cout << endl;
680+
}
681+
// Sum convolution results for each RGB channel
682+
sum_rgb = add(sum_rgb, element, 0);
683+
}
684+
feature_maps[filter_idx] = sum_rgb;
685+
if (verbose)
686+
{
687+
cout << "Feature map [" << filter_idx << "] : " << endl;
688+
cout_mat(feature_maps[filter_idx]);
654689
}
655-
free(filters[i]);
656690
}
657-
free(filters);
658-
691+
// Construct 3D matrix to store different feature maps at different depths
692+
Matrix3d output3d = CreateMatrix3d(output_channels, feature_maps[0].row, feature_maps[0].col);
693+
for (int i = 0; i < output_channels; i++)
694+
{
695+
output3d.matrix3d[i] = feature_maps[i];
696+
}
697+
if (verbose)
698+
{
699+
cout << "Output Matrix3d: " << endl;
700+
cout_mat3d(output3d);
701+
}
659702
return output3d;
660703
}
661704

@@ -686,7 +729,7 @@ Matrix4d batch_conv_test(Matrix4d mid4,
686729
for (int batch_idx = 0; batch_idx < mid4.batch; batch_idx++)
687730
{
688731
Matrix3d mid3 = mid4.matrix4d[batch_idx];
689-
Matrix3d output3d = conv_test_with_output(mid3, input_dim, output_channels, stride, kernel_size, mode, 0, verbose);
732+
Matrix3d output3d = conv_test_with_output(mid3, input_dim, output_channels, stride, kernel_size, mode, verbose);
690733
output3d_arr[batch_idx] = output3d;
691734
}
692735

root/include/edgelayer.h

Lines changed: 14 additions & 109 deletions
Original file line numberDiff line numberDiff line change
@@ -3,46 +3,13 @@ class edge_layer
33
public:
44
virtual ~edge_layer() {}
55
virtual Matrix4d forward(Matrix4d mid1) = 0;
6-
virtual Matrix4d backward(Matrix4d grad_output) = 0;
76
virtual int parameter_counter() = 0;
87
};
98

109
class conv2d : public edge_layer
1110
{
12-
private:
13-
Matrix4d input_cache;
14-
Matrix** kernel;
15-
1611
public:
17-
conv2d(Matrix4d mid_4, int in_channel, int out_channle, int _stride, int ksize, int _mode, int _padding)
18-
{
19-
mid4 = mid_4;
20-
input_dim = in_channel;
21-
output_channels = out_channle;
22-
stride = _stride;
23-
kernel_size = ksize;
24-
mode = _mode;
25-
padding = _padding;
26-
27-
kernel = (Matrix**)malloc(input_dim * sizeof(Matrix*));
28-
for(int i = 0; i < input_dim; i++) {
29-
kernel[i] = (Matrix*)malloc(output_channels * sizeof(Matrix));
30-
for(int j = 0; j < output_channels; j++) {
31-
kernel[i][j] = CreateRandMat(kernel_size, kernel_size);
32-
}
33-
}
34-
}
35-
36-
~conv2d() {
37-
for(int i = 0; i < input_dim; i++) {
38-
for(int j = 0; j < output_channels; j++) {
39-
free_mat(kernel[i][j]);
40-
}
41-
free(kernel[i]);
42-
}
43-
free(kernel);
44-
}
45-
12+
conv2d(Matrix4d mid_4, int in_channel, int out_channle, int _stride, int ksize, int _mode, int _padding);
4613
int arg1;
4714
Matrix4d mid4;
4815
int input_dim;
@@ -52,92 +19,29 @@ class conv2d : public edge_layer
5219
int mode;
5320
int padding;
5421

55-
Matrix4d forward(Matrix4d mid4) override
22+
Matrix4d forward(Matrix4d mid4)
5623
{
57-
input_cache = mid4;
5824
std::cout << "in_channel = " << input_dim << std::endl;
5925
std::cout << "out_channle = " << output_channels << std::endl;
6026
std::cout << "_stride = " << stride << std::endl;
6127
std::cout << "ksize = " << kernel_size << std::endl;
6228
std::cout << "_mode = " << mode << std::endl;
6329
std::cout << "_padding = " << padding << std::endl;
64-
6530
Matrix3d *output3d_arr = (Matrix3d *)malloc(mid4.batch * sizeof(Matrix3d));
6631
for (int batch_idx = 0; batch_idx < mid4.batch; batch_idx++)
6732
{
6833
Matrix3d mid3 = mid4.matrix4d[batch_idx];
69-
Matrix3d output3d = conv_test_with_output(mid3, input_dim, output_channels,
70-
stride, kernel_size, mode, padding, false);
34+
Matrix3d output3d = conv_test_with_output(mid3, input_dim, output_channels, stride, kernel_size, mode, false);
7135
output3d_arr[batch_idx] = output3d;
7236
}
7337

74-
Matrix4d output4d = CreateMatrix4d(mid4.batch, output_channels,
75-
output3d_arr[0].wid, output3d_arr[0].high);
38+
Matrix4d output4d = CreateMatrix4d(mid4.batch, output_channels, output3d_arr[0].wid, output3d_arr[0].high);
7639
for (int batch_idx = 0; batch_idx < mid4.batch; batch_idx++)
7740
{
7841
output4d.matrix4d[batch_idx] = output3d_arr[batch_idx];
7942
}
80-
81-
free(output3d_arr);
8243
return output4d;
8344
}
84-
85-
Matrix4d backward(Matrix4d grad_output) override
86-
{
87-
Matrix4d input_grad = CreateMatrix4d(input_cache.batch, input_dim,
88-
input_cache.wid, input_cache.high);
89-
90-
// 初始化梯度为0
91-
for(int b = 0; b < input_cache.batch; b++) {
92-
for(int c = 0; c < input_dim; c++) {
93-
for(int h = 0; h < input_cache.wid; h++) {
94-
for(int w = 0; w < input_cache.high; w++) {
95-
input_grad.matrix4d[b].matrix3d[c].matrix[h][w] = 0.0f;
96-
}
97-
}
98-
}
99-
}
100-
101-
for(int batch_idx = 0; batch_idx < grad_output.batch; batch_idx++) {
102-
for(int out_c = 0; out_c < output_channels; out_c++) {
103-
for(int in_c = 0; in_c < input_dim; in_c++) {
104-
// 旋转卷积核
105-
Matrix rotated_kernel = rot180(kernel[in_c][out_c]);
106-
107-
// 计算需要的padding大小
108-
int pad_h = kernel_size - 1;
109-
int pad_w = kernel_size - 1;
110-
111-
// 对梯度进行padding
112-
Matrix padded_grad = edge_padding(grad_output.matrix4d[batch_idx].matrix3d[out_c],
113-
grad_output.wid + 2*pad_h,
114-
grad_output.high + 2*pad_w);
115-
116-
// 进行完整卷积(stride=1)
117-
for(int h = 0; h < input_cache.wid; h++) {
118-
for(int w = 0; w < input_cache.high; w++) {
119-
float sum = 0.0f;
120-
for(int kh = 0; kh < kernel_size; kh++) {
121-
for(int kw = 0; kw < kernel_size; kw++) {
122-
int h_p = h + kh;
123-
int w_p = w + kw;
124-
sum += padded_grad.matrix[h_p][w_p] *
125-
rotated_kernel.matrix[kh][kw];
126-
}
127-
}
128-
input_grad.matrix4d[batch_idx].matrix3d[in_c].matrix[h][w] += sum;
129-
}
130-
}
131-
132-
free_mat(rotated_kernel);
133-
free_mat(padded_grad);
134-
}
135-
}
136-
}
137-
138-
return input_grad;
139-
}
140-
14145
int parameter_counter()
14246
{
14347
int num_params = input_dim * output_channels * kernel_size * kernel_size;
@@ -149,17 +53,18 @@ class conv2d : public edge_layer
14953

15054
return num_params;
15155
}
152-
153-
// 添加设置卷积核的方法
154-
void set_kernel(float* kernel_values, int in_c, int out_c) {
155-
for(int i = 0; i < kernel_size; i++) {
156-
for(int j = 0; j < kernel_size; j++) {
157-
kernel[in_c][out_c].matrix[i][j] = kernel_values[i * kernel_size + j];
158-
}
159-
}
160-
}
16156
};
16257

58+
conv2d::conv2d(Matrix4d mid_1, int in_channel, int out_channle, int _stride, int ksize, int _mode, int _padding)
59+
{
60+
mid4 = mid_1;
61+
input_dim = in_channel;
62+
output_channels = out_channle;
63+
stride = _stride;
64+
kernel_size = ksize;
65+
mode = _mode;
66+
padding = _padding;
67+
}
16368
class bn : public edge_layer
16469
{
16570
public:

0 commit comments

Comments
 (0)