Use clm_matrixCopy
This commit is contained in:
parent
544bafdd5e
commit
b6d62e9ad8
15
src/clm.c
15
src/clm.c
@ -42,7 +42,7 @@ void clm_freeMatrix(clm_Matrix mat) {
|
||||
free(mat.values);
|
||||
}
|
||||
|
||||
clm_Matrix clm_matrixCopy(clm_Matrix mat) {
|
||||
clm_Matrix clm_matrixCopyALLOC(clm_Matrix mat) {
|
||||
clm_Matrix copy = clm_createMatrix(mat.rows, mat.cols);
|
||||
|
||||
memcpy(copy.values, mat.values, mat.rows * mat.cols * sizeof(float));
|
||||
@ -50,6 +50,19 @@ clm_Matrix clm_matrixCopy(clm_Matrix mat) {
|
||||
return copy;
|
||||
}
|
||||
|
||||
clm_Matrix clm_matrixCopy(clm_Matrix mat, clm_Matrix out) {
|
||||
if(mat.cols != out.cols || mat.rows != out.rows) {
|
||||
printf("Failed to copy matrix\n");
|
||||
return INVALID_MATRIX;
|
||||
}
|
||||
|
||||
for(unsigned int i = 0; i < mat.rows * mat.cols; i++) {
|
||||
out.values[i] = mat.values[i];
|
||||
}
|
||||
|
||||
return out;
|
||||
}
|
||||
|
||||
clm_Matrix clm_matrixAddScalar(clm_Matrix mat, float scalar) {
|
||||
for(unsigned int i = 0; i < mat.cols * mat.rows; i++) {
|
||||
mat.values[i] += scalar;
|
||||
|
@ -49,7 +49,8 @@ clm_Matrix clm_matrixDSigmoid(clm_Matrix mat);
|
||||
|
||||
clm_Matrix clm_matrixFromArray(float *array, unsigned int length);
|
||||
clm_Matrix clm_matrixWrapArray(float *array, unsigned int length);
|
||||
clm_Matrix clm_matrixCopy(clm_Matrix matrix);
|
||||
clm_Matrix clm_matrixCopyALLOC(clm_Matrix matrix);
|
||||
clm_Matrix clm_matrixCopy(clm_Matrix mat, clm_Matrix other);
|
||||
|
||||
bool clm_matrixIsInvalid(clm_Matrix mat);
|
||||
|
||||
|
@ -66,8 +66,7 @@ void train(clm_NN nn, float *x, unsigned int xL, float *y, unsigned int yL) {
|
||||
clm_Matrix error = layer.error;
|
||||
|
||||
if(i == nn.numLayers - 1) {
|
||||
clm_matrixZero(error); // Zero the error matrix
|
||||
clm_matrixSubtractMatrix(clm_matrixAddMatrix(error, yM), outputOfThisLayer); // yhat - y
|
||||
clm_matrixSubtractMatrix(clm_matrixCopy(yM, error), outputOfThisLayer); // yhat - y
|
||||
} else {
|
||||
clm_Matrix weightsT = clm_matrixTranspose(nn.layers[i + 1].weights);
|
||||
clm_matrixMultiplyMatrix(weightsT, prevError, error);
|
||||
@ -195,7 +194,7 @@ int main() {
|
||||
clm_Linear layers[] = {layer1, layer2};
|
||||
clm_NN nn = {layers, sizeof(layers) / sizeof(clm_Linear), 0.01};
|
||||
|
||||
for(unsigned int epoch = 0; epoch < 10; epoch++) {
|
||||
for(unsigned int epoch = 0; epoch < 1; epoch++) {
|
||||
printf("Epoch %u\n", epoch);
|
||||
for(unsigned int idx = 0; idx < imageCount; idx++) { // Each train sample
|
||||
if(idx % 1000 == 0) {
|
||||
|
Loading…
Reference in New Issue
Block a user