From b6d62e9ad81c08d50e9144482baadd08e448ff58 Mon Sep 17 00:00:00 2001 From: MrLetsplay Date: Sun, 29 Oct 2023 19:35:32 +0100 Subject: [PATCH] Use clm_matrixCopy --- src/clm.c | 15 ++++++++++++++- src/clm.h | 3 ++- src/cltest.c | 5 ++--- 3 files changed, 18 insertions(+), 5 deletions(-) diff --git a/src/clm.c b/src/clm.c index e7c753a..851d7fd 100644 --- a/src/clm.c +++ b/src/clm.c @@ -42,7 +42,7 @@ void clm_freeMatrix(clm_Matrix mat) { free(mat.values); } -clm_Matrix clm_matrixCopy(clm_Matrix mat) { +clm_Matrix clm_matrixCopyALLOC(clm_Matrix mat) { clm_Matrix copy = clm_createMatrix(mat.rows, mat.cols); memcpy(copy.values, mat.values, mat.rows * mat.cols * sizeof(float)); @@ -50,6 +50,19 @@ clm_Matrix clm_matrixCopy(clm_Matrix mat) { return copy; } +clm_Matrix clm_matrixCopy(clm_Matrix mat, clm_Matrix out) { + if(mat.cols != out.cols || mat.rows != out.rows) { + printf("Failed to copy matrix\n"); + return INVALID_MATRIX; + } + + for(unsigned int i = 0; i < mat.rows * mat.cols; i++) { + out.values[i] = mat.values[i]; + } + + return out; +} + clm_Matrix clm_matrixAddScalar(clm_Matrix mat, float scalar) { for(unsigned int i = 0; i < mat.cols * mat.rows; i++) { mat.values[i] += scalar; diff --git a/src/clm.h b/src/clm.h index af20484..d452aa1 100644 --- a/src/clm.h +++ b/src/clm.h @@ -49,7 +49,8 @@ clm_Matrix clm_matrixDSigmoid(clm_Matrix mat); clm_Matrix clm_matrixFromArray(float *array, unsigned int length); clm_Matrix clm_matrixWrapArray(float *array, unsigned int length); -clm_Matrix clm_matrixCopy(clm_Matrix matrix); +clm_Matrix clm_matrixCopyALLOC(clm_Matrix matrix); +clm_Matrix clm_matrixCopy(clm_Matrix mat, clm_Matrix other); bool clm_matrixIsInvalid(clm_Matrix mat); diff --git a/src/cltest.c b/src/cltest.c index da9ca0d..4ce80c6 100644 --- a/src/cltest.c +++ b/src/cltest.c @@ -66,8 +66,7 @@ void train(clm_NN nn, float *x, unsigned int xL, float *y, unsigned int yL) { clm_Matrix error = layer.error; if(i == nn.numLayers - 1) { - clm_matrixZero(error); // Zero the error matrix - clm_matrixSubtractMatrix(clm_matrixAddMatrix(error, yM), outputOfThisLayer); // yhat - y + clm_matrixSubtractMatrix(clm_matrixCopy(yM, error), outputOfThisLayer); // yhat - y } else { clm_Matrix weightsT = clm_matrixTranspose(nn.layers[i + 1].weights); clm_matrixMultiplyMatrix(weightsT, prevError, error); @@ -195,7 +194,7 @@ int main() { clm_Linear layers[] = {layer1, layer2}; clm_NN nn = {layers, sizeof(layers) / sizeof(clm_Linear), 0.01}; - for(unsigned int epoch = 0; epoch < 10; epoch++) { + for(unsigned int epoch = 0; epoch < 1; epoch++) { printf("Epoch %u\n", epoch); for(unsigned int idx = 0; idx < imageCount; idx++) { // Each train sample if(idx % 1000 == 0) {