Use clm_matrixCopy

This commit is contained in:
MrLetsplay 2023-10-29 19:35:32 +01:00
parent 544bafdd5e
commit b6d62e9ad8
Signed by: mr
SSH Key Fingerprint: SHA256:92jBH80vpXyaZHjaIl47pjRq+Yt7XGTArqQg1V7hSqg
3 changed files with 18 additions and 5 deletions

View File

@ -42,7 +42,7 @@ void clm_freeMatrix(clm_Matrix mat) {
free(mat.values); free(mat.values);
} }
clm_Matrix clm_matrixCopy(clm_Matrix mat) { clm_Matrix clm_matrixCopyALLOC(clm_Matrix mat) {
clm_Matrix copy = clm_createMatrix(mat.rows, mat.cols); clm_Matrix copy = clm_createMatrix(mat.rows, mat.cols);
memcpy(copy.values, mat.values, mat.rows * mat.cols * sizeof(float)); memcpy(copy.values, mat.values, mat.rows * mat.cols * sizeof(float));
@ -50,6 +50,19 @@ clm_Matrix clm_matrixCopy(clm_Matrix mat) {
return copy; return copy;
} }
clm_Matrix clm_matrixCopy(clm_Matrix mat, clm_Matrix out) {
if(mat.cols != out.cols || mat.rows != out.rows) {
printf("Failed to copy matrix\n");
return INVALID_MATRIX;
}
for(unsigned int i = 0; i < mat.rows * mat.cols; i++) {
out.values[i] = mat.values[i];
}
return out;
}
clm_Matrix clm_matrixAddScalar(clm_Matrix mat, float scalar) { clm_Matrix clm_matrixAddScalar(clm_Matrix mat, float scalar) {
for(unsigned int i = 0; i < mat.cols * mat.rows; i++) { for(unsigned int i = 0; i < mat.cols * mat.rows; i++) {
mat.values[i] += scalar; mat.values[i] += scalar;

View File

@ -49,7 +49,8 @@ clm_Matrix clm_matrixDSigmoid(clm_Matrix mat);
clm_Matrix clm_matrixFromArray(float *array, unsigned int length); clm_Matrix clm_matrixFromArray(float *array, unsigned int length);
clm_Matrix clm_matrixWrapArray(float *array, unsigned int length); clm_Matrix clm_matrixWrapArray(float *array, unsigned int length);
clm_Matrix clm_matrixCopy(clm_Matrix matrix); clm_Matrix clm_matrixCopyALLOC(clm_Matrix matrix);
clm_Matrix clm_matrixCopy(clm_Matrix mat, clm_Matrix other);
bool clm_matrixIsInvalid(clm_Matrix mat); bool clm_matrixIsInvalid(clm_Matrix mat);

View File

@ -66,8 +66,7 @@ void train(clm_NN nn, float *x, unsigned int xL, float *y, unsigned int yL) {
clm_Matrix error = layer.error; clm_Matrix error = layer.error;
if(i == nn.numLayers - 1) { if(i == nn.numLayers - 1) {
clm_matrixZero(error); // Zero the error matrix clm_matrixSubtractMatrix(clm_matrixCopy(yM, error), outputOfThisLayer); // yhat - y
clm_matrixSubtractMatrix(clm_matrixAddMatrix(error, yM), outputOfThisLayer); // yhat - y
} else { } else {
clm_Matrix weightsT = clm_matrixTranspose(nn.layers[i + 1].weights); clm_Matrix weightsT = clm_matrixTranspose(nn.layers[i + 1].weights);
clm_matrixMultiplyMatrix(weightsT, prevError, error); clm_matrixMultiplyMatrix(weightsT, prevError, error);
@ -195,7 +194,7 @@ int main() {
clm_Linear layers[] = {layer1, layer2}; clm_Linear layers[] = {layer1, layer2};
clm_NN nn = {layers, sizeof(layers) / sizeof(clm_Linear), 0.01}; clm_NN nn = {layers, sizeof(layers) / sizeof(clm_Linear), 0.01};
for(unsigned int epoch = 0; epoch < 10; epoch++) { for(unsigned int epoch = 0; epoch < 1; epoch++) {
printf("Epoch %u\n", epoch); printf("Epoch %u\n", epoch);
for(unsigned int idx = 0; idx < imageCount; idx++) { // Each train sample for(unsigned int idx = 0; idx < imageCount; idx++) { // Each train sample
if(idx % 1000 == 0) { if(idx % 1000 == 0) {