diff --git a/src/cltest.c b/src/cltest.c index ac80a68..99bfe75 100644 --- a/src/cltest.c +++ b/src/cltest.c @@ -53,22 +53,22 @@ void train(clm_NN nn, unsigned int numElements, clm_Vector *inputs, clm_Vector * currentXs = nn.layers[i].output; } - clm_Linear lastLayer = nn.layers[nn.numLayers - 1]; + clm_Linear *lastLayer = &nn.layers[nn.numLayers - 1]; for(unsigned int b = 0; b < batchSize; b++) { // Error of last layer = y - yhat - clm_matrixCopy(batchOutputs[b], lastLayer.error[b]); // lastLayer.error = y - clm_matrixSubtractMatrix(lastLayer.error[b], lastLayer.output[b]); // lastLayer.error -= yhat + clm_matrixCopy(batchOutputs[b], lastLayer->error[b]); // lastLayer.error = y + clm_matrixSubtractMatrix(lastLayer->error[b], lastLayer->output[b]); // lastLayer.error -= yhat } for(int i = nn.numLayers - 1; i >= 0; i--) { - clm_Linear layer = nn.layers[i]; + clm_Linear *layer = &nn.layers[i]; clm_Matrix *inputsToThisLayer = i == 0 ? batchInputs : nn.layers[i - 1].output; - clm_Matrix *outputsOfThisLayer = layer.output; - clm_linearBackprop(&layer, nn.learnRate, batchSize, inputsToThisLayer, outputsOfThisLayer, layer.error, i > 0, i == 0 ? NULL : nn.layers[i - 1].error, layer.weightsError, layer.gradient); + clm_Matrix *outputsOfThisLayer = layer->output; + clm_linearBackprop(layer, nn.learnRate, batchSize, inputsToThisLayer, outputsOfThisLayer, layer->error, i > 0, i == 0 ? NULL : nn.layers[i - 1].error, layer->weightsError, layer->gradient); for(unsigned int b = 0; b < batchSize; b++) { - clm_matrixAddMatrix(layer.weights, layer.weightsError[b]); - clm_matrixAddMatrix(layer.bias, layer.gradient[b]); + clm_matrixAddMatrix(layer->weights, layer->weightsError[b]); + clm_matrixAddMatrix(layer->bias, layer->gradient[b]); } }