Skip to content

Commit 017ae07

Browse files
authored
Add files via upload
1 parent 524d2a5 commit 017ae07

File tree

4 files changed

+297
-0
lines changed

4 files changed

+297
-0
lines changed

examples/sigmoid.ino

Lines changed: 75 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,75 @@
1+
#include "fnn.h"
2+
3+
FNN fnn(6); // Neural network dengan 6 input
4+
5+
void setup() {
6+
Serial.begin(9600);
7+
8+
// Set bobot, bias, dan fungsi aktivasi
9+
fnn.setWeights({0.3, 0.5, 0.2, 0.4, 0.1, 0.6});
10+
fnn.setBiases({0.1, 0.2}); // Menambahkan biases untuk hidden dan output layer
11+
fnn.setActivationFunction(FNN::sigmoid); // Mencoba fungsi aktivasi yang lebih cocok untuk klasifikasi
12+
13+
// Aturan fuzzy (perhatikan bahwa Anda perlu menyesuaikan output dengan label)
14+
fnn.setFuzzyRules({
15+
{"Tidak sesuai", 0.0},
16+
{"Sedikit", 0.2},
17+
{"Sangat Belum", 0.4},
18+
{"Belum Banyak", 0.6},
19+
{"Sedikit Banyak", 0.7},
20+
{"Banyak", 1.0},
21+
{"Extrem", 1.1}
22+
});
23+
24+
// Data pelatihan
25+
std::vector<std::vector<float>> trainingInputs = {
26+
{4.5, 2.8, 0.9, 3.7, 3.1, 7.9},
27+
{1.2, 0.6, 0.3, 0.5, 0.2, 0.7},
28+
{0.4, 0.3, 0.2, 0.6, 0.5, 0.4},
29+
{5.1, 2.4, 1.2, 4.1, 3.2, 6.5},
30+
{3.3, 1.7, 0.6, 3.4, 2.3, 6.1}
31+
};
32+
std::vector<std::string> trainingTargets = {"Banyak", "Sedikit", "Tidak sesuai", "Sedikit Banyak", "Banyak"};
33+
34+
// Data testing
35+
std::vector<std::vector<float>> testInputs = {
36+
{4.5, 2.8, 0.9, 3.7, 3.1, 7.9},
37+
{1.2, 0.6, 0.3, 0.5, 0.2, 0.7},
38+
{0.4, 0.3, 0.2, 0.6, 0.5, 0.4}
39+
};
40+
std::vector<std::string> testTargets = {"Banyak", "Sedikit", "Tidak sesuai"};
41+
42+
int numEpochs = 1000;
43+
float learningRate = 0.01;
44+
45+
// Melatih model
46+
for (int epoch = 0; epoch < numEpochs; ++epoch) {
47+
fnn.train(trainingInputs, trainingTargets, numEpochs, learningRate); // Latih model
48+
49+
if (epoch % 100 == 0) { // Evaluasi setiap 100 epoch
50+
float accuracy = fnn.evaluateAccuracy(testInputs, testTargets);
51+
float precision = fnn.evaluatePrecision(testInputs, testTargets);
52+
Serial.print("Epoch: ");
53+
Serial.print(epoch);
54+
Serial.print(" | Akurasi: ");
55+
Serial.print(accuracy);
56+
Serial.print("% | Presisi: ");
57+
Serial.print(precision);
58+
Serial.println("%");
59+
}
60+
}
61+
62+
// Prediksi setelah pelatihan
63+
Serial.println("Hasil Prediksi setelah Pelatihan:");
64+
for (size_t i = 0; i < testInputs.size(); ++i) {
65+
String result = fnn.predictFNN(testInputs[i]).c_str(); // Prediksi hasil
66+
Serial.print("Input ke-");
67+
Serial.print(i + 1);
68+
Serial.print(": ");
69+
Serial.println(result);
70+
}
71+
}
72+
73+
void loop() {
74+
// Tidak ada loop untuk contoh ini
75+
}

library.properties

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,9 @@
1+
name=fnn
2+
version=1.0.0
3+
author=GALIH RIDHO UTOMO
4+
maintainer=GALIH RIDHO UTOMO <[email protected]>
5+
sentence=Fuzzy Neural Network for Arduino.
6+
paragraph=The FNN (Fuzzy Neural Network) module implements a hybrid intelligent system that combines neural networks with fuzzy logic principles. This implementation is specifically optimized for Arduino platforms, providing efficient computation while maintaining prediction accuracy.
7+
category=Signal Input/Output
8+
url=https://github.com/4211421036/fnn.git
9+
architectures=*

src/fnn.cpp

Lines changed: 155 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,155 @@
1+
#include "fnn.h"
2+
#include <Arduino.h>
3+
4+
// Konstruktor
5+
FNN::FNN(int inputSize, float bias, std::function<float(float)> activation)
6+
: weights(2, std::vector<float>(inputSize, 0.0)), biases(2, bias), activationFunction(activation) {
7+
if (!activationFunction) {
8+
activationFunction = sigmoid; // Default fungsi aktivasi adalah sigmoid
9+
}
10+
}
11+
12+
// Set bobot
13+
void FNN::setWeights(const std::vector<float>& newWeights) {
14+
if (newWeights.size() == weights[0].size()) {
15+
weights[0] = newWeights;
16+
}
17+
}
18+
19+
// Set bias
20+
void FNN::setBiases(const std::vector<float>& newBiases) {
21+
if (newBiases.size() == biases.size()) {
22+
biases = newBiases;
23+
}
24+
}
25+
26+
// Set fungsi aktivasi
27+
void FNN::setActivationFunction(std::function<float(float)> activation) {
28+
activationFunction = activation;
29+
}
30+
31+
// Set fuzzy rules
32+
void FNN::setFuzzyRules(const std::map<std::string, float>& rules) {
33+
fuzzyRules = rules;
34+
}
35+
36+
// Fungsi aktivasi: Sigmoid
37+
float FNN::sigmoid(float x) {
38+
return 1.0 / (1.0 + exp(-x));
39+
}
40+
41+
// Fungsi aktivasi: Tanh
42+
float FNN::tanh(float x) {
43+
return std::tanh(x);
44+
}
45+
46+
// Fungsi aktivasi: Leaky ReLU
47+
std::function<float(float)> FNN::leakyRelu(float alpha) {
48+
return [alpha](float x) { return (x > 0) ? x : alpha * x; };
49+
}
50+
51+
// Fungsi aktivasi: ELU
52+
std::function<float(float)> FNN::elu(float alpha) {
53+
return [alpha](float x) { return (x > 0) ? x : alpha * (exp(x) - 1); };
54+
}
55+
56+
// Fungsi aktivasi: Softplus
57+
float FNN::softplus(float x) {
58+
return log(1 + exp(x));
59+
}
60+
61+
// Defuzzifikasi
62+
std::string FNN::defuzzify(float fuzzyOutput) {
63+
for (const auto& rule : fuzzyRules) {
64+
if (fuzzyOutput <= rule.second) {
65+
return rule.first;
66+
}
67+
}
68+
return "Undefined";
69+
}
70+
71+
// Compute Loss
72+
float FNN::computeLoss(const std::vector<float>& predicted, const std::vector<float>& expected) {
73+
float loss = 0.0f;
74+
for (size_t i = 0; i < predicted.size(); ++i) {
75+
loss += pow(predicted[i] - expected[i], 2);
76+
}
77+
return loss / predicted.size();
78+
}
79+
80+
// Train
81+
void FNN::train(const std::vector<std::vector<float>>& inputs, const std::vector<std::string>& targets, int epochs, float learningRate) {
82+
for (int epoch = 0; epoch < epochs; ++epoch) {
83+
for (size_t i = 0; i < inputs.size(); ++i) {
84+
float hiddenSum = biases[0];
85+
for (size_t j = 0; j < weights[0].size(); ++j) {
86+
hiddenSum += inputs[i][j] * weights[0][j];
87+
}
88+
float hiddenOutput = activationFunction(hiddenSum);
89+
90+
float outputSum = hiddenOutput * weights[1][0] + biases[1];
91+
float output = activationFunction(outputSum);
92+
93+
float outputError = fuzzyRules[targets[i]] - output;
94+
weights[1][0] += learningRate * outputError * hiddenOutput;
95+
biases[1] += learningRate * outputError;
96+
97+
float hiddenError = outputError * weights[1][0];
98+
for (size_t j = 0; j < weights[0].size(); ++j) {
99+
weights[0][j] += learningRate * hiddenError * inputs[i][j];
100+
}
101+
biases[0] += learningRate * hiddenError;
102+
}
103+
}
104+
}
105+
106+
// Predict
107+
std::string FNN::predictFNN(const std::vector<float>& inputs) {
108+
float hiddenSum = biases[0];
109+
for (size_t j = 0; j < weights[0].size(); ++j) {
110+
hiddenSum += inputs[j] * weights[0][j];
111+
}
112+
float hiddenOutput = activationFunction(hiddenSum);
113+
114+
float outputSum = hiddenOutput * weights[1][0] + biases[1];
115+
float output = activationFunction(outputSum);
116+
117+
return defuzzify(output);
118+
}
119+
// Evaluasi Akurasi
120+
float FNN::evaluateAccuracy(const std::vector<std::vector<float>>& testInputs, const std::vector<std::string>& expectedOutputs) {
121+
int correctPredictions = 0;
122+
123+
for (size_t i = 0; i < testInputs.size(); ++i) {
124+
std::string predictedOutput = predictFNN(testInputs[i]);
125+
if (predictedOutput == expectedOutputs[i]) {
126+
correctPredictions++;
127+
}
128+
}
129+
130+
float accuracy = (float)correctPredictions / testInputs.size();
131+
return accuracy * 100.0f; // Hasil dalam persen
132+
}
133+
134+
// Evaluasi Presisi
135+
float FNN::evaluatePrecision(const std::vector<std::vector<float>>& testInputs, const std::vector<std::string>& expectedOutputs) {
136+
int truePositives = 0;
137+
int falsePositives = 0;
138+
139+
for (size_t i = 0; i < testInputs.size(); ++i) {
140+
std::string predictedOutput = predictFNN(testInputs[i]);
141+
142+
if (predictedOutput == expectedOutputs[i]) {
143+
truePositives++;
144+
} else if (fuzzyRules.find(predictedOutput) != fuzzyRules.end()) {
145+
falsePositives++;
146+
}
147+
}
148+
149+
if (truePositives + falsePositives == 0) {
150+
return 0.0f; // Hindari pembagian dengan nol
151+
}
152+
153+
float precision = (float)truePositives / (truePositives + falsePositives);
154+
return precision * 100.0f; // Hasil dalam persen
155+
}

src/fnn.h

Lines changed: 58 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,58 @@
1+
#ifndef FNN_H
2+
#define FNN_H
3+
4+
#include <cmath>
5+
#include <vector>
6+
#include <functional>
7+
#include <map>
8+
#include <string>
9+
10+
class FNN {
11+
private:
12+
std::vector<std::vector<float>> weights; // Bobot untuk tiap layer
13+
std::vector<float> biases; // Bias untuk tiap layer
14+
std::function<float(float)> activationFunction; // Fungsi aktivasi
15+
std::map<std::string, float> fuzzyRules; // Aturan fuzzy
16+
17+
float computeLoss(const std::vector<float>& predicted, const std::vector<float>& expected);
18+
19+
// Defuzzifikasi
20+
std::string defuzzify(float fuzzyOutput);
21+
22+
public:
23+
FNN(int inputSize = 3, float bias = 0.1, std::function<float(float)> activation = nullptr);
24+
25+
// Set bobot
26+
void setWeights(const std::vector<float>& newWeights);
27+
28+
// Set bias
29+
void setBiases(const std::vector<float>& newBiases);
30+
31+
// Set fungsi aktivasi
32+
void setActivationFunction(std::function<float(float)> activation);
33+
34+
// Set fuzzy rules
35+
void setFuzzyRules(const std::map<std::string, float>& rules);
36+
37+
// Prediksi FNN
38+
std::string predictFNN(const std::vector<float>& inputs);
39+
40+
// Fungsi pelatihan
41+
void train(const std::vector<std::vector<float>>& inputs, const std::vector<std::string>& targets, int epochs = 100, float learningRate = 0.01);
42+
43+
// Evaluasi
44+
// Evaluasi Akurasi
45+
float evaluateAccuracy(const std::vector<std::vector<float>>& testInputs, const std::vector<std::string>& expectedOutputs);
46+
47+
// Evaluasi Presisi
48+
float evaluatePrecision(const std::vector<std::vector<float>>& testInputs, const std::vector<std::string>& expectedOutputs);
49+
50+
// Fungsi aktivasi yang disediakan
51+
static float sigmoid(float x);
52+
static float tanh(float x);
53+
static std::function<float(float)> leakyRelu(float alpha = 0.01);
54+
static std::function<float(float)> elu(float alpha = 1.0);
55+
static float softplus(float x);
56+
};
57+
58+
#endif

0 commit comments

Comments
 (0)