Update README.md
Browse files
README.md
CHANGED
@@ -1,3 +1,389 @@
|
|
1 |
-
---
|
2 |
-
license: cc-by-nc-sa-4.0
|
3 |
-
---
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
license: cc-by-nc-sa-4.0
|
3 |
+
---
|
4 |
+
# bai-6 Emotion (TR)
|
5 |
+
|
6 |
+
## Tanım
|
7 |
+
|
8 |
+
bai-6 Emotion modeli, EEG ve iEEG tarafından toplanan veriler ile eğitilen bir detaylı duygu sınıflandırma modelidir. Model, 6 kanallı bir EEG cihazıyla çalışabilir durumdadır.
|
9 |
+
|
10 |
+
## Hedef Kitle
|
11 |
+
|
12 |
+
bai modelleri, herkes için tasarlanmıştır. Açık kaynak versiyonları herkes tarafından kullanılabilir.
|
13 |
+
|
14 |
+
## Sınıflar
|
15 |
+
|
16 |
+
- Sakin
|
17 |
+
- Üzgün
|
18 |
+
- Kızgın
|
19 |
+
- Mutlu
|
20 |
+
|
21 |
+
## Neuramax
|
22 |
+
|
23 |
+
Neuramax-6 Gen1 ile tam uyumlu çalışmaktadır.
|
24 |
+
|
25 |
+
-------------------------------------------------------------------------
|
26 |
+
# bai-6 Emotion (EN)
|
27 |
+
|
28 |
+
## Definition
|
29 |
+
|
30 |
+
The bai-6 Emotion model is a detailed emotion classification model trained with data collected by EEG and iEEG. The model can work with a 6-channel EEG device.
|
31 |
+
|
32 |
+
## Target Audience
|
33 |
+
|
34 |
+
bai models are designed for everyone. Open source versions are available for everyone to use.
|
35 |
+
|
36 |
+
## Classes
|
37 |
+
|
38 |
+
- Calm
|
39 |
+
- Sad
|
40 |
+
- Angry
|
41 |
+
- Happy
|
42 |
+
|
43 |
+
## Neuramax
|
44 |
+
|
45 |
+
Fully compatible with Neuramax-6 Gen1.
|
46 |
+
|
47 |
+
-------------
|
48 |
+
# bai-6 Emotion v1 Yapısı / Structure
|
49 |
+
|
50 |
+
```bash
|
51 |
+
"model_summary":
|
52 |
+
"Model: Total params: 5,046 (19.71 KB)
|
53 |
+
Trainable params: 5,044 (19.70 KB)
|
54 |
+
Non-trainable params: 0 (0.00 B)
|
55 |
+
Optimizer params: 2 (12.00 B)",
|
56 |
+
"layers": [
|
57 |
+
{
|
58 |
+
"name": "dense",
|
59 |
+
"trainable": true,
|
60 |
+
"count_params": 2368
|
61 |
+
},
|
62 |
+
{
|
63 |
+
"name": "dropout",
|
64 |
+
"trainable": true,
|
65 |
+
"count_params": 0
|
66 |
+
},
|
67 |
+
{
|
68 |
+
"name": "dense_1",
|
69 |
+
"trainable": true,
|
70 |
+
"count_params": 2080
|
71 |
+
},
|
72 |
+
{
|
73 |
+
"name": "dropout_1",
|
74 |
+
"trainable": true,
|
75 |
+
"count_params": 0
|
76 |
+
},
|
77 |
+
{
|
78 |
+
"name": "dense_2",
|
79 |
+
"trainable": true,
|
80 |
+
"count_params": 528
|
81 |
+
},
|
82 |
+
{
|
83 |
+
"name": "dense_3",
|
84 |
+
"trainable": true,
|
85 |
+
"count_params": 68
|
86 |
+
}
|
87 |
+
]
|
88 |
+
```
|
89 |
+
|
90 |
+
# Kullanım / Usage
|
91 |
+
|
92 |
+
## 1. Sentetik Veri ile / With Synthetic Data
|
93 |
+
```python
|
94 |
+
import numpy as np
|
95 |
+
import matplotlib.pyplot as plt
|
96 |
+
import mne
|
97 |
+
from matplotlib.animation import FuncAnimation
|
98 |
+
from tensorflow.keras.models import load_model
|
99 |
+
import joblib
|
100 |
+
|
101 |
+
|
102 |
+
class EEGMonitor:
|
103 |
+
def __init__(self, model_path, scaler_path):
|
104 |
+
self.model = load_model(model_path)
|
105 |
+
self.scaler = joblib.load(scaler_path)
|
106 |
+
self.ch_names = ['T7', 'C3', 'Cz', 'C4', 'T8', 'Pz']
|
107 |
+
self.fs = 1000 # Örnekleme frekansı / Sampling frequency
|
108 |
+
self.buffer_size = 1000 # 1 saniyelik buffer / 1 second buffer
|
109 |
+
|
110 |
+
self.raw_buffer = np.zeros((6, self.buffer_size))
|
111 |
+
self.feature_contributions = {ch: [] for ch in self.ch_names}
|
112 |
+
|
113 |
+
# Elektrot pozisyonları (10-20 sistemi) / Electrode positions (10-20 system)
|
114 |
+
self.montage = mne.channels.make_standard_montage('standard_1020')
|
115 |
+
|
116 |
+
self.fig = plt.figure(figsize=(15, 10))
|
117 |
+
self.setup_plots()
|
118 |
+
|
119 |
+
def setup_plots(self):
|
120 |
+
self.ax1 = self.fig.add_subplot(223)
|
121 |
+
self.ax1.set_title("Canlı EEG Sinyalleri / Live EEG Signals")
|
122 |
+
self.ax1.set_xlabel("Zaman (ms) / Time (ms)")
|
123 |
+
self.ax1.set_ylabel("Amplitüd (µV) / Amplitude (µV)")
|
124 |
+
|
125 |
+
self.ax2 = self.fig.add_subplot(221)
|
126 |
+
self.ax2.set_title("Elektrot Konumları / Electrode Locations")
|
127 |
+
|
128 |
+
self.ax3 = self.fig.add_subplot(224)
|
129 |
+
self.ax3.set_title("Elektrot Katkı Oranları / Electrode Contribution Ratios")
|
130 |
+
self.ax3.set_ylim(0, 1)
|
131 |
+
|
132 |
+
self.ax4 = self.fig.add_subplot(222)
|
133 |
+
self.ax4.set_title("Duygu Tahmin Olasılıkları / Emotion Prediction Probabilities")
|
134 |
+
self.ax4.set_ylim(0, 1)
|
135 |
+
|
136 |
+
plt.tight_layout()
|
137 |
+
|
138 |
+
def generate_synthetic_data(self):
|
139 |
+
"""Sentetik EEG verisi üretir (6 kanal x 1000 örnek) / Generates synthetic EEG data (6 channels x 1000 samples)"""
|
140 |
+
noise = np.random.normal(0, 5e-6, (6, self.buffer_size))
|
141 |
+
|
142 |
+
t = np.linspace(0, 1, self.buffer_size)
|
143 |
+
noise[1] += 2e-6 * np.sin(2 * np.pi * 10 * t)
|
144 |
+
|
145 |
+
return noise
|
146 |
+
|
147 |
+
def update_buffer(self, new_data):
|
148 |
+
"""Buffer'ı kaydırmalı olarak günceller / Updates the buffer with new data by rolling"""
|
149 |
+
self.raw_buffer = np.roll(self.raw_buffer, -new_data.shape[1], axis=1)
|
150 |
+
self.raw_buffer[:, -new_data.shape[1]:] = new_data
|
151 |
+
|
152 |
+
def calculate_channel_contributions(self, features):
|
153 |
+
"""Her elektrotun tahmindeki katkısını hesaplar / Calculates the contribution of each electrode to the prediction"""
|
154 |
+
contributions = np.zeros(6)
|
155 |
+
for i in range(6):
|
156 |
+
channel_weights = self.model.layers[0].get_weights()[0][i * 6:(i + 1) * 6]
|
157 |
+
contributions[i] = np.mean(np.abs(channel_weights))
|
158 |
+
|
159 |
+
return contributions / np.sum(contributions)
|
160 |
+
|
161 |
+
def update_plot(self, frame):
|
162 |
+
new_data = self.generate_synthetic_data()
|
163 |
+
self.update_buffer(new_data)
|
164 |
+
|
165 |
+
features = self.extract_features(self.raw_buffer)
|
166 |
+
scaled_features = self.scaler.transform([features])
|
167 |
+
probs = self.model.predict(scaled_features, verbose=0)[0]
|
168 |
+
|
169 |
+
contributions = self.calculate_channel_contributions(features)
|
170 |
+
|
171 |
+
self.update_eeg_plot()
|
172 |
+
self.update_topomap()
|
173 |
+
self.update_contributions(contributions)
|
174 |
+
self.update_probabilities(probs)
|
175 |
+
|
176 |
+
def update_eeg_plot(self):
|
177 |
+
self.ax1.clear()
|
178 |
+
for i in range(6):
|
179 |
+
offset = i * 20e-6
|
180 |
+
self.ax1.plot(self.raw_buffer[i] + offset, label=self.ch_names[i])
|
181 |
+
self.ax1.legend(loc='upper right')
|
182 |
+
|
183 |
+
def update_topomap(self):
|
184 |
+
self.ax2.clear()
|
185 |
+
info = mne.create_info(self.ch_names, self.fs, 'eeg')
|
186 |
+
evoked = mne.EvokedArray(self.raw_buffer.mean(axis=1, keepdims=True), info)
|
187 |
+
evoked.set_montage(self.montage)
|
188 |
+
mne.viz.plot_topomap(evoked.data[:, 0], evoked.info, axes=self.ax2, show=False)
|
189 |
+
|
190 |
+
def update_contributions(self, contributions):
|
191 |
+
self.ax3.clear()
|
192 |
+
self.ax3.barh(self.ch_names, contributions, color='skyblue')
|
193 |
+
for i, v in enumerate(contributions):
|
194 |
+
self.ax3.text(v, i, f"{v * 100:.1f}%", color='black')
|
195 |
+
|
196 |
+
def update_probabilities(self, probs):
|
197 |
+
emotions = ['Mutlu / Happy', 'Kızgın / Angry', 'Üzgün / Sad', 'Sakin / Calm']
|
198 |
+
self.ax4.clear()
|
199 |
+
bars = self.ax4.barh(emotions, probs, color=['green', 'red', 'blue', 'purple'])
|
200 |
+
for bar in bars:
|
201 |
+
width = bar.get_width()
|
202 |
+
self.ax4.text(width, bar.get_y() + 0.2, f"{width * 100:.1f}%", ha='left')
|
203 |
+
|
204 |
+
def extract_features(self, data):
|
205 |
+
"""6 kanal için özellik çıkarımı / Feature extraction for 6 channels"""
|
206 |
+
features = []
|
207 |
+
for channel in data:
|
208 |
+
features.extend([
|
209 |
+
np.mean(channel),
|
210 |
+
np.std(channel),
|
211 |
+
np.ptp(channel),
|
212 |
+
np.sum(np.abs(np.diff(channel))),
|
213 |
+
np.median(channel),
|
214 |
+
np.percentile(np.abs(channel), 95)
|
215 |
+
])
|
216 |
+
return np.array(features)
|
217 |
+
|
218 |
+
def start_monitoring(self):
|
219 |
+
anim = FuncAnimation(self.fig, self.update_plot, interval=100)
|
220 |
+
plt.show()
|
221 |
+
|
222 |
+
|
223 |
+
if __name__ == "__main__":
|
224 |
+
monitor = EEGMonitor(
|
225 |
+
model_path='model/path/bai-6 Emotion.h5',
|
226 |
+
scaler_path='scaler/path/bai-6_scaler.save'
|
227 |
+
)
|
228 |
+
monitor.start_monitoring()
|
229 |
+
```
|
230 |
+
|
231 |
+
## 2. Veri Seti ile / With Dataset
|
232 |
+
```python
|
233 |
+
import numpy as np
|
234 |
+
import matplotlib.pyplot as plt
|
235 |
+
import mne
|
236 |
+
from matplotlib.animation import FuncAnimation
|
237 |
+
from tensorflow.keras.models import load_model
|
238 |
+
import joblib
|
239 |
+
import os
|
240 |
+
|
241 |
+
|
242 |
+
class EEGMonitor:
|
243 |
+
def __init__(self, model_path, scaler_path, data_path):
|
244 |
+
self.model = load_model(model_path)
|
245 |
+
self.scaler = joblib.load(scaler_path)
|
246 |
+
self.data_path = data_path
|
247 |
+
self.ch_names = ['T7', 'C3', 'Cz', 'C4', 'T8', 'Pz']
|
248 |
+
self.fs = 1000 # Örnekleme frekansı / Sampling frequency
|
249 |
+
self.buffer_size = 1000 # 1 saniyelik buffer / 1 second buffer
|
250 |
+
|
251 |
+
self.raw_buffer = np.zeros((6, self.buffer_size))
|
252 |
+
self.feature_contributions = {ch: [] for ch in self.ch_names}
|
253 |
+
|
254 |
+
# Elektrot pozisyonları / Electrode positions (10-20 system)
|
255 |
+
self.montage = mne.channels.make_standard_montage('standard_1020')
|
256 |
+
|
257 |
+
self.fig = plt.figure(figsize=(15, 10))
|
258 |
+
self.setup_plots()
|
259 |
+
|
260 |
+
self.dataset = self.load_dataset(self.data_path)
|
261 |
+
self.current_index = 0
|
262 |
+
|
263 |
+
def setup_plots(self):
|
264 |
+
self.ax1 = self.fig.add_subplot(223)
|
265 |
+
self.ax1.set_title("Canlı EEG Sinyalleri / Live EEG Signals")
|
266 |
+
self.ax1.set_xlabel("Zaman (ms) / Time (ms)")
|
267 |
+
self.ax1.set_ylabel("Amplitüd (µV) / Amplitude (µV)")
|
268 |
+
|
269 |
+
self.ax2 = self.fig.add_subplot(221)
|
270 |
+
self.ax2.set_title("Elektrot Konumları / Electrode Locations")
|
271 |
+
|
272 |
+
self.ax3 = self.fig.add_subplot(224)
|
273 |
+
self.ax3.set_title("Elektrot Katkı Oranları / Electrode Contribution Ratios")
|
274 |
+
self.ax3.set_ylim(0, 1)
|
275 |
+
|
276 |
+
self.ax4 = self.fig.add_subplot(222)
|
277 |
+
self.ax4.set_title("Duygu Tahmin Olasılıkları / Emotion Prediction Probabilities")
|
278 |
+
self.ax4.set_ylim(0, 1)
|
279 |
+
|
280 |
+
plt.tight_layout()
|
281 |
+
|
282 |
+
def load_dataset(self, path):
|
283 |
+
"""Desteklenen veri formatları: .npy (numpy), .csv / Supported data formats: .npy (numpy), .csv"""
|
284 |
+
if not os.path.exists(path):
|
285 |
+
raise FileNotFoundError(f"Veri seti bulunamadı / Not found dataset: {path}")
|
286 |
+
|
287 |
+
if path.endswith(".npy"):
|
288 |
+
data = np.load(path)
|
289 |
+
elif path.endswith(".csv"):
|
290 |
+
data = np.loadtxt(path, delimiter=',')
|
291 |
+
else:
|
292 |
+
raise ValueError("Desteklenmeyen dosya formatı. Yalnızca .npy veya .csv kullanılabilir. / Unsupported file format. Only .npy or .csv can be used.")
|
293 |
+
|
294 |
+
# Transpose gerekebilir: (n_channels, n_samples) / Transpose may be needed: (n_channels, n_samples)
|
295 |
+
if data.shape[0] != 6:
|
296 |
+
data = data.T
|
297 |
+
return data
|
298 |
+
|
299 |
+
def get_next_chunk(self):
|
300 |
+
"""Veri setinden buffer_size uzunluğunda bir parça alır / Gets a chunk of length buffer_size from the dataset"""
|
301 |
+
if self.current_index + self.buffer_size >= self.dataset.shape[1]:
|
302 |
+
self.current_index = 0
|
303 |
+
chunk = self.dataset[:, self.current_index:self.current_index + self.buffer_size]
|
304 |
+
self.current_index += self.buffer_size
|
305 |
+
return chunk
|
306 |
+
|
307 |
+
def update_buffer(self, new_data):
|
308 |
+
self.raw_buffer = np.roll(self.raw_buffer, -new_data.shape[1], axis=1)
|
309 |
+
self.raw_buffer[:, -new_data.shape[1]:] = new_data
|
310 |
+
|
311 |
+
def calculate_channel_contributions(self, features):
|
312 |
+
contributions = np.zeros(6)
|
313 |
+
for i in range(6):
|
314 |
+
channel_weights = self.model.layers[0].get_weights()[0][i * 6:(i + 1) * 6]
|
315 |
+
contributions[i] = np.mean(np.abs(channel_weights))
|
316 |
+
return contributions / np.sum(contributions)
|
317 |
+
|
318 |
+
def update_plot(self, frame):
|
319 |
+
new_data = self.get_next_chunk()
|
320 |
+
self.update_buffer(new_data)
|
321 |
+
|
322 |
+
features = self.extract_features(self.raw_buffer)
|
323 |
+
scaled_features = self.scaler.transform([features])
|
324 |
+
probs = self.model.predict(scaled_features, verbose=0)[0]
|
325 |
+
|
326 |
+
contributions = self.calculate_channel_contributions(features)
|
327 |
+
|
328 |
+
self.update_eeg_plot()
|
329 |
+
self.update_topomap()
|
330 |
+
self.update_contributions(contributions)
|
331 |
+
self.update_probabilities(probs)
|
332 |
+
|
333 |
+
def update_eeg_plot(self):
|
334 |
+
self.ax1.clear()
|
335 |
+
for i in range(6):
|
336 |
+
offset = i * 20e-6
|
337 |
+
self.ax1.plot(self.raw_buffer[i] + offset, label=self.ch_names[i])
|
338 |
+
self.ax1.legend(loc='upper right')
|
339 |
+
|
340 |
+
def update_topomap(self):
|
341 |
+
self.ax2.clear()
|
342 |
+
info = mne.create_info(self.ch_names, self.fs, 'eeg')
|
343 |
+
evoked = mne.EvokedArray(self.raw_buffer.mean(axis=1, keepdims=True), info)
|
344 |
+
evoked.set_montage(self.montage)
|
345 |
+
mne.viz.plot_topomap(evoked.data[:, 0], evoked.info, axes=self.ax2, show=False)
|
346 |
+
|
347 |
+
def update_contributions(self, contributions):
|
348 |
+
self.ax3.clear()
|
349 |
+
self.ax3.barh(self.ch_names, contributions, color='skyblue')
|
350 |
+
for i, v in enumerate(contributions):
|
351 |
+
self.ax3.text(v, i, f"{v * 100:.1f}%", color='black')
|
352 |
+
|
353 |
+
def update_probabilities(self, probs):
|
354 |
+
emotions = ['Mutlu / Happy', 'Kızgın / Angry', 'Üzgün / Sad', 'Sakin / Calm']
|
355 |
+
self.ax4.clear()
|
356 |
+
bars = self.ax4.barh(emotions, probs, color=['green', 'red', 'blue', 'purple'])
|
357 |
+
for bar in bars:
|
358 |
+
width = bar.get_width()
|
359 |
+
self.ax4.text(width, bar.get_y() + 0.2, f"{width * 100:.1f}%", ha='left')
|
360 |
+
|
361 |
+
def extract_features(self, data):
|
362 |
+
features = []
|
363 |
+
for channel in data:
|
364 |
+
features.extend([
|
365 |
+
np.mean(channel),
|
366 |
+
np.std(channel),
|
367 |
+
np.ptp(channel),
|
368 |
+
np.sum(np.abs(np.diff(channel))),
|
369 |
+
np.median(channel),
|
370 |
+
np.percentile(np.abs(channel), 95)
|
371 |
+
])
|
372 |
+
return np.array(features)
|
373 |
+
|
374 |
+
def start_monitoring(self):
|
375 |
+
anim = FuncAnimation(self.fig, self.update_plot, interval=1000)
|
376 |
+
plt.show()
|
377 |
+
|
378 |
+
|
379 |
+
if __name__ == "__main__":
|
380 |
+
monitor = EEGMonitor(
|
381 |
+
model_path="model/path/bai-6 Emotion.h5",
|
382 |
+
scaler_path="scaler/path/bai-6_scaler.save",
|
383 |
+
data_path="data/path/npy/or/csv"
|
384 |
+
)
|
385 |
+
monitor.start_monitoring()
|
386 |
+
```
|
387 |
+
-------------
|
388 |
+
## Lisans/License
|
389 |
+
CC-BY-NC-SA-4.0
|