diff --git a/final_project/keyboard_class.py b/final_project/keyboard_class.py
index 12f13a71d072712c04c5ee3cb5e647fe4c62124e..6fed41d4bc49b248e519e1fca6ed67d08267eb7f 100644
--- a/final_project/keyboard_class.py
+++ b/final_project/keyboard_class.py
@@ -10,14 +10,16 @@ class Keyboard:
     def __init__(self) -> None:
         
         self.predictions = []
-        self.previous_action = None
+        self.previous_action = "change the model"
         self.freeze_action = False
 
         # parameters to fine-tune
         self.action_length = 10
-        #self.time_checking = 0.5
         
-        self.keyboard_action_list = ["a", "b", "c", "d", "e","f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z"]
+        self.keyboard_action_list = ["a", "b", "c", "d", "e","f", 
+                                     "g", "h", "i", "j", "k", "l", 
+                                     "m", "n", "o", "p", "q", "r", 
+                                     "s", "t", "u", "v", "w", "x", "y", "z"]
 
     def get_hand_pos(self, hand_pos):
         self.hand_pos_x = hand_pos[0]
@@ -28,6 +30,8 @@ class Keyboard:
         # when the number of predictions achieve the number of defined action length, make an action
         if len(self.predictions) == self.action_length:
             self.make_action()
+        elif prediction == "change the model":
+            self.previous_action = "change the model"
         
     def make_action(self):
         action = self.get_major_element(self.predictions)
@@ -41,7 +45,7 @@ class Keyboard:
         self.predictions = []
         self.previous_action = action
 
-        self.freeze_action = action in self.keyboard_action_list
+        self.freeze_action = action in self.keyboard_action_list or action == "change the model"
 
     def keyboard_hand_parameters(self):
         pass
@@ -57,8 +61,8 @@ class Keyboard:
             pyautogui.press('space')
         elif prediction == "del":
             pyautogui.press('backspace')
-        elif prediction == "change to mouse":
-            pass
+        elif prediction == "change the model":
+            self.previous_action = "change the model"
         
         #time.sleep(self.time_checking)  # Adjust speed of movement
     
diff --git a/final_project/main.py b/final_project/main.py
index fc305e213fc5e0b24982d2716f3825aef2ab2e3f..c26394c9eb88b6f0e24bf3cbfdd26448802a5f9f 100644
--- a/final_project/main.py
+++ b/final_project/main.py
@@ -6,15 +6,30 @@ from tkinter import Tk, Label
 from PIL import Image, ImageTk
 
 from mouse_class import Mouse
-from hand_detection import normalise_landmarks
+from keyboard_class import Keyboard
+from specialkeys_class import Specialkeys
+from hand_detection import normalise_landmarks, landmarks_from_results
 from tools import load_model, set_camera_window
 
+# hide mediapype warning :UserWarning: SymbolDatabase.GetPrototype() is deprecated. Please use message_factory.GetMessageClass() instead. SymbolDatabase.GetPrototype() will be removed soon.
+import warnings
+warnings.filterwarnings("ignore", category=UserWarning)
+
+MOUSE_ACTIVE = True
+FREEZE_CHANGE_MODEL = False
+
 def main():
+    global MOUSE_ACTIVE
+    global FREEZE_CHANGE_MODEL
     #define Mouse
     mouse = Mouse()
+    keyboard = Keyboard()
+    specialkeys = Specialkeys()
 
     # load MOUSE model
-    model = load_model(device = "mouse")
+    model_mouse = load_model(device = "mouse")
+    model_keyboard = load_model(device = "keyboard")
+    model_specialkeys = load_model(device = "specialkeys")
     
     # create hand detection object
     mp_hands = mp.solutions.hands
@@ -32,10 +47,12 @@ def main():
     root, video_label = set_camera_window()
     
     # mediapipe hand object
-    with mp_hands.Hands(max_num_hands=1, model_complexity=1,
+    with mp_hands.Hands(max_num_hands=2, model_complexity=1,
                         min_detection_confidence=0.9, min_tracking_confidence=0.9) as hands:
         
         def update_frame():
+            global MOUSE_ACTIVE
+            global FREEZE_CHANGE_MODEL
             ret, frame = cap.read()
             if not ret:
                 print("Warning: Cannot read camera input")
@@ -49,46 +66,89 @@ def main():
             # Hand detection
             results = hands.process(frameRGB)
             
-            landmark_list = []
-            mouse_command = None
+            right_landmark_list = []
+            left_landmark_list = []
+            command = None
+
             if results.multi_hand_landmarks:
-                # one hand is detected, because max_num_hands=1
-                hand_landmarks = results.multi_hand_landmarks[0]
-
-                # Draw landmarks on frame
-                mp_drawing.draw_landmarks(
-                    frameRGB, hand_landmarks, mp_hands.HAND_CONNECTIONS, 
-                    mp_drawing.DrawingSpec(color=(250, 0, 0), thickness=2, circle_radius=4),
-                    mp_drawing.DrawingSpec(color=(0, 250, 0), thickness=2, circle_radius=2)
-                )
-                
-                # get landmark list with indices described in https://github.com/google-ai-edge/mediapipe/blob/master/mediapipe/python/solutions/hands.py
-                for lm in hand_landmarks.landmark:
-                    landmark_list.append((lm.x, lm.y))
+                # two hands are detected, so we split left and right 
+                left_hand_landmarks, right_hand_landmarks = landmarks_from_results(results) 
+
+                # if right hand detected, process
+                if right_hand_landmarks is not None:
+                    # Draw landmarks on frame
+                    mp_drawing.draw_landmarks(
+                        frameRGB, right_hand_landmarks, mp_hands.HAND_CONNECTIONS, 
+                        mp_drawing.DrawingSpec(color=(250, 0, 0), thickness=2, circle_radius=4),
+                        mp_drawing.DrawingSpec(color=(0, 250, 0), thickness=2, circle_radius=2)
+                    )
+                    
+                    # get landmark list with indices described in https://github.com/google-ai-edge/mediapipe/blob/master/mediapipe/python/solutions/hands.py
+                    for lm in right_hand_landmarks.landmark:
+                        right_landmark_list.append((lm.x, lm.y))
+                    
+                    # normalise landmarks for more powerful training
+                    normalised_right_landmark_list = normalise_landmarks(right_landmark_list)
+                    
+                    # apply model
+                    if MOUSE_ACTIVE:
+                        pred = model_mouse.predict(np.asarray(normalised_right_landmark_list).reshape(1, -1))
+                        command = pred[0]
+                        mouse.add_prediction(command)
+                        
+                        if command == "move cursor" or command == "grab":
+                            mouse.get_hand_size(right_landmark_list[12], right_landmark_list[0])
+                            mouse.get_hand_pos(right_landmark_list[9])
+                        elif command == "change the model":
+                            if not FREEZE_CHANGE_MODEL:
+                                MOUSE_ACTIVE = False
+                                FREEZE_CHANGE_MODEL = True
+                        else:
+                            FREEZE_CHANGE_MODEL = False
+
+                    else:
+                        pred = model_keyboard.predict(np.asarray(normalised_right_landmark_list).reshape(1, -1))
+                        command = pred[0]
+                        keyboard.add_prediction(command)
+                        if command == "change the model":
+                            if not FREEZE_CHANGE_MODEL:
+                                MOUSE_ACTIVE = True
+                                FREEZE_CHANGE_MODEL = True
+                        else:
+                            FREEZE_CHANGE_MODEL = False
+
+                    cv2.putText(
+                        img=frameRGB, 
+                        text=f"{pred[0]} pos {right_landmark_list[8][0]:.2f}, {right_landmark_list[8][1]:.2f}, {MOUSE_ACTIVE}",
+                        org=(30, 30), fontFace=cv2.FONT_HERSHEY_DUPLEX, fontScale=1, color=(255, 0, 0), thickness=1
+                    )
+
+
+                if left_hand_landmarks is not None:
+                    # Draw landmarks on frame
+                    mp_drawing.draw_landmarks(
+                        frameRGB, left_hand_landmarks, mp_hands.HAND_CONNECTIONS, 
+                        mp_drawing.DrawingSpec(color=(0, 250, 0), thickness=2, circle_radius=4),
+                        mp_drawing.DrawingSpec(color=(0, 120, 120), thickness=2, circle_radius=2)
+                    )
+                    
+                    # get landmark list with indices described in https://github.com/google-ai-edge/mediapipe/blob/master/mediapipe/python/solutions/hands.py
+                    for lm in left_hand_landmarks.landmark:
+                        left_landmark_list.append((lm.x, lm.y))
                 
-                # normalise landmarks for more powerful training
-                normalised_landmark_list = normalise_landmarks(landmark_list)
+                    # normalise landmarks for more powerful training
+                    normalised_left_landmark_list = normalise_landmarks(left_landmark_list)
                 
-                # apply model
-                pred = model.predict(np.asarray(normalised_landmark_list).reshape(1, -1))
-                mouse_command = pred[0]
-                hand_size = landmark_list[0][0] - landmark_list[12][0], landmark_list[0][1] - landmark_list[12][1]
-                cv2.putText(
-                    img=frameRGB, 
-                    text=f"{pred[0]} pos {landmark_list[8][0]:.2f}, {landmark_list[8][1]:.2f}",
-                    org=(30, 30), fontFace=cv2.FONT_HERSHEY_DUPLEX, fontScale=1, color=(255, 0, 0), thickness=1
-                )
-                cv2.putText(
-                    img=frameRGB, 
-                    text=f"hand size: {hand_size[0]:.2f}, {hand_size[1]:.2f}",
-                    org=(30, 60), fontFace=cv2.FONT_HERSHEY_DUPLEX, fontScale=1, color=(0, 255, 0), thickness=1
-                )
-
-                mouse.add_prediction(mouse_command)
-                if mouse_command == "move cursor" or "grab":
-                    mouse.get_hand_size(landmark_list[12], landmark_list[0])
-                    mouse.get_hand_pos(landmark_list[8])
-            
+                    # apply model
+                    pred = model_specialkeys.predict(np.asarray(normalised_left_landmark_list).reshape(1, -1))
+                    command = pred[0]
+                    cv2.putText(
+                        img=frameRGB, text=pred[0], org=(30, 30), 
+                        fontFace=cv2.FONT_HERSHEY_DUPLEX, fontScale=1, color=(0, 255, 0), thickness=1
+                    )
+                    
+                    specialkeys.add_prediction(command)
+
             # Convert frame to Tkinter-compatible format and display
             frameRGB_resized = cv2.resize(frameRGB, (root.winfo_width(), root.winfo_height()))
             img = ImageTk.PhotoImage(Image.fromarray(frameRGB_resized))
diff --git a/final_project/mouse_class.py b/final_project/mouse_class.py
index 67f95dfbf423a12e3a74874146046eda8a008fea..a6f9d5f1fd998e0e09c4b780145e2af5f1927306 100644
--- a/final_project/mouse_class.py
+++ b/final_project/mouse_class.py
@@ -2,18 +2,16 @@ import numpy as np
 import pyautogui
 from collections import Counter
 from screeninfo import get_monitors
-
 from parameters import DETECTION_SIDE_MARGIN, DEFAULT_HAND_SIZE, VISUALIZATION_MARGIN
 
 MONITOR = get_monitors()[0]
 WIDTH, HEIGHT = MONITOR.width, MONITOR.height
 
-
 class Mouse:
     def __init__(self) -> None:
         
         self.predictions = []
-        self.previous_action = None
+        self.previous_action = "change the model"
         self.freeze_action = False
         self.stop_pos = None
         self.hand_size = None
@@ -27,8 +25,7 @@ class Mouse:
     def get_hand_pos(self, hand_pos):
         self.hand_pos_x = hand_pos[0]
         self.hand_pos_y = hand_pos[1]
-        #print(f"hand size: {self.hand_size}")
-        #print(f"hand coord: {self.hand_pos_x}, {self.hand_pos_y} \n")
+
         self.resize_coordinates()
 
 
@@ -43,6 +40,8 @@ class Mouse:
                     self.mouse_control(self.previous_action)
             else:
                 self.mouse_control(self.previous_action)
+        elif prediction == "change the model":
+            self.previous_action = "change the model"
 
     def make_action(self):
         action = self.get_major_element(self.predictions)
@@ -60,20 +59,16 @@ class Mouse:
         self.predictions = []
         self.previous_action = action
 
-        self.freeze_action = action in {"left click", "right click", "double click"} # maybe change to keyboard and drops
-
-
+        self.freeze_action = action in {"left click", "right click", "double click", "change the model"} # maybe drops
 
     def mouse_control(self, prediction):
         if prediction == "stop execution" or None:
             pass  # Stop movement
         elif prediction == "move cursor":
             
-            #hand_point = ([int(self.hand_pos_x*WIDTH), int(self.hand_pos_y*HEIGHT)])
             hand_x = np.clip(int(self.hand_pos_x*WIDTH), VISUALIZATION_MARGIN, WIDTH-VISUALIZATION_MARGIN)
             hand_y = np.clip(int(self.hand_pos_y*HEIGHT), VISUALIZATION_MARGIN, HEIGHT-VISUALIZATION_MARGIN)
-            #print(f"x: {hand_x}, y: {hand_y} \n width: {WIDTH}, height: {HEIGHT}")
-            #pyautogui.moveTo(hand_x, hand_y, tween = pyautogui.easeOutQuad)
+
             pyautogui.moveTo(hand_x, hand_y)
 
         elif prediction == "stop moving":
@@ -114,8 +109,8 @@ class Mouse:
             pyautogui.mouseDown()
         elif prediction == "multiple item selection drop":
             pyautogui.mouseUp()
-        elif prediction == "change to keyboard":
-            pass
+        elif prediction == "change the model":
+            self.previous_action = "change the model"
         
         #time.sleep(self.time_checking)  # Adjust speed of movement
     
@@ -127,20 +122,23 @@ class Mouse:
         return major_element
     
     def resize_coordinates(self):
-        max_x = 1 - 2 * DETECTION_SIDE_MARGIN 
-        calculated_bottom_margin = (-0.7)*self.hand_size + 1
-        #default_bottom_margin = -0.7*DEFAULT_HAND_SIZE + 1
-        if self.hand_size > DEFAULT_HAND_SIZE:                          # close
-            #speed = default_bottom_margin / calculated_bottom_margin
+        max_x = 1 - DETECTION_SIDE_MARGIN 
+        calculated_bottom_margin = (-0.7)*self.hand_size + 1 
+        calculated_up_margin = (-0.7)*self.hand_size + 1 
+
+        if self.hand_size > DEFAULT_HAND_SIZE:                          # the hand is close
+            # kesobbiekben itt lehet az ablak mereteit lecsokkenteni, es eltolni le vagy fel a kenyelmesebb hasznalathoz tavolrol
+            calculated_up_margin = DETECTION_SIDE_MARGIN + 0.1
+            calculated_bottom_margin = 1 - DETECTION_SIDE_MARGIN - 0.1
             speed = 2*(self.hand_size-DEFAULT_HAND_SIZE)**2 + 1
-        else:                                                           # far away
-            #speed = calculated_bottom_margin / default_bottom_margin
+        else:                                                           # the hand is far away
+            calculated_up_margin = (0.572)*self.hand_size - 0.0212          # 0 kozeli
+            calculated_bottom_margin = (-0.733)*self.hand_size + 1.0432     # 1 kozeli
             speed = 7*(self.hand_size-DEFAULT_HAND_SIZE)**2 + 1
 
         self.hand_pos_x = (self.hand_pos_x-DETECTION_SIDE_MARGIN) / (max_x - DETECTION_SIDE_MARGIN) * speed
-        self.hand_pos_y = (self.hand_pos_y-DETECTION_SIDE_MARGIN) / (calculated_bottom_margin - DETECTION_SIDE_MARGIN) * speed
+        self.hand_pos_y = (self.hand_pos_y - calculated_up_margin) / (calculated_bottom_margin - calculated_up_margin) * speed
 
-        #print(f"DETECTION_BOTTOM_MARGIN: {calculated_bottom_margin} DEFAULT_BOTTOM_MARGIN: {default_bottom_margin} \n SPEED: {speed} \n")
     def get_hand_size(self, middle_tip_coord, palm_coord):
         self.hand_size = palm_coord[1] - middle_tip_coord[1]
 
diff --git a/final_project/parameters.py b/final_project/parameters.py
index 8449e8ab3a4d34ad25fd12728d312683dc003ce3..7ace7fc160d04ba73d275833ad236b6288712fd6 100644
--- a/final_project/parameters.py
+++ b/final_project/parameters.py
@@ -1,6 +1,10 @@
+# start # flag for the used model
+MOUSE_ACTIVE = True
+
 #MODEL NAMES
-MOUSE_MODEL_NAME = 'mouse_v2.p'
-KEYBOARD_MODEL_NAME = ""
+MOUSE_MODEL_NAME = 'mouse_v3.p'
+KEYBOARD_MODEL_NAME = "alfabet_v2.p"
+SPECIALKEYS_MODEL_NAME = "specialkeys_v1.p"
 
 # CREATED CAMERA WINDOW
 CAMERA_WINDOW_WIDTH = 160*4
@@ -9,10 +13,9 @@ CAMERA_MARGIN_RIGHT = 10
 CAMERA_MARGIN_BOTTON = 70
 
 # DETECTION WINDOW
-DETECTION_SIDE_MARGIN = 0.06
-#DETECTION_BOTTOM_MARGIN = 0.6 #not used
+DETECTION_SIDE_MARGIN = 0.15
 
-DEFAULT_HAND_SIZE = 0.5
+DEFAULT_HAND_SIZE = 0.4
 
 #VISUALIZATION WINDOW
 VISUALIZATION_MARGIN = 10
diff --git a/final_project/tools.py b/final_project/tools.py
index 07a402c81e30a5165dc7fb623b219718c3224205..031bfef2bbeceb41309498dc0499236fa32755bd 100644
--- a/final_project/tools.py
+++ b/final_project/tools.py
@@ -2,7 +2,7 @@ import os
 import pickle
 from tkinter import Tk, Label
 
-from parameters import (MOUSE_MODEL_NAME, KEYBOARD_MODEL_NAME, 
+from parameters import (MOUSE_MODEL_NAME, KEYBOARD_MODEL_NAME, SPECIALKEYS_MODEL_NAME,
                         CAMERA_WINDOW_WIDTH, CAMERA_WINDOW_HEIGHT, CAMERA_MARGIN_RIGHT, CAMERA_MARGIN_BOTTON)
 
 def load_model(device):
@@ -11,6 +11,8 @@ def load_model(device):
         model_path = os.path.abspath(os.path.join(current_dir, os.pardir, 'trained_models', MOUSE_MODEL_NAME))
     elif device == "keyboard":
         model_path = os.path.abspath(os.path.join(current_dir, os.pardir, 'trained_models', KEYBOARD_MODEL_NAME))
+    elif device == "specialkeys":
+        model_path = os.path.abspath(os.path.join(current_dir, os.pardir, 'trained_models', SPECIALKEYS_MODEL_NAME))
     model_dict = pickle.load(open(model_path, 'rb'))
     model = model_dict['model']
 
diff --git a/merge_pickles.py b/merge_pickles.py
index 0b23aa18c5b74fab2ada0457f35223a6083b5e9f..ceb385635baab17cad5705cb38297f5565b8afd9 100644
--- a/merge_pickles.py
+++ b/merge_pickles.py
@@ -3,8 +3,8 @@ import os
 from pprint import pprint
 
 
-FOLDER_PATH = "./train_data/specialkeys/"
-OUTPUT_FILE = 'specialkeys_v1.pickle'  # Output merged file
+FOLDER_PATH = "./train_data/keyboard/"
+OUTPUT_FILE = 'alfabet_v2.pickle'  # Output merged file
 
 current_dir = os.path.dirname(__file__)
 OUTPUT_FOLDER = os.path.abspath(os.path.join(current_dir, 'merged_training_data', OUTPUT_FILE))
diff --git a/merged_training_data/alfabet_v2.pickle b/merged_training_data/alfabet_v2.pickle
new file mode 100644
index 0000000000000000000000000000000000000000..6d7c741625d7cafaa1417273c17e8f0954208ecd
Binary files /dev/null and b/merged_training_data/alfabet_v2.pickle differ
diff --git a/merged_training_data/mouse_v3.pickle b/merged_training_data/mouse_v3.pickle
new file mode 100644
index 0000000000000000000000000000000000000000..49395a89310118d9bf898a98a147ba73c7395e82
Binary files /dev/null and b/merged_training_data/mouse_v3.pickle differ
diff --git a/train_data/keyboard/change_the_model_Moni.pickle b/train_data/keyboard/change_the_model_Moni.pickle
new file mode 100644
index 0000000000000000000000000000000000000000..8d121928a72c0cd23eb7e3cb0dec3546bdad14d4
Binary files /dev/null and b/train_data/keyboard/change_the_model_Moni.pickle differ
diff --git a/train_data/mouse/change_the_model_Moni.pickle b/train_data/mouse/change_the_model_Moni.pickle
new file mode 100644
index 0000000000000000000000000000000000000000..8d121928a72c0cd23eb7e3cb0dec3546bdad14d4
Binary files /dev/null and b/train_data/mouse/change_the_model_Moni.pickle differ
diff --git a/train_data/double_click_Bazsi.pickle b/train_data/mouse/double_click_Bazsi.pickle
similarity index 100%
rename from train_data/double_click_Bazsi.pickle
rename to train_data/mouse/double_click_Bazsi.pickle
diff --git a/train_data/double_click_Moni.pickle b/train_data/mouse/double_click_Moni.pickle
similarity index 100%
rename from train_data/double_click_Moni.pickle
rename to train_data/mouse/double_click_Moni.pickle
diff --git a/train_data/left_click_Bazsi.pickle b/train_data/mouse/left_click_Bazsi.pickle
similarity index 100%
rename from train_data/left_click_Bazsi.pickle
rename to train_data/mouse/left_click_Bazsi.pickle
diff --git a/train_data/left_click_Moni.pickle b/train_data/mouse/left_click_Moni.pickle
similarity index 100%
rename from train_data/left_click_Moni.pickle
rename to train_data/mouse/left_click_Moni.pickle
diff --git a/train_data/move_cursor_Bazsi.pickle b/train_data/mouse/move_cursor_Bazsi.pickle
similarity index 100%
rename from train_data/move_cursor_Bazsi.pickle
rename to train_data/mouse/move_cursor_Bazsi.pickle
diff --git a/train_data/move_cursor_Moni.pickle b/train_data/mouse/move_cursor_Moni.pickle
similarity index 100%
rename from train_data/move_cursor_Moni.pickle
rename to train_data/mouse/move_cursor_Moni.pickle
diff --git a/train_data/new_drag.pickle b/train_data/mouse/new_drag.pickle
similarity index 100%
rename from train_data/new_drag.pickle
rename to train_data/mouse/new_drag.pickle
diff --git a/train_data/right_click_Bazsi.pickle b/train_data/mouse/right_click_Bazsi.pickle
similarity index 100%
rename from train_data/right_click_Bazsi.pickle
rename to train_data/mouse/right_click_Bazsi.pickle
diff --git a/train_data/right_click_Moni.pickle b/train_data/mouse/right_click_Moni.pickle
similarity index 100%
rename from train_data/right_click_Moni.pickle
rename to train_data/mouse/right_click_Moni.pickle
diff --git a/train_data/scrolling_down_Moni.pickle b/train_data/mouse/scrolling_down_Moni.pickle
similarity index 100%
rename from train_data/scrolling_down_Moni.pickle
rename to train_data/mouse/scrolling_down_Moni.pickle
diff --git a/train_data/scrolling_left_Moni.pickle b/train_data/mouse/scrolling_left_Moni.pickle
similarity index 100%
rename from train_data/scrolling_left_Moni.pickle
rename to train_data/mouse/scrolling_left_Moni.pickle
diff --git a/train_data/scrolling_right_Moni.pickle b/train_data/mouse/scrolling_right_Moni.pickle
similarity index 100%
rename from train_data/scrolling_right_Moni.pickle
rename to train_data/mouse/scrolling_right_Moni.pickle
diff --git a/train_data/scrolling_up_Moni.pickle b/train_data/mouse/scrolling_up_Moni.pickle
similarity index 100%
rename from train_data/scrolling_up_Moni.pickle
rename to train_data/mouse/scrolling_up_Moni.pickle
diff --git a/train_data/stop_execution_Moni.pickle b/train_data/mouse/stop_execution_Moni.pickle
similarity index 100%
rename from train_data/stop_execution_Moni.pickle
rename to train_data/mouse/stop_execution_Moni.pickle
diff --git a/train_data/stop_moving_Bazsi.pickle b/train_data/mouse/stop_moving_Bazsi.pickle
similarity index 100%
rename from train_data/stop_moving_Bazsi.pickle
rename to train_data/mouse/stop_moving_Bazsi.pickle
diff --git a/train_data/stop_moving_Moni.pickle b/train_data/mouse/stop_moving_Moni.pickle
similarity index 100%
rename from train_data/stop_moving_Moni.pickle
rename to train_data/mouse/stop_moving_Moni.pickle
diff --git a/trained_models/alfabet_v2.p b/trained_models/alfabet_v2.p
new file mode 100644
index 0000000000000000000000000000000000000000..4637e4423208392d3e6e8326362bbd6fb0e1dbb7
Binary files /dev/null and b/trained_models/alfabet_v2.p differ
diff --git a/trained_models/mouse_v3.p b/trained_models/mouse_v3.p
new file mode 100644
index 0000000000000000000000000000000000000000..bff48fa6c44e831cc11a3c0a66e98bdd0112b7b6
Binary files /dev/null and b/trained_models/mouse_v3.p differ
diff --git a/training/data_collector.py b/training/data_collector.py
index 5dcca2831710e13b5a17cea791293dde0c3a85ca..76c5676102491704d28985f1025d9467566a6531 100644
--- a/training/data_collector.py
+++ b/training/data_collector.py
@@ -166,7 +166,7 @@ def main():
     current_dir = os.path.dirname(__file__)
     
     filename = input("give filename: ")
-    folder_path = os.path.abspath(os.path.join(current_dir, os.pardir, 'train_data/specialkeys', filename))
+    folder_path = os.path.abspath(os.path.join(current_dir, os.pardir, 'train_data', filename))
     f = open(folder_path + '.pickle', 'wb')
     pickle.dump({'data': data, 'label':labels}, f)
     f.close()
diff --git a/training/train_model.py b/training/train_model.py
index 0bf3c550cb1e53ba01720ca46373da50a2b373ce..d78b5074246922d1cf4a0c1c7e86013160c5a271 100644
--- a/training/train_model.py
+++ b/training/train_model.py
@@ -6,7 +6,7 @@ from sklearn.model_selection import train_test_split
 from sklearn.metrics import accuracy_score
 import os
 
-filename = 'specialkeys_v1'
+filename = 'mouse_v3'
 
 current_dir = os.path.dirname(__file__)
 file_path = os.path.abspath(os.path.join(current_dir, os.pardir, 'merged_training_data', filename))