diff --git a/final_project/main.py b/final_project/main.py
index 86adc1b6cf00094f4e13069854d9e748a8c3c5ac..fc305e213fc5e0b24982d2716f3825aef2ab2e3f 100644
--- a/final_project/main.py
+++ b/final_project/main.py
@@ -1,26 +1,20 @@
 import cv2
-import random
 import mediapipe as mp
-import pickle
 import numpy as np
 from sklearn.ensemble import RandomForestClassifier
-import time
-import os
 from tkinter import Tk, Label
 from PIL import Image, ImageTk
 
 from mouse_class import Mouse
 from hand_detection import normalise_landmarks
+from tools import load_model, set_camera_window
 
 def main():
     #define Mouse
     mouse = Mouse()
 
-    # load model
-    current_dir = os.path.dirname(__file__)
-    model_path = os.path.abspath(os.path.join(current_dir, os.pardir, 'trained_models', 'trained_Moni_data.p'))
-    model_dict = pickle.load(open(model_path, 'rb'))
-    model = model_dict['model']
+    # load MOUSE model
+    model = load_model(device = "mouse")
     
     # create hand detection object
     mp_hands = mp.solutions.hands
@@ -35,25 +29,8 @@ def main():
         return
     
     # set up Tkinter window
-    root = Tk()
-    root.title("Hand Tracking - Always on Top")
-    root.attributes("-topmost", True)
-    video_label = Label(root)
-    video_label.pack()
-
-    # adjust window geometry
-    # Get the screen width and height
-    screen_width = root.winfo_screenwidth()
-    screen_height = root.winfo_screenheight()
+    root, video_label = set_camera_window()
     
-    # Define window size and position (e.g., 320x240 window at bottom-right corner)
-    window_width = 160*4
-    window_height = 120*4
-    x_position = screen_width - window_width - 10  # 10px margin from the right
-    y_position = screen_height - window_height - 70  # 50px margin from the bottom
-
-    # Set window geometry
-    root.geometry(f"{window_width}x{window_height}+{x_position}+{y_position}")
     # mediapipe hand object
     with mp_hands.Hands(max_num_hands=1, model_complexity=1,
                         min_detection_confidence=0.9, min_tracking_confidence=0.9) as hands:
diff --git a/final_project/mouse_class.py b/final_project/mouse_class.py
index 1a205286da65d40e9a91abc3e3dd0a1e34e31cc6..52f97bf67dddbecc93032b5afa2427d12a630bd8 100644
--- a/final_project/mouse_class.py
+++ b/final_project/mouse_class.py
@@ -3,11 +3,11 @@ import pyautogui
 from collections import Counter
 from screeninfo import get_monitors
 
+from parameters import DETECTION_SIDE_MARGIN, DETECTION_BOTTOM_MARGIN, DEFAULT_HAND_SIZE
+
 MONITOR = get_monitors()[0]
 WIDTH, HEIGHT = MONITOR.width, MONITOR.height
-SIDE_MARGIN = 0.06
-BOTTOM_MARGIN = 0.6
-DEFAULT_HAND_SIZE = 0.5
+
 
 class Mouse:
     def __init__(self) -> None:
@@ -17,6 +17,7 @@ class Mouse:
         self.freeze_action = False
         self.stop_pos = None
         self.hand_size = None
+        self.drag_start = None
 
         # parameters to fine-tune
         self.action_length = 5
@@ -48,6 +49,11 @@ class Mouse:
         if self.freeze_action and action == self.previous_action:
             self.update_init(action)
         else:
+            if self.previous_action != action and self.previous_action == "drag":
+                pyautogui.mouseUp()
+                self.drag_start = None
+                print("drop")
+
             self.mouse_control(action)
             self.update_init(action)
 
@@ -58,6 +64,7 @@ class Mouse:
         self.freeze_action = action in {"left click", "right click", "double click"} # maybe change to keyboard and drops
 
 
+
     def mouse_control(self, prediction):
         if prediction == "stop execution" or None:
             pass  # Stop movement
@@ -66,7 +73,7 @@ class Mouse:
             #hand_point = ([int(self.hand_pos_x*WIDTH), int(self.hand_pos_y*HEIGHT)])
             hand_x = np.clip(int(self.hand_pos_x*WIDTH), 0, WIDTH-1)
             hand_y = np.clip(int(self.hand_pos_y*HEIGHT), 0, HEIGHT-1)
-            pyautogui.moveTo(hand_x, hand_y)
+            pyautogui.moveTo(hand_x, hand_y, tween = pyautogui.easeOutQuad)
 
         elif prediction == "stop moving":
             pyautogui.move(0, 0)  # Stop cursor
@@ -88,9 +95,15 @@ class Mouse:
             pyautogui.hscroll(self.scroll_distance)    # Scroll left
             # THIS FUNCTION NOT WORKS ON WINDOWS
         elif prediction == "drag":
-            if self.previous_action == "stop moving":
-                pyautogui.moveTo(*self.stop_pos)
-            pyautogui.mouseDown()  
+            if self.previous_action == "stop moving" and self.drag_start is None:
+                self.drag_start = self.stop_pos
+                pyautogui.mouseDown(*self.drag_start) 
+            elif self.drag_start is None:
+                hand_x = np.clip(int(self.hand_pos_x*WIDTH), 0, WIDTH-1)
+                hand_y = np.clip(int(self.hand_pos_y*HEIGHT), 0, HEIGHT-1)
+                self.drag_start = [hand_x, hand_y]
+                pyautogui.mouseDown(*self.drag_start) 
+ 
             hand_x = np.clip(int(self.hand_pos_x*WIDTH), 0, WIDTH-1)
             hand_y = np.clip(int(self.hand_pos_y*HEIGHT), 0, HEIGHT-1)
             pyautogui.moveTo(hand_x, hand_y)
@@ -113,9 +126,9 @@ class Mouse:
         return major_element
     
     def resize_coordinates(self):
-        max_x = 1 - 2 * SIDE_MARGIN 
-        self.hand_pos_x = (self.hand_pos_x-SIDE_MARGIN) / (max_x - SIDE_MARGIN) #* DEFAULT_HAND_SIZE/self.hand_size
-        self.hand_pos_y = (self.hand_pos_y-SIDE_MARGIN) / (BOTTOM_MARGIN - SIDE_MARGIN) #* DEFAULT_HAND_SIZE/self.hand_size
+        max_x = 1 - 2 * DETECTION_SIDE_MARGIN 
+        self.hand_pos_x = (self.hand_pos_x-DETECTION_SIDE_MARGIN) / (max_x - DETECTION_SIDE_MARGIN) #* DEFAULT_HAND_SIZE/self.hand_size
+        self.hand_pos_y = (self.hand_pos_y-DETECTION_SIDE_MARGIN) / (DETECTION_BOTTOM_MARGIN - DETECTION_SIDE_MARGIN) #* DEFAULT_HAND_SIZE/self.hand_size
 
     def get_hand_size(self, middle_tip_coord, palm_coord):
         self.hand_size = palm_coord[1] - middle_tip_coord[1]
diff --git a/final_project/parameters.py b/final_project/parameters.py
new file mode 100644
index 0000000000000000000000000000000000000000..7e8af1567984ddae703c7b3b109d04a330cb25c7
--- /dev/null
+++ b/final_project/parameters.py
@@ -0,0 +1,15 @@
+
+MOUSE_MODEL_NAME = 'mouse_v2.p'
+KEYBOARD_MODEL_NAME = ""
+
+# CREATED CAMERA WINDOW
+CAMERA_WINDOW_WIDTH = 160*4
+CAMERA_WINDOW_HEIGHT = 120*4
+CAMERA_MARGIN_RIGHT = 10
+CAMERA_MARGIN_BOTTON = 70
+
+# DETECTION WINDOW
+DETECTION_SIDE_MARGIN = 0.06
+DETECTION_BOTTOM_MARGIN = 0.6
+
+DEFAULT_HAND_SIZE = 0.5
diff --git a/final_project/tools.py b/final_project/tools.py
new file mode 100644
index 0000000000000000000000000000000000000000..07a402c81e30a5165dc7fb623b219718c3224205
--- /dev/null
+++ b/final_project/tools.py
@@ -0,0 +1,39 @@
+import os
+import pickle
+from tkinter import Tk, Label
+
+from parameters import (MOUSE_MODEL_NAME, KEYBOARD_MODEL_NAME, 
+                        CAMERA_WINDOW_WIDTH, CAMERA_WINDOW_HEIGHT, CAMERA_MARGIN_RIGHT, CAMERA_MARGIN_BOTTON)
+
+def load_model(device):
+    current_dir = os.path.dirname(__file__)
+    if device == "mouse":
+        model_path = os.path.abspath(os.path.join(current_dir, os.pardir, 'trained_models', MOUSE_MODEL_NAME))
+    elif device == "keyboard":
+        model_path = os.path.abspath(os.path.join(current_dir, os.pardir, 'trained_models', KEYBOARD_MODEL_NAME))
+    model_dict = pickle.load(open(model_path, 'rb'))
+    model = model_dict['model']
+
+    return model
+
+def set_camera_window():
+    # set up Tkinter window
+    root = Tk()
+    root.title("Hand Tracking - Always on Top")
+    root.attributes("-topmost", True)
+    video_label = Label(root)
+    video_label.pack()
+
+    # adjust window geometry
+    # Get the screen width and height
+    screen_width = root.winfo_screenwidth()
+    screen_height = root.winfo_screenheight()
+    
+    # Define window size and position (e.g., 320x240 window at bottom-right corner)
+    x_position = screen_width - CAMERA_WINDOW_WIDTH - CAMERA_MARGIN_RIGHT  # 10px margin from the right
+    y_position = screen_height - CAMERA_WINDOW_HEIGHT - CAMERA_MARGIN_BOTTON  # 50px margin from the bottom
+
+    # Set window geometry
+    root.geometry(f"{CAMERA_WINDOW_WIDTH}x{CAMERA_WINDOW_HEIGHT}+{x_position}+{y_position}")
+
+    return root, video_label
\ No newline at end of file