Ensure both the Raspberry Pi and your computer are connected to the same hotspot.
sudo apt-get install xrdp
            hostname -I
            | Error Type | Description | Possible Fix | 
|---|---|---|
| ImportError | No module named 'pyaudio' | 
                        Run:
                        
                        AskUbuntu Guide
                     | 
                
| Server Crashes | imshow() failed. Can’t initialize GTK backend | Check solutions on Stack Overflow. | 
| Blue Screen Error | Initial login leads to a blue screen | Replace os.getlogin() with:
                        
                     | 
                
| OSError | os.getlogin() | 
                        
  | 
                
Enable auto-login for convenience:
sudo raspi-config -> System Options -> boot/auto login -> Option 3
    The project aims to enhance autonomous navigation for PiCar-X using:
                from picarx import Picarx
                from time import sleep
                import time
                
                px = Picarx()
                
                # Track attempts and offsets
                #attempts = []
                offset = 0 # Initial offset angle
                
                def run():
                    offset = 0  
                    minAngle = -10
                    maxAngle = 10
                    
                    while True:
                        print(f"Setting servo angle to offset: {offset}")
                        px.set_dir_servo_angle(offset)
                        
                        # Move forward and recalibrate if it goes off the line
                        px.forward(5)
                        sleep(0.5)
                        while not off_line():  # Move forward until off the line
                            continue
                        # If the robot goes off the line, stop and adjust
                        px.stop()
                       
                        if end_of_line():
                            break
                             
                        data = px.get_grayscale_data()
                        state = px.get_line_status(data)
                        print(f"Off line, Recalibrating...")
                        sleep(0.5)
                        # Backward to return to the starting point
                        px.backward(5)
                        sleep(0.5)
                        while not off_line():  # Move backward until at starting point
                            continue
                        px.stop()
                        
                        
                        if state[0] == 1:  # Left sensor off
                            print("Adjusting to the left")
                            maxAngle = offset;
                        elif state[2] == 1:  # Right sensor off
                            print("Adjusting to the right")
                            minAngle = 0;
                
                        offset = (minAngle + maxAngle) / 2
                          
                        print(f"New recalibrated offset: {offset}")
                        sleep(0.5)  # Stabilization delay
                    print("End of line reached.")
                    px.stop()
                    return offset
                
                def end_of_line():
                    """Check if all sensors detect the end of the line."""
                    data = px.get_grayscale_data()
                    state = px.get_line_status(data)  # [bool, bool, bool]
                    print(f"End of line check - sensor state: {state}")
                    return all(sensor == 1 for sensor in state)
                
                def off_line(): 
                    """Check if any sensor is off the line."""
                    data = px.get_grayscale_data()
                    state = px.get_line_status(data)
                    #print(f"Off line check - sensor state: {state}")
                    return state[0] != 0 or state[2] != 0
                
                
                    
                if __name__ == '__main__':
                    print("Final offset:", run())
            
        
                import numpy as np
                import cv2 as cv
                from vilib import Vilib
                from time import sleep
                from picarx import Picarx
                import matplotlib.pyplot as plt
                
                # based on https://docs.opencv.org/3.4/d4/dee/tutorial_optical_flow.html
                # "ADDED:" comments show the specific additions to this program that repurpose it for optical flow
                # TODO : CLEAN UP CODE & COMMENT
                
                # use the Picarx to record a refrence video to use for the optical flow
                px = Picarx()
                Vilib.camera_start(vflip=False, hflip=False)
                sleep(0.8)
                
                Vilib.rec_video_set["name"] = "video"
                Vilib.rec_video_set["path"] = "/home/cs371a/picar-x/master-calibration"
                
                Vilib.rec_video_run()
                Vilib.rec_video_start()
                px.forward(5)
                sleep(1)
                px.stop()
                Vilib.rec_video_stop()
                px.backward(5)
                sleep(1)
                px.stop()
                sleep(0.2)
                
                Vilib.camera_close()
                
                #cap holds the 
                arg = "/home/cs371a/picar-x/master-calibration/video.avi"
                cap = cv.VideoCapture(arg)
                # params for ShiTomasi corner detection
                feature_params = dict( maxCorners = 100,
                                       qualityLevel = 0.3,
                                       minDistance = 7,
                                       blockSize = 7 )
                # Parameters for lucas kanade optical flow
                lk_params = dict( winSize  = (15, 15),
                                  maxLevel = 2,
                                  criteria = (cv.TERM_CRITERIA_EPS | cv.TERM_CRITERIA_COUNT, 10, 0.03))
                # Create some random colors
                color = np.random.randint(0, 255, (100, 3))
                # Take first frame and find corners in it
                ret, old_frame = cap.read()
                old_gray = cv.cvtColor(old_frame, cv.COLOR_BGR2GRAY)
                p0 = cv.goodFeaturesToTrack(old_gray, mask = None, **feature_params)
                # Create a mask image for drawing purposes
                mask = np.zeros_like(old_frame)
                
                # ADDED: Create an array of arrays, each inner array contain one p0 pixel
                lines = []
                
                while(1):
                    ret, frame = cap.read()
                    if not ret:
                        print('No frames grabbed!')
                        break
                    frame_gray = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)
                    # calculate optical flow
                    p1, st, err = cv.calcOpticalFlowPyrLK(old_gray, frame_gray, p0, None, **lk_params)
                    # Select good points
                    if p1 is not None:
                        good_new = p1[st==1]
                        good_old = p0[st==1]
                       
                       
                    #ADDED; Add the new pixels to the array with their corresponding pixels from previous frames. If the array is empty (during the first loop) add the previous pixels as new arrays
                    # TODO: The major issue that stopped us is that I'm not sure if the pixels are actually in parallel arrays. They should be, but we're not sure and ran out of time before we figured it out. 
                    # TODO: Check to see what the deal is, and adjust array assignment if necessary. There is a good chance that it IS parallel and there is no issue but it'd be nice to be safe.
                    if(len(lines) == 0):
                        lines = [[x] for x in good_old]
                    
                    for i in range(len(good_new)):
                        lines[i].append(good_new[i])       
                        
                    #This is code for creating an image refrence for the optical flow lines, feel free to uncomment if you'd like to see it, though it's just wasted computation for the calculation itself.
                    #for i, (new, old) in enumerate(zip(good_new, good_old)):
                    #    a, b = new.ravel()
                    #    c, d = old.ravel()
                    #    mask = cv.line(mask, (int(a), int(b)), (int(c), int(d)), color[i].tolist(), 2)
                    #    frame = cv.circle(frame, (int(a), int(b)), 5, color[i].tolist(), -1)
                    #img = cv.add(frame, mask)
                    #cv.imshow('frame', img)
                    k = cv.waitKey(30) & 0xff
                    if k == 27:
                        break
                    # Now update the previous frame and previous points
                    old_gray = frame_gray.copy()
                    p0 = good_new.reshape(-1, 1, 2)
                cv.destroyAllWindows()
                
                # ADDED; Below just gives a plotting of the first line in the optical flow chart. It is unecessary for the final project and just for testing.
                line0 = [[x] for x in lines[4]]
                plt.figure(figsize = (len(line0),len(line0)))
                print(line0[0][0][0])
                x_values = [point[0][0] for point in line0]
                y_values = [point[0][1] for point in line0]
                    
                plt.plot(x_values, y_values, marker = 'o', linestyle = '-', label =f'Set {i+1}')
                plt.show()
                
                # TODO: Get an array of mx + b lines based on running linear regressions on the sets of data in the line array.
                # TODO: Basically, for each array in lines, run a linear regression on the data, and save the M and B coefficients (whatever the program calls them)
                
                # TODO: Take the linear regression lines and find a point who's "normal distance" (look it up, its a calculus thing) is the average lowest for all values.
                # TODO: From that point, adjust so the point is in the center of the camera screen
                
            
        The image below represents the setup for the robot's route:
The robot's journey starts with an initial offset angle of 0, as specified in the code. It navigates the route by continuously adjusting its direction using grayscale sensor data to remain on the line. If the robot detects it has gone off the line, it recalibrates by adjusting its servo angle based on sensor feedback, moving backward to its starting point, and recalculating the offset. The process continues until the robot detects the end of the line.