Skip to content
This repository was archived by the owner on Apr 8, 2025. It is now read-only.
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 4 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
build
**/.vscode
**/__pycache__
**/.DS_Store
6 changes: 6 additions & 0 deletions .gitmodules
Original file line number Diff line number Diff line change
@@ -1,3 +1,9 @@
[submodule "gps"]
path = gps
url = ../gps/
[submodule "YOLO"]
path = YOLO
url = https://github.com/Sooner-Rover-Team/YOLO
[submodule "src/autonomous/gps"]
path = src/autonomous/gps
url = https://github.com/Sooner-Rover-Team/gps
1 change: 1 addition & 0 deletions .python-version
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
cpython@3.11.3
File renamed without changes.
File renamed without changes.
1 change: 1 addition & 0 deletions YOLO
Submodule YOLO added at 453f31
26 changes: 26 additions & 0 deletions examples/ARTrackerTest/config.ini
Original file line number Diff line number Diff line change
@@ -0,0 +1,26 @@
[CONFIG]
SWIFT_IP=10.0.0.222
SWIFT_PORT=55556
MBED_IP=10.0.0.101
MBED_PORT=1001
[ARTRACKER]
#dpp is .040625 with logi
DEGREES_PER_PIXEL=0.09375
VDEGREES_PER_PIXEL = .125
#Focal length was 1500 with logi
FOCAL_LENGTH=435
FOCAL_LENGTH30H=590
FOCAL_LENGTH30V=470
KNOWN_TAG_WIDTH=20
FORMAT=XVID
FRAME_WIDTH=1280
FRAME_HEIGHT=720
MAIN_CAMERA=2.3
LEFT_CAMERA=2.4
RIGHT_CAMERA=2.2
[YOLO]
#I am assuming that these are in the darknet folder
WEIGHTS=soro.weights
DATA=cfg/soro.data
CFG=cfg/soro.cfg
THRESHOLD=.25
84 changes: 84 additions & 0 deletions examples/ARTrackerTest/newAr.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,84 @@
import cv2
import cv2.aruco as aruco
import numpy as np
import configparser
import os


def preprocess_image(image):
# Convert the image to grayscale
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)

# Enhance contrast using histogram equalization
clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8))
equalized = clahe.apply(gray)

# Apply Gaussian blur to reduce noise
blurred = cv2.GaussianBlur(equalized, (5, 5), 0)

# Apply adaptive thresholding to segment the image
print(type(blurred))
thresholded = cv2.adaptiveThreshold(blurred, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY_INV, 11, 4)

return thresholded

def configCam(cam, configFile):
# Open the config file
config = configparser.ConfigParser(allow_no_value=True)
if not config.read(configFile):
print(f"ERROR OPENING AR CONFIG:", end="")
if os.path.isabs(configFile):
print(configFile)
else:
print("{os.getcwd()}/{configFile}")
exit(-2)

# Set variables from the config file
degreesPerPixel = float(config['ARTRACKER']['DEGREES_PER_PIXEL'])
vDegreesPerPixel = float(config['ARTRACKER']['VDEGREES_PER_PIXEL'])
focalLength = float(config['ARTRACKER']['FOCAL_LENGTH'])
focalLength30H = float(config['ARTRACKER']['FOCAL_LENGTH30H'])
focalLength30V = float(config['ARTRACKER']['FOCAL_LENGTH30V'])
knownMarkerWidth = float(config['ARTRACKER']['KNOWN_TAG_WIDTH'])
format = config['ARTRACKER']['FORMAT']
frameWidth = int(config['ARTRACKER']['FRAME_WIDTH'])
frameHeight = int(config['ARTRACKER']['FRAME_HEIGHT'])

# Set the camera properties
cam.set(cv2.CAP_PROP_FRAME_HEIGHT, frameHeight)
cam.set(cv2.CAP_PROP_FRAME_WIDTH, frameWidth)
cam.set(cv2.CAP_PROP_BUFFERSIZE, 1) # greatly speeds up the program but the writer is a bit wack because of this
cam.set(cv2.CAP_PROP_FOURCC, cv2.VideoWriter_fourcc(format[0], format[1], format[2], format[3]))

if __name__ == "__main__":
# Create the camera object
cam = cv2.VideoCapture(1)
configCam(cam, "config.ini")

# Create the aruco dictionary
markerDict = aruco.getPredefinedDictionary(aruco.DICT_4X4_50)

while True:
# Read the image from the camera and convert to grayscale
ret, frame = cam.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

# Detect the markers
print(type(gray))
adapted_thresh = cv2.adaptiveThreshold(gray, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 11, 3)

# Display the image
cv2.imshow('adapted thresh', adapted_thresh)
cv2.waitKey(1)
print(adapted_thresh[:][0])
(corners, markerIDs, rejected) = aruco.detectMarkers(adapted_thresh, markerDict)
if len(corners) == 1 and markerIDs[0] == 1:
print(markerIDs)
else:
print("No markers found")
cv2.imshow('adapted thresh', adapted_thresh)
cv2.waitKey(1)

if cv2.waitKey(1) & 0xFF == ord('q'):
break

Binary file added examples/ARTrackerTest/pictures/10m.jpg
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file added examples/ARTrackerTest/pictures/15m.jpg
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file added examples/ARTrackerTest/pictures/5m.jpg
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
41 changes: 41 additions & 0 deletions examples/ARTrackerTest/testARpic.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,41 @@
import cv2
import cv2.aruco as aruco
import argparse

# Parse the command line arguments
parser = argparse.ArgumentParser(description="Test the ARTracker with a picture")
parser.add_argument("file", help="The file to test the ARTracker with")
args = parser.parse_args() # parse the arguments

if __name__ == "__main__":
# Read the image from the file
if args.file.endswith(".jpg") or args.file.endswith(".png"):
image = cv2.imread(args.file)
else:
print("ERROR: File must be a jpg or png")
exit(-1)

# Show image
cv2.namedWindow('unprocessed image', cv2.WINDOW_KEEPRATIO)
cv2.imshow('unprocessed image', image)
cv2.resizeWindow('unprocessed image', image.shape[1] // 2, image.shape[0] // 2)

# Process the image
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
adapted_thresh = cv2.adaptiveThreshold(gray, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 11, 3)
cv2.namedWindow('adapted thresh', cv2.WINDOW_KEEPRATIO)
cv2.resizeWindow('adapted thresh', image.shape[1] // 2, image.shape[0] // 2)
cv2.imshow('adapted thresh', adapted_thresh)

# Detect the markers
markerDict = aruco.getPredefinedDictionary(aruco.DICT_4X4_50)
(corners, markerIDs, rejected) = aruco.detectMarkers(adapted_thresh, markerDict)
if (markerIDs is not None) :
print("Marker found")
print("Found IDs:" + markerIDs)
else:
print("No markers found")

if cv2.waitKey(0) & 0xFF == ord('q'):
exit(0)

72 changes: 72 additions & 0 deletions examples/ARTrackerTest/testARvid.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,72 @@
import cv2
import cv2.aruco as aruco
from time import sleep
import argparse

# Parse the command line arguments
parser = argparse.ArgumentParser(description="Test the ARTracker with a video or picture")
parser.add_argument("file", help="The file to test the ARTracker with")
args = parser.parse_args() # parse the arguments

if __name__ == "__main__":

# Import either a video or a picture
if args.file.endswith(".mp4") or args.file.endswith(".avi"):
file = cv2.VideoCapture(args.file)
else:
print("file must be a video, either .mp4 or .avi")
exit(-1)

# Play video or display picture
index = 0
markerTicks = 0
soonestFrame = 0
while True:
# Create the aruco dictionary
markerDict = aruco.getPredefinedDictionary(aruco.DICT_4X4_50)

# Capture and display the unprocessed frame
#sleep(1/45) # 30 fps NOTE: This does not accurately represent the actual fps of the video

ret, frame = file.read()
cv2.namedWindow('unprocessed frame', cv2.WINDOW_KEEPRATIO)
cv2.imshow('unprocessed frame', frame)
cv2.resizeWindow('unprocessed frame', frame.shape[1] // 2, frame.shape[0] // 2)

# Process the image
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

# Process image by changing the contrast
"""for i in range(40, 221, 60):
process_image = cv2.threshold(gray,i,255, cv2.THRESH_BINARY)[1]
(corners, markerIDs, rejected) = aruco.detectMarkers(process_image, markerDict)
if markerIDs is not None:
break
"""

# Process image using Adaptive Thresholding
process_image = cv2.adaptiveThreshold(gray, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 25, 3)

# Show the processed image
cv2.namedWindow('processed image', cv2.WINDOW_KEEPRATIO)
cv2.imshow('processed image', process_image)
cv2.resizeWindow('processed image', process_image.shape[1] // 2, process_image.shape[0] // 2)

# Detect the markers
(corners, markerIDs, rejected) = aruco.detectMarkers(process_image, markerDict)
if (markerIDs is not None) and (markerIDs[0] == 1):
if markerTicks == 0:
soonestFrame = index
print("Marker found")
print("Found IDs:" + str(markerIDs))
markerTicks += 1

print("Frame " + str(index))
index += 1
# Exit the program if the user presses 'q'
if cv2.waitKey(1) & 0xFF == ord('q'):
print("Number of times marker was found:" + str(markerTicks))
print("Soonest frame:" + str(soonestFrame))
break


Binary file added examples/ARTrackerTest/videos/UtahAR.mp4
Binary file not shown.
1 change: 0 additions & 1 deletion gps
Submodule gps deleted from f505c0
23 changes: 23 additions & 0 deletions pyproject.toml
Original file line number Diff line number Diff line change
@@ -0,0 +1,23 @@
[project]
name = "autonomous"
version = "0.1.0"
description = "Add a short description here"
dependencies = [
"opencv-contrib-python==4.6.0.66",
"flask>=2.3.2",
]
readme = "README.md"
requires-python = ">= 3.10"

[build-system]
requires = ["hatchling"]
build-backend = "hatchling.build"

[tool.rye]
managed = true

[tool.hatch.metadata]
allow-direct-references = true

[tool.rye.workspace]
members = ["examples", "gps", "libs", "yolo", "RoverMap"]
2 changes: 2 additions & 0 deletions runAutonomous.sh → run_autonomous.sh
100644 → 100755
Original file line number Diff line number Diff line change
@@ -1,4 +1,6 @@
#! /bin/bash
cd src/autonomous

#Parses the config file below
main=$(cat config.ini | grep MAIN_CAMERA)
main=${main: -3}
Expand Down
2 changes: 2 additions & 0 deletions src/RoverMap/.gitignore
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
tiles/
tiles.tar
21 changes: 21 additions & 0 deletions src/RoverMap/LICENSE
Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@
MIT License

Copyright (c) 2021 Benton Smith

Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:

The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.

THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
34 changes: 34 additions & 0 deletions src/RoverMap/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,34 @@
# Map Server

This houses both the HTML/CSS/JS frontend to render the map with the rover's coordinates,
and the backend server that powers it.

## Structure

The frontend provides a leafletjs map which draws the rover's position on it.
The map renderer uses image files served by `server.py`, and generated by
the generating program in `RoverMapTileGenerator`.

## Running and Integration

You can run the server in standalone mode by running `python3 server.py`.

To run it from python, import and call `start_map_server`.
The flask app isn't wrapped nicely in a class (this would be a good refactor),
so we really need to test this integration and make sure it works.

The server sends the rover's gps coords to the web client,
so the server will need to be supplied with those coords.
Call `update_rover_coords` with an array of lat, lng like `[38.4065, -110.79147]`,
the center of the mars thingy.

There's an example in `example/updater.py` to go off of.

## Accessing

The server should open to port 5000, so you can access by connecting to `10.0.0.2:5000`.
That port could change somehow, so you may need to check stdout to find the exact address and port.

## Documentation

This SoRo component is a new candidate for documentation! If you know markdown, and have a good idea about what's going on here, please feel free to [make a new page about it in the docs](https://sooner-rover-team.github.io/soro-documentation/html/new-page-guide.html)! :)
3 changes: 3 additions & 0 deletions src/RoverMap/RoverMapTileGenerator/.gitignore
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
node_modules/
tiles/*
*.tif
Loading