I created this python code with the help of ChatGPT and some personal adjustments. It counts the green pixels in 10 different tones from a rtsp-stream screenshot.
Can someone help me integrate this code to HA?
Maybe it can be executed every hour and return the numbers of pixels to HA, so we can keep a history and make a graph. Can you help me, please?
import cv2
import numpy as np
# Set up RTSP stream URL and credentials
# /stream1 is for a tapo camera
rtsp_url = "rtsp://USER:PASSWORD@IP:PORT/stream1"
# Open the RTSP stream
cap = cv2.VideoCapture(rtsp_url)
# Check if the stream is opened successfully
if not cap.isOpened():
print("Error: Cannot open the RTSP stream.")
exit()
# Full screen mode for OpenCV
cv2.namedWindow('RTSP Stream', cv2.WND_PROP_FULLSCREEN)
cv2.setWindowProperty('RTSP Stream', cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN)
# Initialize the toggle list for 10 tones (all start as False/off)
replace_green_with_black = [False] * 10
text_color_black = [False] * 10 # To toggle text color for each tone
# Function to count and optionally replace different green tones with black
def process_frame(frame, replace_green_with_black):
# Convert the frame to 8-bit
frame_8bit = cv2.convertScaleAbs(frame)
# Convert BGR to HSV for better color range detection
hsv = cv2.cvtColor(frame_8bit, cv2.COLOR_BGR2HSV)
# Define the renamed tones
green_yellow_ranges = [
("rot braun A-C", np.array([0, 20, 160]), np.array([15, 255, 255])),
("braun ", np.array([15, 50, 120]), np.array([20, 255, 255])),
("gelb braun ", np.array([20, 65, 120]), np.array([25, 255, 255])),
("gelb ", np.array([25, 65, 180]), np.array([30, 255, 255])),
("gelb gruen ", np.array([30, 65, 190]), np.array([35, 255, 255])),
("hell gruen ", np.array([35, 90, 180]), np.array([40, 255, 255])),
("helleres gruen", np.array([40, 90, 190]), np.array([45, 255, 255])),
("dunkleres gruen", np.array([45, 95, 120]), np.array([50, 255, 255])),
("dunkel gruen", np.array([50, 60, 170]), np.array([55, 255, 255])),
("nach gruen D-F", np.array([55, 60, 100]), np.array([70, 255, 255]))
]
green_counts = {}
mask_total = np.zeros(frame.shape[:2], dtype="uint8") # Combined mask for selected tones
for i, (tone_name, lower_bound, upper_bound) in enumerate(green_yellow_ranges):
# Create a mask for this tone
mask = cv2.inRange(hsv, lower_bound, upper_bound)
# Count non-zero (green) pixels for this tone
count = cv2.countNonZero(mask)
# Store the count with the tone name
green_counts[tone_name] = count
if replace_green_with_black[i]: # Check if replacement is toggled on for this tone
mask_total = cv2.bitwise_or(mask_total, mask) # Add this mask to the total mask
# Replace the detected green pixels with black in the original frame
frame[mask_total > 0] = [0, 0, 0] # Set all green pixels to black where masks are active
return frame, green_counts
while True:
# Read frame from the RTSP stream
ret, frame = cap.read()
if not ret:
print("Failed to grab frame.")
break
# Process the frame and count green pixels
frame, green_counts = process_frame(frame, replace_green_with_black)
# Overlay the green pixel counts on the frame in the bottom right corner
text_x = frame.shape[1] - 500 # X coordinate for the text
text_y = 50 # Starting Y coordinate
#text_y = frame.shape[0] - 300 # Starting Y coordinate
# Loop through the green counts and display them
for i, (tone_name, count) in enumerate(green_counts.items()):
text = f"{i} - {tone_name}: {count}"
# Toggle text color based on user input (black or green)
color = (0, 0, 0) if text_color_black[i] else (0, 255, 0) # Black or green text color
cv2.putText(frame, text, (text_x, text_y + i * 30), cv2.FONT_HERSHEY_SIMPLEX, 0.8, color, 2, cv2.LINE_AA)
# Display the frame
cv2.imshow('RTSP Stream', frame)
# Handle keyboard input
key = cv2.waitKey(1) & 0xFF
if key == ord('q'): # Press 'q' to quit
break
elif ord('0') <= key <= ord('9'): # Toggle replacement of each tone (keys 0-9)
index = key - ord('0')
replace_green_with_black[index] = not replace_green_with_black[index]
text_color_black[index] = not text_color_black[index] # Toggle text color for each tone
# Release the video capture object and close the window
cap.release()
cv2.destroyAllWindows()
UPDATE:
I completed the task by writing the values to a MariaDB Table and fetched them with the HA SQL Integration. Taskplaner executes the script sometimes on my PC and updates the DB with some new “sensor” values. Maybe I will outsource this task to another Pi someday.