Slice the image based on the center, width, and height
import cv2 import numpy as np
# Load the image img = cv2.imread('example.jpg')
# Get the center point of the rectangular region center_x, center_y = 100, 100 # Get the width and height of the rectangular region width, height = 50, 80
defimg_slice(center_x, center_y, width, height): # Calculate the coordinates of the top-left and bottom-right corners of the rectangular region x1 = int(center_x - (width / 2)) y1 = int(center_y - (height / 2)) x2 = int(center_x + (width / 2)) y2 = int(center_y + (height / 2))
# Slice the rectangular region from the original image return img[y1:y2, x1:x2]
center_coordinates = (120, 100) axesLength = (100, 50) angle = 30 startAngle = 0 endAngle = 360 # Blue color in BGR color = (255, 0, 0) # Line thickness of -1 px thickness = -1 # Using cv2.ellipse() method # Draw a ellipse with blue line borders of thickness of -1 px image = cv2.ellipse(image, center_coordinates, axesLength, angle, startAngle, endAngle, color, thickness) # Displaying the image cv2.imshow("Ellipse", image)
start_point = (225, 0) # End coordinate end_point = (0, 90) # Red color in BGR color = (0, 0, 255) # Line thickness of 9 px thickness = 9 # Using cv2.arrowedLine() method # Draw a red arrow line # with thickness of 9 px and tipLength = 0.5 image = cv2.arrowedLine(image, start_point, end_point, color, thickness, tipLength = 0.5) # Displaying the image cv2.imshow("arrow", image)
cap=cv2.VideoCapture("test") while (True): ret,frame=cap.read() cv2.imshow("video",frame) # 在播放每一帧时,使用cv2.waitKey()设置适当的持续时间。如果设置的太低视频就会播放的非常快,如果设置的太高就会播放的很慢。通常情况下25ms就ok if cv2.waitKey(25)&0xFF==ord('q'): cv2.destroyAllWindows() break
Reading Video information
## fps of this Video fps_c = cap.get(cv2.CAP_PROP_FPS) frame_total = cap.get(cv2.CAP_PROP_FRAME_COUNT) Video_h = cap.get(cv2.CAP_PROP_FRAME_HEIGHT) Video_w = cap.get(cv2.CAP_PROP_FRAME_WIDTH)
play Video and audio
##https://stackoverflow.com/questions/46864915/python-add-audio-to-video-opencv import cv2 import numpy as np ##ffpyplayer for playing audio from ffpyplayer.player import MediaPlayer video_path="../L1/images/Godwin.mp4" defPlayVideo(video_path): video=cv2.VideoCapture(video_path) player = MediaPlayer(video_path) whileTrue: grabbed, frame=video.read() audio_frame, val = player.get_frame() ifnot grabbed: print("End of video") break if cv2.waitKey(28) & 0xFF == ord("q"): break cv2.imshow("Video", frame) if val != 'eof'and audio_frame isnotNone: #audio img, t = audio_frame video.release() cv2.destroyAllWindows() PlayVideo(video_path)
Video write
import cv2, os
File = "Up" OUTPUT = "Egg_Day1.avi" List = os.popen('ls '+File).read().split('\n')[:-1]
img = cv2.imread(File +"/"+List[0]) fps = 24 size = (len(img[0]),len(img)) fourcc = cv2.VideoWriter_fourcc('M','J','P','G') videowriter = cv2.VideoWriter(OUTPUT,fourcc,fps,size) for i in List: img = cv2.imread(File +"/"+i) videowriter.write(img) videowriter.release()
cv2.VideoWriter_fourcc('M','J','P','G'): It creates a VideoWriter fourcc object in OpenCV, which is used to specify the codec to be used for writing video files.
The cv2.VideoWriter_fourcc() function takes four characters as input to create a fourcc code. In this case, the four characters are 'M', 'J', 'P', and 'G', which correspond to the MPEG-1 codec.
So the fourcc variable will hold the fourcc code for the MPEG-1 codec, which will be used when writing the video file.
Grey iamge to video
import cv2 import numpy as np
# Create a list of grayscale images img_list = [...] # insert your list of images here
# Define the video writer object fourcc = cv2.VideoWriter_fourcc(*'XVID') out = cv2.VideoWriter('output.avi', fourcc, 10.0, (img_list[0].shape[1], img_list[0].shape[0]), False)
# Write each image to the video for img in img_list: # Convert to grayscale if not already iflen(img.shape) == 3and img.shape[2] == 3: img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # Write to video out.write(img)
# Release the video writer out.release()
In this code, img_list is the list of grayscale images you want to output as a video. The code first defines the VideoWriter object with the desired filename, codec, frame rate, and frame size. Then, it iterates through each image in img_list, converts it to grayscale (if it isn’t already), and writes it to the video using the write method of the VideoWriter object. Finally, it releases the VideoWriter object to close the video file.
vedio to gif
from cv2 import cv2 import imageio import numpy # Collection of the imgs frames_list = []
# Tossed frames per FPS. When FPS = 1, all frame are saved. FPS = 1
defmain(): # 初始化摄像头 keep_processing = True; camera_to_use = 0; # 0 if you have one camera, 1 or > 1 otherwise cap = cv2.VideoCapture(0) # 定义视频捕获类cap windowName = "Live Video Capture and Write"# 窗口名
# 摄像头开启检测 # error detection # ifnot (((len(sys.argv) == 2) and (cap.open(str(sys.argv[1])))) or (cap.open(camera_to_use))): print("ERROR:No video file specified or camera connected.") return -1
# Camera Is Open # create window by name (note flags for resizable or not) cv2.namedWindow(windowName, cv2.WINDOW_NORMAL) print("按键Q-结束视频录制")
while (cap.isOpened()):
# 00 if video file successfully open then read frame from video if (keep_processing):
ret, frame = cap.read() # 定义read对象ret和frame帧 # start a timer (to see how long processing and display takes) start_t = cv2.getTickCount()
# stop the timer and convert to ms. (to see how long processing and display takes) stop_t = ((cv2.getTickCount() - start_t) / cv2.getTickFrequency()) * 1000
# 接收键盘停止指令 # start the event loop - essential # wait 40ms or less depending on processing time taken (i.e. 1000ms / 25 fps = 40 ms)
import matplotlib.pyplot as plt import numpy as np import cv2
deffig2data(fig): """ fig = plt.figure() image = fig2data(fig) @brief Convert a Matplotlib figure to a 4D numpy array with RGBA channels and return it @param fig a matplotlib figure @return a numpy 3D array of RGBA values """ import PIL.Image as Image # draw the renderer fig.canvas.draw() # Get the RGBA buffer from the figure w, h = fig.canvas.get_width_height() buf = np.fromstring(fig.canvas.tostring_argb(), dtype=np.uint8) buf.shape = (w, h, 4) # canvas.tostring_argb give pixmap in ARGB mode. Roll the ALPHA channel to have it in RGBA mode buf = np.roll(buf, 3, axis=2) image = Image.frombytes("RGBA", (w, h), buf.tostring()) image = np.asarray(image) return image
if __name__ == "__main__": # Generate a figure with matplotlib</font> figure = plt.figure() plot = figure.add_subplot(111) # draw a cardinal sine plot x = np.arange(1, 100, 0.1) y = np.sin(x) / x plot.plot(x, y) plt.show() ## image = fig2data(figure) cv2.imshow("image", image) cv2.waitKey(0)