示例1
# 测试示例 3588上耗时约70ms
from turbojpeg import TurboJPEG
jpeg = TurboJPEG(os.path.join(work_dir, "lib", "libturbojpeg.so"))
ori_img = frame.copy() # frame是RGB格式的
ori_img = cv2.cvtColor(ori_img, cv2.COLOR_RGB2BGR)
# jpeg.encode默认输入是BGR的,如果输入是RGB,
# 可以指定参数 encoded_ori_img = jpeg.encode(ori_best_img, pixel_format=turbojpeg.TJPF_RGB, quality=85)
encoded_ori_img = jpeg.encode(ori_img, quality=85)
# 只支持1080P的OSD
t_start = time.perf_counter()
decoded_img = jpeg.decode(encoded_ori_img)
osd_init_x = 40
osd_x = osd_init_x
osd_y = 90
time_max_height = 0
time_str = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
for char in time_str:
text_size, _ = cv2.getTextSize(char, cv2.FONT_HERSHEY_SIMPLEX, 1.2, 3)
if text_size[1] > time_max_height:
time_max_height = text_size[1]
osd_region = decoded_img[osd_y - text_size[1]:osd_y, osd_x:osd_x + text_size[0]]
average_gray = cv2.mean(cv2.cvtColor(osd_region, cv2.COLOR_BGR2GRAY))[0]
color = (250, 250, 250)
if average_gray > 160:
color = (5, 5, 5)
cv2.putText(decoded_img, char, (osd_x, osd_y), cv2.FONT_HERSHEY_SIMPLEX, 1.2, color, 3)
osd_x += cv2.getTextSize(char, cv2.FONT_HERSHEY_SIMPLEX, 1.2, 3)[0][0] - 4
info = ["0123456789 012345", "118.874241 32.139602"]
osd_x = osd_init_x
osd_y += time_max_height + 12
for val in info:
text_size, _ = cv2.getTextSize(val, cv2.FONT_HERSHEY_SIMPLEX, 1.2, 3)
cv2.putText(decoded_img, val, (osd_x, osd_y), cv2.FONT_HERSHEY_SIMPLEX, 1.2, (250, 250, 250), 3)
osd_y += text_size[1] + 12
encoded_img = jpeg.encode(decoded_img, quality=85)
print("===============================", round((time.perf_counter()-t_start)*1000, 1), "ms")
with open("1.jpg", "wb") as f:
f.write(encoded_img)
示例2
def osd_fun(self, data):
try:
# 只支持1080P的OSD
Id1 = data['id1']
Id2 = data['id2']
lng = data['lng']
lat = data['lat']
decoded_img = jpeg.decode(data['pictureUrl'])
start_point = (40, 90)
x_offset = 4
y_offset = 12
osd_x = start_point[0]
osd_y = start_point[1]
time_str = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
for char in time_str:
text_size, _ = cv2.getTextSize(char, cv2.FONT_HERSHEY_SIMPLEX, 1.2, 3)
osd_region = decoded_img[osd_y - text_size[1]:osd_y, osd_x:osd_x + text_size[0]]
average_gray = cv2.mean(cv2.cvtColor(osd_region, cv2.COLOR_BGR2GRAY))[0]
color = (250, 250, 250)
if average_gray > 160:
color = (5, 5, 5)
cv2.putText(decoded_img, char, (osd_x, osd_y), cv2.FONT_HERSHEY_SIMPLEX, 1.2, color, 3)
osd_x += cv2.getTextSize(char, cv2.FONT_HERSHEY_SIMPLEX, 1.2, 3)[0][0] - x_offset
info = [f"{id1}", f"{round(lng, 6)} {round(lat, 6)}", f"{id2}"]
osd_y = start_point[1]
front_scalse = 1.2 * 3/4
for val in info:
text_size, _ = cv2.getTextSize(val, cv2.FONT_HERSHEY_SIMPLEX, front_scalse, 3)
osd_x = 1920 - text_size[0] - start_point[0]
cv2.putText(decoded_img, val, (osd_x, osd_y), cv2.FONT_HERSHEY_SIMPLEX, front_scalse, (250, 250, 250), 3)
osd_y += text_size[1] + y_offset
encoded_img = jpeg.encode(decoded_img, quality=85)
return encoded_img