运行 不同线程中的相同脚本 python
Run same script in different threads python
我有一个脚本可以识别来自相机的底片,现在我需要相同的脚本来识别其他相机所以简而言之它需要同时识别两个相机,我正在使用 Tensoflow/keras 和 YOLO对象检测,有人可以建议解决这个问题吗,我尝试了不同的线程,但我无法启动第二个线程,我将 post 我已经尝试过的
import sys, os
import threading
import keras
import cv2
import traceback
import numpy as np
import time
import sqlite3
import pyodbc
import time
from imutils.video import VideoStream
from pattern import apply_pattern
import darknet.python.darknet as dn
from os.path import splitext, basename
from glob import glob
from darknet.python.darknet import detect
from src.label import dknet_label_conversion
from src.utils import nms
from src.keras_utils import load_model
from glob import glob
from os.path import splitext, basename
from src.utils import im2single
from src.keras_utils import load_model, detect_lp
from src.label import Shape, writeShapes
import imutils
cam_vlez ="rtsp://"
cam_izlez = "rtsp://a"
def adjust_pts(pts,lroi):
return pts*lroi.wh().reshape((2,1)) + lroi.tl().reshape((2,1))
def start_vlez(cam):
while True:
cap = VideoStream(cam).start()
start_time = time.time()
sky = cap.read()
frame = sky[100:700, 300:1800]
w = frame.shape[0]
h = frame.shape[1]
ratio = float(max(frame.shape[:2])) / min(frame.shape[:2])
side = int(ratio * 288.)
bound_dim = min(side + (side % (2 ** 4)), 608)
Llp,LlpImgs,_ = detect_lp(wpod_net,im2single(frame),bound_dim,2**4,(240,80),lp_threshold)
cv2.imshow('detected_plate', frame)
if len(LlpImgs):
Ilp = LlpImgs[0]
s = Shape(Llp[0].pts)
for shape in [s]:
ptsarray = shape.pts.flatten()
try:
frame = cv2.rectangle(frame,(int(ptsarray[0]*h), int(ptsarray[5]*w)),(int(ptsarray[1]*h),int(ptsarray[6]*w)),(0,255,0),3)
cv2.imshow('detected_plate', frame)
except:
traceback.print_exc()
sys.exit(1)
Ilp = cv2.cvtColor(Ilp, cv2.COLOR_BGR2GRAY)
Ilp = cv2.cvtColor(Ilp, cv2.COLOR_GRAY2BGR)
cv2.imwrite('%s/_lp.png' % (output_dir),Ilp*255.)
cv2.imshow('lp_bic', Ilp)
R,(width,height) = detect(ocr_net, ocr_meta, 'lp_images/_lp.png' ,thresh=ocr_threshold, nms=None)
if len(R):
L = dknet_label_conversion(R,width,height)
L = nms(L,.45)
L.sort(key=lambda x: x.tl()[0])
lp_str = ''.join([chr(l.cl()) for l in L])
result =apply_pattern(lp_str)
write_to_database(result)
print("License Plate Detected: ", lp_str)
print("Written in database: ", result)
print("--- %s seconds ---" % (time.time() - start_time))
#updateSqliteTable(lp_str)
def start_izlez(cam):
while True:
cap = VideoStream(cam).start()
start_time = time.time()
sky = cap.read()
frame = sky[100:700, 300:1800]
w = frame.shape[0]
h = frame.shape[1]
ratio = float(max(frame.shape[:2])) / min(frame.shape[:2])
side = int(ratio * 288.)
bound_dim = min(side + (side % (2 ** 4)), 608)
Llp,LlpImgs,_ = detect_lp(wpod_net,im2single(frame),bound_dim,2**4,(240,80),lp_threshold)
cv2.imshow('detected_plate1', frame)
if len(LlpImgs):
Ilp = LlpImgs[0]
s = Shape(Llp[0].pts)
for shape in [s]:
ptsarray = shape.pts.flatten()
try:
frame = cv2.rectangle(frame,(int(ptsarray[0]*h), int(ptsarray[5]*w)),(int(ptsarray[1]*h),int(ptsarray[6]*w)),(0,255,0),3)
cv2.imshow('detected_plate1', frame)
except:
traceback.print_exc()
sys.exit(1)
Ilp = cv2.cvtColor(Ilp, cv2.COLOR_BGR2GRAY)
Ilp = cv2.cvtColor(Ilp, cv2.COLOR_GRAY2BGR)
cv2.imwrite('%s/_lp.png' % (output_dir),Ilp*255.)
cv2.imshow('lp_bic', Ilp)
R,(width,height) = detect(ocr_net, ocr_meta, 'lp_images/_lp.png' ,thresh=ocr_threshold, nms=None)
if len(R):
L = dknet_label_conversion(R,width,height)
L = nms(L,.45)
L.sort(key=lambda x: x.tl()[0])
lp_str = ''.join([chr(l.cl()) for l in L])
result =apply_pattern(lp_str)
write_to_database(result)
print("License Plate Detected: ", lp_str)
print("Written in database: ", result)
print("--- %s seconds ---" % (time.time() - start_time))
#updateSqliteTable(lp_str)
if __name__ == '__main__':
try:
output_dir = 'lp_images/'
lp_threshold = .5
wpod_net_path = "./my-trained-model/my-trained-model1_final.json"
wpod_net = load_model(wpod_net_path)
ocr_threshold = .6
ocr_weights = b'data/ocr/ocr-net.weights'
ocr_netcfg = b'data/ocr/ocr-net.cfg'
ocr_dataset = b'data/ocr/ocr-net.data'
ocr_net = dn.load_net(ocr_netcfg, ocr_weights, 0)
ocr_meta = dn.load_meta(ocr_dataset)
t = threading.Thread(target=start_vlez(cam_izlez))
t1 = threading.Thread(target=start_izlez(cam_vlez))
t.start()
t1.start()
except:
print ("Error: unable to start thread")
Thread
中的 target=
需要没有 ()
和参数的函数名称 - 稍后它将使用 ()
来启动它。
您当前的代码不会运行在线程中运行,但它的工作方式类似于
result = start_vlez(cam_izlez)
result1 = start_izlez(cam_vlez)
t = threading.Thread(target=result)
t1 = threading.Thread(target=result1)
t.start()
t2.start()
所以它 运行 是主线程中的第一个函数并等待它结束。接下来它 运行 的第二个函数也在主线程中并等待它结束。然后它尝试使用 Thread
如果你有参数,那么你需要在 target=
中使用不带 ()
的函数名称,并在 args=
中使用带参数的元组
t = threading.Thread(target=start_vlez, args=(cam_izlez,))
t1 = threading.Thread(target=start_izlez, args=(cam_vlez,))
args=
即使对于单个参数也需要元组,所以我在 (cam_izlez,)
和 (cam_vlez,)
中使用 ,
我有一个脚本可以识别来自相机的底片,现在我需要相同的脚本来识别其他相机所以简而言之它需要同时识别两个相机,我正在使用 Tensoflow/keras 和 YOLO对象检测,有人可以建议解决这个问题吗,我尝试了不同的线程,但我无法启动第二个线程,我将 post 我已经尝试过的
import sys, os
import threading
import keras
import cv2
import traceback
import numpy as np
import time
import sqlite3
import pyodbc
import time
from imutils.video import VideoStream
from pattern import apply_pattern
import darknet.python.darknet as dn
from os.path import splitext, basename
from glob import glob
from darknet.python.darknet import detect
from src.label import dknet_label_conversion
from src.utils import nms
from src.keras_utils import load_model
from glob import glob
from os.path import splitext, basename
from src.utils import im2single
from src.keras_utils import load_model, detect_lp
from src.label import Shape, writeShapes
import imutils
cam_vlez ="rtsp://"
cam_izlez = "rtsp://a"
def adjust_pts(pts,lroi):
return pts*lroi.wh().reshape((2,1)) + lroi.tl().reshape((2,1))
def start_vlez(cam):
while True:
cap = VideoStream(cam).start()
start_time = time.time()
sky = cap.read()
frame = sky[100:700, 300:1800]
w = frame.shape[0]
h = frame.shape[1]
ratio = float(max(frame.shape[:2])) / min(frame.shape[:2])
side = int(ratio * 288.)
bound_dim = min(side + (side % (2 ** 4)), 608)
Llp,LlpImgs,_ = detect_lp(wpod_net,im2single(frame),bound_dim,2**4,(240,80),lp_threshold)
cv2.imshow('detected_plate', frame)
if len(LlpImgs):
Ilp = LlpImgs[0]
s = Shape(Llp[0].pts)
for shape in [s]:
ptsarray = shape.pts.flatten()
try:
frame = cv2.rectangle(frame,(int(ptsarray[0]*h), int(ptsarray[5]*w)),(int(ptsarray[1]*h),int(ptsarray[6]*w)),(0,255,0),3)
cv2.imshow('detected_plate', frame)
except:
traceback.print_exc()
sys.exit(1)
Ilp = cv2.cvtColor(Ilp, cv2.COLOR_BGR2GRAY)
Ilp = cv2.cvtColor(Ilp, cv2.COLOR_GRAY2BGR)
cv2.imwrite('%s/_lp.png' % (output_dir),Ilp*255.)
cv2.imshow('lp_bic', Ilp)
R,(width,height) = detect(ocr_net, ocr_meta, 'lp_images/_lp.png' ,thresh=ocr_threshold, nms=None)
if len(R):
L = dknet_label_conversion(R,width,height)
L = nms(L,.45)
L.sort(key=lambda x: x.tl()[0])
lp_str = ''.join([chr(l.cl()) for l in L])
result =apply_pattern(lp_str)
write_to_database(result)
print("License Plate Detected: ", lp_str)
print("Written in database: ", result)
print("--- %s seconds ---" % (time.time() - start_time))
#updateSqliteTable(lp_str)
def start_izlez(cam):
while True:
cap = VideoStream(cam).start()
start_time = time.time()
sky = cap.read()
frame = sky[100:700, 300:1800]
w = frame.shape[0]
h = frame.shape[1]
ratio = float(max(frame.shape[:2])) / min(frame.shape[:2])
side = int(ratio * 288.)
bound_dim = min(side + (side % (2 ** 4)), 608)
Llp,LlpImgs,_ = detect_lp(wpod_net,im2single(frame),bound_dim,2**4,(240,80),lp_threshold)
cv2.imshow('detected_plate1', frame)
if len(LlpImgs):
Ilp = LlpImgs[0]
s = Shape(Llp[0].pts)
for shape in [s]:
ptsarray = shape.pts.flatten()
try:
frame = cv2.rectangle(frame,(int(ptsarray[0]*h), int(ptsarray[5]*w)),(int(ptsarray[1]*h),int(ptsarray[6]*w)),(0,255,0),3)
cv2.imshow('detected_plate1', frame)
except:
traceback.print_exc()
sys.exit(1)
Ilp = cv2.cvtColor(Ilp, cv2.COLOR_BGR2GRAY)
Ilp = cv2.cvtColor(Ilp, cv2.COLOR_GRAY2BGR)
cv2.imwrite('%s/_lp.png' % (output_dir),Ilp*255.)
cv2.imshow('lp_bic', Ilp)
R,(width,height) = detect(ocr_net, ocr_meta, 'lp_images/_lp.png' ,thresh=ocr_threshold, nms=None)
if len(R):
L = dknet_label_conversion(R,width,height)
L = nms(L,.45)
L.sort(key=lambda x: x.tl()[0])
lp_str = ''.join([chr(l.cl()) for l in L])
result =apply_pattern(lp_str)
write_to_database(result)
print("License Plate Detected: ", lp_str)
print("Written in database: ", result)
print("--- %s seconds ---" % (time.time() - start_time))
#updateSqliteTable(lp_str)
if __name__ == '__main__':
try:
output_dir = 'lp_images/'
lp_threshold = .5
wpod_net_path = "./my-trained-model/my-trained-model1_final.json"
wpod_net = load_model(wpod_net_path)
ocr_threshold = .6
ocr_weights = b'data/ocr/ocr-net.weights'
ocr_netcfg = b'data/ocr/ocr-net.cfg'
ocr_dataset = b'data/ocr/ocr-net.data'
ocr_net = dn.load_net(ocr_netcfg, ocr_weights, 0)
ocr_meta = dn.load_meta(ocr_dataset)
t = threading.Thread(target=start_vlez(cam_izlez))
t1 = threading.Thread(target=start_izlez(cam_vlez))
t.start()
t1.start()
except:
print ("Error: unable to start thread")
Thread
中的 target=
需要没有 ()
和参数的函数名称 - 稍后它将使用 ()
来启动它。
您当前的代码不会运行在线程中运行,但它的工作方式类似于
result = start_vlez(cam_izlez)
result1 = start_izlez(cam_vlez)
t = threading.Thread(target=result)
t1 = threading.Thread(target=result1)
t.start()
t2.start()
所以它 运行 是主线程中的第一个函数并等待它结束。接下来它 运行 的第二个函数也在主线程中并等待它结束。然后它尝试使用 Thread
如果你有参数,那么你需要在 target=
中使用不带 ()
的函数名称,并在 args=
t = threading.Thread(target=start_vlez, args=(cam_izlez,))
t1 = threading.Thread(target=start_izlez, args=(cam_vlez,))
args=
即使对于单个参数也需要元组,所以我在 (cam_izlez,)
和 (cam_vlez,)
,