我想为使用 CLI 的面部识别程序实现一个图形用户界面
i want to implement a gui for a facial recognition program that uses CLIs
我不太擅长编写完整的 GUI。
所以基本上我想做的是使用 tkinter 为我的面部识别程序创建一个 GUI,而不是用户在命令提示符下手动输入命令行参数,我希望输出像这样(左侧面板中的原始图像,以及右侧的识别图像。
此处的代码是允许用户在命令提示符中输入参数的工作代码。
提前致谢。
import face_recognition
import argparse
import pickle
import cv2
# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-e", "--encodings", required=True,
help="path to serialized db of facial encodings")
ap.add_argument("-i", "--image", required=True,
help="path to input image")
ap.add_argument("-d", "--detection-method", type=str, default="cnn",
help="face detection model to use: either `hog` or `cnn`")
args = vars(ap.parse_args())
# load the known faces and embeddings
print("[INFO] loading encodings...")
data = pickle.loads(open(args["encodings"], "rb").read())
# load the input image and convert it from BGR to RGB
image = cv2.imread(args["image"])
rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
# detect the (x, y)-coordinates of the bounding boxes corresponding
# to each face in the input image, then compute the facial embeddings
# for each face
print("[INFO] recognizing faces...")
boxes = face_recognition.face_locations(rgb,
model=args["detection_method"])
encodings = face_recognition.face_encodings(rgb, boxes)
# initialize the list of names for each face detected
names = []
# loop over the facial embeddings
for encoding in encodings:
# attempt to match each face in the input image to our known
# encodings
matches = face_recognition.compare_faces(data["encodings"],
encoding)
name = "Unknown"
# check to see if we have found a match
if True in matches:
# find the indexes of all matched faces then initialize a
# dictionary to count the total number of times each face
# was matched
matchedIdxs = [i for (i, b) in enumerate(matches) if b]
counts = {}
# loop over the matched indexes and maintain a count for
# each recognized face face
for i in matchedIdxs:
name = data["names"][i]
counts[name] = counts.get(name, 0) + 1
# determine the recognized face with the largest number of
# votes (note: in the event of an unlikely tie Python will
# select first entry in the dictionary)
name = max(counts, key=counts.get)
# update the list of names
names.append(name)
# loop over the recognized faces
for ((top, right, bottom, left), name) in zip(boxes, names):
# draw the predicted face name on the image
cv2.rectangle(image, (left, top), (right, bottom), (0, 255, 0), 2)
y = top - 15 if top - 15 > 15 else top + 15
cv2.putText(image, name, (left, y), cv2.FONT_HERSHEY_SIMPLEX,
0.75, (0, 255, 0), 2)
# show the output image
cv2.imshow("Image", image)
cv2.waitKey(0)
这就是我试过的方法
import face_recognition
import argparse
import pickle
import cv2
from tkinter import *
from tkinter import filedialog
def select_image():
# grab a reference to the image panels
global panelA, panelB
# open a file chooser dialog and allow the user to select an input
# image
imagepath = filedialog.askopenfilename()
encodingspath = filedialog.askopenfilename()
# ensuring a file path is selected
# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-d", "--detection-method", type=str, default="cnn",
help="face detection model to use: either `hog` or `cnn`")
args = vars(ap.parse_args())
if len(imagepath and encodingspath) > 0:
data = pickle.loads(open(encodingspath, "rb").read())
# load the input image and convert it from BGR to RGB
initial_image = cv2.imread(imagepath)
image = cv2.imread(imagepath)
rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
# detect the (x, y)-coordinates of the bounding boxes corresponding
# to each face in the input image, then compute the facial embeddings
# for each face
print("[INFO] recognizing faces...")
boxes = face_recognition.face_locations(rgb, model=args["detection_method"])
encodingspath = face_recognition.face_encodings(rgb, boxes)
# initialize the list of names for each face detected
names = []
# loop over the facial embeddings
for encoding in encodingspath:
# attempt to match each face in the input image to our known
# encodings
matches = face_recognition.compare_faces(data(encodingspath), encoding)
name = "Unknown"
# check to see if we have found a match
if True in matches:
# find the indexes of all matched faces then initialize a
# dictionary to count the total number of times each face
# was matched
matchedIdxs = [i for (i, b) in enumerate(matches) if b]
counts = {}
# loop over the matched indexes and maintain a count for
# each recognized face face
for i in matchedIdxs:
name = data["names"][i]
counts[name] = counts.get(name, 0) + 1
# determine the recognized face with the largest number of
# votes (note: in the event of an unlikely tie Python will
# select first entry in the dictionary)
name = max(counts, key=counts.get)
# update the list of names
names.append(name)
# loop over the recognized faces
for ((top, right, bottom, left), name) in zip(boxes, names):
# draw the predicted face name on the image
cv2.rectangle(image, (left, top), (right, bottom), (0, 255, 0), 2)
y = top - 15 if top - 15 > 15 else top + 15
cv2.putText(image, name, (left, y), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (0, 255, 0), 2)
# show the output image
#cv2.imshow("Image", image)
#cv2.waitKey(0)
# if the panels are None, initialize them
if panelA is None or panelB is None:
# the first panel will store our original image
panelA = Label(image=initial_image)
panelA.image = initial_image
panelA.pack(side="left", padx=10, pady=10)
# while the second panel will store the recognized image
panelB = Label(image=image)
panelB.image = image
panelB.pack(side="right", padx=10, pady=10)
# otherwise, update the image panels
else:
# update the pannels
panelA.configure(image=initial_image)
panelB.configure(image=image)
panelA.image = initial_image
panelB.image = image
# initialize the window toolkit along with the two image panels
root = Tk()
panelA = None
panelB = None
# create a button, then when pressed, will trigger a file chooser
# dialog and allow the user to select an input image; then add the
# button the GUI
btn = Button(root, text="Select an image", command=select_image)
btn.pack(side="bottom", fill="both", expand="yes", padx="10", pady="10")
# kick off the GUI
root.mainloop()
当我 运行 我的尝试没有显示任何东西
您应该保持命令行工具不变,让 GUI 在选择参数后调用它。因此,如果您愿意,它仍然可以从命令行使用。
为了让你开始考虑这样的事情(假设你的 cli 工具被命名为 face_recognition
)
#! /usr/bin/env python3
import subprocess
import sys
import tkinter as tk
import tkinter.filedialog
class Application(tk.Frame):
def __init__(self, master=None):
super().__init__(master)
self.master = master
self.pack()
self.create_widgets()
self.image = ""
def create_widgets(self):
tk.Button(self, text="Choose image...", command=self.get_image_filename).pack(fill=tk.X)
# TODO: add a way to enter all the desired arguments...
tk.Button(self, text="Run", command=self.run_face_recognition).pack(fill=tk.X)
tk.Button(self, text="Quit", command=self.master.destroy).pack(fill=tk.X)
def get_image_filename(self):
self.image = tkinter.filedialog.askopenfilename()
def run_face_recognition(self):
subprocess.run([sys.executable, "/path/to/face_recognition.py", "--image", self.image, "args..."])
root = tk.Tk()
app = Application(master=root)
app.mainloop()
我不太擅长编写完整的 GUI。 所以基本上我想做的是使用 tkinter 为我的面部识别程序创建一个 GUI,而不是用户在命令提示符下手动输入命令行参数,我希望输出像这样(左侧面板中的原始图像,以及右侧的识别图像。 此处的代码是允许用户在命令提示符中输入参数的工作代码。
提前致谢。
import face_recognition
import argparse
import pickle
import cv2
# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-e", "--encodings", required=True,
help="path to serialized db of facial encodings")
ap.add_argument("-i", "--image", required=True,
help="path to input image")
ap.add_argument("-d", "--detection-method", type=str, default="cnn",
help="face detection model to use: either `hog` or `cnn`")
args = vars(ap.parse_args())
# load the known faces and embeddings
print("[INFO] loading encodings...")
data = pickle.loads(open(args["encodings"], "rb").read())
# load the input image and convert it from BGR to RGB
image = cv2.imread(args["image"])
rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
# detect the (x, y)-coordinates of the bounding boxes corresponding
# to each face in the input image, then compute the facial embeddings
# for each face
print("[INFO] recognizing faces...")
boxes = face_recognition.face_locations(rgb,
model=args["detection_method"])
encodings = face_recognition.face_encodings(rgb, boxes)
# initialize the list of names for each face detected
names = []
# loop over the facial embeddings
for encoding in encodings:
# attempt to match each face in the input image to our known
# encodings
matches = face_recognition.compare_faces(data["encodings"],
encoding)
name = "Unknown"
# check to see if we have found a match
if True in matches:
# find the indexes of all matched faces then initialize a
# dictionary to count the total number of times each face
# was matched
matchedIdxs = [i for (i, b) in enumerate(matches) if b]
counts = {}
# loop over the matched indexes and maintain a count for
# each recognized face face
for i in matchedIdxs:
name = data["names"][i]
counts[name] = counts.get(name, 0) + 1
# determine the recognized face with the largest number of
# votes (note: in the event of an unlikely tie Python will
# select first entry in the dictionary)
name = max(counts, key=counts.get)
# update the list of names
names.append(name)
# loop over the recognized faces
for ((top, right, bottom, left), name) in zip(boxes, names):
# draw the predicted face name on the image
cv2.rectangle(image, (left, top), (right, bottom), (0, 255, 0), 2)
y = top - 15 if top - 15 > 15 else top + 15
cv2.putText(image, name, (left, y), cv2.FONT_HERSHEY_SIMPLEX,
0.75, (0, 255, 0), 2)
# show the output image
cv2.imshow("Image", image)
cv2.waitKey(0)
这就是我试过的方法
import face_recognition
import argparse
import pickle
import cv2
from tkinter import *
from tkinter import filedialog
def select_image():
# grab a reference to the image panels
global panelA, panelB
# open a file chooser dialog and allow the user to select an input
# image
imagepath = filedialog.askopenfilename()
encodingspath = filedialog.askopenfilename()
# ensuring a file path is selected
# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-d", "--detection-method", type=str, default="cnn",
help="face detection model to use: either `hog` or `cnn`")
args = vars(ap.parse_args())
if len(imagepath and encodingspath) > 0:
data = pickle.loads(open(encodingspath, "rb").read())
# load the input image and convert it from BGR to RGB
initial_image = cv2.imread(imagepath)
image = cv2.imread(imagepath)
rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
# detect the (x, y)-coordinates of the bounding boxes corresponding
# to each face in the input image, then compute the facial embeddings
# for each face
print("[INFO] recognizing faces...")
boxes = face_recognition.face_locations(rgb, model=args["detection_method"])
encodingspath = face_recognition.face_encodings(rgb, boxes)
# initialize the list of names for each face detected
names = []
# loop over the facial embeddings
for encoding in encodingspath:
# attempt to match each face in the input image to our known
# encodings
matches = face_recognition.compare_faces(data(encodingspath), encoding)
name = "Unknown"
# check to see if we have found a match
if True in matches:
# find the indexes of all matched faces then initialize a
# dictionary to count the total number of times each face
# was matched
matchedIdxs = [i for (i, b) in enumerate(matches) if b]
counts = {}
# loop over the matched indexes and maintain a count for
# each recognized face face
for i in matchedIdxs:
name = data["names"][i]
counts[name] = counts.get(name, 0) + 1
# determine the recognized face with the largest number of
# votes (note: in the event of an unlikely tie Python will
# select first entry in the dictionary)
name = max(counts, key=counts.get)
# update the list of names
names.append(name)
# loop over the recognized faces
for ((top, right, bottom, left), name) in zip(boxes, names):
# draw the predicted face name on the image
cv2.rectangle(image, (left, top), (right, bottom), (0, 255, 0), 2)
y = top - 15 if top - 15 > 15 else top + 15
cv2.putText(image, name, (left, y), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (0, 255, 0), 2)
# show the output image
#cv2.imshow("Image", image)
#cv2.waitKey(0)
# if the panels are None, initialize them
if panelA is None or panelB is None:
# the first panel will store our original image
panelA = Label(image=initial_image)
panelA.image = initial_image
panelA.pack(side="left", padx=10, pady=10)
# while the second panel will store the recognized image
panelB = Label(image=image)
panelB.image = image
panelB.pack(side="right", padx=10, pady=10)
# otherwise, update the image panels
else:
# update the pannels
panelA.configure(image=initial_image)
panelB.configure(image=image)
panelA.image = initial_image
panelB.image = image
# initialize the window toolkit along with the two image panels
root = Tk()
panelA = None
panelB = None
# create a button, then when pressed, will trigger a file chooser
# dialog and allow the user to select an input image; then add the
# button the GUI
btn = Button(root, text="Select an image", command=select_image)
btn.pack(side="bottom", fill="both", expand="yes", padx="10", pady="10")
# kick off the GUI
root.mainloop()
当我 运行 我的尝试没有显示任何东西
您应该保持命令行工具不变,让 GUI 在选择参数后调用它。因此,如果您愿意,它仍然可以从命令行使用。
为了让你开始考虑这样的事情(假设你的 cli 工具被命名为 face_recognition
)
#! /usr/bin/env python3
import subprocess
import sys
import tkinter as tk
import tkinter.filedialog
class Application(tk.Frame):
def __init__(self, master=None):
super().__init__(master)
self.master = master
self.pack()
self.create_widgets()
self.image = ""
def create_widgets(self):
tk.Button(self, text="Choose image...", command=self.get_image_filename).pack(fill=tk.X)
# TODO: add a way to enter all the desired arguments...
tk.Button(self, text="Run", command=self.run_face_recognition).pack(fill=tk.X)
tk.Button(self, text="Quit", command=self.master.destroy).pack(fill=tk.X)
def get_image_filename(self):
self.image = tkinter.filedialog.askopenfilename()
def run_face_recognition(self):
subprocess.run([sys.executable, "/path/to/face_recognition.py", "--image", self.image, "args..."])
root = tk.Tk()
app = Application(master=root)
app.mainloop()