概述
def predict_image(detector):
if FLAGS.run_benchmark:
detector.predict(
FLAGS.image_file,
FLAGS.threshold,
warmup=100,
repeats=100,
run_benchmark=True)
else:
imgs_lists = get_image_list(FLAGS.image_file)
os.system('nvidia-smi -q -d Memory |grep -A4 GPU|grep Free >tmp')
memory_gpu = [int(x.split()[2]) for x in open('tmp', 'r').readlines()]
os.system('rm tmp')
GPU_free1 = memory_gpu[FLAGS.GPU_id]
# print('GPU:%d - free: %s' % (FLAGS.GPU_id, str(GPU_free1)))
GPU_free2 = 0
result_save_path = os.path.join(FLAGS.output_dir, 'result.txt')
with open(result_save_path,'w') as f:
time_start = time.time()
for i,img in enumerate(imgs_lists):
results = detector.predict(img, FLAGS.threshold)
# os.system('nvidia-smi -q -d Memory |grep -A4 GPU|grep Free >tmp')
# memory_gpu = [int(x.split()[2]) for x in open('tmp', 'r').readlines()]
# os.system('rm tmp')
# GPU_free = memory_gpu[FLAGS.GPU_id]
# print('GPU:%d - free: %s' % (FLAGS.GPU_id, str(GPU_free)))
if i == len(imgs_lists)-1:
os.system('nvidia-smi -q -d Memory |grep -A4 GPU|grep Free >tmp')
memory_gpu = [int(x.split()[2]) for x in open('tmp', 'r').readlines()]
os.system('rm tmp')
GPU_free2 = memory_gpu[FLAGS.GPU_id]
dict = {}
dict['image_name']=img.split('.jpg')[0].split('/')[-1]+'.jpg'
# image = cv2.imread(img)
# dict['width']=image.shape[1]
# dict['height']=image.shape[0]
# dict['bbox']=[]
classes = []
bboxes = []
for dt in results['boxes']:
clsid, bbox, score = int(dt[0]), list(dt[2:]), float(dt[1])
classes.append(detector.config.labels[clsid])
bboxes.append(bbox)
# obj={}
# obj['label']=detector.config.labels[clsid]
# obj['bbox']=bbox
# obj['rate']=score
# dict['bbox'].append(obj)
dict['classes'] = classes
dict['bboxes'] = bboxes
f.write(json.dumps(dict,cls=NpEncoder)+'n')
visualize(
img,
results,
detector.config.labels,
mask_resolution=detector.config.mask_resolution,
output_dir=FLAGS.output_dir,
threshold=FLAGS.threshold)
time_fn = time.time()
print('-----------------------------------------------------------------')
print('GPU:%d - Consumption: %s M' % (FLAGS.GPU_id, int(GPU_free1)-int(GPU_free2)))
ms = (time_fn - time_start) * 1000.0 / len(imgs_lists)
print("Inference: {} ms per batch image".format(ms))
最后
以上就是如意飞鸟为你收集整理的GPU fps memory的全部内容,希望文章能够帮你解决GPU fps memory所遇到的程序开发问题。
如果觉得靠谱客网站的内容还不错,欢迎将靠谱客网站推荐给程序员好友。
本图文内容来源于网友提供,作为学习参考使用,或来自网络收集整理,版权属于原作者所有。
发表评论 取消回复