读取训练分类器的巨大图像数据

问题描述 投票:1回答:2

我是python和机器学习的新手。我有一个巨大的汽车图像数据集,拥有超过27000个图像和标签。我正在尝试创建一个数据集,以便我可以在我的训练分类器中使用它,但是当然处理这些数据将是记忆的真正痛苦,而这就是我被困住的地方。起初我试图做这样的事情。

import os
import matplotlib.pyplot as plt
import matplotlib.image as mpg
import cv2
import gc
import numpy as np
from sklearn.preprocessing import normalize
import gc
import resource
import h5py

bbox = "/run/media/fdai5182/LAMAMADAN/Morethan4000samples/data/labels"
imagepath = "/run/media/fdai5182/LAMAMADAN/Morethan4000samples/data/image"



training_data = []
training_labels = []
count = 0


for root, _, files in os.walk(bbox):
    cdp = os.path.abspath(root)
    for rootImage , _ , fileImage in os.walk(imagepath):
        cdpimg = os.path.abspath(r) 
        for f in files:
            ct = 0
            name,ext = os.path.splitext(f)
            for fI in fileImage:
                n , e = os.path.splitext(fI)
                if name == n and ext == ".txt" and e == ".jpg":
                    cip = os.path.join(cdp,f)
                    cipimg = os.path.join(cdpimg,fI)
                    txt = open(cip,"r")
                    for q in txt:
                        ct = ct + 1
                        if ct == 3:
                            x1 = int(q.rsplit(' ')[0])
                            y1 = int(q.rsplit(' ')[1])
                            x2 = int(q.rsplit(' ')[2])
                            y2 = int(q.rsplit(' ')[3])  
                            try:
                                read_img = mpg.imread(cipimg)
                                read_img = read_img.astype('float32')
                                read_img_bbox = read_img[y1:y2, x1:x2,:]
                                resize_img = cv2.cv2.resize(read_img_bbox,(300,300))
                                resize_img /= 255.0 
                                training_labels.append(int(cipimg.split('\\')[4]))                                 


                                training_data.append(resize_img)
                                print("len Of Training_data",len(training_data))
                                training_labels.append(int(cipimg.split('/')[8]))
                                del resize_img
                                print("len Of Training Labels", len(training_labels))
                                gc.collect()                                    
                            except Exception as e:
                                print("Error",str(e), cip)
                            count = count + 1
                            print(count)    
                    txt.flush()
                    txt.close() 




np.save('/run/media/fdai5182/LAMA MADAN/Training_Data_4000Samples',training_data)
np.save('/run/media/fdai5182/LAMA MADAN/Training_Labels_4000Samples',training_labels)




print("DONE")

但即使在32gb RAM上读取图像,它也总是给我一个巨大的内存错误。

所以,为此,我想做一些其他步骤,这些步骤可能有用,减少内存并使其工作。我想要做的步骤如下。

  1. 分配形状N的np数组X,类型为float32的150,150,3 / 300,300,3(不是astype)
  2. 迭代图像并用150,150,3图像像素填充数组X的每一行
  3. 就地规范化:X / = 255
  4. 写入文件(.npy格式)

我到现在所做的是

import cv2
import matplotlib.pyplot as plt
import matplotlib.iamge as mpg
import numpy as np

bbox = "/run/media/fdai5182/LAMAMADAN/Morethan4000samples/data/labels"
imagepath = "/run/media/fdai5182/LAMAMADAN/Morethan4000samples/data/image"

for root, _, files in os.walk(bbox):
    cdp = os.path.abspath(root)
    for rootImage, _, fileImage in os.walk(imagepath):
        cdpimg = os.path.abspath(rootImage)
        for f in files:
            ct = 0
            name,ext = os.path.splitext(f)
            for fI in fileImage:
                n , e = os.path.splitext(fI)
                if name == n and ext == ".txt" and e == ".jpg":
                   nparrayX = np.zeros((150,150,3)).view('float32')
                   cip = os.path.join(cdp,f)
                   cipImg = os.path.join(cdpimg,fI)
                   read_image = mpg.imread(cip)
                   resize_image = cv2.cv2.resize(read_image,(150,150))

我在正确的道路上吗?另外,如何用150,150,3图像像素填充图像格式的每一行。我不想再使用列表了,因为它们需要更多的内存和时间。请帮我解决这个问题。

此外,作为新成员,如果问题不符合StackOverflow的规则和规定,请告诉我,我会更多地编辑它。

谢谢,

python machine-learning label training-data
2个回答
0
投票

tensorflow / keras和pytorch都提供数据集/生成器类,您可以使用它们构建内存高效的数据加载器。

对于tensorflow / keras,有一个由Stanford's Shervine Amidi创建的优秀教程。

对于pytorch,你可以在project's man page上找到一个很好的教程。

我强烈建议您使用这些框架进行实施,因为它们可以避免编写样板代码并使您的培训可扩展。


0
投票

谢谢您的帮助 。但是我想手动检查一下如何在不使用其他发生器的情况下进行检查。以下是我的代码。

import cv2
import matplotlib.pyplot as plt
import matplotlib.image as mpg
import numpy as np
import os

N = 0
training_labels = []

bbox = "D:/Morethan4000samples/data/labels"
imagepath = "D:/Morethan4000samples/data/image/"
for root, _, files in os.walk(imagepath):
        cdp = os.path.abspath(root)
        for f in files:
                name, ext = os.path.splitext(f)
                if ext == ".jpg":
                        cip = os.path.join(cdp,f)
                        N += 1  

print(N) 

imageX = np.zeros((N,227,227,3), dtype='float32')

i = 0

for root, _ , files in os.walk(imagepath):
        cdp = os.path.abspath(root)
        print(cdp)
        for f in files:
                ct = 0
                name, ext = os.path.splitext(f)
                if ext == ".jpg":
                        cip = os.path.join(cdp,f)
                        read = mpg.imread(cip)
                        cipLabel = cip.replace('image','labels')
                        cipLabel = cipLabel.replace('.jpg','.txt')
                        nameL , extL = os.path.splitext(cipLabel)
                        if extL == '.txt':
                                boxes = open(cipLabel, 'r')
                                for q in boxes:
                                        ct = ct + 1 
                                        if ct == 3:
                                                x1 = int(q.rsplit(' ')[0])
                                                y1 = int(q.rsplit(' ')[1])
                                                x2 = int(q.rsplit(' ')[2])
                                                y2 = int(q.rsplit(' ')[3])
                                            readimage = read[y1:y2, x1:x2]
                                            resize = cv2.cv2.resize(readimage,(227,227))
                        resize = cv2.cv2.GaussianBlur(resize, (5,5),0)
                                            imageX[i] = resize
                        #training_labels.append(int(cip.split('\\')[4]))
                        training_labels.append(int(cip.split('/')[8]))
                        print(len(training_labels), len(imageX))
                        i += 1  
                        print(i)


imageX /= 255.0
plt.imshow(imageX[10])
plt.show()

print(imageX.shape)
print(len(training_labels))

np.save("/run/media/fdai5182/LAMA MADAN/Morethan4000samples/227227/training_images", imageX)
np.save("/run/media/fdai5182/LAMA MADAN/Morethan4000samples/227227/trainin_labels",training_labels)

将每个图像保存在一行相同尺寸的矩阵中是最有效的方法。

© www.soinside.com 2019 - 2024. All rights reserved.