batch_pca_lda-2.py 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327
  1. import os
  2. import numpy as np
  3. import pandas as pd
  4. from PIL import Image
  5. from sklearn.decomposition import PCA
  6. from sklearn.discriminant_analysis import LinearDiscriminantAnalysis as LDA
  7. import cv2
  8. def read_and_process_images(folder_path, target_shape):
  9. image_data = []
  10. image_labels = []
  11. image_names = []
  12. image_paths = []
  13. for root, dirs, files in os.walk(folder_path):
  14. for file in files:
  15. fileLower = file.lower()
  16. if fileLower.endswith(".jpg") or fileLower.endswith(".png") or fileLower.endswith(".jpeg"): # 仅处理 jpg 和 png 格式的图像文件
  17. image_path = os.path.join(root, file)
  18. image = Image.open(image_path).convert('L') # 转换为灰度图像
  19. resized_image = image.resize(target_shape) # 调整图像大小
  20. image_array = np.array(resized_image).reshape(-1) # 将图像转换为一维数组
  21. image_data.append(image_array)
  22. image_labels.append(os.path.basename(root)) # 文件夹名称作为标签
  23. image_names.append(file)
  24. image_paths.append(image_path)
  25. image_data = np.array(image_data)
  26. image_labels = np.array(image_labels)
  27. image_names = np.array(image_names)
  28. image_paths = np.array(image_paths)
  29. return image_data, image_labels, image_names, image_paths
  30. def save_to_csv(data, labels, names, paths, pca_projection, lda_projection, output_file):
  31. num_pca_components = pca_projection.shape[1]
  32. num_lda_components = lda_projection.shape[1]
  33. columns = ['类别', '图片名', '图片路径'] + [f'pca_{i}' for i in range(num_pca_components)] + [f'lda_{i}' for i in range(num_lda_components)]
  34. df = pd.DataFrame(columns=columns)
  35. for i in range(len(data)):
  36. row_data = np.concatenate(([labels[i]], [names[i]], [paths[i]], pca_projection[i], lda_projection[i]))
  37. df.loc[i] = row_data
  38. df.to_csv(output_file, index=False)
  39. print(f"Data saved to {output_file}")
  40. '''将keypoints转换为可以使用的向量'''
  41. def keypoints_to_vector(keypoints):
  42. # 创建一个空的向量列表
  43. vectors = []
  44. # 遍历所有关键点
  45. for kp in keypoints:
  46. # 提取关键点的属性
  47. x = kp.pt[0] # x 坐标
  48. y = kp.pt[1] # y 坐标
  49. scale = kp.size # 尺度
  50. orientation = kp.angle # 方向
  51. # 将关键点属性组合成一个向量
  52. vector = np.array([x, y, scale, orientation])
  53. # 添加到向量列表中
  54. vectors.append(vector)
  55. # 将列表转换为 numpy 数组
  56. vectors = np.array(vectors)
  57. return vectors
  58. from skimage.feature import graycomatrix, graycoprops
  59. from skimage import io, color, img_as_ubyte
  60. import matplotlib.pyplot as plt
  61. def feature_glcm(input_img_path,blockPlt=True):
  62. gray_image = io.imread(input_img_path)
  63. # gray_image = io.imread('data/TRAIN/BTR70/HB04039.004.jpeg')
  64. gray_image = img_as_ubyte(gray_image)
  65. # 定义GLCM参数
  66. distances = [1, 2, 3, 4] # 邻距离
  67. angles = [0, np.pi / 4, np.pi / 2, 3 * np.pi / 4] # 邻角度
  68. # 计算GLCM
  69. glcm = graycomatrix(gray_image, distances=distances, angles=angles, symmetric=True, normed=True)
  70. # 计算GLCM特征
  71. contrast = graycoprops(glcm, prop='contrast')
  72. dissimilarity = graycoprops(glcm, prop='dissimilarity')
  73. homogeneity = graycoprops(glcm, prop='homogeneity')
  74. energy = graycoprops(glcm, prop='energy')
  75. correlation = graycoprops(glcm, prop='correlation')
  76. # 打印GLCM特征
  77. # print("Contrast:", contrast)
  78. # print("Dissimilarity:", dissimilarity)
  79. # print("Homogeneity:", homogeneity)
  80. # print("Energy:", energy)
  81. # print("Correlation:", correlation)
  82. # 保存GLCM特征图,保持目录结构
  83. glcm_dir = 'glcm'
  84. glcm_img_path = os.path.join(glcm_dir, input_img_path)
  85. glcm_img_dir = os.path.dirname(glcm_img_path)
  86. os.makedirs(glcm_img_dir, exist_ok=True)
  87. # 保存GLCM Energy特征图
  88. cv2.imwrite(glcm_img_path, energy)
  89. fea = np.concatenate([contrast,dissimilarity,homogeneity,energy,correlation]).flatten()
  90. return fea
  91. from skimage.feature import hog
  92. from skimage import exposure, io, color
  93. from skimage import io
  94. import matplotlib.pyplot as plt
  95. def feature_hog(input_img_path,blockPlt=True):
  96. image = io.imread(input_img_path)
  97. # 提取HOG特征
  98. features, hog_image = hog(image, pixels_per_cell=(8, 8), cells_per_block=(2, 2), visualize=True)
  99. # # 可视化HOG图像
  100. # fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 6))
  101. # print(len(features))
  102. # print(hog_image.shape)
  103. # 对HOG图像进行对比度增强
  104. hog_image_rescaled = exposure.rescale_intensity(hog_image, in_range=(0, 10))
  105. # plt.imshow(hog_image_rescaled,cmap='gray')
  106. # 保存HOG图像,保持目录结构
  107. hog_dir = 'hog'
  108. hog_img_path = os.path.join(hog_dir, input_img_path)
  109. hog_img_dir = os.path.dirname(hog_img_path)
  110. os.makedirs(hog_img_dir, exist_ok=True)
  111. cv2.imwrite(hog_img_path, hog_image_rescaled)
  112. return features
  113. import cv2
  114. import numpy as np
  115. import matplotlib.pyplot as plt
  116. from skimage import feature
  117. from skimage import io
  118. def feature_lbp(input_img_path,blockPlt=True):
  119. # 定义LBP算子的计算函数
  120. def calculate_lbp_pixel(center, pixels):
  121. lbp_code = 0
  122. for i, pixel in enumerate(pixels):
  123. lbp_code |= (pixel >= center) << i
  124. return lbp_code
  125. # 定义LBP特征提取函数
  126. def extract_lbp_features(image, radius, num_points):
  127. gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
  128. height, width = gray.shape
  129. lbp_image = np.zeros((height, width), dtype=np.uint8)
  130. for y in range(radius, height - radius):
  131. for x in range(radius, width - radius):
  132. center = gray[y, x]
  133. pixels = [
  134. gray[y - radius, x - radius],
  135. gray[y - radius, x],
  136. gray[y - radius, x + radius],
  137. gray[y, x + radius],
  138. gray[y + radius, x + radius],
  139. gray[y + radius, x],
  140. gray[y + radius, x - radius],
  141. gray[y, x - radius]
  142. ]
  143. lbp_code = calculate_lbp_pixel(center, pixels)
  144. lbp_image[y, x] = lbp_code
  145. hist = np.histogram(lbp_image, bins=np.arange(0, num_points + 2), range=(0, num_points + 1))[0]
  146. hist = hist.astype("float")
  147. hist /= (hist.sum() + 1e-6) # 归一化
  148. return hist
  149. # 加载图像
  150. image = cv2.imread(input_img_path)
  151. # 读取图像并转换为灰度图
  152. # image = cv2.imread('./data/TRAIN/2S1/HB19404.jpg', cv2.IMREAD_GRAYSCALE)
  153. # image = io.imread('./data/TRAIN/2S1/HB19404.jpg')#color.rgb2gray(image)
  154. # print(image.shape)
  155. # 设置LBP算子的半径和邻域点数
  156. radius = 1
  157. num_points = 8 * radius
  158. # 提取LBP特征
  159. lbp_features = extract_lbp_features(image, radius, num_points)
  160. # 打印特征向量
  161. # print("LBP特征向量:")
  162. # print(lbp_features)
  163. # 计算LBP特征
  164. image = io.imread(input_img_path)
  165. radius = 1
  166. n_points = 8 * radius
  167. lbp_image = feature.local_binary_pattern(image, n_points, radius, method='uniform')
  168. # 保存LBP特征图,保持目录结构
  169. lbp_dir = 'lbp'
  170. lbp_img_path = os.path.join(lbp_dir, input_img_path)
  171. lbp_img_dir = os.path.dirname(lbp_img_path)
  172. os.makedirs(lbp_img_dir, exist_ok=True)
  173. cv2.imwrite(lbp_img_path, lbp_image)
  174. return lbp_features
  175. def feature_orb(input_img_path,blockPlt=True):
  176. image = io.imread(input_img_path)
  177. # 创建ORB对象
  178. orb = cv2.ORB_create()
  179. # 检测ORB特征点
  180. keypoints, descriptors = orb.detectAndCompute(image, None)
  181. # 绘制特征点
  182. image_with_keypoints = cv2.drawKeypoints(image, keypoints, None)
  183. # 保存带有特征点的图像,保持目录结构
  184. orb_dir = 'orb'
  185. orb_img_path = os.path.join(orb_dir, input_img_path)
  186. orb_img_dir = os.path.dirname(orb_img_path)
  187. os.makedirs(orb_img_dir, exist_ok=True)
  188. cv2.imwrite(orb_img_path, image_with_keypoints)
  189. fea_keypoints = keypoints_to_vector(keypoints)
  190. fea_keypoints = fea_keypoints.flatten()
  191. return fea_keypoints
  192. def feature_shift(input_img_path,blockPlt=True):
  193. image = io.imread(input_img_path)
  194. # 创建SIFT特征检测器
  195. sift = cv2.SIFT_create()
  196. # 检测关键点和提取描述符
  197. keypoints, descriptors = sift.detectAndCompute(image, None)
  198. # print(keypoints)
  199. # print(descriptors)
  200. # 显示关键点
  201. output_image = cv2.drawKeypoints(image, keypoints, None, flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
  202. # 保存带有特征点的图像,保持目录结构
  203. sift_dir = 'sift'
  204. sift_img_path = os.path.join(sift_dir, input_img_path)
  205. sift_img_dir = os.path.dirname(sift_img_path)
  206. os.makedirs(sift_img_dir, exist_ok=True)
  207. cv2.imwrite(sift_img_path, output_image)
  208. fea_keypoints = keypoints_to_vector(keypoints)
  209. fea_keypoints = fea_keypoints.flatten()
  210. return fea_keypoints
  211. def batch_pca_lda():
  212. # 文件夹路径
  213. # folder_path = 'data/TEST/'
  214. folder_path = 'data/TRAIN/'
  215. # 图像调整大小
  216. target_shape = (100, 100)
  217. # PCA 和 LDA 的主成分数量
  218. pca_components = 128
  219. lda_components = 7 # lda降维数目不能超过类别数目
  220. # 输出 CSV 文件名
  221. # output_file = 'sar_test_pca_lda.csv'
  222. output_file = 'sar_train_pca_lda.csv'
  223. # 读取并处理图像数据
  224. image_data, image_labels, image_names, image_paths = read_and_process_images(folder_path, target_shape)
  225. # 使用 PCA 进行降维
  226. pca = PCA(n_components=pca_components)
  227. pca_projection = pca.fit_transform(image_data)
  228. # 使用 LDA 进行降维
  229. lda = LDA(n_components=lda_components)
  230. lda_projection = lda.fit_transform(image_data, image_labels)
  231. # 保存数据到 CSV 文件
  232. save_to_csv(image_data, image_labels, image_names, image_paths, pca_projection, lda_projection, output_file)
  233. # 传入输入图像路径以及需要展示的特征,展示特征图像
  234. def display_feature(input_img_path, fea_type):
  235. fea_dir = fea_type
  236. fea_img_path = os.path.join(fea_dir, input_img_path)
  237. if os.path.exists(fea_img_path):
  238. fea_img = io.imread(fea_img_path)
  239. plt.imshow(fea_img, cmap='gray')
  240. plt.show()
  241. else:
  242. print(f"Feature image not found at {fea_img_path}")
  243. if __name__ == '__main__':
  244. # batch_pca_lda()
  245. # img_path = 'data/TRAIN/BTR70/HB04039.004.jpeg'
  246. # img_path = 'data/TEST/2S1/HB14931.JPG'
  247. # img_path = 'data/TEST/2S1/HB14938.JPG'
  248. # img_path = 'test_data\\TEST\\2S1\\HB14934.JPG'
  249. img_path = 'D:\\hiddz\\SAR\\test_data\\TEST\\2S1\\HB14934.JPG'
  250. fea = feature_glcm(img_path,False)
  251. fea = feature_hog(img_path,False)
  252. fea = feature_lbp(img_path,False)
  253. fea = feature_orb(img_path,False)
  254. fea = feature_shift(img_path,False)
  255. print(fea)
  256. display_feature(img_path,'glcm')
  257. display_feature(img_path,'hog')
  258. display_feature(img_path,'lbp')
  259. display_feature(img_path,'orb')
  260. display_feature(img_path,'sift')