batch_pca_lda.py 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316
  1. import os
  2. import pandas as pd
  3. from PIL import Image
  4. from sklearn.decomposition import PCA
  5. from sklearn.discriminant_analysis import LinearDiscriminantAnalysis as LDA
  6. import cv2
  7. import numpy as np
  8. from skimage import feature
  9. from skimage.feature import graycomatrix, graycoprops, hog
  10. from skimage import io, color, img_as_ubyte, exposure
  11. import matplotlib.pyplot as plt
  12. def read_and_process_images(folder_path, target_shape):
  13. image_data = []
  14. image_labels = []
  15. image_names = []
  16. image_paths = []
  17. for root, dirs, files in os.walk(folder_path):
  18. for file in files:
  19. fileLower = file.lower()
  20. if fileLower.endswith(".jpg") or fileLower.endswith(".png") or fileLower.endswith(".jpeg"): # 仅处理 jpg 和 png 格式的图像文件
  21. image_path = os.path.join(root, file)
  22. image = Image.open(image_path).convert('L') # 转换为灰度图像
  23. resized_image = image.resize(target_shape) # 调整图像大小
  24. image_array = np.array(resized_image).reshape(-1) # 将图像转换为一维数组
  25. image_data.append(image_array)
  26. image_labels.append(os.path.basename(root)) # 文件夹名称作为标签
  27. image_names.append(file)
  28. image_paths.append(image_path)
  29. image_data = np.array(image_data)
  30. image_labels = np.array(image_labels)
  31. image_names = np.array(image_names)
  32. image_paths = np.array(image_paths)
  33. return image_data, image_labels, image_names, image_paths
  34. def save_to_csv(data, labels, names, paths, pca_projection, lda_projection, output_file):
  35. num_pca_components = pca_projection.shape[1]
  36. num_lda_components = lda_projection.shape[1]
  37. columns = ['类别', '图片名', '图片路径'] + [f'pca_{i}' for i in range(num_pca_components)] + [f'lda_{i}' for i in range(num_lda_components)]
  38. df = pd.DataFrame(columns=columns)
  39. for i in range(len(data)):
  40. row_data = np.concatenate(([labels[i]], [names[i]], [paths[i]], pca_projection[i], lda_projection[i]))
  41. df.loc[i] = row_data
  42. df.to_csv(output_file, index=False)
  43. print(f"Data saved to {output_file}")
  44. '''将keypoints转换为可以使用的向量'''
  45. def keypoints_to_vector(keypoints):
  46. # 创建一个空的向量列表
  47. vectors = []
  48. # 遍历所有关键点
  49. for kp in keypoints:
  50. # 提取关键点的属性
  51. x = kp.pt[0] # x 坐标
  52. y = kp.pt[1] # y 坐标
  53. scale = kp.size # 尺度
  54. orientation = kp.angle # 方向
  55. # 将关键点属性组合成一个向量
  56. vector = np.array([x, y, scale, orientation])
  57. # 添加到向量列表中
  58. vectors.append(vector)
  59. # 将列表转换为 numpy 数组
  60. vectors = np.array(vectors)
  61. return vectors
  62. def feature_glcm(input_img_path,blockPlt=True):
  63. gray_image = io.imread(input_img_path)
  64. # gray_image = io.imread('data/TRAIN/BTR70/HB04039.004.jpeg')
  65. gray_image = img_as_ubyte(gray_image)
  66. # 定义GLCM参数
  67. distances = [1, 2, 3, 4] # 邻距离
  68. angles = [0, np.pi / 4, np.pi / 2, 3 * np.pi / 4] # 邻角度
  69. # 计算GLCM
  70. glcm = graycomatrix(gray_image, distances=distances, angles=angles, symmetric=True, normed=True)
  71. # 计算GLCM特征
  72. contrast = graycoprops(glcm, prop='contrast')
  73. dissimilarity = graycoprops(glcm, prop='dissimilarity')
  74. homogeneity = graycoprops(glcm, prop='homogeneity')
  75. energy = graycoprops(glcm, prop='energy')
  76. correlation = graycoprops(glcm, prop='correlation')
  77. # 打印GLCM特征
  78. # print("Contrast:", contrast)
  79. # print("Dissimilarity:", dissimilarity)
  80. # print("Homogeneity:", homogeneity)
  81. # print("Energy:", energy)
  82. # print("Correlation:", correlation)
  83. # 保存GLCM特征图,保持目录结构
  84. glcm_dir = 'glcm'
  85. glcm_img_path = os.path.join(glcm_dir, input_img_path)
  86. glcm_img_dir = os.path.dirname(glcm_img_path)
  87. os.makedirs(glcm_img_dir, exist_ok=True)
  88. # 保存GLCM Energy特征图
  89. io.imsave(glcm_img_path, energy)
  90. fea = np.concatenate([contrast,dissimilarity,homogeneity,energy,correlation]).flatten()
  91. return fea
  92. def feature_hog(input_img_path, blockPlt=True):
  93. image = io.imread(input_img_path)
  94. # 提取HOG特征
  95. features, hog_image = hog(image, pixels_per_cell=(8, 8), cells_per_block=(2, 2), visualize=True)
  96. # # 可视化HOG图像
  97. # fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 6))
  98. # print(len(features))
  99. # print(hog_image.shape)
  100. # 对HOG图像进行对比度增强
  101. hog_image_rescaled = exposure.rescale_intensity(hog_image, in_range=(0, 10))
  102. # plt.imshow(hog_image_rescaled,cmap='gray')
  103. # 保存HOG图像,保持目录结构
  104. hog_dir = 'hog'
  105. hog_img_path = os.path.join(hog_dir, input_img_path)
  106. hog_img_dir = os.path.dirname(hog_img_path)
  107. os.makedirs(hog_img_dir, exist_ok=True)
  108. io.imsave(hog_img_path, hog_image_rescaled)
  109. return features
  110. def feature_lbp(input_img_path,blockPlt=True):
  111. # 定义LBP算子的计算函数
  112. def calculate_lbp_pixel(center, pixels):
  113. lbp_code = 0
  114. for i, pixel in enumerate(pixels):
  115. lbp_code |= (pixel >= center) << i
  116. return lbp_code
  117. # 定义LBP特征提取函数
  118. def extract_lbp_features(image, radius, num_points):
  119. gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
  120. height, width = gray.shape
  121. lbp_image = np.zeros((height, width), dtype=np.uint8)
  122. for y in range(radius, height - radius):
  123. for x in range(radius, width - radius):
  124. center = gray[y, x]
  125. pixels = [
  126. gray[y - radius, x - radius],
  127. gray[y - radius, x],
  128. gray[y - radius, x + radius],
  129. gray[y, x + radius],
  130. gray[y + radius, x + radius],
  131. gray[y + radius, x],
  132. gray[y + radius, x - radius],
  133. gray[y, x - radius]
  134. ]
  135. lbp_code = calculate_lbp_pixel(center, pixels)
  136. lbp_image[y, x] = lbp_code
  137. hist = np.histogram(lbp_image, bins=np.arange(0, num_points + 2), range=(0, num_points + 1))[0]
  138. hist = hist.astype("float")
  139. hist /= (hist.sum() + 1e-6) # 归一化
  140. return hist
  141. # 加载图像
  142. image = cv2.imread(input_img_path)
  143. # 读取图像并转换为灰度图
  144. # image = cv2.imread('./data/TRAIN/2S1/HB19404.jpg', cv2.IMREAD_GRAYSCALE)
  145. # image = io.imread('./data/TRAIN/2S1/HB19404.jpg')#color.rgb2gray(image)
  146. # print(image.shape)
  147. # 设置LBP算子的半径和邻域点数
  148. radius = 1
  149. num_points = 8 * radius
  150. # 提取LBP特征
  151. lbp_features = extract_lbp_features(image, radius, num_points)
  152. # 打印特征向量
  153. # print("LBP特征向量:")
  154. # print(lbp_features)
  155. # 计算LBP特征
  156. image = io.imread(input_img_path)
  157. radius = 1
  158. n_points = 8 * radius
  159. lbp_image = feature.local_binary_pattern(image, n_points, radius, method='uniform')
  160. # 保存LBP特征图,保持目录结构
  161. lbp_dir = 'lbp'
  162. lbp_img_path = os.path.join(lbp_dir, input_img_path)
  163. lbp_img_dir = os.path.dirname(lbp_img_path)
  164. os.makedirs(lbp_img_dir, exist_ok=True)
  165. cv2.imwrite(lbp_img_path, lbp_image)
  166. return lbp_features
  167. def feature_orb(input_img_path,blockPlt=True):
  168. image = io.imread(input_img_path)
  169. # 创建ORB对象
  170. orb = cv2.ORB_create()
  171. # 检测ORB特征点
  172. keypoints, descriptors = orb.detectAndCompute(image, None)
  173. # 绘制特征点
  174. image_with_keypoints = cv2.drawKeypoints(image, keypoints, None)
  175. # 保存带有特征点的图像,保持目录结构
  176. orb_dir = 'orb'
  177. orb_img_path = os.path.join(orb_dir, input_img_path)
  178. orb_img_dir = os.path.dirname(orb_img_path)
  179. os.makedirs(orb_img_dir, exist_ok=True)
  180. cv2.imwrite(orb_img_path, image_with_keypoints)
  181. fea_keypoints = keypoints_to_vector(keypoints)
  182. fea_keypoints = fea_keypoints.flatten()
  183. return fea_keypoints
  184. def feature_shift(input_img_path,blockPlt=True):
  185. image = io.imread(input_img_path)
  186. # 创建SIFT特征检测器
  187. sift = cv2.SIFT_create()
  188. # 检测关键点和提取描述符
  189. keypoints, descriptors = sift.detectAndCompute(image, None)
  190. # print(keypoints)
  191. # print(descriptors)
  192. # 显示关键点
  193. output_image = cv2.drawKeypoints(image, keypoints, None, flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
  194. # 保存带有特征点的图像,保持目录结构
  195. sift_dir = 'sift'
  196. sift_img_path = os.path.join(sift_dir, input_img_path)
  197. sift_img_dir = os.path.dirname(sift_img_path)
  198. os.makedirs(sift_img_dir, exist_ok=True)
  199. cv2.imwrite(sift_img_path, output_image)
  200. fea_keypoints = keypoints_to_vector(keypoints)
  201. fea_keypoints = fea_keypoints.flatten()
  202. return fea_keypoints
  203. #
  204. #
  205. # def batch_pca_lda():
  206. # # 文件夹路径
  207. # # folder_path = 'data/TEST/'
  208. # folder_path = 'data/TRAIN/'
  209. # # 图像调整大小
  210. # target_shape = (100, 100)
  211. # # PCA 和 LDA 的主成分数量
  212. # pca_components = 128
  213. # lda_components = 7 # lda降维数目不能超过类别数目
  214. # # 输出 CSV 文件名
  215. # # output_file = 'sar_test_pca_lda.csv'
  216. # output_file = 'sar_train_pca_lda.csv'
  217. #
  218. # # 读取并处理图像数据
  219. # image_data, image_labels, image_names, image_paths = read_and_process_images(folder_path, target_shape)
  220. #
  221. # # 使用 PCA 进行降维
  222. # pca = PCA(n_components=pca_components)
  223. # pca_projection = pca.fit_transform(image_data)
  224. #
  225. # # 使用 LDA 进行降维
  226. # lda = LDA(n_components=lda_components)
  227. # lda_projection = lda.fit_transform(image_data, image_labels)
  228. #
  229. # # 保存数据到 CSV 文件
  230. # save_to_csv(image_data, image_labels, image_names, image_paths, pca_projection, lda_projection, output_file)
  231. # 传入输入图像路径以及需要展示的特征,展示特征图像
  232. def display_feature(input_img_path, fea_type):
  233. fea_dir = fea_type
  234. fea_img_path = os.path.join(fea_dir, input_img_path)
  235. if os.path.exists(fea_img_path):
  236. fea_img = io.imread(fea_img_path)
  237. plt.imshow(fea_img, cmap='gray')
  238. plt.show()
  239. else:
  240. print(f"Feature image not found at {fea_img_path}")
  241. if __name__ == '__main__':
  242. # batch_pca_lda()
  243. img_path = 'D:\\hiddz\\SAR\\test_data\\TEST\\2S1\\HB14936.JPG'
  244. fea = feature_glcm(img_path, False)
  245. fea = feature_hog(img_path, False)
  246. fea = feature_lbp(img_path,False)
  247. fea = feature_orb(img_path,False)
  248. fea = feature_shift(img_path,False)
  249. print(fea)
  250. display_feature(img_path,'glcm')
  251. display_feature(img_path, 'hog')
  252. display_feature(img_path,'lbp')
  253. display_feature(img_path,'orb')
  254. display_feature(img_path,'sift')