リモートセンシング意味分割切図予測後再接合
14637 ワード
リモートセンシング映像は意味分割モデルを利用して訓練を行う時、よく小図に切り取って訓練を行う.予測する時、全体のリモートセンシング映像を小図に切断して予測を行い、更につなぎ合わせる必要がある.コードは以下の通りである.
def total_predict(ori_image):
h_step = ori_image.shape[0] // 256
w_step = ori_image.shape[1] // 256
h_rest = -(ori_image.shape[0] - 256 * h_step)
w_rest = -(ori_image.shape[1] - 256 * w_step)
image_list = []
predict_list = []
#
for h in range(h_step):
for w in range(w_step):
#
image_sample = ori_image[(h * 256):(h * 256 + 256),
(w * 256):(w * 256 + 256), :]
image_list.append(image_sample)
image_list.append(ori_image[(h * 256):(h * 256 + 256), -256:, :])
for w in range(w_step - 1):
image_list.append(ori_image[-256:, (w * 256):(w * 256 + 256), :])
image_list.append(ori_image[-256:, -256:, :])
#
# predict
for image in image_list:
x_batch = image / 255.0
x_batch = np.expand_dims(x_batch, axis=0)
feed_dict = {
img: x_batch
}
pred1 = sess.run(pred, feed_dict=feed_dict)
predict = np.argmax(pred1, axis=3)
predict = np.squeeze(predict).astype(np.uint8)
#
predict_list.append(predict)
#
count_temp = 0
tmp = np.ones([ori_image.shape[0], ori_image.shape[1]])
#print('tmp shape: ', tmp.shape)
for h in range(h_step):
for w in range(w_step):
tmp[
h * 256:(h + 1) * 256,
w * 256:(w + 1) * 256
] = predict_list[count_temp]
count_temp += 1
tmp[h * 256:(h + 1) * 256, w_rest:] = predict_list[count_temp][:, w_rest:]
count_temp += 1
for w in range(w_step - 1):
tmp[h_rest:, (w * 256):(w * 256 + 256)] = predict_list[count_temp][h_rest:, :]
count_temp += 1
# tmp[h_rest:, w_rest:] = predict_list[count_temp][h_rest:, w_rest:]
tmp[-257:-1, -257:-1] = predict_list[count_temp][:, :]
return tmp