AOD-NET去雾网络与ACE自适应对比度增强结合代码示例
下面是一个使用AOD-NET去雾网络和ACE自适应对比度增强结合的代码示例:
import cv2
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision.transforms as transforms
from torchvision.models import vgg16
# AOD-NET去雾网络定义
class AODNet(nn.Module):
def __init__(self):
super(AODNet, self).__init__()
self.conv1 = nn.Conv2d(3, 3, kernel_size=3, stride=1, padding=1)
self.relu1 = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(3, 3, kernel_size=3, stride=1, padding=1)
self.relu2 = nn.ReLU(inplace=True)
self.conv3 = nn.Conv2d(3, 3, kernel_size=3, stride=1, padding=1)
self.relu3 = nn.ReLU(inplace=True)
self.conv4 = nn.Conv2d(3, 3, kernel_size=3, stride=1, padding=1)
self.relu4 = nn.ReLU(inplace=True)
self.conv5 = nn.Conv2d(3, 3, kernel_size=3, stride=1, padding=1)
self.relu5 = nn.ReLU(inplace=True)
self.conv6 = nn.Conv2d(3, 3, kernel_size=3, stride=1, padding=1)
self.relu6 = nn.ReLU(inplace=True)
self.conv7 = nn.Conv2d(3, 3, kernel_size=3, stride=1, padding=1)
self.relu7 = nn.ReLU(inplace=True)
self.conv8 = nn.Conv2d(3, 3, kernel_size=3, stride=1, padding=1)
self.relu8 = nn.ReLU(inplace=True)
self.conv9 = nn.Conv2d(3, 3, kernel_size=3, stride=1, padding=1)
self.relu9 = nn.ReLU(inplace=True)
self.conv10 = nn.Conv2d(3, 1, kernel_size=3, stride=1, padding=1)
def forward(self, x):
out1 = self.conv1(x)
out2 = self.relu1(out1)
out3 = self.conv2(out2)
out4 = self.relu2(out3)
out5 = self.conv3(out4)
out6 = self.relu3(out5)
out7 = self.conv4(out6)
out8 = self.relu4(out7)
out9 = self.conv5(out8)
out10 = self.relu5(out9)
out11 = self.conv6(out10)
out12 = self.relu6(out11)
out13 = self.conv7(out12)
out14 = self.relu7(out13)
out15 = self.conv8(out14)
out16 = self.relu8(out15)
out17 = self.conv9(out16)
out18 = self.relu9(out17)
out19 = self.conv10(out18)
return out19
# ACE自适应对比度增强
def ace_contrast_enhancement(image):
# 将图像转换为Lab颜色空间
lab_image = cv2.cvtColor(image, cv2.COLOR_BGR2Lab)
# 提取亮度通道
l_channel = lab_image[:, :, 0]
# 计算亮度通道的平均值和标准差
l_mean = np.mean(l_channel)
l_std = np.std(l_channel)
# 对亮度通道进行对比度增强
enhanced_l_channel = (l_channel - l_mean) / l_std * 16 + l_mean
enhanced_l_channel = np.clip(enhanced_l_channel, 0, 255).astype(np.uint8)
# 将增强后的亮度通道与原始的a和b通道组合成Lab图像
enhanced_lab_image = np.dstack((enhanced_l_channel, lab_image[:, :, 1], lab_image[:, :, 2]))
# 将Lab图像转换回RGB颜色空间
enhanced_image = cv2.cvtColor(enhanced_lab_image, cv2.COLOR_Lab2BGR)
return enhanced_image
# 加载AOD-NET模型权重
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
aod_net = AODNet().to(device)
aod_net.load_state_dict(torch.load('aod_net_weights.pth'))
aod_net.eval()
# 加载VGG16模型
vgg16_model = vgg16(pretrained=True).features[:16].to(device).eval()
# 对输入图像进行去雾和对比度增强
def dehaze_and_enhance(image):
# 将图像从[0, 255]范围归一化到[0, 1]范围
image = image / 255.0
# 转换图像形状为[batch_size, channels, height, width]
image = image.transpose((2, 0, 1))
image = torch.from_numpy(image).float().unsqueeze(0).to(device)
# 使用AOD-NET进行去雾
dehazed_image = aod_net(image)
dehazed_image = dehazed_image.squeeze().cpu().detach().numpy()
# 使用ACE自适应对比度增强
enhanced_image = ace_contrast_enhancement(dehazed_image)
# 将图像从[0, 1]范围归一化到[0, 255]范围
enhanced_image = enhanced_image * 255.0
return enhanced_image.astype(np.uint8)
# 加载输入图像
input_image = cv2.imread('input_image.jpg')
# 对输入图像进行去雾和对比度增强
output_image = dehaze_and_enhance(input_image)
# 显示结果图像
cv2.imshow('Input Image', input_image)
cv2.imshow('Output Image', output_image)
cv2.waitKey(0)
cv2.destroyAllWindows()
请注意,在运行此代码之前,您需要在同一目录中准备aod_net_weights.pth文件,其中包含已训练的AOD-NET模型的权重。此外,您还需要将input_image.jpg替换为您自己的输入图像文件。
原文地址: https://www.cveoy.top/t/topic/pmzm 著作权归作者所有。请勿转载和采集!