当前位置: 首页 > ds >正文

图像修复:深度学习GLCIC神经网络实现老照片划痕修复

第一步:GLCIC介绍

        GLCIC-PyTorch是一个基于PyTorch的开源项目,它实现了“全局和局部一致性图像修复”方法。该方法由Iizuka等人提出,主要用于图像修复任务,能够有效地恢复图像中被遮挡或损坏的部分。项目使用Python编程语言编写,并依赖于PyTorch深度学习框架。

第二步:GLCIC网络结构

        项目的核心功能是图像修复,它通过训练一个生成网络(Completion Network)和一个判别网络(Context Discriminator)来实现。生成网络负责完成图像修复任务,而判别网络则用于提高修复质量,确保修复后的图像在全局和局部上都与原始图像保持一致性。主要特点如下:

        图像修复:利用生成网络对图像中缺失的部分进行修复。
        全局与局部一致性:确保修复后的图像既在全局上与原图一致,又在局部细节上保持连贯。
        判别网络辅助:通过判别网络对生成图像进行评估,以提升修复质量。

第三步:模型代码展示

import torch
import torch.nn as nn
import torch.nn.functional as F
from layers import Flatten, Concatenateclass CompletionNetwork(nn.Module):def __init__(self):super(CompletionNetwork, self).__init__()# input_shape: (None, 4, img_h, img_w)self.conv1 = nn.Conv2d(4, 64, kernel_size=5, stride=1, padding=2)self.bn1 = nn.BatchNorm2d(64)self.act1 = nn.ReLU()# input_shape: (None, 64, img_h, img_w)self.conv2 = nn.Conv2d(64, 128, kernel_size=3, stride=2, padding=1)self.bn2 = nn.BatchNorm2d(128)self.act2 = nn.ReLU()# input_shape: (None, 128, img_h//2, img_w//2)self.conv3 = nn.Conv2d(128, 128, kernel_size=3, stride=1, padding=1)self.bn3 = nn.BatchNorm2d(128)self.act3 = nn.ReLU()# input_shape: (None, 128, img_h//2, img_w//2)self.conv4 = nn.Conv2d(128, 256, kernel_size=3, stride=2, padding=1)self.bn4 = nn.BatchNorm2d(256)self.act4 = nn.ReLU()# input_shape: (None, 256, img_h//4, img_w//4)self.conv5 = nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1)self.bn5 = nn.BatchNorm2d(256)self.act5 = nn.ReLU()# input_shape: (None, 256, img_h//4, img_w//4)self.conv6 = nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1)self.bn6 = nn.BatchNorm2d(256)self.act6 = nn.ReLU()# input_shape: (None, 256, img_h//4, img_w//4)self.conv7 = nn.Conv2d(256, 256, kernel_size=3, stride=1, dilation=2, padding=2)self.bn7 = nn.BatchNorm2d(256)self.act7 = nn.ReLU()# input_shape: (None, 256, img_h//4, img_w//4)self.conv8 = nn.Conv2d(256, 256, kernel_size=3, stride=1, dilation=4, padding=4)self.bn8 = nn.BatchNorm2d(256)self.act8 = nn.ReLU()# input_shape: (None, 256, img_h//4, img_w//4)self.conv9 = nn.Conv2d(256, 256, kernel_size=3, stride=1, dilation=8, padding=8)self.bn9 = nn.BatchNorm2d(256)self.act9 = nn.ReLU()# input_shape: (None, 256, img_h//4, img_w//4)self.conv10 = nn.Conv2d(256, 256, kernel_size=3, stride=1, dilation=16, padding=16)self.bn10 = nn.BatchNorm2d(256)self.act10 = nn.ReLU()# input_shape: (None, 256, img_h//4, img_w//4)self.conv11 = nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1)self.bn11 = nn.BatchNorm2d(256)self.act11 = nn.ReLU()# input_shape: (None, 256, img_h//4, img_w//4)self.conv12 = nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1)self.bn12 = nn.BatchNorm2d(256)self.act12 = nn.ReLU()# input_shape: (None, 256, img_h//4, img_w//4)self.deconv13 = nn.ConvTranspose2d(256, 128, kernel_size=4, stride=2, padding=1)self.bn13 = nn.BatchNorm2d(128)self.act13 = nn.ReLU()# input_shape: (None, 128, img_h//2, img_w//2)self.conv14 = nn.Conv2d(128, 128, kernel_size=3, stride=1, padding=1)self.bn14 = nn.BatchNorm2d(128)self.act14 = nn.ReLU()# input_shape: (None, 128, img_h//2, img_w//2)self.deconv15 = nn.ConvTranspose2d(128, 64, kernel_size=4, stride=2, padding=1)self.bn15 = nn.BatchNorm2d(64)self.act15 = nn.ReLU()# input_shape: (None, 64, img_h, img_w)self.conv16 = nn.Conv2d(64, 32, kernel_size=3, stride=1, padding=1)self.bn16 = nn.BatchNorm2d(32)self.act16 = nn.ReLU()# input_shape: (None, 32, img_h, img_w)self.conv17 = nn.Conv2d(32, 3, kernel_size=3, stride=1, padding=1)self.act17 = nn.Sigmoid()# output_shape: (None, 3, img_h. img_w)def forward(self, x):x = self.bn1(self.act1(self.conv1(x)))x = self.bn2(self.act2(self.conv2(x)))x = self.bn3(self.act3(self.conv3(x)))x = self.bn4(self.act4(self.conv4(x)))x = self.bn5(self.act5(self.conv5(x)))x = self.bn6(self.act6(self.conv6(x)))x = self.bn7(self.act7(self.conv7(x)))x = self.bn8(self.act8(self.conv8(x)))x = self.bn9(self.act9(self.conv9(x)))x = self.bn10(self.act10(self.conv10(x)))x = self.bn11(self.act11(self.conv11(x)))x = self.bn12(self.act12(self.conv12(x)))x = self.bn13(self.act13(self.deconv13(x)))x = self.bn14(self.act14(self.conv14(x)))x = self.bn15(self.act15(self.deconv15(x)))x = self.bn16(self.act16(self.conv16(x)))x = self.act17(self.conv17(x))return xclass LocalDiscriminator(nn.Module):def __init__(self, input_shape):super(LocalDiscriminator, self).__init__()self.input_shape = input_shapeself.output_shape = (1024,)self.img_c = input_shape[0]self.img_h = input_shape[1]self.img_w = input_shape[2]# input_shape: (None, img_c, img_h, img_w)self.conv1 = nn.Conv2d(self.img_c, 64, kernel_size=5, stride=2, padding=2)self.bn1 = nn.BatchNorm2d(64)self.act1 = nn.ReLU()# input_shape: (None, 64, img_h//2, img_w//2)self.conv2 = nn.Conv2d(64, 128, kernel_size=5, stride=2, padding=2)self.bn2 = nn.BatchNorm2d(128)self.act2 = nn.ReLU()# input_shape: (None, 128, img_h//4, img_w//4)self.conv3 = nn.Conv2d(128, 256, kernel_size=5, stride=2, padding=2)self.bn3 = nn.BatchNorm2d(256)self.act3 = nn.ReLU()# input_shape: (None, 256, img_h//8, img_w//8)self.conv4 = nn.Conv2d(256, 512, kernel_size=5, stride=2, padding=2)self.bn4 = nn.BatchNorm2d(512)self.act4 = nn.ReLU()# input_shape: (None, 512, img_h//16, img_w//16)self.conv5 = nn.Conv2d(512, 512, kernel_size=5, stride=2, padding=2)self.bn5 = nn.BatchNorm2d(512)self.act5 = nn.ReLU()# input_shape: (None, 512, img_h//32, img_w//32)in_features = 512 * (self.img_h//32) * (self.img_w//32)self.flatten6 = Flatten()# input_shape: (None, 512 * img_h//32 * img_w//32)self.linear6 = nn.Linear(in_features, 1024)self.act6 = nn.ReLU()# output_shape: (None, 1024)def forward(self, x):x = self.bn1(self.act1(self.conv1(x)))x = self.bn2(self.act2(self.conv2(x)))x = self.bn3(self.act3(self.conv3(x)))x = self.bn4(self.act4(self.conv4(x)))x = self.bn5(self.act5(self.conv5(x)))x = self.act6(self.linear6(self.flatten6(x)))return xclass GlobalDiscriminator(nn.Module):def __init__(self, input_shape, arc='celeba'):super(GlobalDiscriminator, self).__init__()self.arc = arcself.input_shape = input_shapeself.output_shape = (1024,)self.img_c = input_shape[0]self.img_h = input_shape[1]self.img_w = input_shape[2]# input_shape: (None, img_c, img_h, img_w)self.conv1 = nn.Conv2d(self.img_c, 64, kernel_size=5, stride=2, padding=2)self.bn1 = nn.BatchNorm2d(64)self.act1 = nn.ReLU()# input_shape: (None, 64, img_h//2, img_w//2)self.conv2 = nn.Conv2d(64, 128, kernel_size=5, stride=2, padding=2)self.bn2 = nn.BatchNorm2d(128)self.act2 = nn.ReLU()# input_shape: (None, 128, img_h//4, img_w//4)self.conv3 = nn.Conv2d(128, 256, kernel_size=5, stride=2, padding=2)self.bn3 = nn.BatchNorm2d(256)self.act3 = nn.ReLU()# input_shape: (None, 256, img_h//8, img_w//8)self.conv4 = nn.Conv2d(256, 512, kernel_size=5, stride=2, padding=2)self.bn4 = nn.BatchNorm2d(512)self.act4 = nn.ReLU()# input_shape: (None, 512, img_h//16, img_w//16)self.conv5 = nn.Conv2d(512, 512, kernel_size=5, stride=2, padding=2)self.bn5 = nn.BatchNorm2d(512)self.act5 = nn.ReLU()# input_shape: (None, 512, img_h//32, img_w//32)if arc == 'celeba':in_features = 512 * (self.img_h//32) * (self.img_w//32)self.flatten6 = Flatten()self.linear6 = nn.Linear(in_features, 1024)self.act6 = nn.ReLU()elif arc == 'places2':self.conv6 = nn.Conv2d(512, 512, kernel_size=5, stride=2, padding=2)self.bn6 = nn.BatchNorm2d(512)self.act6 = nn.ReLU()# input_shape (None, 512, img_h//64, img_w//64)in_features = 512 * (self.img_h//64) * (self.img_w//64)self.flatten7 = Flatten()self.linear7 = nn.Linear(in_features, 1024)self.act7 = nn.ReLU()else:raise ValueError('Unsupported architecture \'%s\'.' % self.arc)# output_shape: (None, 1024)def forward(self, x):x = self.bn1(self.act1(self.conv1(x)))x = self.bn2(self.act2(self.conv2(x)))x = self.bn3(self.act3(self.conv3(x)))x = self.bn4(self.act4(self.conv4(x)))x = self.bn5(self.act5(self.conv5(x)))if self.arc == 'celeba':x = self.act6(self.linear6(self.flatten6(x)))elif self.arc == 'places2':x = self.bn6(self.act6(self.conv6(x)))x = self.act7(self.linear7(self.flatten7(x)))return xclass ContextDiscriminator(nn.Module):def __init__(self, local_input_shape, global_input_shape, arc='celeba'):super(ContextDiscriminator, self).__init__()self.arc = arcself.input_shape = [local_input_shape, global_input_shape]self.output_shape = (1,)self.model_ld = LocalDiscriminator(local_input_shape)self.model_gd = GlobalDiscriminator(global_input_shape, arc=arc)# input_shape: [(None, 1024), (None, 1024)]in_features = self.model_ld.output_shape[-1] + self.model_gd.output_shape[-1]self.concat1 = Concatenate(dim=-1)# input_shape: (None, 2048)self.linear1 = nn.Linear(in_features, 1)self.act1 = nn.Sigmoid()# output_shape: (None, 1)def forward(self, x):x_ld, x_gd = xx_ld = self.model_ld(x_ld)x_gd = self.model_gd(x_gd)out = self.act1(self.linear1(self.concat1([x_ld, x_gd])))return out

第四步:运行交互代码

第五步:整个工程的内容

 项目完整文件下载请见演示与介绍视频的简介处给出:➷➷➷

图像修复:深度学习GLCIC神经网络实现老照片划痕修复_哔哩哔哩_bilibili​

http://www.xdnf.cn/news/15581.html

相关文章:

  • Sharding-Sphere学习专题(三)数据加密、读写分离
  • AI 临床医学课题【总结】
  • WIFI MTU含义 ,协商修改的过程案例分析
  • 《大数据技术原理与应用》实验报告三 熟悉HBase常用操作
  • 《大数据技术原理与应用》实验报告二 熟悉常用的HDFS操作
  • LeetCode|Day11|557. 反转字符串中的单词 III|Python刷题笔记
  • 理解:进程、线程、协程
  • autoware激光雷达和相机标定
  • 【ASP.NET Core】内存缓存(MemoryCache)原理、应用及常见问题解析
  • 2025 春秋杯夏季个人挑战赛 Web
  • 【解决办法】越疆Dobot CR5 桌面客户端DobotStudio Pro连不上机器人
  • docker简介
  • Java实现文件自动下载,XXL-Job定时任务中的HTTP文件下载最佳实践
  • lightgbm算法学习
  • Datawhale AI夏令营大模型 task2.1
  • ISO-IEC-IEEE 42010架构规范
  • 更改elementui 图标 css content
  • 详解从零开始实现循环神经网络(RNN)
  • 深浅拷贝以及函数缓存
  • Dubbo高阶难题:异步转同步调用链上全局透传参数的丢失问题
  • iOS App 安全加固全流程:静态 + 动态混淆对抗逆向攻击实录
  • iOS如何查看电池容量?理解系统限制与开发者级能耗调试方法
  • 内网环境自签名超长期HTTPS证书,并在Chrome中显示为安全证书
  • C#自定义控件
  • 【Python】基础语法
  • 单向链表、双向链表、栈、队列复习(7.14)
  • LSV负载均衡
  • Usage of standard library is restricted (arkts-limited-stdlib) <ArkTSCheck>
  • 防火墙技术概述
  • Java行为型模式---模板方法模式