Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 6 additions & 1 deletion .gitignore
Original file line number Diff line number Diff line change
@@ -1,4 +1,9 @@
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
*$py.class
*$py.class
build
*.egg-info
checkpoints

sample_data/*.tif
Empty file added deepwatermap/__init__.py
Empty file.
148 changes: 74 additions & 74 deletions deepwatermap.py → deepwatermap/deepwatermap.py
Original file line number Diff line number Diff line change
@@ -1,75 +1,75 @@
''' Implementation of DeepWaterMapV2.
The model architecture is explained in:
L.F. Isikdogan, A.C. Bovik, and P. Passalacqua,
"Seeing Through the Clouds with DeepWaterMap," IEEE GRSL, 2019.
'''
import tensorflow as tf
def model(min_width=4):
inputs = tf.keras.layers.Input(shape=[None, None, 6])
def conv_block(x, num_filters, kernel_size, stride=1, use_relu=True):
x = tf.keras.layers.Conv2D(
filters=num_filters,
kernel_size=kernel_size,
kernel_initializer='he_uniform',
strides=stride,
padding='same',
use_bias=False)(x)
x = tf.keras.layers.BatchNormalization()(x)
if use_relu:
x = tf.keras.layers.Activation('relu')(x)
return x
def downscaling_unit(x):
num_filters = int(x.get_shape()[-1]) * 4
x_1 = conv_block(x, num_filters, kernel_size=5, stride=2)
x_2 = conv_block(x_1, num_filters, kernel_size=3, stride=1)
x = tf.keras.layers.Add()([x_1, x_2])
return x
def upscaling_unit(x):
num_filters = int(x.get_shape()[-1]) // 4
x = tf.keras.layers.Lambda(lambda x: tf.nn.depth_to_space(x, 2))(x)
x_1 = conv_block(x, num_filters, kernel_size=3)
x_2 = conv_block(x_1, num_filters, kernel_size=3)
x = tf.keras.layers.Add()([x_1, x_2])
return x
def bottleneck_unit(x):
num_filters = int(x.get_shape()[-1])
x_1 = conv_block(x, num_filters, kernel_size=3)
x_2 = conv_block(x_1, num_filters, kernel_size=3)
x = tf.keras.layers.Add()([x_1, x_2])
return x
# model flow
skip_connections = []
num_filters = min_width
# first layer
x = conv_block(inputs, num_filters, kernel_size=1, use_relu=False)
skip_connections.append(x)
# encoder
for i in range(4):
x = downscaling_unit(x)
skip_connections.append(x)
# bottleneck
x = bottleneck_unit(x)
# decoder
for i in range(4):
x = tf.keras.layers.Add()([x, skip_connections.pop()])
x = upscaling_unit(x)
# last layer
x = tf.keras.layers.Add()([x, skip_connections.pop()])
x = conv_block(x, 1, kernel_size=1, use_relu=False)
x = tf.keras.layers.Activation('sigmoid')(x)
model = tf.keras.Model(inputs=inputs, outputs=x)
''' Implementation of DeepWaterMapV2.

The model architecture is explained in:
L.F. Isikdogan, A.C. Bovik, and P. Passalacqua,
"Seeing Through the Clouds with DeepWaterMap," IEEE GRSL, 2019.
'''

import tensorflow as tf

def model(min_width=4):
inputs = tf.keras.layers.Input(shape=[None, None, 6])

def conv_block(x, num_filters, kernel_size, stride=1, use_relu=True):
x = tf.keras.layers.Conv2D(
filters=num_filters,
kernel_size=kernel_size,
kernel_initializer='he_uniform',
strides=stride,
padding='same',
use_bias=False)(x)
x = tf.keras.layers.BatchNormalization()(x)
if use_relu:
x = tf.keras.layers.Activation('relu')(x)
return x

def downscaling_unit(x):
num_filters = int(x.get_shape()[-1]) * 4
x_1 = conv_block(x, num_filters, kernel_size=5, stride=2)
x_2 = conv_block(x_1, num_filters, kernel_size=3, stride=1)
x = tf.keras.layers.Add()([x_1, x_2])
return x

def upscaling_unit(x):
num_filters = int(x.get_shape()[-1]) // 4
x = tf.keras.layers.Lambda(lambda x: tf.nn.depth_to_space(x, 2))(x)
x_1 = conv_block(x, num_filters, kernel_size=3)
x_2 = conv_block(x_1, num_filters, kernel_size=3)
x = tf.keras.layers.Add()([x_1, x_2])
return x

def bottleneck_unit(x):
num_filters = int(x.get_shape()[-1])
x_1 = conv_block(x, num_filters, kernel_size=3)
x_2 = conv_block(x_1, num_filters, kernel_size=3)
x = tf.keras.layers.Add()([x_1, x_2])
return x

# model flow
skip_connections = []
num_filters = min_width

# first layer
x = conv_block(inputs, num_filters, kernel_size=1, use_relu=False)
skip_connections.append(x)

# encoder
for i in range(4):
x = downscaling_unit(x)
skip_connections.append(x)

# bottleneck
x = bottleneck_unit(x)

# decoder
for i in range(4):
x = tf.keras.layers.Add()([x, skip_connections.pop()])
x = upscaling_unit(x)

# last layer
x = tf.keras.layers.Add()([x, skip_connections.pop()])
x = conv_block(x, 1, kernel_size=1, use_relu=False)
x = tf.keras.layers.Activation('sigmoid')(x)

model = tf.keras.Model(inputs=inputs, outputs=x)
return model
144 changes: 74 additions & 70 deletions inference.py → deepwatermap/inference.py
Original file line number Diff line number Diff line change
@@ -1,70 +1,74 @@
''' Runs inference on a given GeoTIFF image.

example:
$ python inference.py --checkpoint_path checkpoints/cp.135.ckpt \
--image_path sample_data/sentinel2_example.tif --save_path water_map.png
'''

# Uncomment this to run inference on CPU if your GPU runs out of memory
# import os
# os.environ['CUDA_VISIBLE_DEVICES'] = '-1'

import argparse
import deepwatermap
import tifffile as tiff
import numpy as np
import cv2

def find_padding(v, divisor=32):
v_divisible = max(divisor, int(divisor * np.ceil( v / divisor )))
total_pad = v_divisible - v
pad_1 = total_pad // 2
pad_2 = total_pad - pad_1
return pad_1, pad_2

def main(checkpoint_path, image_path, save_path):
# load the model
model = deepwatermap.model()
model.load_weights(checkpoint_path)

# load and preprocess the input image
image = tiff.imread(image_path)
pad_r = find_padding(image.shape[0])
pad_c = find_padding(image.shape[1])
image = np.pad(image, ((pad_r[0], pad_r[1]), (pad_c[0], pad_c[1]), (0, 0)), 'reflect')

# solve no-pad index issue after inference
if pad_r[1] == 0:
pad_r = (pad_r[0], 1)
if pad_c[1] == 0:
pad_c = (pad_c[0], 1)

image = image.astype(np.float32)

# remove nans (and infinity) - replace with 0s
image = np.nan_to_num(image, copy=False, nan=0.0, posinf=0.0, neginf=0.0)

image = image - np.min(image)
image = image / np.maximum(np.max(image), 1)

# run inference
image = np.expand_dims(image, axis=0)
dwm = model.predict(image)
dwm = np.squeeze(dwm)
dwm = dwm[pad_r[0]:-pad_r[1], pad_c[0]:-pad_c[1]]

# soft threshold
dwm = 1./(1+np.exp(-(16*(dwm-0.5))))
dwm = np.clip(dwm, 0, 1)

# save the output water map
cv2.imwrite(save_path, dwm * 255)

if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--checkpoint_path', type=str,
help="Path to the dir where the checkpoints are stored")
parser.add_argument('--image_path', type=str, help="Path to the input GeoTIFF image")
parser.add_argument('--save_path', type=str, help="Path where the output map will be saved")
args = parser.parse_args()
main(args.checkpoint_path, args.image_path, args.save_path)
''' Runs inference on a given GeoTIFF image.

example:
$ python inference.py --checkpoint_path checkpoints/cp.135.ckpt --image_path sample_data/image2.tif --save_path water_map.png
'''

# Uncomment this to run inference on CPU if your GPU runs out of memory
# import os
# os.environ['CUDA_VISIBLE_DEVICES'] = '-1'

import argparse
from deepwatermap import deepwatermap
import tifffile as tiff
import numpy as np
import cv2

def find_padding(v, divisor=32):
v_divisible = max(divisor, int(divisor * np.ceil( v / divisor )))
total_pad = v_divisible - v
pad_1 = total_pad // 2
pad_2 = total_pad - pad_1
return pad_1, pad_2

def main():
parser = argparse.ArgumentParser()
parser.add_argument('--checkpoint_path', type=str,
help="Path to the dir where the checkpoints are stored")
parser.add_argument('--image_path', type=str, help="Path to the input GeoTIFF image")
parser.add_argument('--save_path', type=str, help="Path where the output map will be saved")
args = vars(parser.parse_args())

checkpoint_path=args["checkpoint_path"]
image_path=args["image_path"]
save_path=args["save_path"]

# load the model
model = deepwatermap.model()
model.load_weights(checkpoint_path)

# load and preprocess the input image
image = tiff.imread(image_path)
pad_r = find_padding(image.shape[0])
pad_c = find_padding(image.shape[1])
image = np.pad(image, ((pad_r[0], pad_r[1]), (pad_c[0], pad_c[1]), (0, 0)), 'reflect')

# solve no-pad index issue after inference
if pad_r[1] == 0:
pad_r = (pad_r[0], 1)
if pad_c[1] == 0:
pad_c = (pad_c[0], 1)

image = image.astype(np.float32)

# remove nans (and infinity) - replace with 0s
image = np.nan_to_num(image, copy=False, nan=0.0, posinf=0.0, neginf=0.0)

image = image - np.min(image)
image = image / np.maximum(np.max(image), 1)

# run inference
image = np.expand_dims(image, axis=0)
dwm = model.predict(image)
dwm = np.squeeze(dwm)
dwm = dwm[pad_r[0]:-pad_r[1], pad_c[0]:-pad_c[1]]

# soft threshold
dwm = 1./(1+np.exp(-(16*(dwm-0.5))))
dwm = np.clip(dwm, 0, 1)

# save the output water map
cv2.imwrite(save_path, dwm * 255)

if __name__ == '__main__':
main()
80 changes: 40 additions & 40 deletions metrics.py → deepwatermap/metrics.py
Original file line number Diff line number Diff line change
@@ -1,41 +1,41 @@
''' This script defines custom metrics and loss functions.
The Adaptive Max-Pool Loss acts as a weighting function that multiplies a
loss value with the maximum loss values within an NxN neighborhood.
An earlier version of this loss function described in:
F. Isikdogan, A.C. Bovik, and P. Passalacqua,
"Learning a River Network Extractor using an Adaptive Loss Function,"
IEEE Geoscience and Remote Sensing Letters, 2018.
'''
import tensorflow as tf
from tensorflow.keras import backend as K
import numpy as np
def running_recall(y_true, y_pred):
TP = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
TP_FN = K.sum(K.round(K.clip(y_true, 0, 1)))
recall = TP / (TP_FN + K.epsilon())
return recall
def running_precision(y_true, y_pred):
TP = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
TP_FP = K.sum(K.round(K.clip(y_pred, 0, 1)))
precision = TP / (TP_FP + K.epsilon())
return precision
def running_f1(y_true, y_pred):
precision = running_precision(y_true, y_pred)
recall = running_recall(y_true, y_pred)
return 2 * ((precision * recall) / (precision + recall + K.epsilon()))
def adaptive_maxpool_loss(y_true, y_pred, alpha=0.25):
y_pred = K.clip(y_pred, K.epsilon(), 1. - K.epsilon())
positive = -y_true * K.log(y_pred) * alpha
negative = -(1. - y_true) * K.log(1. - y_pred) * (1-alpha)
pointwise_loss = positive + negative
max_loss = tf.keras.layers.MaxPool2D(pool_size=8, strides=1, padding='same')(pointwise_loss)
x = pointwise_loss * max_loss
x = K.mean(x, axis=-1)
''' This script defines custom metrics and loss functions.

The Adaptive Max-Pool Loss acts as a weighting function that multiplies a
loss value with the maximum loss values within an NxN neighborhood.
An earlier version of this loss function described in:

F. Isikdogan, A.C. Bovik, and P. Passalacqua,
"Learning a River Network Extractor using an Adaptive Loss Function,"
IEEE Geoscience and Remote Sensing Letters, 2018.
'''

import tensorflow as tf
from tensorflow.keras import backend as K
import numpy as np

def running_recall(y_true, y_pred):
TP = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
TP_FN = K.sum(K.round(K.clip(y_true, 0, 1)))
recall = TP / (TP_FN + K.epsilon())
return recall

def running_precision(y_true, y_pred):
TP = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
TP_FP = K.sum(K.round(K.clip(y_pred, 0, 1)))
precision = TP / (TP_FP + K.epsilon())
return precision

def running_f1(y_true, y_pred):
precision = running_precision(y_true, y_pred)
recall = running_recall(y_true, y_pred)
return 2 * ((precision * recall) / (precision + recall + K.epsilon()))

def adaptive_maxpool_loss(y_true, y_pred, alpha=0.25):
y_pred = K.clip(y_pred, K.epsilon(), 1. - K.epsilon())
positive = -y_true * K.log(y_pred) * alpha
negative = -(1. - y_true) * K.log(1. - y_pred) * (1-alpha)
pointwise_loss = positive + negative
max_loss = tf.keras.layers.MaxPool2D(pool_size=8, strides=1, padding='same')(pointwise_loss)
x = pointwise_loss * max_loss
x = K.mean(x, axis=-1)
return x
Loading