This example shows super resolution convolution model with using a single image. This model based on article "Image Super-Resolution Using DeepConvolutional Networks" Chao Dong, Chen Change Loy, Kaiming He, Xiaoou Tang. It works with a recent version of Chrome or another modern browser (not checked). It is recommended to run Chrome with "--allow-file-access-from-files" key to enable loading local file images.
Copy the following code into a html file and open it with Chrome. Use "Development tool" in option menu for debugging. First variant uses bilinear interpolation the second one bicubic.
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<meta http-equiv="X-UA-Compatible" content="IE=edge">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>TensorFlow.js Example</title>
<!-- Import TensorFlow.js -->
<script src="https://cdn.jsdelivr.net/npm/@tensorflow/tfjs@3.1.0/dist/tf.min.js"></script>
<!-- Import tfjs-vis -->
<script src="https://cdn.jsdelivr.net/npm/@tensorflow/tfjs-vis@1.0.2/dist/tfjs-vis.umd.min.js"></script>
<script>
var CountImage=0;
function OnLoadImage(Event)
{
CountImage++;
}
async function AddImage(Name, ID)
{
var Img = document.createElement("img");
Img.src=Name;
Img.id=ID;
Img.addEventListener("load", OnLoadImage);
document.body.appendChild(Img);
}
async function Idle()
{
if (CountImage==1)
{
CountImage++;
Run("A"); // start training when image loaded
}
}
async function Start()
{
AddImage("Lena.bmp","A");
setInterval(Idle,50);
}
document.addEventListener("DOMContentLoaded", Start);
async function Run(To)
{
var From,Model,Bin=4;
To=GetScreenTensor(To);
From=To.resizeBilinear([To.shape[0]/Bin, To.shape[1]/Bin]); // Upscaling and Downscaling
From=From.resizeBilinear([To.shape[0], To.shape[1]]); // by factor 4
ShowTensor(From,"B");
//return;
Model=GetTensorModel();
tfvis.show.modelSummary({name: "Architecture", tab: "Model"}, Model);
CompileTensor(Model);
await TrainTensor(Model, [From], [To], 1000);
}
function GetTensorLayer(Prev, Kernel, Filters, Activation) // Get one convolution layer
{
var Layer;
Layer = tf.layers.conv2d
({
kernelSize: Kernel,
filters: Filters,
activation : Activation?"relu":undefined,
padding: "same",
useBias: false,
});
Prev = Layer.apply(Prev);
return Prev;
}
function GetTensorModel()
{
var Kernel=9,Filters=32;
var Inp = tf.input({shape:[,,1]}),Prev,Sum;
Prev=GetTensorLayer(Inp, Kernel, Filters, 1); // Model icludes 3 layers
Prev=GetTensorLayer(Prev, 1, Filters/2, 1); // first andsecond non-linear
Prev=GetTensorLayer(Prev, 3, 1); // third averaging one
Sum=tf.layers.add(); // Residual layer
Prev=Sum.apply([Inp, Prev]);
return tf.model({inputs:Inp, outputs:Prev});
}
async function TrainTensor(Model, Froms, Tos, Count)
{
var Callbacks=tfvis.show.fitCallbacks
(
{ name: "Training Performance " },
["mse"],
{ height: 200, callbacks: ["onBatchEnd"]}
);
Callbacks.onEpochEnd = function(epoch, logs) // Apply model and show result for each iteration
{
tf.tidy(() => // to clear memory
{
var From = PredictTensor(Model, Froms[0]);
ShowTensor(From, "Result");
});
};
await Model.fit(tf.stack(Froms), tf.stack(Tos), { epochs : Count,callbacks: Callbacks} );
}
function CompileTensor(Model)
{
Model.compile
({
optimizer: tf.train.adam(),
loss: tf.losses.meanSquaredError,
metrics: ["mse"],
});
}
function PredictTensor(Model, From)
{
return Model.predict(From.expandDims(0)).squeeze(0);
}
function GetScreenTensor(ID)
{
var Res=tf.browser.fromPixels(document.getElementById(ID)).div(128).sub(1);
return Res.mean(2).expandDims(2);
}
async function ShowTensor(Img, ID)
{
var z=0.5;
Img=Img.div(2).add(z);
Img=tf.keep(tf.clipByValue(Img, 0, 1).squeeze());
var Canvas=GetTensorElem("canvas",document.body,ID);
tf.browser.toPixels(Img, Canvas);
tf.dispose(Img);
}
function GetTensorElem(Type, Parent, ID)
{
var Elem = document.getElementById(ID);
if (!Elem)
{
Elem = document.createElement(Type);
Elem.id = ID;
Parent.appendChild(Elem);
}
return Elem;
}
</script>
</head>
</html>
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<meta http-equiv="X-UA-Compatible" content="IE=edge">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>TensorFlow.js Example</title>
<!-- Import TensorFlow.js -->
<script src="https://cdn.jsdelivr.net/npm/@tensorflow/tfjs@3.1.0/dist/tf.min.js"></script>
<!-- Import tfjs-vis -->
<script src="https://cdn.jsdelivr.net/npm/@tensorflow/tfjs-vis@1.0.2/dist/tfjs-vis.umd.min.js"></script>
<script>
var CountImage=0;
function OnLoadImage(Event)
{
CountImage++;
}
async function AddImage(Name, ID)
{
var Img = document.createElement("img");
Img.src=Name;
Img.id=ID;
Img.addEventListener("load", OnLoadImage);
document.body.appendChild(Img);
}
async function Idle()
{
if (CountImage==2)
{
CountImage++;
Run("A","B"); // start training when image loaded
}
}
async function Start()
{
AddImage("LenaBicubic.bmp","A");
AddImage("Lena.bmp","B");
setInterval(Idle,50);
}
document.addEventListener("DOMContentLoaded", Start);
async function Run(From,To)
{
var From,Model,Bin=4;
To=GetScreenTensor(To);
From=GetScreenTensor(From);
Model=GetTensorModel();
tfvis.show.modelSummary({name: "Architecture", tab: "Model"}, Model);
GetTensorElem("div",document.body,"LN")
CompileTensor(Model);
await TrainTensor(Model, [From], [To], 1000);
}
function GetTensorLayer(Prev, Kernel, Filters, Activation) // Get one convolution layer
{
var Layer;
Layer = tf.layers.conv2d
({
kernelSize: Kernel,
filters: Filters,
activation : Activation?"relu":undefined,
padding: "same",
useBias: false,
});
Prev = Layer.apply(Prev);
return Prev;
}
function GetTensorModel()
{
var Kernel=9,Filters=32;
var Inp = tf.input({shape:[,,1]}),Prev,Sum;
Prev=GetTensorLayer(Inp, Kernel, Filters, 1); // Model icludes 3 layers
Prev=GetTensorLayer(Prev, 1, Filters/2, 1); // first andsecond non-linear
Prev=GetTensorLayer(Prev, 3, 1); // third averaging one
Sum=tf.layers.add(); // Residual layer
Prev=Sum.apply([Inp, Prev]);
return tf.model({inputs:Inp, outputs:Prev});
}
async function TrainTensor(Model, Froms, Tos, Count)
{
var Callbacks=tfvis.show.fitCallbacks
(
{ name: "Training Performance " },
["mse"],
{ height: 200, callbacks: ["onBatchEnd"]}
);
Callbacks.onEpochEnd = function(epoch, logs) // Apply model and show result for each iteration
{
tf.tidy(() => // to clear memory
{
var From = PredictTensor(Model, Froms[0]);
ShowTensor(From, "Result");
});
};
await Model.fit(tf.stack(Froms), tf.stack(Tos), { epochs : Count,callbacks: Callbacks} );
}
function CompileTensor(Model)
{
Model.compile
({
optimizer: tf.train.adam(),
loss: tf.losses.meanSquaredError,
metrics: ["mse"],
});
}
function PredictTensor(Model, From)
{
return Model.predict(From.expandDims(0)).squeeze(0);
}
function GetScreenTensor(ID)
{
var Res=tf.browser.fromPixels(document.getElementById(ID)).div(128).sub(1);
return Res.mean(2).expandDims(2);
}
async function ShowTensor(Img, ID, Zoom)
{
var z=0.5;
Img=Img.div(2).add(z);
Img=tf.keep(tf.clipByValue(Img, 0, 1).squeeze());
var Canvas=GetTensorElem("canvas",document.body,ID);
tf.browser.toPixels(Img, Canvas);
if (Zoom)
{
Canvas.style.width=Img.shape[1]*Zoom+"px";
Canvas.style.height=Img.shape[0]*Zoom+"px";
}
tf.dispose(Img);
}
function GetTensorElem(Type, Parent, ID)
{
var Elem = document.getElementById(ID);
if (!Elem)
{
Elem = document.createElement(Type);
Elem.id = ID;
Parent.appendChild(Elem);
}
return Elem;
}
</script>
</head>
</html>
Комментариев нет:
Отправить комментарий