스크립트의 함수 정의는 파일의 끝에 표시되어야 합니다. "preprocessMiniBatch" 함수 정의 뒤에 있는 모든 명령문을 첫 번째 로컬 함수 정의 앞으로 이동하십시오.

31 visualizaciones (últimos 30 días)
형구
형구 el 14 de Dic. de 2023
Respondida: lazymatlab el 14 de Dic. de 2023
GNN CODE에서 output image size를 변경하기 위해 다음과같은 아래에 imds 코드 부터 실행을 진행하면
mbq = minibatchqueue(augimds, ...
MiniBatchSize=miniBatchSize, ...
PartialMiniBatch="discard", ...
MiniBatchFcn=@preprocessMiniBatch, ...
MiniBatchFormat="SSCB");
해당 내용에서 다음과 같은 문제가 발생됩니다.
[스크립트의 함수 정의는 파일의 끝에 표시되어야 합니다.
"preprocessMiniBatch" 함수 정의 뒤에 있는 모든 명령문을 첫 번째 로컬 함수 정의 앞으로 이동하십시오.]
이를 해결 할수 있는 방법이 있나요?
imds = imageDatastore("C:\Users\COMPUTER\Documents\MATLAB\lololo", IncludeSubfolders=true);
augmenter = imageDataAugmenter('RandXReflection', true);
augimds = augmentedImageDatastore([128 128], imds, 'DataAugmentation', augmenter);
numLatentInputs = 100;
projectionSize = [8 8 512]; % 조정된 값
filterSize = 5;
numFilters = 64;
layersGenerator = [
featureInputLayer(numLatentInputs, 'InputSize', [1 1 1 numLatentInputs])
projectAndReshapeLayer(projectionSize)
transposedConv2dLayer(filterSize, 4*numFilters, 'Name', 'tconv1')
batchNormalizationLayer('Name', 'bn1')
reluLayer('Name', 'relu1')
transposedConv2dLayer(filterSize, 2*numFilters, 'Stride', 2, 'Cropping', 'same', 'Name', 'tconv2')
batchNormalizationLayer('Name', 'bn2')
reluLayer('Name', 'relu2')
transposedConv2dLayer(filterSize, numFilters, 'Stride', 2, 'Cropping', 'same', 'Name', 'tconv3')
batchNormalizationLayer('Name', 'bn3')
reluLayer('Name', 'relu3')
transposedConv2dLayer(filterSize, 3, 'Stride', 2, 'Cropping', 'same', 'Name', 'tconv4')
tanhLayer('Name', 'tanh')];
netG = dlnetwork(layersGenerator);
dropoutProb = 0.5;
numFilters = 64;
scale = 0.2;
inputSize = [128 128 3]; % 이미지 크기 변경
filterSize = 5;
layersDiscriminator = [
imageInputLayer(inputSize, Normalization="none")
dropoutLayer(dropoutProb)
convolution2dLayer(filterSize, numFilters, Stride=2, Padding="same")
leakyReluLayer(scale)
convolution2dLayer(filterSize, 2*numFilters, Stride=2, Padding="same")
batchNormalizationLayer
leakyReluLayer(scale)
convolution2dLayer(filterSize, 4*numFilters, Stride=2, Padding="same")
batchNormalizationLayer
leakyReluLayer(scale)
convolution2dLayer(filterSize, 8*numFilters, Stride=2, Padding="same")
batchNormalizationLayer
leakyReluLayer(scale)
convolution2dLayer(4, 1)
sigmoidLayer];
netD = dlnetwork(layersDiscriminator);
dropoutProb = 0.5;
numFilters = 64;
scale = 0.2;
inputSize = [512 512 3];
filterSize = 5;
layersDiscriminator = [
imageInputLayer(inputSize,Normalization="none")
dropoutLayer(dropoutProb)
convolution2dLayer(filterSize,numFilters,Stride=2,Padding="same")
leakyReluLayer(scale)
convolution2dLayer(filterSize,2*numFilters,Stride=2,Padding="same")
batchNormalizationLayer
leakyReluLayer(scale)
convolution2dLayer(filterSize,4*numFilters,Stride=2,Padding="same")
batchNormalizationLayer
leakyReluLayer(scale)
convolution2dLayer(filterSize,8*numFilters,Stride=2,Padding="same")
batchNormalizationLayer
leakyReluLayer(scale)
convolution2dLayer(4,1)
sigmoidLayer];
netD = dlnetwork(layersDiscriminator);
numEpochs = 100;
miniBatchSize = 128;
learnRate = 0.0002;
gradientDecayFactor = 0.5;
squaredGradientDecayFactor = 0.999;
flipProb = 0.35;
validationFrequency = 100;
augimds.MiniBatchSize = miniBatchSize;
function X = preprocessMiniBatch(data)
% Concatenate mini-batch
X = cat(4, data{:});
% Rescale the images in the range [-1 1].
X = rescale(X, -1, 1, 'InputMin', 0, 'InputMax', 255);
end
mbq = minibatchqueue(augimds, ...
MiniBatchSize=miniBatchSize, ...
PartialMiniBatch="discard", ...
MiniBatchFcn=@preprocessMiniBatch, ...
MiniBatchFormat="SSCB");
trailingAvgG = [];
trailingAvgSqG = [];
trailingAvg = [];
trailingAvgSqD = [];
numValidationImages = 5;
ZValidation = randn(numLatentInputs,numValidationImages,"single");
ZValidation = dlarray(ZValidation,"CB");
if canUseGPU
ZValidation = gpuArray(ZValidation);
end
f = figure;
f.Position(3) = 2*f.Position(3);
imageAxes = subplot(1,2,1);
scoreAxes = subplot(1,2,2);
C = colororder;
lineScoreG = animatedline(scoreAxes,Color=C(1,:));
lineScoreD = animatedline(scoreAxes,Color=C(2,:));
legend("Generator","Discriminator");
ylim([0 1])
xlabel("Iteration")
ylabel("Score")
grid on
iteration = 0;
start = tic;
% Loop over epochs.
for epoch = 1:numEpochs
% Reset and shuffle datastore.
shuffle(mbq);
% Loop over mini-batches.
while hasdata(mbq)
iteration = iteration + 1;
% Read mini-batch of data.
X = next(mbq);
% Generate latent inputs for the generator network. Convert to
% dlarray and specify the format "CB" (channel, batch). If a GPU is
% available, then convert latent inputs to gpuArray.
Z = randn(numLatentInputs, miniBatchSize, 'single');
Z = dlarray(Z, 'CB');
if canUseGPU
Z = gpuArray(Z);
end
% Evaluate the gradients of the loss with respect to the learnable
% parameters, the generator state, and the network scores using
% dlfeval and the modelLoss function.
[lossG, lossD, gradientsG, gradientsD, stateG, scoreG, scoreD] = ...
dlfeval(@modelLoss, netG, netD, X, Z, flipProb);
netG.State = stateG;
% Update the discriminator network parameters.
[netD, trailingAvg, trailingAvgSqD] = adamupdate(netD, gradientsD, ...
trailingAvg, trailingAvgSqD, iteration, ...
learnRate, gradientDecayFactor, squaredGradientDecayFactor);
% Update the generator network parameters.
[netG, trailingAvgG, trailingAvgSqG] = adamupdate(netG, gradientsG, ...
trailingAvgG, trailingAvgSqG, iteration, ...
learnRate, gradientDecayFactor, squaredGradientDecayFactor);
% Every validationFrequency iterations, display batch of generated
% images using the held-out generator input.
if mod(iteration, validationFrequency) == 0 || iteration == 1
% Generate images using the held-out generator input.
XGeneratedValidation = predict(netG, ZValidation);
% Tile and rescale the images in the range [0 1].
I = imtile(extractdata(XGeneratedValidation));
I = rescale(I);
% Display the images.
subplot(1, 2, 1);
image(imageAxes, I)
xticklabels([]);
yticklabels([]);
title("Generated Images");
end
% Update the scores plot.
subplot(1, 2, 2)
lossG = double(extractdata(lossG));
addpoints(lineScoreG, iteration, lossG);
lossD = double(extractdata(lossD));
addpoints(lineScoreD, iteration, lossD);
% Update the title with training progress information.
D = duration(0, 0, toc(start), 'Format', 'hh:mm:ss');
title(...
"Epoch: " + epoch + ", " + ...
"Iteration: " + iteration + ", " + ...
"Elapsed: " + string(D))
drawnow
end
end
% 나머지 코드...
numObservations = 5;
ZNew = randn(numLatentInputs, numObservations, "single");
ZNew = dlarray(ZNew, "CB");
if canUseGPU
ZNew = gpuArray(ZNew);
end
XGeneratedNew = predict(netG, ZNew);
I = imtile(extractdata(XGeneratedNew));
I = rescale(I);
figure
image(I)
axis off
title("Generated Images")
numImagesToSave = 1; % 저장할 이미지 개수
outputFolder = 'C:\Users\COMPUTER\Documents\MATLAB\pixgan\rpgan'; % 이미지를 저장할 폴더 경로
% 새 폴더 생성
if ~exist(outputFolder, 'dir')
mkdir(outputFolder);
end
% 생성된 이미지를 개별로 저장
for i = 1:numImagesToSave
generatedImage = extractdata(XGeneratedNew(:, :, :, i));
% 이미지 스케일 조정 및 uint8로 변환
generatedImage = uint8(255 * mat2gray(generatedImage));
imwrite(generatedImage, fullfile(outputFolder, strcat('generated_image_', num2str(i), '.jpg')));
end

Respuesta aceptada

lazymatlab
lazymatlab el 14 de Dic. de 2023
함수는 스크립트 중간에 정의될 수 없습니다.
코드 중 아래 부분을 스크립트 맨 뒤로 옮기고 실행해보세요.
function X = preprocessMiniBatch(data)
% Concatenate mini-batch
X = cat(4, data{:});
% Rescale the images in the range [-1 1].
X = rescale(X, -1, 1, 'InputMin', 0, 'InputMax', 255);
end

Más respuestas (0)

Categorías

Más información sobre 영상에서의 딥러닝 en Help Center y File Exchange.

Productos

Community Treasure Hunt

Find the treasures in MATLAB Central and discover how the community can help you!

Start Hunting!