LPI雷达的分类
导言
LPI雷达(低拦截概率)系统旨在通过在低功率水平的宽频率范围内使用特殊调制和传输方案来逃避检测。 在现代LPI雷达中,这些条件通常通过发射连续波(CW)信号来实现。 LPI信号的检测和分类是一项严肃的任务。 最近的进展表明,深度学习技术可以成功应用于雷达信号分类。
此示例演示如何使用神经网络对LPI雷达信号进行分类。 在将数据提交给ANN输入之前,它们经过处理-我们从复信号中获得时频域中的数据,之后,使用图像处理,我们获得二进制频谱图和使用HoG获得的特征向量。
阅读下面有关数据预处理的更多信息
神经网络本身由两个分支组成。 第一个分支是具有二进制频谱图输入的二维卷积网络。 神经网络的第二个分支是一维卷积网络,其输入是使用hog从二进制频谱图中提取的特征矢量。 两个分支输出端的特征数据被合并并送入产生logits的INS-2全连接层的头部。 然后,使用交叉熵计算信号的预测类标签。
工作准备
导入必要的软件包
include("installPackages.jl");
using Pkg
Pkg.instantiate()
using SignalAnalysis
using DSP
using MAT
using Plots
using Images, ColorSchemes, Random
using ImageBinarization, ImageFiltering, ImageMorphology, ImageFeatures
using Flux, CUDA, cuDNN
using OneHotArrays
using StatisticalMeasures
include("show_sample.jl");
include("utils.jl");
CM = StatisticalMeasures.ConfusionMatrices;
让我们设置将在这项工作中使用的参数。
n = 64
hop=48
Fs = 20e6
nfft = 256;
threshold = 210 / 255
datadir = "data";
n_classes = 11;
lr = 1e-3
CLASSES = 1:n_classes;
数据概览
我们将对一个选定的信号进行预处理。
首先,我们将从文件下载并读取信号。 数据-复杂形式的信号
testpath = joinpath(datadir, "Frank")
files = readdir(testpath)
n_files = length(files)
ind = Random.rand(1:n_files)
filepath = joinpath(testpath, files[ind])
data = matread(filepath)["x"][:, 1];
功能 show_sample 使用我们上面设置的STFT参数计算信号上的频谱图
z, fig = show_sample(data, n, hop, Fs, nfft);
fig
GetImage函数允许您从计算的频谱图中获取RGB图像。 那么,getBinary计算具有给定阈值的接收频谱图的二进制图像。 结果如下所示
img = getImage(z)
bnr_img = getBinary(img, threshold)
[img bnr_img]
接下来,我们将得到的二进制频谱图提交给HoG进行特征提取。 HoG计算梯度方向(亮度变化方向)的直方图。 在该方法的输出处,我们获得了可应用于一维卷积网络输入的特征矢量。
一般来说,这个向量可以被可视化。 高列表示相应单元格中的边框以给定角度强烈标记;零或几乎为零值表示单元格中没有此方向的梯度。
featureHoG = calcHoG(bnr_img)
plot(featureHoG, legend=false, xticks=false, yticks=false)
接下来,我们将创建一个数据集结构,它将迭代地为每个信号执行上述所有管道。 此外,我们将所有数据分为三组-训练,验证和标准比例的测试。 我们还将创建批量大小为64的数据加载器。
subfolders = readdir(datadir)
classes = subfolders
paths = String[]
labels = Int[]
for (cls_idx, sub) in enumerate(classes)
folder = joinpath(datadir, sub)
for fname in readdir(folder)
push!(paths, joinpath(folder, fname))
push!(labels, cls_idx)
end
end
N = length(paths)
perm = randperm(N)
paths = paths[perm]
labels = labels[perm]
N = length(paths)
train_len = floor(Int, 0.7 * N)
val_len = floor(Int, 0.15 * N)
test_len = N - train_len - val_len
train_idx = 1:train_len
val_idx = (train_len+1):(train_len+val_len)
test_idx = (train_len+val_len+1):N
train_paths, train_labels = paths[train_idx], labels[train_idx]
val_paths, val_labels = paths[val_idx], labels[val_idx]
test_paths, test_labels = paths[test_idx], labels[test_idx]
struct LazyRadarDataseT
paths::Vector{String}
labels::Vector{Int}
n; hop; Fs; nfft; threshold
end
Base.getindex(ds::LazyRadarDataseT, i::Int) = begin
data = matread(ds.paths[i])["x"][:,1]
b, f = prepareSample(data, ds.n, ds.hop, ds.Fs, ds.nfft, ds.threshold)
return Float16.(b), Float16.(f), ds.labels[i]
end
function Base.getindex(ds::LazyRadarDataseT, idxs::AbstractVector{Int})
B = length(idxs)
b0, f0, _ = ds[idxs[1]]
nb, Tb = size(b0)
Tf = length(f0)
batch_b = Array{Float16}(undef, nb, Tb, 1, B)
batch_f = Array{Float16}(undef, Tf, 1, B)
batch_labels = Vector{Int}(undef, B)
for (j, i) in enumerate(idxs)
bj, fj, lbl = ds[i]
batch_b[:, :, :, j] = bj
batch_f[:,:, j] = fj
batch_labels[j] = lbl
end
return batch_b, batch_f, batch_labels
end
Base.length(ds::LazyRadarDataseT) = length(ds.paths)
train_ds = LazyRadarDataseT(train_paths, train_labels, n, hop, Fs, nfft, threshold)
val_ds = LazyRadarDataseT(val_paths, val_labels, n, hop, Fs, nfft, threshold)
test_ds = LazyRadarDataseT(test_paths, test_labels, n, hop, Fs, nfft, threshold);
# --—4)我们将它们包装在DataLoader中-Flux将通过索引快速提取必要的元素
batch_size = 128
train_loader = Flux.DataLoader(train_ds;
batchsize = batch_size,
shuffle = true,
partial = true,
parallel = true,
)
val_loader = Flux.DataLoader(val_ds; batchsize=batch_size, shuffle=true)
test_loader = Flux.DataLoader(test_ds; batchsize=batch_size, shuffle=true)
让我们定义将在其上执行网络训练的设备。
device = CUDA.functional() ? gpu : cpu
让我们定义一个由两个分支和一个头组成的神经网络的体系结构。
function makeBranch2dCnn(; p = 0.5)
Chain(
Conv((5,5), 1=>64, pad=1), relu,
x -> maxpool(x, (2,2)),
Conv((5,5), 64=>128, pad=1), relu,
x -> maxpool(x, (2,2)),
Conv((5,5), 128=>128, pad=1), relu,
x -> maxpool(x, (2,2)),
Conv((5,5), 128=>256, pad=1), relu,
x -> maxpool(x, (2,2)),
Conv((5,5), 256=>512, pad=1), relu,
Dropout(p),
x -> maxpool(x, (2,2)),
)
end
function makeBranch1dCnn(; p = 0.25)
Chain(
Conv((5,), 1=>8, pad=1), relu,
x -> maxpool(x, (2,)),
Conv((5,), 8=>64, pad=1), relu,
x -> maxpool(x, (2,)),
)
end
function makeHead()
head = Chain(
t -> begin
x1, x2 = t
v1 = reshape(x2, :, size(x2, 3))
v2 = reshape(x1, :, size(x1, 4))
return vcat(v1, v2)
end,
Dense(432576, 128, relu),
Dense(128, n_classes)
);
end
struct Net
branch1::Chain
branch2::Chain
head::Chain
end
function Net()
cnn2d = makeBranch2dCnn();
cnn1d = makeBranch1dCnn();
head = makeHead()
Net(cnn2d, cnn1d, head)
end
function (m::Net)(imgs, feat)
out2d = m.branch1(imgs)
out1d = m.branch2(feat)
return m.head((out2d, out1d))
end
Flux.@functor Net
初始化网络实例并将其传输到适当的设备
model = Net()
device == gpu ? model = gpu(model) : nothing;
使用指定的学习率初始化优化器
optimizer = Flux.Adam(1e-4, (0.9, 0.99));
让我们在同一个时代定义训练和验证的函数
lossSnet(imgs, features, y) = Flux.Losses.logitcrossentropy(Net(imgs, features), y);
function train_one_epoch(model, Loader, correct_sample, TSamples, Tloss, Optimizer, device)
for (i, (imgs, feat, y)) in enumerate(Loader)
# println("Iteration: ", i)
TSamples += length(y)
imgs = gpu(imgs)
feat = gpu(feat)
y = gpu(y)
wg = Flux.withgradient(Flux.params(model)) do
ŷ = model(imgs, feat)
l = Flux.Losses.logitcrossentropy(ŷ, onehotbatch(y, CLASSES))
return l, ŷ # <-正如你的例子:两个值
end
batch_loss, y_pred = wg.val
Flux.update!(Optimizer, Flux.params(model), wg.grad)
Tloss += batch_loss
correct_sample += sum(onecold(y_pred, CLASSES) .== y)
end
return Tloss, TSamples, correct_sample
end;
function validate(model, val_loader, task, device, correct_sample, TSamples, running_loss)
preds = nothing
targets = Int[] # 一个空的Int向量
# n_batches = 0
for (i, (imgs, feat, y)) in enumerate(val_loader)
TSamples += length(y)
imgs = gpu(imgs)
feat = gpu(feat)
y = gpu(y)
ŷ = model(imgs, feat)
running_loss += Flux.Losses.logitcrossentropy(ŷ, onehotbatch(y, CLASSES))
# n_batches += 1
correct_sample += sum(onecold(ŷ, CLASSES) .== y)
end
# avg_loss = running_loss / max(n_batches, 1)
epoch_acc = 100.0 * (correct_sample / TSamples)
epoch_loss = running_loss / TSamples
return epoch_loss, preds, targets, epoch_acc
end
让我们开始网络学习周期
for epoch in 1:10
total_loss = 0.0
train_running_correct = 0
total_samples = 0
correct_sample_val = 0
TSamples_val = 0
running_loss_val = 0
@info "Epoch $epoch"
@info "锻炼身体"
Tloss, TSamples, correct_sample = train_one_epoch(model, train_loader, train_running_correct,
total_samples, total_loss, optimizer, device)
@info "验证/验证"
avg_loss, preds, targets, epoch_acc_valid = validate(model, val_loader, "val", device, correct_sample_val, TSamples_val, running_loss_val)
epoch_loss = Tloss / TSamples
epoch_acc = 100.0 * (correct_sample / TSamples)
println("loss: $epoch_loss, accuracy: $epoch_acc, val_loss:$avg_loss, val_acc:$epoch_acc_valid")
end
让我们来评估模型
function evaluate_model_accuracy(loader, model, classes)
total_loss, correct_predictions, total_samples = 0.0, 0, 0
all_preds = []
True_labels = []
for (i, (imgs, feat, y)) in enumerate(loader)
imgs = gpu(imgs)
feat = gpu(feat)
y = gpu(y)
y_pred = model(imgs, feat)
total_loss += Flux.Losses.logitcrossentropy(y_pred, onehotbatch(y, classes))
preds = onecold(y_pred, classes)
true_classes = y
append!(all_preds, preds)
append!(True_labels, true_classes)
correct_predictions += sum(preds .== true_classes)
total_samples += length(y)
end
# 计算精度
accuracy = 100.0 * correct_predictions / total_samples
return accuracy, all_preds, True_labels
end;
让我们通过计算测试数据集上的精度来评估我们的模型。
accuracy_score_LSTM, all_predsLSTMm, true_predS_LSTM = evaluate_model_accuracy(test_loader, model, CLASSES);
println("Accuracy trained model:", accuracy_score_LSTM, "%")
现在让我们构建一个误差矩阵,用于模型的可视化评估。
function plot_confusion_matrix(C)
# 使用大字体和对比色创建热图
heatmap(
C,
title = "Confusion Matrix",
xlabel = "Predicted",
ylabel = "True",
xticks = (1:length(classes), classes),
yticks = (1:length(classes), classes),
c = :viridis, # 对比鲜明的配色方案
colorbar_title = "Count",
size = (600, 400)
)
# 让我们为单元格添加值以提高可见性。
for i in 1:size(C, 1)
for j in 1:size(C, 2)
annotate!(j, i, text(C[i, j], :white, 12, :bold))
end
end
# 我们显式显示图形
display(current())
end;
preds_for_CM_LSTM = map(x -> x[1], all_predsLSTMm);
conf_matrix = CM.confmat(preds_for_CM_LSTM, true_predS_LSTM)
conf_matrix = CM.matrix(conf_matrix)
plot_confusion_matrix(conf_matrix)
进一步改进模型意味着更好地选择体系结构和选择最优超参数。
结论
此示例显示了使用机器学习和深度学习技术执行雷达目标分类的工作流程。 虽然该示例使用合成数据进行训练和测试,但可以轻松扩展以考虑雷达的实际结果。
.png)