利用深度学习对雷达信号进行分类
导言
本示例展示了如何使用深度学习对雷达信号进行分类。
调制分类是智能接收机的重要功能。 调制分类具有许多应用,例如,在认知雷达和软件定义无线电系统中。 通常,要识别这些信号并按调制类型对其进行分类,必须识别显着特征并将其输入分类器。 本示例讨论了使用深度学习网络从信号中自动提取时频特征及其分类。
本示例的第一部分模拟了一个雷达信号分类系统,该系统将三个脉冲信号合成并分类。 雷达信号的波形:
*矩形
*线性频率调制(LFM)
*巴克密码
准备工作
安装必要的软件包
include("PackagesHelper.jl")
using Pkg
Pkg.instantiate()
using CategoricalArrays
using OneHotArrays
using BSON: @save, @load
using Random, Statistics, FFTW, DSP, Images, ImageIO, ImageTransformations, FileIO
using Flux, Metalhead, MLUtils, CUDA, StatsBase, StatisticalMeasures
include("utils.jl");
我们将设置与模型将被训练的设备,图像的分辨率,数据生成的参数等相关的某些常数。
Random.seed!(42)
device == gpu ? model = gpu(model) : nothing;
IMG_SZ=(224,224)
OUT="tfd_db"
radar_classes=["Barker","LFM","Rect"]
CLASSES = 1 : length(radar_classes)
N_PER_CLASS=600
train_ratio,val_ratio,test_ratio = 0.8,0.1,0.1
DATA_MEAN=(0.485f0,0.456f0,0.406f0)
DATA_STD=(0.229f0,0.224f0,0.225f0)
我们将生成一个表示三种类型信号的数据集。 功能 helperGenerateRadarWaveforms 创建三种类型调制的无线电信号的合成数据集:矩形脉冲,LFM线性调频和巴克码。 参数(载波、频带、长度、调制方向等)。)是为每个信号随机选择的,之后将噪声、频移和失真添加到其中。 在输出端,该函数返回一个复杂序列列表和一个标签列表,其中包含每个信号的调制类型。
data, truth = helperGenerateRadarWaveforms(Fs=1e8, nSignalsPerMod=3000, seed=0)
接下来,我们将一组无线电信号转换为图像,用于神经网络的后续训练。 一、功能 tfd_image_gray 构建信号的频谱图,将其转换为对数标度,对值进行归一化并形成灰度图像。 然后函数 save_dataset_as_tfd_images_splits 它需要一个信号及其类标签列表,将它们随机分为训练,验证和测试部分,创建所需的文件夹结构,并为每个信号保存频谱图的相应PNG图像。
save_dataset_as_tfd_images_splits(data, truth; Fs=1e8, outdir="tfd_db", img_sz=(224,224), ratios=(0.8,0.1,0.1), seed=0)
我们可视化每种类型的一个信号。
img1 = Images.load(raw"tfd_db/val/Rect/4283146180.png")
img2 = Images.load(raw"tfd_db/val/Barker/3375303598.png")
img3 = Images.load(raw"tfd_db/val/LFM/3736510008.png")
[img1 img2 img3]
接下来,我们描述定义用于训练模型的数据集的结构。 功能 create_dataset 接受图像路径,将它们加载到内存中,然后对它们应用转换,例如按照FLux要求的顺序调整大小和重新排列轴。
function Augment_func(img)
resized = imresize(img, 224, 224)
rgb = RGB.(resized)
ch = channelview(rgb)
x = permutedims(ch, (3,2,1))
Float32.(x)
end
function Create_dataset(path)
img_train = []
img_test = []
img_valid = []
label_train = []
label_test = []
label_valid = []
train_path = joinpath(path, "train");
test_path = joinpath(path, "test");
valid_path = joinpath(path, "val");
function process_directory(directory, img_array, label_array, label_idx)
for file in readdir(directory)
if endswith(file, ".jpg") || endswith(file, ".png")
file_path = joinpath(directory, file);
img = Images.load(file_path);
img = Augment_func(img);
push!(img_array, img)
push!(label_array, label_idx)
end
end
end
for (idx, label) in enumerate(readdir(train_path))
println("Processing label in train: ", label)
label_dir = joinpath(train_path, label)
process_directory(label_dir, img_train, label_train, idx);
end
for (idx, label) in enumerate(readdir(test_path))
println("Processing label in test: ", label)
label_dir = joinpath(test_path, label)
process_directory(label_dir, img_test, label_test, idx);
end
for (idx, label) in enumerate(readdir(valid_path))
println("Processing label in valid: ", label)
label_dir = joinpath(valid_path, label)
process_directory(label_dir, img_valid, label_valid, idx);
end
return img_train, img_test, img_valid, label_train, label_test, label_valid;
end;
创建数据集
img_train, img_test, img_valid, label_train, label_test, label_valid = Create_dataset("tfd_db");
创建数据加载器
train_loader = DataLoader((data=img_train, label=label_train), batchsize=64, shuffle=true, collate=true)
test_loader = DataLoader((data=img_test, label=label_test), batchsize=64, shuffle=false, collate=true)
valid_loader = DataLoader((data=img_valid, label=label_valid), batchsize=64, shuffle=false, collate=true)
接下来,我们描述用于训练和验证模型的函数。
function train!(model, train_loader, opt, loss_fn, device, epoch::Int, num_epochs::Int)
Flux.trainmode!(model)
running_loss = 0.0
n_batches = 0
for (data, label) in train_loader
x = device(data)
yoh = Flux.onehotbatch(label, CLASSES) |> device
loss_val, gs = Flux.withgradient(Flux.params(model)) do
ŷ = model(x)
loss_fn(ŷ, yoh)
end
Flux.update!(opt, Flux.params(model), gs)
running_loss += Float64(loss_val)
n_batches += 1
end
return opt, running_loss / max(n_batches, 1)
end
function validate(model, val_loader, loss_fn, device)
Flux.testmode!(model)
running_loss = 0.0
n_batches = 0
for (data, label) in train_loader
x = device(data)
yoh = Flux.onehotbatch(label, CLASSES) |> device
ŷ = model(x)
loss_val = loss_fn(ŷ, yoh)
running_loss += Float64(loss_val)
n_batches += 1
end
Flux.trainmode!(model)
return running_loss / max(n_batches, 1)
end
初始化SqueezeNet模型
model = SqueezeNet(pretrain=false, nclasses=length(radar_classes))
model = gpu(model);
让我们设置损失函数,优化器和学习率。
lr = 0.0001
lossFunction(x, y) = Flux.Losses.logitcrossentropy(x, y);
opt = Flux.Adam(lr, (0.9, 0.99));
Classes = 1:length(radar_classes);
让我们开始模型的训练周期
no_improve_epochs = 0
best_model = nothing
train_losses = [];
valid_losses = [];
best_val_loss = Inf;
num_epochs = 50
for epoch in 1:num_epochs
println("-"^50 * "\n")
println("EPOCH $(epoch):")
opt, train_loss = train!(
model, train_loader, opt,
lossFunction, gpu, 1, num_epochs
)
val_loss = validate(model, valid_loader, lossFunction, gpu)
if val_loss < best_val_loss
best_val_loss = val_loss
best_model = deepcopy(model)
end
println("Epoch $epoch/$num_epochs | train $(round(train_loss, digits=4)) | val $(round(val_loss, digits=4))")
push!(train_losses, train_loss)
push!(valid_losses, val_loss)
end
保存训练好的模型
model = cpu(model)
@save "$(@__DIR__)/models/modelCLSRadarSignal.bson" model
测试模型
让我们执行一个推理,并构建一个误差矩阵。
model_data = load("$(@__DIR__)/models/modelCLSRadarSignal.bson")
model = model_data[:model] |> gpu;
让我们编写一个函数来评估训练后的模型,该模型计算精度度量。
total_loss, correct_predictions, total_samples = 0.0, 0, 0
all_preds = []
True_labels = []
for (data, label) in enumerate(test_loader)
x = gpu(data)
yoh = Flux.onehotbatch(label, CLASSES) |> gpu
ŷ = model(x)
total_loss= loss_fn(ŷ, yoh)
# y_pred = model(imgs, feat)
# total_loss += Flux.Losses.logitcrossentropy(y_pred, onehotbatch(y, classes))
preds = onecold(ŷ, classes)
true_classes = y
append!(all_preds, preds)
append!(True_labels, true_classes)
correct_predictions += sum(preds .== true_classes)
total_samples += length(y)
end
accuracy = 100.0 * correct_predictions / total_samples
accuracy_score, all_preds, true_predS = evaluate_model_accuracy(test_loader, model, CLASSES);
println("Accuracy trained model:", accuracy_score, "%")
我们还将在特定对象上进行测试。
classes = readdir("tfd_db/train")
cls = rand(classes)
files = readdir(joinpath("tfd_db/train", cls))
f = rand(files)
path = joinpath("tfd_db/train", cls, f)
img = Images.load(path)
img = Augment_func(img)
img = reshape(img, size(img)..., 1)
ŷ = model(gpu(img))
probs = Flux.softmax(ŷ)
pred_idx = argmax(Array(probs))
pred_class = radar_classes[pred_idx]
println("真正的阶级: ", cls)
println("预测类: ", pred_class)
结论
在这个演示示例中,训练了一个卷积神经网络来对雷达信号进行分类。
学习成果有很好的指标
.png)