1
0
mirror of https://github.com/containers/netavark.git synced 2026-02-05 15:45:47 +01:00

Merge pull request #1269 from Luap99/v1.15

release v1.15.2
This commit is contained in:
openshift-merge-bot[bot]
2025-06-04 14:34:52 +00:00
committed by GitHub
4 changed files with 59 additions and 54 deletions

31
Cargo.lock generated
View File

@@ -487,9 +487,9 @@ checksum = "2a2330da5de22e8a3cb63252ce2abb30116bf5265e89c0e01bc17015ce30a476"
[[package]] [[package]]
name = "dhcproto" name = "dhcproto"
version = "0.9.0" version = "0.12.0"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "dcee045385d5f7819022821f41209b9945d17550760b0b2349aaef4ecfa14bc3" checksum = "f6794294f2c4665aae452e950c2803a1e487c5672dc8448f0bfa3f52ff67e270"
dependencies = [ dependencies = [
"dhcproto-macros", "dhcproto-macros",
"hex", "hex",
@@ -1221,7 +1221,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "43b627935a2f5d654613bea2bcd677cc760b03ecf224ced0f1970c0d174813b9" checksum = "43b627935a2f5d654613bea2bcd677cc760b03ecf224ced0f1970c0d174813b9"
dependencies = [ dependencies = [
"lazy_static", "lazy_static",
"nix 0.29.0", "nix",
"regex", "regex",
] ]
@@ -1371,9 +1371,9 @@ dependencies = [
[[package]] [[package]]
name = "mozim" name = "mozim"
version = "0.2.5" version = "0.2.6"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "610e34113d007c3588631f879854c14fbf291f3ab7853b833220f7cbf6ece8ad" checksum = "8232b853f83a0c76331d934627aeec172e9d5f2c82d1f9e7f86caa0df72cb304"
dependencies = [ dependencies = [
"byteorder", "byteorder",
"dhcproto", "dhcproto",
@@ -1382,7 +1382,7 @@ dependencies = [
"libc", "libc",
"log", "log",
"nispor", "nispor",
"nix 0.27.1", "nix",
"rand 0.8.5", "rand 0.8.5",
] ]
@@ -1414,7 +1414,7 @@ checksum = "1d87ecb2933e8aeadb3e3a02b828fed80a7528047e68b4f424523a0981a3a084"
[[package]] [[package]]
name = "netavark" name = "netavark"
version = "1.15.1" version = "1.15.2"
dependencies = [ dependencies = [
"anyhow", "anyhow",
"chrono", "chrono",
@@ -1435,7 +1435,7 @@ dependencies = [
"netlink-sys", "netlink-sys",
"nftables", "nftables",
"nispor", "nispor",
"nix 0.29.0", "nix",
"once_cell", "once_cell",
"prost", "prost",
"rand 0.9.1", "rand 0.9.1",
@@ -1562,17 +1562,6 @@ dependencies = [
"wl-nl80211", "wl-nl80211",
] ]
[[package]]
name = "nix"
version = "0.27.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2eb04e9c688eff1c89d72b407f168cf79bb9e867a9d3323ed6c01519eb9cc053"
dependencies = [
"bitflags",
"cfg-if",
"libc",
]
[[package]] [[package]]
name = "nix" name = "nix"
version = "0.29.0" version = "0.29.0"
@@ -1926,7 +1915,7 @@ dependencies = [
"netlink-packet-utils", "netlink-packet-utils",
"netlink-proto", "netlink-proto",
"netlink-sys", "netlink-sys",
"nix 0.29.0", "nix",
"thiserror 1.0.69", "thiserror 1.0.69",
"tokio", "tokio",
] ]
@@ -2927,7 +2916,7 @@ dependencies = [
"futures-core", "futures-core",
"futures-lite", "futures-lite",
"hex", "hex",
"nix 0.29.0", "nix",
"ordered-stream", "ordered-stream",
"serde", "serde",
"serde_repr", "serde_repr",

View File

@@ -1,6 +1,6 @@
[package] [package]
name = "netavark" name = "netavark"
version = "1.15.1" version = "1.15.2"
edition = "2021" edition = "2021"
authors = ["github.com/containers"] authors = ["github.com/containers"]
license = "Apache-2.0" license = "Apache-2.0"
@@ -49,7 +49,7 @@ fs2 = "0.4.3"
tokio = { version = "1.45.0", features = ["rt", "rt-multi-thread", "signal", "fs"] } tokio = { version = "1.45.0", features = ["rt", "rt-multi-thread", "signal", "fs"] }
tokio-stream = { version = "0.1.17", features = ["net"] } tokio-stream = { version = "0.1.17", features = ["net"] }
tonic = "0.13.1" tonic = "0.13.1"
mozim = "0.2.5" mozim = "0.2.6"
prost = "0.13.5" prost = "0.13.5"
futures-channel = "0.3.31" futures-channel = "0.3.31"
futures-core = "0.3.31" futures-core = "0.3.31"

View File

@@ -1,5 +1,10 @@
# Release Notes # Release Notes
## v1.15.2
* Fixed a bug that caused a thread leak in the dhcp-proxy for each started container. ([#811](https://github.com/containers/netavark/issues/811))
* Fixed a bug which printed bogus errors when the dhcp-proxy was run with an activity timeout of 0. ([#1262](https://github.com/containers/netavark/issues/1262))
## v1.15.1 ## v1.15.1
* Fixed a regression that caused container name lookups to get the wrong ip address when the host's search domain responded for the same name. ([containers/podman#26198](https://github.com/containers/podman/issues/26198)) * Fixed a regression that caused container name lookups to get the wrong ip address when the host's search domain responded for the same name. ([containers/podman#26198](https://github.com/containers/podman/issues/26198))

View File

@@ -56,7 +56,7 @@ struct NetavarkProxyService<W: Write + Clear> {
// the timeout for the dora operation // the timeout for the dora operation
dora_timeout: u32, dora_timeout: u32,
// channel send-side for resetting the inactivity timeout // channel send-side for resetting the inactivity timeout
timeout_sender: Arc<Mutex<Sender<i32>>>, timeout_sender: Option<Arc<Mutex<Sender<i32>>>>,
// All dhcp poll will be spawned on a new task, keep track of it so // All dhcp poll will be spawned on a new task, keep track of it so
// we can remove it on teardown. The key is the container mac. // we can remove it on teardown. The key is the container mac.
task_map: Arc<Mutex<HashMap<String, AbortHandle>>>, task_map: Arc<Mutex<HashMap<String, AbortHandle>>>,
@@ -64,17 +64,19 @@ struct NetavarkProxyService<W: Write + Clear> {
impl<W: Write + Clear> NetavarkProxyService<W> { impl<W: Write + Clear> NetavarkProxyService<W> {
fn reset_inactivity_timeout(&self) { fn reset_inactivity_timeout(&self) {
let sender = self.timeout_sender.clone(); if let Some(sender) = &self.timeout_sender {
let locked_sender = match sender.lock() { let sender_clone = sender.clone();
Ok(v) => v, let locked_sender = match sender_clone.lock() {
Err(e) => { Ok(v) => v,
log::error!("{}", e); Err(e) => {
return; log::error!("{}", e);
return;
}
};
match locked_sender.try_send(1) {
Ok(..) => {}
Err(e) => log::error!("{}", e),
} }
};
match locked_sender.try_send(1) {
Ok(..) => {}
Err(e) => log::error!("{}", e),
} }
} }
} }
@@ -285,11 +287,18 @@ pub async fn serve(opts: Opts) -> NetavarkResult<()> {
// Create send and receive channels for activity timeout. If anything is // Create send and receive channels for activity timeout. If anything is
// sent by the tx side, the inactivity timeout is reset // sent by the tx side, the inactivity timeout is reset
let (activity_timeout_tx, activity_timeout_rx) = mpsc::channel(5); let (activity_timeout_tx, activity_timeout_rx) = if inactivity_timeout.as_secs() > 0 {
let (tx, rx) = mpsc::channel(5);
(Some(tx), Some(rx))
} else {
(None, None)
};
let netavark_proxy_service = NetavarkProxyService { let netavark_proxy_service = NetavarkProxyService {
cache: cache.clone(), cache: cache.clone(),
dora_timeout, dora_timeout,
timeout_sender: Arc::new(Mutex::new(activity_timeout_tx.clone())), timeout_sender: activity_timeout_tx
.clone()
.map(|tx| Arc::new(Mutex::new(tx))),
task_map: Arc::new(Mutex::new(HashMap::new())), task_map: Arc::new(Mutex::new(HashMap::new())),
}; };
@@ -328,29 +337,31 @@ pub async fn serve(opts: Opts) -> NetavarkResult<()> {
/// ///
/// ``` /// ```
async fn handle_wakeup<W: Write + Clear>( async fn handle_wakeup<W: Write + Clear>(
mut rx: mpsc::Receiver<i32>, rx: Option<mpsc::Receiver<i32>>,
timeout_duration: Duration, timeout_duration: Duration,
current_cache: Arc<Mutex<LeaseCache<W>>>, current_cache: Arc<Mutex<LeaseCache<W>>>,
) { ) {
loop { if let Some(mut rx) = rx {
match timeout(timeout_duration, rx.recv()).await { loop {
Ok(Some(_)) => { match timeout(timeout_duration, rx.recv()).await {
debug!("timeout timer reset") Ok(Some(_)) => {
} debug!("timeout timer reset")
Ok(None) => { }
println!("timeout channel closed"); Ok(None) => {
break; println!("timeout channel closed");
}
Err(_) => {
// only 'exit' if the timeout is met AND there are no leases
// if we do not exit, the activity_timeout is reset
if is_catch_empty(current_cache.clone()) {
println!(
"timeout met: exiting after {} secs of inactivity",
timeout_duration.as_secs()
);
break; break;
} }
Err(_) => {
// only 'exit' if the timeout is met AND there are no leases
// if we do not exit, the activity_timeout is reset
if is_catch_empty(current_cache.clone()) {
println!(
"timeout met: exiting after {} secs of inactivity",
timeout_duration.as_secs()
);
break;
}
}
} }
} }
} }