File: listener.rs

package info (click to toggle)
rust-interprocess 2.2.3-1
  • links: PTS, VCS
  • area: main
  • in suites: forky, sid
  • size: 1,016 kB
  • sloc: makefile: 2
file content (83 lines) | stat: -rw-r--r-- 3,205 bytes parent folder | download
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
//{
#[cfg(not(all(windows, feature = "tokio")))]
fn main() {
    eprintln!("This example is not available when the Tokio feature is disabled.");
}
#[cfg(all(windows, feature = "tokio"))]
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
    //}
    use {
        interprocess::os::windows::named_pipe::{pipe_mode, tokio::*, PipeListenerOptions},
        std::{io, path::Path},
        tokio::{
            io::{AsyncReadExt, AsyncWriteExt},
            try_join,
        },
    };

    // Describe the things we do when we've got a connection ready.
    async fn handle_conn(conn: DuplexPipeStream<pipe_mode::Bytes>) -> io::Result<()> {
        // Split the connection into two halves to process received and sent data concurrently.
        let (mut recver, mut sender) = conn.split();

        // Allocate a sizeable buffer for receiving. This size should be enough and should be easy
        // to find for the allocator.
        let mut buffer = String::with_capacity(128);

        // Describe the send operation as first sending our whole message, and then shutting down
        // the send half to send an EOF to help the other side determine the end of the
        // transmission.
        let send = async {
            sender.write_all(b"Hello from server!").await?;
            sender.shutdown().await?;
            Ok(())
        };

        // Describe the receive operation as receiving into our big buffer.
        let recv = recver.read_to_string(&mut buffer);

        // Run both the send-and-invoke-EOF operation and the receive operation concurrently.
        try_join!(recv, send)?;

        // Dispose of our connection right now and not a moment later because I want to!
        drop((recver, sender));

        // Produce our output!
        println!("Client answered: {}", buffer.trim());
        Ok(())
    }

    static PIPE_NAME: &str = "Example";

    // Create our listener.
    let listener = PipeListenerOptions::new()
        .path(Path::new(PIPE_NAME))
        .create_tokio_duplex::<pipe_mode::Bytes>()?;

    // The syncronization between the server and client, if any is used, goes here.
    eprintln!(r"Server running at \\.\pipe\{PIPE_NAME}");

    // Set up our loop boilerplate that processes our incoming connections.
    loop {
        // Sort out situations when establishing an incoming connection caused an error.
        let conn = match listener.accept().await {
            Ok(c) => c,
            Err(e) => {
                eprintln!("There was an error with an incoming connection: {e}");
                continue;
            }
        };

        // Spawn new parallel asynchronous tasks onto the Tokio runtime and hand the connection
        // over to them so that multiple clients could be processed simultaneously in a
        // lightweight fashion.
        tokio::spawn(async move {
            // The outer match processes errors that happen when we're connecting to something.
            // The inner if-let processes errors that happen during the connection.
            if let Err(e) = handle_conn(conn).await {
                eprintln!("error while handling connection: {e}");
            }
        });
    }
} //