File: wrapping_middleware_example.cpp

package info (click to toggle)
glaze 6.4.0-1
  • links: PTS, VCS
  • area: main
  • in suites: forky
  • size: 7,312 kB
  • sloc: cpp: 109,539; sh: 99; ansic: 26; makefile: 13
file content (180 lines) | stat: -rw-r--r-- 7,190 bytes parent folder | download | duplicates (2)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
// Example: Wrapping Middleware
// This demonstrates wrapping middleware that can execute code before AND after
// handlers complete, enabling timing, logging, and response transformation.

#include <atomic>
#include <chrono>
#include <iostream>
#include <thread>

#include "glaze/glaze.hpp"
#include "glaze/net/http_server.hpp"

// Thread-safe metrics structure
struct ServerMetrics
{
   std::atomic<uint64_t> total_requests{0};
   std::atomic<uint64_t> total_responses{0};
   std::atomic<double> response_time_sum{0.0};
   std::atomic<uint64_t> status_2xx{0};
   std::atomic<uint64_t> status_4xx{0};
   std::atomic<uint64_t> status_5xx{0};

   void print_stats() const
   {
      auto total_req = total_requests.load();
      auto total_res = total_responses.load();
      auto time_sum = response_time_sum.load();

      std::cout << "\n=== Server Metrics ===\n";
      std::cout << "Total Requests:  " << total_req << "\n";
      std::cout << "Total Responses: " << total_res << "\n";
      std::cout << "Average Response Time: ";
      if (total_res > 0) {
         std::cout << (time_sum / total_res * 1000.0) << " ms\n";
      }
      else {
         std::cout << "N/A\n";
      }
      std::cout << "Status 2xx: " << status_2xx.load() << "\n";
      std::cout << "Status 4xx: " << status_4xx.load() << "\n";
      std::cout << "Status 5xx: " << status_5xx.load() << "\n";
      std::cout << "===================\n\n";
   }
};

int main()
{
   ServerMetrics metrics;

   glz::http_server<> server;

   // Wrapping Middleware #1: Logging middleware
   // Logs before and after each request
   server.wrap([](const glz::request& req, glz::response&, const auto& next) {
      std::cout << "→ Request: " << glz::to_string(req.method) << " " << req.target << "\n";
      next(); // Call the next middleware or handler
      std::cout << "← Response sent\n";
   });

   // Wrapping Middleware #2: Timing and metrics middleware
   // Measures request timing naturally by wrapping
   server.wrap([&metrics](const glz::request&, glz::response& res, const auto& next) {
      auto start = std::chrono::steady_clock::now();

      // Count incoming request
      metrics.total_requests.fetch_add(1, std::memory_order_relaxed);

      // Execute the rest of the middleware chain and handler
      next();

      // Now we have the response - measure timing and collect metrics
      auto end = std::chrono::steady_clock::now();
      auto duration = std::chrono::duration<double>(end - start).count();

      metrics.response_time_sum.fetch_add(duration, std::memory_order_relaxed);
      metrics.total_responses.fetch_add(1, std::memory_order_relaxed);

      // Track status codes
      if (res.status_code >= 200 && res.status_code < 300) {
         metrics.status_2xx.fetch_add(1, std::memory_order_relaxed);
      }
      else if (res.status_code >= 400 && res.status_code < 500) {
         metrics.status_4xx.fetch_add(1, std::memory_order_relaxed);
      }
      else if (res.status_code >= 500) {
         metrics.status_5xx.fetch_add(1, std::memory_order_relaxed);
      }

      std::cout << "  ⏱  " << (duration * 1000.0) << " ms - Status " << res.status_code << "\n";
   });

   // Wrapping Middleware #3: Error handling middleware
   // Catches exceptions and converts them to 500 responses
   server.wrap([](const glz::request&, glz::response& res, const auto& next) {
      try {
         next();
      }
      catch (const std::exception& e) {
         std::cerr << "Error: " << e.what() << "\n";
         res.status(500).body("Internal Server Error");
      }
   });

   // Wrapping Middleware #4: Response transformation
   // Adds a custom header to all responses
   server.wrap([](const glz::request&, glz::response& res, const auto& next) {
      next();
      // After handler completes, we can modify the response
      res.header("X-Powered-By", "Glaze");
      res.header("X-Response-Time", std::to_string(std::chrono::steady_clock::now().time_since_epoch().count()));
   });

   // Register some example routes
   server.get("/", [](const glz::request&, glz::response& res) { res.body("Hello, World!"); });

   server.get("/api/users", [](const glz::request&, glz::response& res) {
      std::this_thread::sleep_for(std::chrono::milliseconds(10));
      res.json({{"users", std::vector<std::string>{"alice", "bob", "charlie"}}});
   });

   server.get("/api/users/:id", [](const glz::request& req, glz::response& res) {
      std::this_thread::sleep_for(std::chrono::milliseconds(5));
      auto id = req.params.at("id");
      res.json({{"id", id}, {"name", "User " + id}});
   });

   server.get("/slow", [](const glz::request&, glz::response& res) {
      std::this_thread::sleep_for(std::chrono::milliseconds(100));
      res.body("This was slow");
   });

   server.get("/error", [](const glz::request&, glz::response&) {
      throw std::runtime_error("Simulated error");
      // The error handling middleware will catch this
   });

   server.get("/metrics", [&metrics](const glz::request&, glz::response& res) {
      res.json({{"total_requests", metrics.total_requests.load()},
                {"total_responses", metrics.total_responses.load()},
                {"avg_response_time_ms", metrics.total_responses.load() > 0 ? (metrics.response_time_sum.load() /
                                                                               metrics.total_responses.load() * 1000.0)
                                                                            : 0.0},
                {"status_2xx", metrics.status_2xx.load()},
                {"status_4xx", metrics.status_4xx.load()},
                {"status_5xx", metrics.status_5xx.load()}});
   });

   std::cout << "Wrapping Middleware Example\n";
   std::cout << "============================\n\n";
   std::cout << "This example demonstrates wrapping middleware that can execute\n";
   std::cout << "code both before and after handlers.\n\n";
   std::cout << "Middleware wraps the next() handler, allowing code execution:\n";
   std::cout << "  1. BEFORE the handler (request processing)\n";
   std::cout << "  2. AFTER the handler (response processing)\n\n";
   std::cout << "This enables:\n";
   std::cout << "  ✓ Natural timing measurement\n";
   std::cout << "  ✓ Response transformation\n";
   std::cout << "  ✓ Error handling around handlers\n";
   std::cout << "  ✓ Logging with full context\n";
   std::cout << "  ✓ Any cross-cutting concerns\n\n";

   std::cout << "Server starting on http://localhost:8080\n";
   std::cout << "Try these endpoints:\n";
   std::cout << "  GET /              - Home page\n";
   std::cout << "  GET /api/users     - List users (10ms processing)\n";
   std::cout << "  GET /api/users/123 - Get user (5ms processing)\n";
   std::cout << "  GET /slow          - Slow endpoint (100ms processing)\n";
   std::cout << "  GET /error         - Error endpoint (triggers error handler)\n";
   std::cout << "  GET /metrics       - View current metrics\n\n";
   std::cout << "Press Ctrl+C to stop the server\n\n";

   server.bind(8080).with_signals().start(4);

   server.wait_for_signal();

   // Print final metrics
   metrics.print_stats();

   return 0;
}