oslo.messaging-5.35.0/0000775000175100017510000000000013224676256014575 5ustar zuulzuul00000000000000oslo.messaging-5.35.0/tools/0000775000175100017510000000000013224676256015735 5ustar zuulzuul00000000000000oslo.messaging-5.35.0/tools/messages_length.yaml0000666000175100017510000006103713224676046021777 0ustar zuulzuul00000000000000# The numbers below present the length of the messages (in string equivalent) # that were sent through the MQ backend (RabbitMQ) during the # boot_and_delete_server Rally scenario run (50 times, concurrency equal to 3). # The information was gathered via adding log to the _send method of # AMQPDriverBase class after all lines related to the msg object modifications. # Message length was gathered to introduce real-like message generator for # simulator.py oslo.messaging tool, that could introduce traffic closer to the # real control plane load and estimate both message length and size (in bytes) # going through the MQ layer. test_data: string_lengths: 806, 992, 992, 1116, 1116, 1191, 1595, 1199, 1043, 1210, 1220, 1191, 1123, 1624, 2583, 1153, 4412, 1642, 1210, 1590, 1500, 1500, 1500, 1500, 1500, 1500, 6386, 6368, 6386, 6368, 6386, 11292, 2136, 5407, 6368, 11292, 2136, 5407, 2116, 2116, 11292, 2136, 5398, 5407, 4357, 5431, 2116, 2116, 5398, 4407, 5431, 2116, 2116, 5398, 4457, 5431, 4387, 2627, 4387, 2094, 2038, 2627, 2094, 2038, 5438, 4387, 5438, 2310, 2310, 2627, 2094, 2496, 2038, 5451, 2310, 5438, 2496, 2496, 2240, 2099, 2240, 1500, 2099, 2626, 5451, 2240, 2626, 1555, 1555, 1702, 1500, 5451, 1702, 2450, 2450, 1570, 1155, 4539, 1570, 4539, 1641, 2099, 1641, 2626, 1555, 1702, 2450, 1570, 3518, 5710, 1641, 2226, 2643, 3382, 6671, 3518, 2531, 2226, 2643, 2124, 3382, 5500, 3518, 2531, 2226, 2643, 965, 2124, 3382, 5500, 6858, 2531, 1177, 965, 2124, 5687, 1177, 965, 1575, 1500, 1500, 2549, 7745, 1575, 5687, 7688, 2183, 1177, 2549, 965, 6574, 7688, 2183, 7270, 2128, 7270, 2128, 1575, 6535, 2549, 6574, 6480, 2643, 2584, 6535, 1220, 2644, 7688, 2183, 1500, 1676, 2611, 1500, 6480, 2611, 2643, 1624, 2241, 1153, 4696, 7270, 2128, 2584, 2644, 1590, 2611, 2611, 1555, 2241, 1555, 6535, 6480, 2643, 2584, 2644, 2611, 2611, 2241, 1555, 1702, 2450, 1570, 3308, 2043, 3518, 2582, 2398, 2226, 2093, 3420, 6576, 2142, 4480, 6536, 2298, 2608, 1855, 1880, 2175, 6386, 6368, 11292, 2136, 5407, 2116, 2116, 5398, 4504, 5431, 4434, 2627, 2094, 2038, 5438, 2310, 2496, 2240, 5451, 2099, 2626, 1555, 1702, 2450, 1570, 4539, 1641, 3518, 2226, 2643, 3382, 5500, 2531, 2124, 5687, 1177, 965, 1575, 2549, 6574, 7688, 2183, 1500, 7270, 2128, 1500, 6535, 6480, 2643, 2584, 2644, 2611, 2611, 2241, 1555, 1702, 2450, 1570, 3308, 2043, 3518, 1575, 2582, 2398, 2226, 2093, 3420, 6576, 2142, 4532, 6536, 2298, 2608, 1855, 1880, 2175, 1575, 6386, 6368, 11292, 2136, 5407, 2116, 2116, 5398, 4532, 5431, 4434, 2627, 2094, 2038, 5438, 2310, 2496, 2240, 5451, 2099, 2626, 1555, 1702, 2450, 1570, 4539, 1641, 3518, 2226, 2643, 3382, 5500, 2531, 2124, 5687, 1177, 965, 1575, 1575, 2549, 6574, 7688, 2183, 1500, 7270, 2128, 1500, 6535, 6480, 2643, 2584, 2644, 2611, 2611, 2241, 1555, 1702, 2450, 1570, 3308, 2043, 3518, 2582, 2398, 2226, 2093, 3420, 6576, 2142, 4532, 6536, 2298, 2608, 1855, 1880, 2175, 6386, 6368, 11292, 2136, 5407, 2116, 2116, 5398, 4582, 5431, 4484, 2627, 2094, 2038, 5438, 2310, 2496, 2240, 2099, 2626, 5451, 1555, 1702, 2450, 1570, 4539, 1641, 3518, 2226, 2643, 3382, 5500, 2531, 2124, 1575, 5687, 1177, 965, 1575, 2549, 6574, 7688, 2183, 7270, 1500, 2128, 1500, 6535, 6480, 2643, 2584, 2644, 2611, 2611, 2241, 1555, 1702, 2450, 1570, 3308, 2043, 3518, 2582, 2398, 2226, 2093, 3420, 6576, 2142, 4582, 6536, 2298, 2608, 1855, 1880, 2175, 6386, 6368, 11292, 2136, 5407, 2116, 2116, 5398, 4582, 5431, 4484, 2627, 2094, 2038, 5438, 2310, 2496, 2240, 5451, 2099, 2626, 1555, 1702, 2450, 1570, 4539, 1641, 3518, 2226, 2643, 3382, 5500, 2531, 2124, 5687, 1177, 965, 1575, 1575, 2549, 6574, 7688, 2183, 7270, 2128, 1500, 6535, 6480, 2643, 2584, 2644, 2611, 2611, 2241, 1500, 1555, 1702, 2450, 1570, 3308, 2043, 3518, 2582, 2398, 2226, 2093, 3420, 6576, 2142, 4582, 6536, 2298, 2608, 1855, 1880, 2175, 6386, 6368, 11292, 2136, 5407, 2116, 2116, 5398, 4582, 5431, 4484, 2627, 2094, 2038, 5438, 2310, 2496, 2240, 5451, 2099, 2626, 1555, 1702, 2450, 1570, 4539, 1641, 3518, 2226, 2643, 3382, 5500, 2531, 2124, 5687, 1177, 965, 1575, 1575, 2549, 6574, 7688, 2183, 7270, 2128, 6535, 6480, 2643, 2584, 2644, 2611, 2611, 2241, 1555, 1702, 2450, 1570, 3308, 2043, 3518, 1500, 2582, 2398, 2226, 2093, 3420, 6576, 2142, 4582, 6536, 2298, 2608, 1855, 1880, 1500, 2175, 6386, 6368, 11292, 2136, 5407, 2116, 2116, 5398, 4582, 5431, 4484, 2627, 2094, 2038, 5438, 2310, 2496, 2240, 5451, 2099, 2626, 1555, 1702, 2450, 1570, 4539, 1641, 3518, 2226, 2643, 3382, 5500, 2531, 2124, 5687, 1177, 965, 1575, 1575, 2549, 6574, 7688, 2183, 7270, 2128, 6516, 2300, 6516, 5839, 6156, 6512, 1597, 1500, 1026, 1676, 1500, 6516, 4505, 1220, 2300, 6516, 1624, 6535, 1153, 4668, 5839, 2228, 6156, 1590, 6480, 2643, 6512, 2228, 2584, 1611, 2644, 1102, 1701, 2611, 4354, 2449, 2611, 2241, 1555, 1702, 2450, 1570, 3308, 2043, 3518, 1575, 2582, 2398, 6386, 2226, 6368, 2093, 3420, 6576, 2142, 4452, 11292, 2136, 6536, 5407, 6386, 6368, 2298, 2116, 2116, 2608, 5398, 1855, 1880, 2175, 4526, 5431, 11292, 2136, 5407, 4456, 2627, 2094, 2038, 2116, 2310, 2496, 5438, 2116, 2240, 5398, 5451, 4604, 5431, 2099, 2626, 1555, 4506, 2627, 1702, 2094, 2038, 5438, 2310, 2450, 2496, 4539, 2240, 1641, 2099, 1500, 1570, 6386, 2626, 5451, 1555, 6368, 1500, 1702, 2450, 11292, 2136, 1570, 5407, 3518, 2116, 2116, 5398, 4539, 2226, 1641, 4604, 2643, 5431, 3382, 3518, 5500, 4506, 2531, 2627, 2094, 2038, 5438, 2226, 2310, 2124, 2643, 3382, 5451, 2496, 5500, 2240, 2531, 2099, 2626, 1555, 5687, 2124, 1177, 1702, 965, 2450, 1570, 4539, 1641, 1575, 3518, 2226, 2643, 3382, 5500, 1575, 5687, 2531, 1177, 965, 6574, 2549, 2124, 1500, 1500, 7688, 2183, 7270, 2128, 1575, 5687, 1177, 2549, 6574, 965, 6535, 7688, 2183, 6480, 2643, 2584, 2644, 2611, 2611, 2241, 1555, 1500, 1702, 1500, 2450, 1570, 3308, 2043, 3518, 7270, 2128, 2582, 2398, 2226, 2093, 3420, 6576, 2142, 1575, 2549, 6574, 4604, 6535, 6536, 7688, 2183, 2298, 6480, 2643, 2608, 1855, 1880, 2175, 2584, 2644, 2611, 2611, 2241, 1555, 1702, 2450, 1570, 3308, 2043, 3518, 1500, 1500, 7270, 2128, 2582, 2398, 2226, 2093, 3420, 6576, 1575, 6386, 6368, 11292, 2136, 5407, 2116, 2116, 5398, 4604, 5431, 2142, 4604, 6535, 6536, 4506, 2627, 2094, 2038, 2298, 6480, 2643, 2310, 5438, 2608, 2496, 1855, 1880, 2175, 2584, 2240, 2644, 2099, 2626, 5451, 2611, 1555, 2611, 2241, 1702, 2450, 1555, 1570, 1702, 2450, 1570, 3308, 2043, 3518, 4539, 1641, 3518, 2582, 2398, 6386, 2226, 6368, 2093, 3420, 6576, 2226, 2643, 3382, 5500, 2142, 4604, 11292, 2136, 6536, 5407, 2531, 2116, 2116, 2124, 5398, 2298, 2608, 1855, 1880, 2175, 4604, 5431, 5687, 1177, 4506, 965, 2627, 2094, 2038, 5438, 2310, 2496, 2240, 5451, 2099, 2626, 1555, 1702, 2450, 1570, 1575, 1500, 4539, 1641, 1500, 1575, 2549, 6574, 3518, 7688, 2183, 2226, 2643, 3382, 5500, 2531, 2124, 7270, 2128, 6386, 6368, 11292, 2136, 5407, 5687, 1177, 2116, 2116, 5398, 965, 4604, 6535, 5431, 6480, 2643, 4506, 2584, 2627, 2094, 2644, 2038, 5438, 2611, 2310, 2611, 5451, 2496, 2241, 2240, 1575, 1555, 1702, 2450, 2099, 1570, 2626, 3308, 1555, 2043, 3518, 1702, 4539, 1575, 2450, 1641, 1570, 2549, 1500, 6574, 1500, 1220, 2582, 2398, 2226, 2093, 7688, 2183, 3420, 1624, 6576, 1676, 3518, 1153, 4717, 2142, 1590, 4501, 2226, 6536, 1611, 2643, 7270, 2128, 1102, 1701, 3382, 5500, 2449, 2298, 2608, 1855, 2531, 1880, 2175, 2124, 6535, 6480, 2643, 2584, 5687, 2644, 1177, 2611, 965, 2611, 2241, 1555, 1702, 2450, 6386, 6368, 1570, 3308, 2043, 3518, 11292, 2136, 5407, 2116, 2582, 2116, 2398, 5398, 2226, 2093, 4551, 3420, 6576, 5431, 1575, 1500, 6574, 1500, 4481, 2549, 1575, 2627, 2142, 2094, 2038, 5438, 2310, 2496, 4579, 6536, 2240, 2099, 7688, 2183, 2626, 5451, 1555, 2298, 1702, 2450, 1570, 2608, 1855, 1880, 2175, 7270, 2128, 4539, 1641, 3518, 2226, 2643, 3382, 5500, 2531, 2124, 6535, 6480, 2643, 2584, 2644, 2611, 2611, 5687, 2241, 1177, 965, 1555, 6386, 6368, 1702, 2450, 1570, 11292, 2136, 3308, 5407, 2043, 3518, 2116, 2116, 5398, 2582, 4579, 2398, 5431, 2226, 2093, 3420, 4481, 1500, 6576, 2627, 2094, 2038, 5438, 1500, 2142, 2310, 1575, 1575, 2496, 2240, 6574, 2099, 4579, 2626, 1555, 2549, 5451, 1702, 6536, 2450, 1570, 7688, 2183, 2298, 2608, 1855, 1880, 2175, 3518, 5710, 2226, 1641, 2643, 3382, 6671, 7270, 2128, 2531, 6386, 6368, 11292, 2136, 5407, 2116, 2116, 5398, 2124, 4629, 5431, 6535, 4531, 2627, 2094, 2038, 2310, 6480, 2643, 2496, 5438, 6858, 2584, 1177, 2240, 965, 2644, 1500, 2611, 5451, 2611, 2241, 2099, 1500, 2626, 1555, 1555, 1702, 2450, 1702, 1575, 1570, 2450, 4539, 1570, 1641, 3308, 2043, 3518, 1575, 3518, 2549, 7745, 2582, 2398, 2226, 2643, 2226, 7688, 2093, 2183, 3382, 3420, 5500, 6576, 2531, 2124, 2142, 4629, 6536, 2298, 2608, 7270, 2128, 1855, 1880, 2175, 5687, 1177, 965, 6535, 6480, 2643, 2584, 2644, 6386, 6368, 2611, 2611, 2241, 11292, 2136, 5407, 1555, 1500, 1702, 2116, 2116, 1500, 5398, 2450, 1570, 3308, 4629, 2043, 5431, 3518, 1575, 4531, 2549, 2627, 2094, 2038, 5438, 6574, 2582, 2310, 2496, 2398, 5451, 2240, 7688, 2183, 2226, 1575, 2093, 3420, 2099, 2626, 1555, 6576, 1702, 2450, 2142, 1570, 4629, 6536, 4539, 1641, 2298, 2608, 1855, 1880, 2175, 7270, 2128, 3518, 2226, 2643, 3382, 5500, 1500, 2531, 1500, 2124, 6535, 6480, 2643, 2584, 2644, 2611, 2611, 2241, 5687, 6386, 1177, 1555, 6368, 965, 1702, 2450, 11292, 1570, 2136, 3308, 5407, 2043, 3518, 2116, 2116, 5398, 1575, 2582, 4679, 2398, 2226, 5431, 2093, 3420, 6576, 4581, 2627, 2094, 2038, 2310, 1575, 2496, 2549, 2142, 5438, 6574, 2240, 4679, 6536, 7688, 2183, 5451, 2099, 2626, 2298, 1555, 2608, 1855, 1880, 2175, 1702, 2450, 1570, 7270, 4539, 1500, 2128, 1641, 1500, 1597, 1066, 3518, 2226, 2643, 3382, 5500, 1220, 2531, 1624, 2124, 1153, 1676, 4818, 6386, 6535, 6368, 1624, 6480, 2643, 2584, 1611, 2644, 5687, 2611, 11292, 2136, 2611, 2241, 1177, 965, 1102, 1701, 5407, 2449, 1555, 1575, 1702, 2116, 2450, 2116, 1570, 5398, 3308, 2043, 3518, 4602, 5431, 2582, 2398, 4532, 2226, 2627, 2094, 2038, 2093, 5438, 2310, 3420, 2496, 6576, 1575, 2240, 5451, 2549, 2142, 6574, 4630, 6536, 2099, 2626, 1500, 7688, 2183, 1500, 4539, 1555, 2298, 1641, 2608, 1702, 1855, 1880, 2175, 2450, 1570, 7270, 2128, 3518, 2226, 2643, 3382, 5500, 2531, 6386, 6368, 6535, 2124, 6480, 2643, 11292, 2136, 2584, 5407, 2644, 2611, 2611, 2241, 2116, 2116, 5687, 5398, 1177, 1555, 965, 1575, 1702, 2450, 4630, 1570, 3308, 5431, 2043, 3518, 4532, 2627, 2094, 2038, 5438, 2310, 2496, 2582, 2398, 2240, 5451, 2226, 2093, 1500, 2099, 3420, 6576, 2626, 1500, 1555, 1575, 6574, 2549, 2142, 1702, 4630, 4539, 2450, 1641, 6536, 1570, 7688, 2183, 2298, 2608, 1855, 1880, 2175, 7270, 2128, 3518, 2226, 2643, 3382, 5500, 2531, 2124, 6535, 6480, 2643, 2584, 2644, 2611, 2611, 5687, 2241, 1177, 965, 1555, 1702, 6386, 2450, 6368, 1570, 3308, 2043, 1575, 1500, 3518, 11292, 2136, 5407, 1500, 2582, 2116, 2398, 2116, 2226, 5398, 2093, 3420, 6576, 4680, 5431, 2142, 4680, 6536, 4582, 1575, 2627, 2094, 2038, 5438, 6574, 2549, 2310, 5451, 2496, 2298, 2240, 2608, 1855, 1880, 2175, 7688, 2183, 2099, 2626, 1555, 1702, 2450, 1570, 4539, 1641, 3518, 2226, 2643, 3382, 5500, 7270, 2128, 2531, 6386, 6368, 11292, 2136, 5407, 2116, 2116, 5398, 4680, 5431, 4582, 1500, 2627, 2094, 2038, 2310, 2124, 2496, 5438, 1500, 2240, 5451, 6535, 2099, 2626, 1555, 5687, 1177, 1702, 965, 6480, 2643, 2450, 2584, 1570, 2644, 2611, 1575, 4539, 2611, 1641, 2241, 1555, 1702, 3518, 2450, 1570, 3308, 1575, 2043, 3518, 2226, 2549, 2643, 6574, 3382, 5500, 2531, 7688, 2183, 2582, 2398, 2124, 2226, 2093, 3420, 6576, 2142, 4680, 6536, 5687, 1177, 2298, 965, 2608, 1855, 1880, 2175, 7270, 2128, 1500, 1500, 6386, 6368, 11292, 2136, 5407, 2116, 2116, 5398, 4680, 5431, 4582, 1575, 2627, 2094, 2038, 5438, 2549, 6574, 2310, 2496, 5451, 6535, 1575, 2240, 6480, 2643, 2099, 2626, 7688, 2183, 2584, 1555, 2644, 1702, 2611, 2611, 2450, 1570, 2241, 4539, 1641, 1555, 7270, 2128, 1712, 1702, 1154, 2450, 1570, 3308, 2043, 1500, 3518, 3518, 1500, 2582, 2398, 1220, 2226, 2226, 2643, 2093, 1624, 3420, 6576, 3382, 1153, 5500, 6535, 2531, 2124, 4768, 1624, 2142, 1676, 4552, 6480, 6536, 2643, 2584, 2644, 2611, 2298, 2611, 2608, 1855, 1880, 2241, 2175, 5687, 1177, 965, 1555, 1702, 2450, 1570, 3308, 2043, 3518, 2582, 2398, 2226, 2093, 3420, 6576, 2142, 4552, 1575, 1575, 6536, 6386, 2549, 6368, 6574, 1500, 2298, 1500, 7688, 2183, 2608, 11292, 1855, 1880, 2175, 2136, 5407, 2116, 2116, 5398, 4552, 5431, 7270, 4482, 2128, 2627, 2094, 2038, 2310, 5438, 2496, 2240, 5451, 2099, 2626, 1555, 1702, 2450, 1570, 6386, 6368, 6535, 4539, 1641, 11292, 2136, 5407, 6480, 2643, 1575, 2584, 3518, 2644, 2611, 2611, 2116, 2116, 2241, 5398, 2226, 2643, 1555, 1702, 3382, 5500, 4580, 2450, 1570, 5431, 3308, 2043, 2531, 3518, 4482, 2124, 2627, 2094, 2038, 2310, 2496, 5438, 2582, 5451, 2240, 2398, 2226, 5687, 2093, 2099, 3420, 2626, 1177, 1555, 6576, 965, 1702, 2450, 1570, 2142, 4580, 4539, 6536, 1641, 1500, 2298, 1500, 2608, 1855, 1880, 2175, 3518, 2226, 2643, 3382, 5500, 2531, 2124, 1575, 2549, 6574, 5687, 7688, 2183, 1177, 965, 7270, 2128, 6386, 6368, 11292, 2136, 5407, 2116, 2116, 5398, 4630, 1575, 5431, 1500, 1575, 4532, 1500, 2627, 2094, 2038, 5438, 2310, 2496, 2549, 6574, 6535, 2240, 7688, 2183, 2099, 2626, 5451, 6480, 2643, 1555, 2584, 2644, 1702, 2611, 2450, 1570, 2611, 7270, 2241, 2128, 1555, 1702, 4539, 1641, 2450, 1570, 3308, 2043, 3518, 3518, 6535, 6480, 2643, 2582, 2226, 2398, 2226, 2584, 2644, 2643, 2611, 2093, 2611, 3382, 3420, 2241, 5500, 6576, 1500, 1500, 2531, 1555, 2142, 4630, 6536, 2124, 1702, 2450, 1570, 2298, 5687, 2608, 1855, 1880, 2175, 3308, 2043, 1177, 965, 3518, 2582, 2398, 2226, 2093, 3420, 6576, 2142, 4630, 6536, 2298, 2608, 1855, 1880, 2175, 1575, 1575, 6386, 6368, 2549, 6574, 11292, 2136, 7688, 2183, 5407, 2116, 2116, 5398, 4630, 5431, 4532, 2627, 2094, 2038, 2310, 5438, 7270, 2496, 2128, 1500, 1500, 2240, 2099, 5451, 2626, 1555, 6386, 6368, 1702, 2450, 1570, 11292, 1575, 2136, 5407, 4539, 2116, 1641, 2116, 5398, 6535, 3518, 6480, 2643, 4630, 5431, 2226, 2643, 2584, 2644, 2611, 3382, 2611, 2241, 5500, 1555, 4532, 2627, 2094, 2038, 2531, 1702, 2310, 2450, 1570, 2496, 2124, 3308, 5438, 2240, 2043, 3518, 2099, 5451, 2626, 1555, 1702, 2582, 2398, 5687, 2450, 2226, 1570, 1177, 965, 2093, 3420, 6576, 2142, 4630, 4539, 6536, 1641, 1500, 3518, 1500, 2298, 2608, 1855, 1880, 2175, 2226, 2643, 1220, 3382, 5500, 1575, 1676, 2531, 2549, 6574, 1624, 2124, 7688, 2183, 1153, 4741, 1590, 1611, 5687, 1102, 1701, 1177, 965, 2449, 1597, 1066, 7270, 2128, 1575, 6386, 6368, 11292, 2136, 5407, 2116, 2116, 5398, 4525, 5431, 4455, 2627, 2094, 2038, 5438, 2310, 2496, 1500, 2240, 5451, 1500, 2099, 2626, 1555, 1702, 2450, 1570, 1575, 4539, 1641, 2549, 6574, 6535, 3518, 7688, 2183, 6480, 2643, 2584, 2644, 2226, 2611, 2643, 2611, 3382, 2241, 5500, 1555, 2531, 7270, 2124, 2128, 1702, 2450, 1570, 3308, 2043, 3518, 2582, 2398, 2226, 2093, 3420, 6576, 2142, 4553, 6536, 1500, 1500, 2298, 2608, 1855, 1880, 2175, 6535, 5687, 1177, 965, 6480, 2643, 2584, 2644, 2611, 2611, 2241, 1555, 1702, 2450, 1570, 3308, 2043, 3518, 2582, 2398, 2226, 2093, 3420, 6576, 1575, 1575, 6574, 6386, 2549, 2142, 6368, 4553, 11292, 2136, 6536, 5407, 7688, 2183, 2116, 2298, 2116, 5398, 2608, 1855, 1880, 2175, 1500, 1500, 7270, 2128, 4553, 5431, 4455, 6386, 6368, 2627, 2094, 2038, 5438, 2310, 2496, 2240, 6535, 5451, 11292, 2136, 6480, 2643, 5407, 2584, 2099, 2116, 2626, 2644, 1555, 2116, 2611, 5398, 1702, 2611, 1575, 2450, 2241, 4539, 4553, 1570, 1555, 1641, 5431, 1702, 2450, 4455, 1570, 2627, 2094, 2038, 3308, 5438, 2310, 2043, 2496, 3518, 2240, 3518, 5451, 2099, 2626, 2226, 2643, 2582, 2398, 3382, 1555, 5500, 2226, 1702, 2093, 2531, 2450, 3420, 1570, 6576, 2124, 4539, 1641, 2142, 4553, 6536, 2298, 3518, 1500, 2608, 1855, 1880, 2175, 1500, 2226, 2643, 3382, 5500, 5687, 2531, 1177, 965, 2124, 6386, 6368, 11292, 2136, 5407, 1575, 5687, 2549, 6574, 1177, 2116, 965, 2116, 7688, 2183, 5398, 4553, 5431, 1575, 4455, 2627, 2094, 2038, 5438, 2310, 2496, 1500, 7270, 1500, 2128, 2240, 5451, 2099, 2626, 1555, 1702, 2450, 1570, 1575, 2549, 4539, 6574, 1641, 6535, 3518, 7688, 2183, 6480, 2643, 2584, 2226, 2644, 2643, 2611, 3382, 2611, 5500, 2241, 1555, 2531, 1702, 2450, 2124, 1570, 7270, 2128, 3308, 2043, 3518, 2582, 1500, 2398, 2226, 1500, 2093, 5687, 3420, 1177, 6576, 2142, 4553, 965, 6536, 6535, 2298, 2608, 6480, 1855, 2643, 1880, 2175, 2584, 2644, 2611, 1220, 2611, 2241, 1555, 1702, 2450, 1570, 1676, 3308, 2043, 3518, 1575, 2582, 2398, 1624, 2226, 2549, 6574, 2093, 3420, 1153, 6386, 6576, 7688, 6368, 2183, 1575, 4767, 1624, 11292, 2136, 5407, 2142, 4551, 1611, 7270, 2128, 1102, 1701, 1500, 2449, 1500, 6536, 2116, 2116, 5398, 2298, 2608, 1855, 1880, 2175, 4551, 5431, 4481, 2627, 2094, 2038, 5438, 2310, 2496, 5451, 6535, 2240, 2099, 6480, 2643, 2626, 1555, 2584, 2644, 1702, 4539, 2611, 6386, 1641, 2450, 2611, 6368, 1570, 2241, 1555, 1575, 1702, 11292, 2450, 1570, 2136, 5407, 3308, 2043, 3518, 2116, 3518, 2116, 5398, 4579, 2582, 2226, 5431, 2398, 2643, 2226, 2093, 3382, 3420, 5500, 4481, 6576, 2627, 2094, 2038, 5438, 2531, 2310, 2496, 5451, 2142, 2124, 4579, 2240, 6536, 2099, 2626, 1555, 2298, 2608, 1702, 1855, 1880, 2175, 2450, 1570, 4539, 1641, 5687, 1500, 1177, 965, 1500, 3518, 2226, 2643, 3382, 5500, 2531, 2124, 1575, 2549, 6574, 7688, 2183, 5687, 1177, 965, 6386, 6368, 11292, 2136, 1575, 5407, 2116, 2116, 5398, 1500, 1500, 4579, 7270, 2128, 5431, 4481, 1575, 2627, 2094, 2038, 5438, 2549, 2310, 6574, 2496, 6535, 5451, 2240, 7688, 2183, 2099, 6457, 2643, 2626, 1555, 2584, 4539, 2644, 2611, 1641, 1702, 7270, 2128, 2611, 2450, 2241, 1570, 1555, 1500, 1500, 1702, 2450, 1570, 3308, 2043, 3518, 3518, 6535, 2582, 2398, 2226, 2643, 6480, 2643, 3382, 2226, 5500, 2584, 2644, 2093, 3420, 2611, 6553, 2531, 2611, 2124, 2241, 2142, 4579, 1555, 6513, 1702, 2298, 2450, 1570, 2608, 1855, 1880, 2175, 3308, 2043, 3518, 5687, 1177, 965, 2582, 2398, 2226, 2093, 3420, 6576, 2142, 4579, 6536, 2298, 2608, 1855, 1880, 2175, 1575, 1575, 2549, 6574, 6386, 6368, 7688, 2183, 11292, 2136, 5407, 1500, 2116, 1500, 2116, 5398, 4579, 5431, 4481, 2627, 2094, 2038, 5438, 2310, 2496, 2240, 5451, 2099, 2626, 7270, 2128, 1555, 1575, 1702, 2450, 4539, 1570, 6386, 1641, 6368, 11292, 2136, 6535, 5407, 6480, 2643, 2116, 2116, 3518, 2584, 5398, 2644, 2611, 2226, 2643, 4629, 2611, 5431, 3382, 2241, 5500, 4531, 1555, 2531, 2627, 2094, 2038, 1702, 2310, 5438, 2450, 2496, 2124, 1570, 3308, 2240, 2043, 3518, 5451, 2099, 1500, 2626, 1500, 1555, 5687, 1702, 1177, 2450, 2582, 965, 1570, 2398, 2226, 2093, 3420, 6576, 4539, 1641, 2142, 4629, 6536, 3518, 2298, 2608, 1855, 1880, 2175, 2226, 2643, 3382, 5500, 1575, 1220, 2531, 1676, 2549, 6574, 2124, 1624, 7688, 2183, 1153, 4769, 1624, 1611, 1102, 1701, 5687, 2449, 1177, 1597, 965, 1066, 7270, 2128, 1500, 6386, 1500, 6368, 11292, 2136, 5407, 2116, 2116, 5398, 1575, 4553, 5431, 4483, 2627, 2094, 2038, 5438, 2310, 2496, 2240, 2099, 2626, 1555, 1702, 2450, 1570, 1575, 5451, 6535, 6574, 2549, 6480, 2643, 3518, 2584, 2644, 7688, 2183, 2226, 2611, 2643, 2611, 5710, 2241, 3382, 1641, 1555, 6671, 1702, 2450, 1570, 3308, 2531, 2043, 3518, 2124, 1500, 2582, 1500, 2398, 2226, 2093, 3420, 7270, 2128, 6576, 2142, 6858, 4581, 1177, 6536, 2298, 965, 2608, 6535, 1855, 1880, 2175, 6480, 2643, 2584, 2644, 2611, 2611, 2241, 1555, 1702, 2450, 1570, 3308, 2043, 3518, 2582, 2398, 2226, 2093, 3420, 6576, 1575, 2142, 4581, 2549, 7745, 6536, 1575, 2298, 2608, 7688, 2183, 1855, 1880, 2175, 6386, 6368, 1500, 1500, 11292, 2136, 5407, 7270, 2128, 2116, 2116, 5398, 4631, 6386, 6368, 5431, 11292, 2136, 5407, 4533, 2627, 2094, 2038, 2310, 2496, 2116, 5438, 2116, 5398, 2240, 2099, 6535, 2626, 6480, 2643, 5451, 2584, 2644, 4631, 1555, 5431, 2611, 4533, 2627, 2094, 2038, 1702, 2310, 2496, 2611, 2241, 2450, 1570, 2240, 5438, 2099, 2626, 1555, 5451, 1555, 1702, 4539, 1641, 1702, 2450, 2450, 1570, 1570, 3518, 3308, 2043, 3518, 2226, 1575, 2643, 4539, 3382, 5500, 2582, 2398, 3518, 2226, 1641, 2226, 2093, 3420, 2643, 6576, 2531, 3382, 2124, 5500, 2142, 4631, 6536, 2531, 2298, 2608, 1855, 1880, 2175, 2124, 5687, 1177, 965, 1500, 1500, 1575, 5687, 1177, 2549, 965, 6574, 7688, 2183, 7270, 2128, 1575, 6386, 6368, 11292, 2136, 5407, 2116, 2116, 5398, 1575, 4631, 1500, 2549, 1500, 5431, 6574, 6535, 4533, 2627, 2094, 2038, 7688, 2183, 2310, 6480, 2643, 2496, 5438, 2240, 2584, 2099, 2626, 2644, 2611, 5451, 1555, 2611, 1702, 2241, 2450, 1570, 1555, 1702, 2450, 1570, 7270, 3308, 2128, 4539, 2043, 3518, 1641, 3518, 2582, 2226, 2398, 2643, 2226, 2093, 3382, 3420, 5500, 6576, 2531, 2142, 4631, 2124, 6536, 6535, 2298, 2608, 6480, 1855, 2643, 1880, 2175, 2584, 2644, 2611, 2611, 2241, 5687, 1177, 1555, 965, 1702, 2450, 1570, 3308, 2043, 3518, 1500, 1500, 2582, 2398, 2226, 2093, 3420, 6576, 2142, 4631, 6536, 2298, 1575, 2608, 1855, 1880, 2175, 6574, 1575, 1676, 7688, 2183, 1220, 2549, 1624, 1153, 4691, 6386, 6368, 1590, 1611, 7270, 2128, 1102, 1701, 11292, 2136, 2449, 5407, 1500, 1500, 2116, 2116, 5398, 4549, 5431, 6535, 6386, 6480, 6368, 2643, 4479, 2627, 2094, 2038, 2584, 2644, 5438, 1575, 2310, 5451, 2496, 2611, 2240, 2099, 2611, 2241, 2626, 11292, 2136, 1555, 5407, 1702, 2450, 1555, 1702, 2116, 1570, 2116, 2450, 5398, 4539, 1570, 1641, 4577, 3308, 5431, 2043, 3518, 3518, 4479, 2226, 2627, 2094, 2038, 5438, 2643, 2310, 3382, 5500, 2496, 2582, 5451, 2240, 2398, 2099, 2531, 2626, 1555, 2226, 2093, 1702, 2124, 3420, 2450, 1570, 6576, 2142, 4577, 6536, 4539, 1641, 2298, 5687, 2608, 1855, 1880, 2175, 1177, 965, 3518, 2226, 2643, 3382, 5500, 2531, 2124, 1500, 1500, 1575, 5687, 2549, 1177, 6574, 965, 7688, 2183, 6386, 6368, 1575, 7270, 2128, 11292, 2136, 5407, 2116, 2116, 5398, 4577, 5431, 4479, 1575, 2627, 2094, 2038, 5438, 2549, 2310, 6574, 2496, 6535, 1500, 5451, 1500, 2240, 6480, 2643, 7688, 2183, 2584, 2099, 2644, 2626, 1555, 2611, 2611, 1702, 4539, 2450, 2241, 1570, 1641, 1555, 1702, 2450, 1570, 3308, 7270, 2043, 2128, 3518, 3518, 2582, 2398, 2226, 2226, 2643, 2093, 3382, 3420, 5500, 6576, 2142, 2531, 4577, 6536, 6535, 6480, 2643, 2124, 2584, 2644, 2298, 2608, 2611, 1855, 1880, 2175, 2611, 2241, 1555, 1702, 2450, 1570, 5687, 3308, 1177, 2043, 965, 3518, 2582, 2398, 2226, 2093, 3420, 6576, 1500, 2142, 1500, 4577, 6536, 2298, 2608, 1855, 1880, 2175, 1575, 2549, 6574, 7688, 2183, 1575, 6386, 6368, 11292, 2136, 5407, 2116, 2116, 5398, 7270, 2128, 4627, 5431, 4529, 2627, 2094, 2038, 5438, 2310, 2496, 6386, 6368, 6535, 11292, 2136, 5407, 2240, 2099, 5451, 2626, 6480, 2643, 1555, 2584, 2116, 2644, 1702, 2611, 2116, 2450, 5398, 2611, 1570, 2241, 4539, 4627, 1641, 1555, 1500, 5431, 1500, 1702, 2450, 4529, 1570, 2627, 2094, 3518, 2038, 5438, 3308, 2310, 2043, 3518, 2226, 2496, 2643, 3382, 5451, 1575, 2240, 5500, 2582, 2398, 2226, 2099, 2626, 2093, 3420, 1555, 2531, 6576, 2124, 1702, 4539, 2450, 2142, 1570, 1641, 4627, 6536, 2298, 2608, 1855, 1880, 2175, 5687, 1177, 965, 3518, 2226, 2643, 3382, 5500, 2531, 1575, 2124, 2549, 6574, 6386, 7688, 2183, 6368, 1568, 5687, 1177, 11292, 965, 2136, 5407, 1500, 1500, 2116, 2116, 5398, 7270, 2128, 1712, 1575, 4627, 1154, 5431, 4529, 2627, 2094, 2038, 2310, 5438, 2496, 2240, 5451, 1676, 2099, 2626, 1555, 1220, 1702, 2450, 1575, 1570, 2549, 6574, 6535, 1624, 4539, 7688, 2183, 1641, 1500, 1500, 6480, 2643, 3518, 1153, 2584, 2644, 2226, 4817, 2611, 2643, 2611, 1590, 3382, 2241, 5500, 1624, 1555, 2559, 2561, 2559, 2531, 1702, 2124, 7270, 2579, 2579, 2450, 1611, 1570, 2128, 3308, 1102, 1701, 2449, 2043, 3518, 1597, 1106, 2582, 5687, 2398, 2226, 1177, 2093, 3420, 6576, 965, 6535, 2142, 4601, 6536, 6480, 2643, 2584, 2644, 2298, 1500, 2608, 1500, 2611, 1855, 1880, 2175, 2611, 2241, 1555, 1702, 2450, 1570, 1575, 3308, 2043, 3518, 1575, 2549, 6574, 2582, 2398, 2226, 7688, 2093, 2183, 3420, 6576, 2142, 4601, 6536, 2298, 6386, 2608, 6368, 1855, 1880, 2175, 7270, 2128, 11292, 2136, 5407, 2116, 2116, 5398, 4601, 5431, 4531, 2627, 2094, 2038, 2310, 5438, 2496, 2240, 1500, 5451, 1500, 6535, 2099, 2626, 1555, 6480, 2643, 2584, 1702, 2644, 2450, 2611, 1570, 2611, 2241, 1555, 4539, 1641, 1702, 2450, 1570, 3308, 2043, 3518, 3518, 2582, 2226, 2398, 2643, 2226, 2093, 3382, 3420, 5500, 6576, 2531, 2142, 4629, 2124, 6536, 2298, 2608, 1855, 1880, 2175, 5687, 1177, 965, 1575, 1575, 2549, 6574, 7688, 2183, 7270, 2128, 6535, 6480, 2643, 2584, 2644, 2611, 2611, 2241, 1555, 1702, 2450, 1570, 3308, 2043, 3518, 1500, 1500, 2582, 2398, 2226, 2093, 3420, 6576, 2142, 4629, 6536, 2298, 2608, 1855, 1880, 2175, 1575, 7291, 2128, 6534, 6479, 2643, 2584, 2644, 2611, 2611, 2241, 1555, 1702, 2450, 1570, 3308, 2043, 3518, 2582, 2398, 2226, 2093, 3420, 6576, 2142, 4629, 7291, 2128, 6536, 2298, 6534, 2608, 1855, 1880, 2175, 6479, 2643, 2584, 2644, 2611, 2611, 2241, 1555, 1702, 2450, 1570, 3308, 2043, 3518, 2582, 2398, 2226, 1500, 2093, 3420, 1500, 6576, 2142, 4629, 6536, 2298, 2608, 1855, 1880, 2175, 1575, 1500, 1500, 1220, 1624, 1153, 4412, 1676, 1590, 1500, 1500, 1500, 1500, 1500, 1500, 1500, 1500, 1500, 1500, 1500, 1500, 1676, 1220, 1624, 1153, 4412, 1597, 908, 1590, 1500, 1500, 1500, 1500, 1500, 1500, 1500, 1500, 1500, 1500, 1500, 1500, 1676, 1220, 1624, 1153, 1500, 4412, 1500, 1590, 1500, 1500, 1500, 1500, 1500, 1500, 1500, 1500, 1500, 1500, 1597, 908, 1500, 1500, 1676, 1220, 1624, 1153, 4412, 1590, 1500, 1500, 1500, 1500, 1500, 1500oslo.messaging-5.35.0/tools/functions.sh0000666000175100017510000000061113224676046020276 0ustar zuulzuul00000000000000 wait_for_line () { while read line do echo "$line" | grep -q "$1" && break echo "$line" | grep "$2" && exit 1 done < "$3" # Read the fifo for ever otherwise process would block cat "$3" >/dev/null & } function clean_exit(){ local error_code="$?" for job in `jobs -p` do kill -9 $job done rm -rf "$1" return $error_code } oslo.messaging-5.35.0/tools/setup-test-env-amqp1.sh0000777000175100017510000001061013224676046022211 0ustar zuulzuul00000000000000#!/bin/bash # # Usage: setup-test-env-amqp.sh # where AMQP1_BACKEND is the AMQP 1.0 intermediary to use. Valid # values are "qdrouterd" for router and "qpidd" for broker. set -e # router requires qdrouterd, sasl2-bin/cyrus-sasl-plain+cyrus-sasl-lib # broker requires qpidd, qpid-tools sasl2-bin/cyrus-sasl-plain+cyrus-sasl-lib . tools/functions.sh DATADIR=$(mktemp -d /tmp/OSLOMSG-${AMQP1_BACKEND}.XXXXX) trap "clean_exit $DATADIR" EXIT function _setup_qdrouterd_user { echo secretqpid | saslpasswd2 -c -p -f ${DATADIR}/qdrouterd.sasldb stackqpid } function _setup_qpidd_user { echo secretqpid | saslpasswd2 -c -p -f ${DATADIR}/qpidd.sasldb -u QPID stackqpid } function _configure_qdrouterd { # create a stand alone router cat > ${DATADIR}/qdrouterd.conf <> ${DATADIR}/qdrouterd.conf <> ${DATADIR}/qdrouterd.conf <> ${DATADIR}/qdrouterd.conf < ${DATADIR}/sasl2/qdrouterd.conf </dev/null) if [[ ! -x "$QPIDD" ]]; then echo "FAILURE: qpidd broker not installed" exit 1 fi [ -f "/usr/lib/qpid/daemon/acl.so" ] && LIBACL="load-module=/usr/lib/qpid/daemon/acl.so" cat > ${DATADIR}/qpidd.conf <> ${DATADIR}/qpidd.conf <> ${DATADIR}/qpidd.conf <> ${DATADIR}/qpidd.conf < ${DATADIR}/qpidd.acl < ${DATADIR}/sasl2/qpidd.conf < ${SITEDIR}/dispatch.pth </dev/null) mkfifo ${DATADIR}/out $QDR --config ${DATADIR}/qdrouterd.conf & wait_for_line "Router .*started" "error" ${DATADIR}/out rm ${SITEDIR}/dispatch.pth } function _start_qpidd { chmod -R a+r ${DATADIR} QPIDD=$(which qpidd 2>/dev/null) mkfifo ${DATADIR}/out $QPIDD --log-enable trace+ --log-to-file ${DATADIR}/out --config ${DATADIR}/qpidd.conf & wait_for_line "Broker .*running" "error" ${DATADIR}/out } _configure_${AMQP1_BACKEND} _setup_${AMQP1_BACKEND}_user _start_${AMQP1_BACKEND} $* oslo.messaging-5.35.0/tools/test-setup.sh0000777000175100017510000000255213224676046020414 0ustar zuulzuul00000000000000#!/bin/bash -xe # This script will be run by OpenStack CI before unit tests are run, # it sets up the test system as needed. # Developer should setup their test systems in a similar way. # This setup for amqp1 needs to be run by a user that can run sudo. function is_fedora { [ -f /usr/bin/yum ] && cat /etc/*release | grep -q -e "Fedora" } # NOTE(sileht): we create the virtualenv only and use bindep directly # because tox doesn't have a quiet option... tox -ebindep --notest # TODO(ansmith) for now setup amqp1 dependencies for any profile. # Fix this when test-setup is passed environment profile setting. # NOTE(sileht): bindep return 1 if some packages have to be installed PACKAGES="$(.tox/bindep/bin/bindep -b -f bindep.txt amqp1 || true)" [ -n "$PACKAGES" ] || exit 0 # inspired from project-config install-distro-packages.sh if apt-get -v >/dev/null 2>&1 ; then sudo add-apt-repository -y ppa:qpid/testing sudo apt-get -qq update sudo PATH=/usr/sbin:/sbin:$PATH DEBIAN_FRONTEND=noninteractive \ apt-get -q --option "Dpkg::Options::=--force-confold" \ --assume-yes install $PACKAGES elif emerge --version >/dev/null 2>&1 ; then sudo emerge -uDNq --jobs=4 @world sudo PATH=/usr/sbin:/sbin:$PATH emerge -q --jobs=4 $PACKAGES else is_fedora && YUM=dnf || YUM=yum sudo PATH=/usr/sbin:/sbin:$PATH $YUM install -y $PACKAGES fi oslo.messaging-5.35.0/tools/simulator.py0000777000175100017510000007162413224676046020342 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import eventlet eventlet.monkey_patch() import argparse import bisect import collections import functools import itertools import json import logging import os import random import signal import six import socket import string import sys import threading import time import yaml from oslo_config import cfg import oslo_messaging as messaging from oslo_messaging import notify # noqa from oslo_messaging import rpc # noqa from oslo_utils import timeutils LOG = logging.getLogger() CURRENT_PID = None CURRENT_HOST = None CLIENTS = [] MESSAGES = [] IS_RUNNING = True SERVERS = [] TRANSPORT = None USAGE = """ Usage: ./simulator.py [-h] [--url URL] [-d DEBUG]\ {notify-server,notify-client,rpc-server,rpc-client} ... Usage example: python tools/simulator.py\ --url rabbit://stackrabbit:secretrabbit@localhost/ rpc-server python tools/simulator.py\ --url rabbit://stackrabbit:secretrabbit@localhost/ rpc-client\ --exit-wait 15000 -p 64 -m 64""" MESSAGES_LIMIT = 1000 DISTRIBUTION_BUCKET_SIZE = 500 def init_random_generator(): data = [] file_dir = os.path.dirname(os.path.abspath(__file__)) with open(os.path.join(file_dir, 'messages_length.yaml')) as m_file: content = yaml.safe_load(m_file) data += [int(n) for n in content[ 'test_data']['string_lengths'].split(', ')] ranges = collections.defaultdict(int) for msg_length in data: range_start = ((msg_length // DISTRIBUTION_BUCKET_SIZE) * DISTRIBUTION_BUCKET_SIZE + 1) ranges[range_start] += 1 ranges_start = sorted(ranges.keys()) total_count = len(data) accumulated_distribution = [] running_total = 0 for range_start in ranges_start: norm = float(ranges[range_start]) / total_count running_total += norm accumulated_distribution.append(running_total) def weighted_random_choice(): r = random.random() * running_total start = ranges_start[bisect.bisect_right(accumulated_distribution, r)] return random.randrange(start, start + DISTRIBUTION_BUCKET_SIZE) return weighted_random_choice class LoggingNoParsingFilter(logging.Filter): def filter(self, record): msg = record.getMessage() for i in ['received {', 'MSG_ID is ']: if i in msg: return False return True Message = collections.namedtuple( 'Message', ['seq', 'cargo', 'client_ts', 'server_ts', 'return_ts']) def make_message(seq, cargo, client_ts=0, server_ts=0, return_ts=0): return Message(seq, cargo, client_ts, server_ts, return_ts) def update_message(message, **kwargs): return Message(*message)._replace(**kwargs) class MessageStatsCollector(object): def __init__(self, label): self.label = label self.buffer = [] # buffer to store messages during report interval self.series = [] # stats for every report interval now = time.time() diff = int(now) - now + 1 # align start to whole seconds threading.Timer(diff, self.monitor).start() # schedule in a second def monitor(self): global IS_RUNNING if IS_RUNNING: # NOTE(kbespalov): this way not properly works # because the monitor starting with range 1sec +-150 ms # due to high threading contention between rpc clients threading.Timer(1.0, self.monitor).start() now = time.time() count = len(self.buffer) size = 0 min_latency = sys.maxsize max_latency = 0 sum_latencies = 0 for i in six.moves.range(count): p = self.buffer[i] size += len(p.cargo) latency = None if p.return_ts: latency = p.return_ts - p.client_ts # round-trip elif p.server_ts: latency = p.server_ts - p.client_ts # client -> server if latency: sum_latencies += latency min_latency = min(min_latency, latency) max_latency = max(max_latency, latency) del self.buffer[:count] # trim processed items seq = len(self.series) stats = dict(seq=seq, timestamp=now, count=count, size=size) msg = ('%-14s: seq: %-4d count: %-6d bytes: %-10d' % (self.label, seq, count, size)) if sum_latencies: latency = sum_latencies / count stats.update(dict(latency=latency, min_latency=min_latency, max_latency=max_latency)) msg += (' latency: %-9.3f min: %-9.3f max: %-9.3f' % (latency, min_latency, max_latency)) self.series.append(stats) LOG.info(msg) def push(self, parsed_message): self.buffer.append(parsed_message) def get_series(self): return self.series @staticmethod def calc_stats(label, *collectors): count = 0 size = 0 min_latency = sys.maxsize max_latency = 0 sum_latencies = 0 start = sys.maxsize end = 0 for point in itertools.chain(*(c.get_series() for c in collectors)): count += point['count'] size += point['size'] if point['count']: # NOTE(kbespalov): # we except the start and end time as time of # first and last processed message, no reason # to set boundaries if server was idle before # running of clients and after. start = min(start, point['timestamp']) end = max(end, point['timestamp']) if 'latency' in point: sum_latencies += point['latency'] * point['count'] min_latency = min(min_latency, point['min_latency']) max_latency = max(max_latency, point['max_latency']) # start is the timestamp of the earliest block, which inclides samples # for the prior second start -= 1 duration = end - start if count else 0 stats = dict(count=count, size=size, duration=duration, count_p_s=0, size_p_s=0) if duration: stats.update(dict(start=start, end=end, count_p_s=count / duration, size_p_s=size / duration)) msg = ('%s: duration: %.2f count: %d (%.1f msg/sec) ' 'bytes: %d (%.0f bps)' % (label, duration, count, stats['count_p_s'], size, stats['size_p_s'])) if sum_latencies: latency = sum_latencies / count stats.update(dict(latency=latency, min_latency=min_latency, max_latency=max_latency)) msg += (' latency: %.3f min: %.3f max: %.3f' % (latency, min_latency, max_latency)) LOG.info(msg) return stats class NotifyEndpoint(object): def __init__(self, wait_before_answer, requeue): self.wait_before_answer = wait_before_answer self.requeue = requeue self.received_messages = MessageStatsCollector('server') self.cache = set() def info(self, ctxt, publisher_id, event_type, payload, metadata): LOG.debug("%s %s %s %s", ctxt, publisher_id, event_type, payload) server_ts = time.time() message = update_message(payload, server_ts=server_ts) self.received_messages.push(message) if self.requeue and message.seq not in self.cache: self.cache.add(message.seq) if self.wait_before_answer > 0: time.sleep(self.wait_before_answer) return messaging.NotificationResult.REQUEUE return messaging.NotificationResult.HANDLED def notify_server(transport, topic, wait_before_answer, duration, requeue): endpoints = [NotifyEndpoint(wait_before_answer, requeue)] target = messaging.Target(topic=topic) server = notify.get_notification_listener(transport, [target], endpoints, executor='eventlet') run_server(server, duration=duration) return endpoints[0] class BatchNotifyEndpoint(object): def __init__(self, wait_before_answer, requeue): self.wait_before_answer = wait_before_answer self.requeue = requeue self.received_messages = MessageStatsCollector('server') self.cache = set() def info(self, batch): LOG.debug('msg rcv') LOG.debug("%s", batch) server_ts = time.time() for item in batch: message = update_message(item['payload'], server_ts=server_ts) self.received_messages.push(message) return messaging.NotificationResult.HANDLED def batch_notify_server(transport, topic, wait_before_answer, duration, requeue): endpoints = [BatchNotifyEndpoint(wait_before_answer, requeue)] target = messaging.Target(topic=topic) server = notify.get_batch_notification_listener( transport, [target], endpoints, executor='eventlet', batch_size=1000, batch_timeout=5) run_server(server, duration=duration) return endpoints[0] class RpcEndpoint(object): def __init__(self, wait_before_answer): self.wait_before_answer = wait_before_answer self.received_messages = MessageStatsCollector('server') def info(self, ctxt, message): server_ts = time.time() LOG.debug("######## RCV: %s", message) reply = update_message(message, server_ts=server_ts) self.received_messages.push(reply) if self.wait_before_answer > 0: time.sleep(self.wait_before_answer) return reply class ServerControlEndpoint(object): def __init__(self, controlled_server): self.connected_clients = set() self.controlled_server = controlled_server def sync_start(self, ctx, message): """Handle start reports from clients""" client_id = message['id'] LOG.info('The client %s started to send messages' % client_id) self.connected_clients.add(client_id) def sync_done(self, ctx, message): """Handle done reports from clients""" client_id = message['id'] LOG.info('The client %s finished msg sending.' % client_id) if client_id in self.connected_clients: self.connected_clients.remove(client_id) if not self.connected_clients: LOG.info( 'The clients sent all messages. Shutting down the server..') threading.Timer(1, self._stop_server_with_delay).start() def _stop_server_with_delay(self): self.controlled_server.stop() self.controlled_server.wait() class Client(object): def __init__(self, client_id, client, method, has_result, wait_after_msg): self.client_id = client_id self.client = client self.method = method self.wait_after_msg = wait_after_msg self.seq = 0 self.messages_count = len(MESSAGES) # Start sending the messages from a random position to avoid # memory re-usage and generate more realistic load on the library # and a message transport self.position = random.randint(0, self.messages_count - 1) self.sent_messages = MessageStatsCollector('client-%s' % client_id) self.errors = MessageStatsCollector('error-%s' % client_id) if has_result: self.round_trip_messages = MessageStatsCollector( 'round-trip-%s' % client_id) def host_based_id(self): _id = "%(client_id)s %(salt)s@%(hostname)s" return _id % {'hostname': CURRENT_HOST, 'salt': hex(id(self))[2:], 'client_id': self.client_id} def send_msg(self): msg = make_message(self.seq, MESSAGES[self.position], time.time()) self.sent_messages.push(msg) res = None try: res = self.method(self.client, msg) except Exception: self.errors.push(msg) else: LOG.debug("SENT: %s", msg) if res: return_ts = time.time() res = update_message(res, return_ts=return_ts) self.round_trip_messages.push(res) self.seq += 1 self.position = (self.position + 1) % self.messages_count if self.wait_after_msg > 0: time.sleep(self.wait_after_msg) class RPCClient(Client): def __init__(self, client_id, transport, target, timeout, is_cast, wait_after_msg, sync_mode=False): client = rpc.RPCClient(transport, target) method = _rpc_cast if is_cast else _rpc_call super(RPCClient, self).__init__(client_id, client.prepare(timeout=timeout), method, not is_cast, wait_after_msg) self.sync_mode = sync_mode self.is_sync = False # prepare the sync client if sync_mode: if sync_mode == 'call': self.sync_client = self.client else: self.sync_client = client.prepare(fanout=True, timeout=timeout) def send_msg(self): if self.sync_mode and not self.is_sync: self.is_sync = self.sync_start() super(RPCClient, self).send_msg() def sync_start(self): try: msg = {'id': self.host_based_id()} method = _rpc_call if self.sync_mode == 'call' else _rpc_cast method(self.sync_client, msg, 'sync_start') except Exception: LOG.error('The client: %s failed to sync with %s.' % (self.client_id, self.client.target)) return False LOG.info('The client: %s successfully sync with %s' % ( self.client_id, self.client.target)) return True def sync_done(self): try: msg = {'id': self.host_based_id()} method = _rpc_call if self.sync_mode == 'call' else _rpc_cast method(self.sync_client, msg, 'sync_done') except Exception: LOG.error('The client: %s failed finish the sync with %s.' % (self.client_id, self.client.target)) return False LOG.info('The client: %s successfully finished sync with %s' % (self.client_id, self.client.target)) return True class NotifyClient(Client): def __init__(self, client_id, transport, topic, wait_after_msg): client = notify.Notifier(transport, driver='messaging', topics=topic) client = client.prepare(publisher_id='publisher-%d' % client_id) method = _notify super(NotifyClient, self).__init__(client_id, client, method, False, wait_after_msg) def generate_messages(messages_count): # Limit the messages amount. Clients will reiterate the array again # if an amount of messages to be sent is bigger than MESSAGES_LIMIT if messages_count > MESSAGES_LIMIT: messages_count = MESSAGES_LIMIT LOG.info("Generating %d random messages", messages_count) generator = init_random_generator() for i in six.moves.range(messages_count): length = generator() msg = ''.join(random.choice( string.ascii_lowercase) for x in six.moves.range(length)) MESSAGES.append(msg) LOG.info("Messages has been prepared") def wrap_sigexit(f): def inner(*args, **kwargs): try: return f(*args, **kwargs) except SignalExit as e: LOG.info('Signal %s is caught. Interrupting the execution', e.signo) for server in SERVERS: server.stop() server.wait() finally: if TRANSPORT: TRANSPORT.cleanup() return inner @wrap_sigexit def run_server(server, duration=None): global IS_RUNNING SERVERS.append(server) server.start() if duration: with timeutils.StopWatch(duration) as stop_watch: while not stop_watch.expired() and IS_RUNNING: time.sleep(1) server.stop() IS_RUNNING = False server.wait() LOG.info('The server is terminating') time.sleep(1) # wait for stats collector to process the last second def rpc_server(transport, target, wait_before_answer, executor, duration): endpoints = [RpcEndpoint(wait_before_answer)] server = rpc.get_rpc_server(transport, target, endpoints, executor) # make the rpc server controllable by rpc clients endpoints.append(ServerControlEndpoint(server)) LOG.debug("starting RPC server for target %s", target) run_server(server, duration=duration) return server.dispatcher.endpoints[0] @wrap_sigexit def spawn_rpc_clients(threads, transport, targets, wait_after_msg, timeout, is_cast, messages_count, duration, sync_mode): p = eventlet.GreenPool(size=threads) targets = itertools.cycle(targets) for i in six.moves.range(threads): target = next(targets) LOG.debug("starting RPC client for target %s", target) client_builder = functools.partial(RPCClient, i, transport, target, timeout, is_cast, wait_after_msg, sync_mode) p.spawn_n(send_messages, i, client_builder, messages_count, duration) p.waitall() @wrap_sigexit def spawn_notify_clients(threads, topic, transport, message_count, wait_after_msg, timeout, duration): p = eventlet.GreenPool(size=threads) for i in six.moves.range(threads): client_builder = functools.partial(NotifyClient, i, transport, topic, wait_after_msg) p.spawn_n(send_messages, i, client_builder, message_count, duration) p.waitall() def send_messages(client_id, client_builder, messages_count, duration): global IS_RUNNING client = client_builder() CLIENTS.append(client) # align message sending closer to whole seconds now = time.time() diff = int(now) - now + 1 time.sleep(diff) if duration: with timeutils.StopWatch(duration) as stop_watch: while not stop_watch.expired() and IS_RUNNING: client.send_msg() eventlet.sleep() IS_RUNNING = False else: LOG.debug("Sending %d messages using client %d", messages_count, client_id) for _ in six.moves.range(messages_count): client.send_msg() eventlet.sleep() if not IS_RUNNING: break LOG.debug("Client %d has sent %d messages", client_id, messages_count) # wait for replies to be collected time.sleep(1) # send stop request to the rpc server if isinstance(client, RPCClient) and client.is_sync: client.sync_done() def _rpc_call(client, msg, remote_method='info'): try: res = client.call({}, remote_method, message=msg) except Exception as e: LOG.exception('Error %s on CALL for message %s', str(e), msg) raise else: LOG.debug("SENT: %s, RCV: %s", msg, res) return res def _rpc_cast(client, msg, remote_method='info'): try: client.cast({}, remote_method, message=msg) except Exception as e: LOG.exception('Error %s on CAST for message %s', str(e), msg) raise else: LOG.debug("SENT: %s", msg) def _notify(notification_client, msg): notification_client.info({}, 'compute.start', msg) def show_server_stats(endpoint, json_filename): LOG.info('=' * 35 + ' summary ' + '=' * 35) output = dict(series={}, summary={}) output['series']['server'] = endpoint.received_messages.get_series() stats = MessageStatsCollector.calc_stats( 'server', endpoint.received_messages) output['summary'] = stats if json_filename: write_json_file(json_filename, output) def show_client_stats(clients, json_filename, has_reply=False): LOG.info('=' * 35 + ' summary ' + '=' * 35) output = dict(series={}, summary={}) for cl in clients: cl_id = cl.client_id output['series']['client_%s' % cl_id] = cl.sent_messages.get_series() output['series']['error_%s' % cl_id] = cl.errors.get_series() if has_reply: output['series']['round_trip_%s' % cl_id] = ( cl.round_trip_messages.get_series()) sent_stats = MessageStatsCollector.calc_stats( 'client', *(cl.sent_messages for cl in clients)) output['summary']['client'] = sent_stats error_stats = MessageStatsCollector.calc_stats( 'error', *(cl.errors for cl in clients)) output['summary']['error'] = error_stats if has_reply: round_trip_stats = MessageStatsCollector.calc_stats( 'round-trip', *(cl.round_trip_messages for cl in clients)) output['summary']['round_trip'] = round_trip_stats if json_filename: write_json_file(json_filename, output) def write_json_file(filename, output): with open(filename, 'w') as f: f.write(json.dumps(output)) LOG.info('Stats are written into %s', filename) class SignalExit(SystemExit): def __init__(self, signo, exccode=1): super(SignalExit, self).__init__(exccode) self.signo = signo def signal_handler(signum, frame): global IS_RUNNING IS_RUNNING = False raise SignalExit(signum) def _setup_logging(is_debug): log_level = logging.DEBUG if is_debug else logging.INFO logging.basicConfig( stream=sys.stdout, level=log_level, format="%(asctime)-15s %(levelname)s %(name)s %(message)s") logging.getLogger().handlers[0].addFilter(LoggingNoParsingFilter()) for i in ['kombu', 'amqp', 'stevedore', 'qpid.messaging' 'oslo.messaging._drivers.amqp', ]: logging.getLogger(i).setLevel(logging.WARN) def main(): parser = argparse.ArgumentParser( description='Tools to play with oslo.messaging\'s RPC', usage=USAGE, ) parser.add_argument('--url', dest='url', help="oslo.messaging transport url") parser.add_argument('-d', '--debug', dest='debug', action='store_true', help="Turn on DEBUG logging level instead of WARN") parser.add_argument('-tp', '--topic', dest='topic', default="profiler_topic", help="Topics to publish/receive messages to/from.") parser.add_argument('-s', '--server', dest='server', default="profiler_server", help="Servers to publish/receive messages to/from.") parser.add_argument('-tg', '--targets', dest='targets', nargs="+", default=["profiler_topic.profiler_server"], help="Targets to publish/receive messages to/from.") parser.add_argument('-l', dest='duration', type=int, help='send messages for certain time') parser.add_argument('-j', '--json', dest='json_filename', help='File name to store results in JSON format') parser.add_argument('--config-file', dest='config_file', type=str, help="Oslo messaging config file") subparsers = parser.add_subparsers(dest='mode', help='notify/rpc server/client mode') server = subparsers.add_parser('notify-server') server.add_argument('-w', dest='wait_before_answer', type=int, default=-1) server.add_argument('--requeue', dest='requeue', action='store_true') server = subparsers.add_parser('batch-notify-server') server.add_argument('-w', dest='wait_before_answer', type=int, default=-1) server.add_argument('--requeue', dest='requeue', action='store_true') client = subparsers.add_parser('notify-client') client.add_argument('-p', dest='threads', type=int, default=1, help='number of client threads') client.add_argument('-m', dest='messages', type=int, default=1, help='number of call per threads') client.add_argument('-w', dest='wait_after_msg', type=float, default=-1, help='sleep time between two messages') client.add_argument('--timeout', dest='timeout', type=int, default=3, help='client timeout') server = subparsers.add_parser('rpc-server') server.add_argument('-w', dest='wait_before_answer', type=int, default=-1) server.add_argument('-e', '--executor', dest='executor', type=str, default='eventlet', help='name of a message executor') client = subparsers.add_parser('rpc-client') client.add_argument('-p', dest='threads', type=int, default=1, help='number of client threads') client.add_argument('-m', dest='messages', type=int, default=1, help='number of call per threads') client.add_argument('-w', dest='wait_after_msg', type=float, default=-1, help='sleep time between two messages') client.add_argument('--timeout', dest='timeout', type=int, default=3, help='client timeout') client.add_argument('--exit-wait', dest='exit_wait', type=int, default=0, help='Keep connections open N seconds after calls ' 'have been done') client.add_argument('--is-cast', dest='is_cast', action='store_true', help='Use `call` or `cast` RPC methods') client.add_argument('--is-fanout', dest='is_fanout', action='store_true', help='fanout=True for CAST messages') client.add_argument('--sync', dest='sync', choices=('call', 'fanout'), help="stop server when all msg was sent by clients") args = parser.parse_args() _setup_logging(is_debug=args.debug) if args.config_file: cfg.CONF(["--config-file", args.config_file]) global TRANSPORT if args.mode in ['rpc-server', 'rpc-client']: TRANSPORT = messaging.get_transport(cfg.CONF, url=args.url) else: TRANSPORT = messaging.get_notification_transport(cfg.CONF, url=args.url) if args.mode in ['rpc-client', 'notify-client']: # always generate maximum number of messages for duration-limited tests generate_messages(MESSAGES_LIMIT if args.duration else args.messages) # oslo.config defaults cfg.CONF.heartbeat_interval = 5 cfg.CONF.prog = os.path.basename(__file__) cfg.CONF.project = 'oslo.messaging' signal.signal(signal.SIGTERM, signal_handler) signal.signal(signal.SIGINT, signal_handler) if args.mode == 'rpc-server': target = messaging.Target(topic=args.topic, server=args.server) endpoint = rpc_server(TRANSPORT, target, args.wait_before_answer, args.executor, args.duration) show_server_stats(endpoint, args.json_filename) elif args.mode == 'notify-server': endpoint = notify_server(TRANSPORT, args.topic, args.wait_before_answer, args.duration, args.requeue) show_server_stats(endpoint, args.json_filename) elif args.mode == 'batch-notify-server': endpoint = batch_notify_server(TRANSPORT, args.topic, args.wait_before_answer, args.duration, args.requeue) show_server_stats(endpoint, args.json_filename) elif args.mode == 'notify-client': spawn_notify_clients(args.threads, args.topic, TRANSPORT, args.messages, args.wait_after_msg, args.timeout, args.duration) show_client_stats(CLIENTS, args.json_filename) elif args.mode == 'rpc-client': targets = [] for target in args.targets: tp, srv = target.partition('.')[::2] t = messaging.Target(topic=tp, server=srv, fanout=args.is_fanout) targets.append(t) spawn_rpc_clients(args.threads, TRANSPORT, targets, args.wait_after_msg, args.timeout, args.is_cast, args.messages, args.duration, args.sync) show_client_stats(CLIENTS, args.json_filename, not args.is_cast) if args.exit_wait: LOG.info("Finished. waiting for %d seconds", args.exit_wait) time.sleep(args.exit_wait) if __name__ == '__main__': CURRENT_PID = os.getpid() CURRENT_HOST = socket.gethostname() main() oslo.messaging-5.35.0/HACKING.rst0000666000175100017510000000017013224676046016370 0ustar zuulzuul00000000000000Style Commandments ================== Read the OpenStack Style Commandments https://docs.openstack.org/hacking/latest/ oslo.messaging-5.35.0/oslo.messaging.egg-info/0000775000175100017510000000000013224676256021217 5ustar zuulzuul00000000000000oslo.messaging-5.35.0/oslo.messaging.egg-info/requires.txt0000664000175100017510000000063213224676255023617 0ustar zuulzuul00000000000000pbr!=2.1.0,>=2.0.0 futurist>=1.2.0 oslo.config>=5.1.0 oslo.log>=3.30.0 oslo.utils>=3.33.0 oslo.serialization!=2.19.1,>=2.18.0 oslo.service!=1.28.1,>=1.24.0 oslo.i18n>=3.15.3 stevedore>=1.20.0 debtcollector>=1.2.0 monotonic>=0.6 six>=1.10.0 cachetools>=2.0.0 WebOb>=1.7.1 PyYAML>=3.10 amqp!=2.1.4,>=2.1.1 kombu!=4.0.2,>=4.0.0 pika>=0.10.0 pika-pool>=0.1.3 futures>=3.0.0 tenacity>=3.2.1 oslo.middleware>=3.31.0 oslo.messaging-5.35.0/oslo.messaging.egg-info/entry_points.txt0000664000175100017510000000350513224676255024517 0ustar zuulzuul00000000000000[console_scripts] oslo-messaging-send-notification = oslo_messaging.notify.notifier:_send_notification oslo-messaging-zmq-broker = oslo_messaging._cmd.zmq_proxy:main oslo-messaging-zmq-proxy = oslo_messaging._cmd.zmq_proxy:main [oslo.config.opts] oslo.messaging = oslo_messaging.opts:list_opts [oslo.messaging.drivers] amqp = oslo_messaging._drivers.impl_amqp1:ProtonDriver fake = oslo_messaging._drivers.impl_fake:FakeDriver kafka = oslo_messaging._drivers.impl_kafka:KafkaDriver kombu = oslo_messaging._drivers.impl_rabbit:RabbitDriver pika = oslo_messaging._drivers.impl_pika:PikaDriver rabbit = oslo_messaging._drivers.impl_rabbit:RabbitDriver zmq = oslo_messaging._drivers.impl_zmq:ZmqDriver [oslo.messaging.executors] blocking = futurist:SynchronousExecutor eventlet = futurist:GreenThreadPoolExecutor threading = futurist:ThreadPoolExecutor [oslo.messaging.notify.drivers] log = oslo_messaging.notify._impl_log:LogDriver messaging = oslo_messaging.notify.messaging:MessagingDriver messagingv2 = oslo_messaging.notify.messaging:MessagingV2Driver noop = oslo_messaging.notify._impl_noop:NoOpDriver routing = oslo_messaging.notify._impl_routing:RoutingDriver test = oslo_messaging.notify._impl_test:TestDriver [oslo.messaging.pika.connection_factory] new = oslo_messaging._drivers.pika_driver.pika_connection_factory:PikaConnectionFactory read_write = oslo_messaging._drivers.pika_driver.pika_connection_factory:ReadWritePikaConnectionFactory single = oslo_messaging._drivers.pika_driver.pika_connection_factory:SinglePikaConnectionFactory [oslo.messaging.zmq.matchmaker] dummy = oslo_messaging._drivers.zmq_driver.matchmaker.zmq_matchmaker_base:MatchmakerDummy redis = oslo_messaging._drivers.zmq_driver.matchmaker.zmq_matchmaker_redis:MatchmakerRedis sentinel = oslo_messaging._drivers.zmq_driver.matchmaker.zmq_matchmaker_redis:MatchmakerSentinel oslo.messaging-5.35.0/oslo.messaging.egg-info/SOURCES.txt0000664000175100017510000003200113224676256023077 0ustar zuulzuul00000000000000.coveragerc .testr.conf .zuul.yaml AUTHORS CONTRIBUTING.rst ChangeLog HACKING.rst LICENSE README.rst babel.cfg bindep.txt requirements.txt setup-test-env-kafka.sh setup-test-env-zmq-direct-dynamic.sh setup-test-env-zmq-proxy.sh setup-test-env-zmq-pub-sub.sh setup-test-env-zmq.sh setup.cfg setup.py test-requirements.txt tox.ini doc/requirements.txt doc/source/conf.py doc/source/index.rst doc/source/admin/AMQP1.0.rst doc/source/admin/drivers.rst doc/source/admin/index.rst doc/source/admin/pika_driver.rst doc/source/admin/zmq_driver.rst doc/source/configuration/conffixture.rst doc/source/configuration/index.rst doc/source/configuration/opts.rst doc/source/contributor/contributing.rst doc/source/contributor/driver-dev-guide.rst doc/source/contributor/index.rst doc/source/contributor/supported-messaging-drivers.rst doc/source/reference/exceptions.rst doc/source/reference/executors.rst doc/source/reference/index.rst doc/source/reference/notification_driver.rst doc/source/reference/notification_listener.rst doc/source/reference/notifier.rst doc/source/reference/rpcclient.rst doc/source/reference/serializer.rst doc/source/reference/server.rst doc/source/reference/target.rst doc/source/reference/transport.rst doc/source/user/FAQ.rst doc/source/user/history.rst doc/source/user/index.rst etc/routing_notifier.yaml.sample oslo.messaging.egg-info/PKG-INFO oslo.messaging.egg-info/SOURCES.txt oslo.messaging.egg-info/dependency_links.txt oslo.messaging.egg-info/entry_points.txt oslo.messaging.egg-info/not-zip-safe oslo.messaging.egg-info/pbr.json oslo.messaging.egg-info/requires.txt oslo.messaging.egg-info/top_level.txt oslo_messaging/__init__.py oslo_messaging/_i18n.py oslo_messaging/_utils.py oslo_messaging/conffixture.py oslo_messaging/dispatcher.py oslo_messaging/exceptions.py oslo_messaging/opts.py oslo_messaging/serializer.py oslo_messaging/server.py oslo_messaging/target.py oslo_messaging/transport.py oslo_messaging/version.py oslo_messaging/_cmd/__init__.py oslo_messaging/_cmd/zmq_proxy.py oslo_messaging/_drivers/__init__.py oslo_messaging/_drivers/amqp.py oslo_messaging/_drivers/amqpdriver.py oslo_messaging/_drivers/base.py oslo_messaging/_drivers/common.py oslo_messaging/_drivers/impl_amqp1.py oslo_messaging/_drivers/impl_fake.py oslo_messaging/_drivers/impl_kafka.py oslo_messaging/_drivers/impl_pika.py oslo_messaging/_drivers/impl_rabbit.py oslo_messaging/_drivers/impl_zmq.py oslo_messaging/_drivers/pool.py oslo_messaging/_drivers/amqp1_driver/__init__.py oslo_messaging/_drivers/amqp1_driver/addressing.py oslo_messaging/_drivers/amqp1_driver/controller.py oslo_messaging/_drivers/amqp1_driver/eventloop.py oslo_messaging/_drivers/amqp1_driver/opts.py oslo_messaging/_drivers/amqp1_driver/oslo_messaging_amqp_driver_overview.rst oslo_messaging/_drivers/kafka_driver/__init__.py oslo_messaging/_drivers/kafka_driver/kafka_options.py oslo_messaging/_drivers/pika_driver/__init__.py oslo_messaging/_drivers/pika_driver/pika_commons.py oslo_messaging/_drivers/pika_driver/pika_connection.py oslo_messaging/_drivers/pika_driver/pika_connection_factory.py oslo_messaging/_drivers/pika_driver/pika_engine.py oslo_messaging/_drivers/pika_driver/pika_exceptions.py oslo_messaging/_drivers/pika_driver/pika_listener.py oslo_messaging/_drivers/pika_driver/pika_message.py oslo_messaging/_drivers/pika_driver/pika_poller.py oslo_messaging/_drivers/zmq_driver/__init__.py oslo_messaging/_drivers/zmq_driver/zmq_address.py oslo_messaging/_drivers/zmq_driver/zmq_async.py oslo_messaging/_drivers/zmq_driver/zmq_names.py oslo_messaging/_drivers/zmq_driver/zmq_options.py oslo_messaging/_drivers/zmq_driver/zmq_poller.py oslo_messaging/_drivers/zmq_driver/zmq_socket.py oslo_messaging/_drivers/zmq_driver/zmq_updater.py oslo_messaging/_drivers/zmq_driver/zmq_version.py oslo_messaging/_drivers/zmq_driver/client/__init__.py oslo_messaging/_drivers/zmq_driver/client/zmq_ack_manager.py oslo_messaging/_drivers/zmq_driver/client/zmq_client.py oslo_messaging/_drivers/zmq_driver/client/zmq_client_base.py oslo_messaging/_drivers/zmq_driver/client/zmq_publisher_manager.py oslo_messaging/_drivers/zmq_driver/client/zmq_receivers.py oslo_messaging/_drivers/zmq_driver/client/zmq_request.py oslo_messaging/_drivers/zmq_driver/client/zmq_response.py oslo_messaging/_drivers/zmq_driver/client/zmq_routing_table.py oslo_messaging/_drivers/zmq_driver/client/zmq_senders.py oslo_messaging/_drivers/zmq_driver/client/zmq_sockets_manager.py oslo_messaging/_drivers/zmq_driver/client/publishers/__init__.py oslo_messaging/_drivers/zmq_driver/client/publishers/zmq_publisher_base.py oslo_messaging/_drivers/zmq_driver/client/publishers/dealer/__init__.py oslo_messaging/_drivers/zmq_driver/client/publishers/dealer/zmq_dealer_publisher_base.py oslo_messaging/_drivers/zmq_driver/client/publishers/dealer/zmq_dealer_publisher_direct.py oslo_messaging/_drivers/zmq_driver/client/publishers/dealer/zmq_dealer_publisher_proxy.py oslo_messaging/_drivers/zmq_driver/matchmaker/__init__.py oslo_messaging/_drivers/zmq_driver/matchmaker/zmq_matchmaker_base.py oslo_messaging/_drivers/zmq_driver/matchmaker/zmq_matchmaker_redis.py oslo_messaging/_drivers/zmq_driver/poller/__init__.py oslo_messaging/_drivers/zmq_driver/poller/green_poller.py oslo_messaging/_drivers/zmq_driver/poller/threading_poller.py oslo_messaging/_drivers/zmq_driver/proxy/__init__.py oslo_messaging/_drivers/zmq_driver/proxy/zmq_base_proxy.py oslo_messaging/_drivers/zmq_driver/proxy/zmq_proxy.py oslo_messaging/_drivers/zmq_driver/proxy/zmq_sender.py oslo_messaging/_drivers/zmq_driver/proxy/central/__init__.py oslo_messaging/_drivers/zmq_driver/proxy/central/zmq_central_proxy.py oslo_messaging/_drivers/zmq_driver/proxy/central/zmq_publisher_proxy.py oslo_messaging/_drivers/zmq_driver/proxy/local/__init__.py oslo_messaging/_drivers/zmq_driver/proxy/local/zmq_local_proxy.py oslo_messaging/_drivers/zmq_driver/server/__init__.py oslo_messaging/_drivers/zmq_driver/server/zmq_incoming_message.py oslo_messaging/_drivers/zmq_driver/server/zmq_server.py oslo_messaging/_drivers/zmq_driver/server/zmq_ttl_cache.py oslo_messaging/_drivers/zmq_driver/server/consumers/__init__.py oslo_messaging/_drivers/zmq_driver/server/consumers/zmq_consumer_base.py oslo_messaging/_drivers/zmq_driver/server/consumers/zmq_dealer_consumer.py oslo_messaging/_drivers/zmq_driver/server/consumers/zmq_router_consumer.py oslo_messaging/_drivers/zmq_driver/server/consumers/zmq_sub_consumer.py oslo_messaging/hacking/__init__.py oslo_messaging/hacking/checks.py oslo_messaging/notify/__init__.py oslo_messaging/notify/_impl_log.py oslo_messaging/notify/_impl_noop.py oslo_messaging/notify/_impl_routing.py oslo_messaging/notify/_impl_test.py oslo_messaging/notify/dispatcher.py oslo_messaging/notify/filter.py oslo_messaging/notify/listener.py oslo_messaging/notify/log_handler.py oslo_messaging/notify/logger.py oslo_messaging/notify/messaging.py oslo_messaging/notify/middleware.py oslo_messaging/notify/notifier.py oslo_messaging/rpc/__init__.py oslo_messaging/rpc/client.py oslo_messaging/rpc/dispatcher.py oslo_messaging/rpc/server.py oslo_messaging/rpc/transport.py oslo_messaging/tests/__init__.py oslo_messaging/tests/test_config_opts_proxy.py oslo_messaging/tests/test_exception_serialization.py oslo_messaging/tests/test_expected_exceptions.py oslo_messaging/tests/test_fixture.py oslo_messaging/tests/test_opts.py oslo_messaging/tests/test_target.py oslo_messaging/tests/test_transport.py oslo_messaging/tests/test_urls.py oslo_messaging/tests/test_utils.py oslo_messaging/tests/utils.py oslo_messaging/tests/drivers/__init__.py oslo_messaging/tests/drivers/test_amqp_driver.py oslo_messaging/tests/drivers/test_impl_kafka.py oslo_messaging/tests/drivers/test_impl_rabbit.py oslo_messaging/tests/drivers/test_pool.py oslo_messaging/tests/drivers/pika/__init__.py oslo_messaging/tests/drivers/pika/test_message.py oslo_messaging/tests/drivers/pika/test_poller.py oslo_messaging/tests/drivers/zmq/__init__.py oslo_messaging/tests/drivers/zmq/test_impl_zmq.py oslo_messaging/tests/drivers/zmq/test_pub_sub.py oslo_messaging/tests/drivers/zmq/test_routing_table.py oslo_messaging/tests/drivers/zmq/test_zmq_ack_manager.py oslo_messaging/tests/drivers/zmq/test_zmq_address.py oslo_messaging/tests/drivers/zmq/test_zmq_async.py oslo_messaging/tests/drivers/zmq/test_zmq_transport_url.py oslo_messaging/tests/drivers/zmq/test_zmq_ttl_cache.py oslo_messaging/tests/drivers/zmq/test_zmq_version.py oslo_messaging/tests/drivers/zmq/zmq_common.py oslo_messaging/tests/drivers/zmq/matchmaker/__init__.py oslo_messaging/tests/drivers/zmq/matchmaker/test_impl_matchmaker.py oslo_messaging/tests/functional/__init__.py oslo_messaging/tests/functional/test_functional.py oslo_messaging/tests/functional/test_rabbitmq.py oslo_messaging/tests/functional/utils.py oslo_messaging/tests/functional/notify/__init__.py oslo_messaging/tests/functional/notify/test_logger.py oslo_messaging/tests/functional/zmq/__init__.py oslo_messaging/tests/functional/zmq/multiproc_utils.py oslo_messaging/tests/functional/zmq/test_startup.py oslo_messaging/tests/notify/__init__.py oslo_messaging/tests/notify/test_dispatcher.py oslo_messaging/tests/notify/test_listener.py oslo_messaging/tests/notify/test_log_handler.py oslo_messaging/tests/notify/test_logger.py oslo_messaging/tests/notify/test_middleware.py oslo_messaging/tests/notify/test_notifier.py oslo_messaging/tests/rpc/__init__.py oslo_messaging/tests/rpc/test_client.py oslo_messaging/tests/rpc/test_dispatcher.py oslo_messaging/tests/rpc/test_server.py playbooks/oslo.messaging-devstack-full/post.yaml playbooks/oslo.messaging-devstack-full/pre.yaml playbooks/oslo.messaging-devstack-full/run.yaml playbooks/oslo.messaging-src-dsvm-full-amqp1-dual-centos-7/post.yaml playbooks/oslo.messaging-src-dsvm-full-amqp1-dual-centos-7/run.yaml playbooks/oslo.messaging-src-dsvm-full-amqp1-hybrid/post.yaml playbooks/oslo.messaging-src-dsvm-full-amqp1-hybrid/run.yaml playbooks/oslo.messaging-src-dsvm-full-kafka-default/post.yaml playbooks/oslo.messaging-src-dsvm-full-kafka-default/run.yaml playbooks/oslo.messaging-src-dsvm-full-kafka-default-centos-7/post.yaml playbooks/oslo.messaging-src-dsvm-full-kafka-default-centos-7/run.yaml playbooks/oslo.messaging-src-dsvm-full-pika-default/post.yaml playbooks/oslo.messaging-src-dsvm-full-pika-default/run.yaml playbooks/oslo.messaging-src-dsvm-full-rabbit-default/post.yaml playbooks/oslo.messaging-src-dsvm-full-rabbit-default/run.yaml playbooks/oslo.messaging-src-dsvm-full-zmq-default/post.yaml playbooks/oslo.messaging-src-dsvm-full-zmq-default/run.yaml playbooks/oslo.messaging-src-grenade-dsvm/post.yaml playbooks/oslo.messaging-src-grenade-dsvm/run.yaml playbooks/oslo.messaging-src-grenade-dsvm-multinode/post.yaml playbooks/oslo.messaging-src-grenade-dsvm-multinode/run.yaml playbooks/oslo.messaging-telemetry-dsvm-integration-amqp1/post.yaml playbooks/oslo.messaging-telemetry-dsvm-integration-amqp1/run.yaml playbooks/oslo.messaging-telemetry-dsvm-integration-kafka/post.yaml playbooks/oslo.messaging-telemetry-dsvm-integration-kafka/run.yaml playbooks/oslo.messaging-telemetry-dsvm-integration-pika/post.yaml playbooks/oslo.messaging-telemetry-dsvm-integration-pika/run.yaml playbooks/oslo.messaging-telemetry-dsvm-integration-rabbit/post.yaml playbooks/oslo.messaging-telemetry-dsvm-integration-rabbit/run.yaml playbooks/oslo.messaging-telemetry-dsvm-integration-zmq/post.yaml playbooks/oslo.messaging-telemetry-dsvm-integration-zmq/run.yaml playbooks/oslo.messaging-tempest-neutron-dsvm-src-amqp1-hybrid/post.yaml playbooks/oslo.messaging-tempest-neutron-dsvm-src-amqp1-hybrid/run.yaml playbooks/oslo.messaging-tempest-neutron-dsvm-src-kafka-default/post.yaml playbooks/oslo.messaging-tempest-neutron-dsvm-src-kafka-default/run.yaml playbooks/oslo.messaging-tempest-neutron-dsvm-src-pika-default/post.yaml playbooks/oslo.messaging-tempest-neutron-dsvm-src-pika-default/run.yaml playbooks/oslo.messaging-tempest-neutron-dsvm-src-rabbit-default/post.yaml playbooks/oslo.messaging-tempest-neutron-dsvm-src-rabbit-default/run.yaml playbooks/oslo.messaging-tempest-neutron-dsvm-src-zmq-default/post.yaml playbooks/oslo.messaging-tempest-neutron-dsvm-src-zmq-default/run.yaml releasenotes/notes/add_reno-3b4ae0789e9c45b4.yaml releasenotes/notes/blocking-executor-deprecated-895146c1c3bf2f51.yaml releasenotes/notes/connection_ttl-2cf0fe6e1ab8c73c.yaml releasenotes/notes/fix-access_policy-deafult-a6954a147cb002b0.yaml releasenotes/notes/get_rpc_transport-4aa3511ad9754a60.yaml releasenotes/notes/option-rabbitmq-max_retries-has-been-deprecated-471f66a9e6d672a2.yaml releasenotes/notes/pika-driver-has-been-deprecated-e2407fa53c91fe5c.yaml releasenotes/notes/rabbit-no-wait-for-ack-9e5de3e1320d7660.yaml releasenotes/notes/remove-RequestContextSerializer-234c0496a7e0376b.yaml releasenotes/notes/retry-support-07996ef04dda9482.yaml releasenotes/source/conf.py releasenotes/source/index.rst releasenotes/source/newton.rst releasenotes/source/ocata.rst releasenotes/source/pike.rst releasenotes/source/unreleased.rst releasenotes/source/_static/.placeholder releasenotes/source/_templates/.placeholder releasenotes/source/locale/en_GB/LC_MESSAGES/releasenotes.po tools/functions.sh tools/messages_length.yaml tools/setup-test-env-amqp1.sh tools/simulator.py tools/test-setup.shoslo.messaging-5.35.0/oslo.messaging.egg-info/pbr.json0000664000175100017510000000005613224676255022675 0ustar zuulzuul00000000000000{"git_version": "1ccdccd", "is_release": true}oslo.messaging-5.35.0/oslo.messaging.egg-info/dependency_links.txt0000664000175100017510000000000113224676255025264 0ustar zuulzuul00000000000000 oslo.messaging-5.35.0/oslo.messaging.egg-info/not-zip-safe0000664000175100017510000000000113224676172023442 0ustar zuulzuul00000000000000 oslo.messaging-5.35.0/oslo.messaging.egg-info/PKG-INFO0000664000175100017510000000360713224676255022321 0ustar zuulzuul00000000000000Metadata-Version: 1.1 Name: oslo.messaging Version: 5.35.0 Summary: Oslo Messaging API Home-page: https://docs.openstack.org/oslo.messaging/latest/ Author: OpenStack Author-email: openstack-dev@lists.openstack.org License: UNKNOWN Description-Content-Type: UNKNOWN Description: ======================== Team and repository tags ======================== .. image:: https://governance.openstack.org/tc/badges/oslo.messaging.svg :target: https://governance.openstack.org/tc/reference/tags/index.html .. Change things from this point on Oslo Messaging Library ====================== .. image:: https://img.shields.io/pypi/v/oslo.messaging.svg :target: https://pypi.python.org/pypi/oslo.messaging/ :alt: Latest Version .. image:: https://img.shields.io/pypi/dm/oslo.messaging.svg :target: https://pypi.python.org/pypi/oslo.messaging/ :alt: Downloads The Oslo messaging API supports RPC and notifications over a number of different messaging transports. * License: Apache License, Version 2.0 * Documentation: https://docs.openstack.org/oslo.messaging/latest/ * Source: https://git.openstack.org/cgit/openstack/oslo.messaging * Bugs: https://bugs.launchpad.net/oslo.messaging Platform: UNKNOWN Classifier: Environment :: OpenStack Classifier: Intended Audience :: Developers Classifier: Intended Audience :: Information Technology Classifier: License :: OSI Approved :: Apache Software License Classifier: Operating System :: OS Independent Classifier: Programming Language :: Python Classifier: Programming Language :: Python :: 2 Classifier: Programming Language :: Python :: 2.7 Classifier: Programming Language :: Python :: 3 Classifier: Programming Language :: Python :: 3.5 oslo.messaging-5.35.0/oslo.messaging.egg-info/top_level.txt0000664000175100017510000000001713224676255023746 0ustar zuulzuul00000000000000oslo_messaging oslo.messaging-5.35.0/test-requirements.txt0000666000175100017510000000230713224676046021037 0ustar zuulzuul00000000000000# The order of packages is significant, because pip processes them in the order # of appearance. Changing the order has an impact on the overall integration # process, which may cause wedges in the gate later. # Hacking already pins down pep8, pyflakes and flake8 hacking!=0.13.0,<0.14,>=0.12.0 # Apache-2.0 fixtures>=3.0.0 # Apache-2.0/BSD mock>=2.0.0 # BSD python-subunit>=1.0.0 # Apache-2.0/BSD testrepository>=0.0.18 # Apache-2.0/BSD testscenarios>=0.4 # Apache-2.0/BSD testtools>=2.2.0 # MIT oslotest>=1.10.0 # Apache-2.0 pifpaf>=0.10.0 # Apache-2.0 # for test_matchmaker_redis redis>=2.10.0 # MIT # for test_impl_zmq pyzmq>=14.3.1 # LGPL+BSD # for test_impl_kafka kafka-python>=1.3.1 # Apache-2.0 # when we can require tox>= 1.4, this can go into tox.ini: # [testenv:cover] # deps = {[testenv]deps} coverage coverage!=4.4,>=4.0 # Apache-2.0 # this is required for the docs build jobs sphinx>=1.6.2 # BSD openstackdocstheme>=1.17.0 # Apache-2.0 reno>=2.5.0 # Apache-2.0 # AMQP 1.0 support depends on the Qpid Proton AMQP 1.0 # development libraries. pyngus>=2.2.0 # Apache-2.0 # Bandit security code scanner bandit>=1.1.0 # Apache-2.0 eventlet!=0.18.3,!=0.20.1,<0.21.0,>=0.18.2 # MIT greenlet>=0.4.10 # MIT oslo.messaging-5.35.0/CONTRIBUTING.rst0000666000175100017510000000104313224676046017233 0ustar zuulzuul00000000000000If you would like to contribute to the development of OpenStack, you must follow the steps in this page: https://docs.openstack.org/infra/manual/developers.html Once those steps have been completed, changes to OpenStack should be submitted for review via the Gerrit tool, following the workflow documented at: https://docs.openstack.org/infra/manual/developers.html#development-workflow Pull requests submitted through GitHub will be ignored. Bugs should be filed on Launchpad, not GitHub: https://bugs.launchpad.net/oslo.messaging oslo.messaging-5.35.0/setup-test-env-zmq-pub-sub.sh0000777000175100017510000000151313224676046022216 0ustar zuulzuul00000000000000#!/bin/bash set -e . tools/functions.sh DATADIR=$(mktemp -d /tmp/OSLOMSG-ZEROMQ.XXXXX) trap "clean_exit $DATADIR" EXIT export ZMQ_MATCHMAKER=redis export ZMQ_REDIS_PORT=65123 export ZMQ_IPC_DIR=${DATADIR} export ZMQ_USE_PUB_SUB=true export ZMQ_USE_ROUTER_PROXY=true export ZMQ_USE_ACKS=false export TRANSPORT_URL="zmq+${ZMQ_MATCHMAKER}://127.0.0.1:${ZMQ_REDIS_PORT}" export ZMQ_PROXY_HOST=127.0.0.1 cat > ${DATADIR}/zmq.conf < ${DATADIR}/zmq-proxy.log 2>&1 & $* oslo.messaging-5.35.0/setup-test-env-zmq-proxy.sh0000777000175100017510000000151413224676046022023 0ustar zuulzuul00000000000000#!/bin/bash set -e . tools/functions.sh DATADIR=$(mktemp -d /tmp/OSLOMSG-ZEROMQ.XXXXX) trap "clean_exit $DATADIR" EXIT export ZMQ_MATCHMAKER=redis export ZMQ_REDIS_PORT=65123 export ZMQ_IPC_DIR=${DATADIR} export ZMQ_USE_PUB_SUB=false export ZMQ_USE_ROUTER_PROXY=true export ZMQ_USE_ACKS=false export TRANSPORT_URL="zmq+${ZMQ_MATCHMAKER}://127.0.0.1:${ZMQ_REDIS_PORT}" export ZMQ_PROXY_HOST=127.0.0.1 cat > ${DATADIR}/zmq.conf < ${DATADIR}/zmq-proxy.log 2>&1 & $* oslo.messaging-5.35.0/ChangeLog0000664000175100017510000017316613224676255016364 0ustar zuulzuul00000000000000CHANGES ======= 5.35.0 ------ * Add kafka driver vhost emulation * Updated from global requirements * Create doc/requirements.txt * Update kafka functional test * Imported Translations from Zanata * Updated from global requirements 5.34.1 ------ * Imported Translations from Zanata * Avoid tox\_install.sh for constraints support * rabbitmq: don't wait for message ack/requeue * Provide bindep\_profile in openstack-tox job setup * Updated from global requirements * Add zmq packages that are no longer in bindep-fallback * don't convert generator to list unless required * sort when using groupby 5.34.0 ------ * Remove setting of version/release from releasenotes * Updated from global requirements * Updated from global requirements * Catch socket.timeout when doing heartbeat\_check * Updated from global requirements * fix batch handling * Remove stable/newton from zuul settings * Zuul: add file extension to playbook path 5.33.1 ------ * Move legacy zuulv3 tests into oslo.messaging repo * Imported Translations from Zanata * Flesh out transport\_url help * Fix typo in contributor docs title 5.33.0 ------ * Fix default value of RPC dispatcher access\_policy * Fix wrong transport warnings in functional tests * Updated from global requirements 5.32.0 ------ * Updated from global requirements * Warn when wrong transport instance is used * Fix some reST field lists in docstrings * Remove pbr version from setup.py * Suppress excessive debug logs when consume rabbit * Fix use of print function on python3 5.31.0 ------ * Remove envelope argument from driver send() interface * Imported Translations from Zanata * Updated from global requirements * Update amqp 1.0 driver deployment guide * Prevent rabbit from raising unexpected exceptions * Updated from global requirements * Remove unnecessary setUp function in testcase * Add licenses and remove unused import in doc/source/conf.py * Ensure RPC endpoint target attribute is correct * Fix a typo * Update links in README * Updated from global requirements * Class-level \_exchanges in FakeExchangeManager * fix 'configration' typo * Update reno for stable/pike * Add support for virtual hosts * Remove the test that counts kombu connect calls 5.30.0 ------ * Updated from global requirements * Update URLs in documents according to document migration * Add monkey\_patch to demo code 5.29.0 ------ * switch from oslosphinx to openstackdocstheme * update the docs url in the readme * rearrange content to fit the new standard layout * Updated from global requirements * Enable some off-by-default checks 5.28.0 ------ * Updated from global requirements * Add kafka\_driver directory 5.27.0 ------ * Updated from global requirements * Fix html\_last\_updated\_fmt for Python3 * Add note for blocking executor deprecation * Fix rabbitmq driver with blocking executor * Build universal wheels * Updated from global requirements * Fix serializer tests * deprecated blocking executor 5.26.0 ------ * Updated from global requirements * Clean up the TransportURL documentation * Mark the Pika driver as deprecated 5.25.0 ------ * Updated from global requirements * Updated from global requirements * Add missing {posargs:} to AMQP 1.0 functional tests * rabbit: restore synchronous ack/requeue 5.24.2 ------ * Updated from global requirements * [AMQP 1.0] Properly shut down test RPC server 5.24.1 ------ * Updated from global requirements * Fix the amqp1 SSL test CA certificate * Add get\_rpc\_transport call * Disable AMQP 1.0 SSL unit tests 5.24.0 ------ 5.23.0 ------ * Fix notification tests not unmocking logging * Remove use of mox stubs * Fix aliases deprecation * tests: fix MultiStrOpt value * Retry support for oslo\_messaging\_notifications driver 5.22.0 ------ * [AMQP 1.0] Add default SASL realm setting * Updated from global requirements * Remove usage of parameter enforce\_type 5.21.0 ------ * Optimize the link address * [AMQP 1.0] if RPC call is configured as presettled ignore acks * Mock 'oslo\_messaging.notify.\_impl\_routing.LOG' in notifier tests * Updated from global requirements * Add "ssl" option for amqp driver * Refactor logic of getting exector's executor\_thread\_pool\_size * remove all kombu<4.0.0 workarounds 5.20.0 ------ * serializer: remove deprecated RequestContextSerializer * Try to fix TestSerializer.test\_call\_serializer failed randomly * Updated from global requirements * Deprecate username/password config options in favor of TRANSPORT\_URL * Add HACKING.rst * Break user credentials from host at the rightmost '@' * [zmq] Prevent access to rpc\_response\_timeout * [zmq] pass a dummy TransportURL to register\_opts * Fix simulator's use of Notifier - use 'topics' not 'topic' * Trivial: Add executor 'threading' in docstring * Deprecate parameter aliases * Use Sphinx 1.5 warning-is-error * tox: Build docs with Python 2.7 5.19.0 ------ * Updated from global requirements * Remove self.mox * Move decorator updated\_kwarg\_default\_value to right place 5.18.0 ------ * Remove old messaging notify driver alias * [Fix gate]Update test requirement * Updated from global requirements * Allow checking if notifier is enabled * RabbitMQ: Standardize SSL parameter names * drop topic keyword from Notifier * Validate the transport url query string * drivers: use common.ConfigOptsProxy everywhere * Stop using oslotest.mockpatch * tests: don't run functional tests in parallel * rabbit: make ack/requeue thread-safe * Fix releasenotes * Remove mox3 from test-requirements.txt * Updated from global requirements * [zmq] Update configurations documentation * Fix type of the kafka\_consumer\_timeout option * [zmq] Dynamic connections send failure * support kombu4 * Test:Use unittest.mock on Python 3 * Fix the typo * pbr.version.VersionInfo needs package name (oslo.xyz and not oslo\_xyz) * [zmq] Properly analyse \`use\_dynamic\_connections\` option * [zmq] Dummy add value aging mechanism * kafka: skip multiple servers tests * kafka: ensure topics are created * kafka: fix python3 exception * kafka: Remove testing hack for kafka * [zmq] Failure of dynamic connections fanout * Update reno for stable/ocata * Return list of addresses for IPV4 and IPV6 5.17.0 ------ * [zmq] Dynamic connections failover * [zmq] Fix py35 gate * [zmq] Use more stable configuration in voting job * Remove references to Python 3.4 * [AMQP 1.0] Fix SSL client authentication * [zmq] Support py35 testenv * [zmq] Distinguish Round-Robin/Fanout socket sending mode * tests: cleanup monkey path * [AMQP 1.0] Resend messages that are released or modified * gate: Remove useless files * [zmq] Redis TTL for values * eventlet is no more a hard dependency * [AMQP 1.0] Propagate authentication errors to caller * ensure we set channel in lock * tox: use already installed kafka if present * kafka: remove no really implemented feature * kafka: return to poller when timeout is reach * kafka: Don't hide unpack/unserialize exception * kafka: timeout is in milliseconds * kafka: disable batch for functional tests * kafka: Remove Producer singleton * Moving driver to new kafka-python version * tox: rename zeromq target * tests: make rabbit failover failure more helpful * [zmq] Refactor make \`zmq\_address.target\_to\_key\` a universal method * Updated from global requirements * [zmq] Restore static direct connections * reject when skipping failed messages * fix one typo * [AMQP 1.0] Setup the amqp1 test environment on ubuntu * test\_rabbitmq: remove dead code 5.16.0 ------ * Updated from global requirements * Replace mox with mock * tests: fix test-setup.sh * tests: remove useless debug * [rabbit] Log correct topic on cast/call 5.15.0 ------ * Updated from global requirements * kafka separate unit/functionnal tests * Add bindep.txt/test-setup.sh to prepare the system * [zmq] Matchmaker redis available time 5.14.0 ------ * [AMQP 1.0] Simplify the I/O event loop code * [zmq] Support message versions for rolling upgrades * [zmq] Fix non voting gate jobs * Fix transport url with empty port * Remove ordering assumption from functional test * Periodically purge sender link cache 5.13.0 ------ * Remove small job timeout * Register opts if we're going to check conf.transport\_url in parse() * [doc] Fix three typos * [zmq] Fix zmq-specific f-tests from periodic hangs * [zmq] Fix functional gates proxy/pub-sub * Show team and repo badges on README * [zmq] Send fanouts without pub/sub in background * Use assertGreater(len(x), 0) instead of assertTrue(len(x) > 0) * Add Constraints support * Replace six.iteritems() with .items() * [zmq] Fix configuration for functional gate job * Document the transport backend driver interface * Fix a docstring typo in impl\_pika.py * [sentinel] Move master/slave discovering from \_\_init\_\_ * rabbit: on reconnect set socket timeout after channel is set * Updated from global requirements * [zmq] Don't create real matchmaker in unit tests * update srouce doc pika\_driver.rst the charactor then to than * Remove useless logging import statements * rabbit: Avoid busy loop on epoll\_wait with heartbeat+eventlet * [zmq] Refactor receivers * [zmq] Cleanup changes to zmq-specific f-tests * Updated from global requirements * This patch cleans up the 'notification\_listener.rst' documetion by removing some class which don't exist and adding some function which exist in current source * Remove nonexistent functions from documentation * Replace retrying with tenacity 5.12.0 ------ * Updated from global requirements * Updated from global requirements * Remove the temporary hack in code * Using assertIsNone() instead of assertEqual(None) * Change assertTrue(isinstance()) by optimal assert * [zmq] Don't fallback to topic if wrong server specified * [TrivialFix] Replace old style assertions with new style assertions * [TrivialFix] Fix typo in oslo.messaging * [simulator] Fix transport\_url usage * [simulator] Fix a message length generator usage * Update .coveragerc after the removal of respective directory * [sentinels] Fix hosts extracting and slaves usage * [zmq] SUB-PUB local proxy 5.11.0 ------ * Fix typos in addressing.py and setup.cfg * Updated from global requirements * Record length of queues for ReplyWaiters * rabbit: Don't prefetch when batch\_size is set * [AMQP 1.0] Avoid unnecessary thread switch on ack * [zmq] Fix issues with broken messages on proxies * [zmq] Maintain several redis hosts * Removed redundant 'the' * Fix a typo in server.py * [document] The example which is written in the developer guide of 'Notification Listener' doesn't work * Enable release notes translation * cast() and RPC replies should not block waiting for endpoint to ack * [simulator] Automatic stopping of rpc-servers * Fix whitespace formatting issue * Properly deserializes built-in exceptions * [zmq] Fix send\_cast in AckManager * Remove debug logs from fast path * [zmq] Routing table refactoring, dynamic direct connections * Fix simulator bool command line args * Replace 'the' with 'to' in docstring * Remove default=None when set value in Config * [zmq] Add acks from proxy for PUB/SUB messages * [zmq] Refactor consumers and incoming messages * [zmq] Make second ROUTER socket optional for proxy * Use method fetch\_current\_thread\_functor from oslo.utils * [zmq] Fix ZmqSocket.send\_string * [zmq] Remove unused methods from executors * [zmq] Added a processing to handle ImportError in Redis plugin of Matchmaker * modify the home-page info with the developer documentation * Set the valid choices for the rabbit login methods * [zmq] Unify delimeters * [zmq] Fix fanout without PUB/SUB * [zmq] Send immediate ack after message receiving * Corrects documentation typo * [zmq] Remove unnecessary subscriptions from SubConsumer * Fixups to the inline documentation * Fix consuming from unbound reply queue * Add configurable serialization to pika * [zmq] Remove ZmqSocket.close\_linger attribute * [zmq] Make ZMQ TCP keepalive options configurable * [zmq] Fix TestZmqAckManager periodic failure * [zmq] Make ThreadingPoller work with ZmqSocket * Fix notify filter when data item is None * [zmq] Rename rpc\_cast\_timeout option * [AMQP 1.0] Update setup test environment dispatch router backend * Allow dispatcher to restrict endpoint methods * [AMQP 1.0] Add Acknowledgement and Batch Notification Topics * Update reno for stable/newton * [kafka] invoke TypeError exception when 'listen()' method of KafkaDriver is called * [zmq] Proxy has to skip broken multi-part message * Add Documentation String for PikaDriver * [zmq] Implement retries for unacknowledged CALLs 5.10.0 ------ * [AMQP 1.0] Make the default settlement behavior configurable * [zmq] Eliminate GreenPool from GreenPoller * Avoid sending cast after server shutdown in functional test * [zmq] Update ZMQ-driver documentation * Updated from global requirements 5.9.0 ----- * [zmq] Add --log-file option to zmq-proxy * Updated from global requirements * [zmq] Host name and target in socket identity 5.8.0 ----- * [zmq] Make zmq\_immediate configurable * Fix calculating of duration in simulator.py * [zmq] Redis unavailability is not critical * [zmq] Discover new publisher proxy * Clean outdated docstring and comment * [AMQP 1.0] small fixes to improve timer scalability * Add docstring for get\_notification\_transport * Add warning when credential is not specified for each host * Updated from global requirements * [zmq] Implement retries for unacknowledged CASTs * Fix the help info format 5.7.0 ----- * Move zmq driver options into its own group * Log a warning when connected to a routable message bus * Updated from global requirements * [AMQP 1.0] Add link credit configuration options * Updated from global requirements * [AMQP 1.0] AMQP 1.0 Driver User Guide Document update * AMQP 1.0 Driver Architecture Overview Document * Remove the max\_send\_retries option 5.6.0 ----- * Fix pika functional tests * [zmq] Use zmq.IMMEDIATE option for round-robin * fix a typo in impl\_rabbit.py * Updated from global requirements * [AMQP 1.0] Cancel response treatment for detached link * Fix syntax error on notification listener docs * Delete fanout queues on gracefully shutdown * Set the default link property to allow message acks * Properly cleanup listener and driver on simulator exit * Fix a timer leak in the AMQP 1.0 driver * [zmq] Let proxy serve on a static port numbers * Introduce TTL for idle connections * Fix parameters of assertEqual are misplaced * Fix misstyping issue * Updated from global requirements * Updated from global requirements * notify: add a CLI tool to manually send notifications * Add deprecated relnote for max\_retries rabbit configuration option * [zmq] Add py34 configuration for functional tests * [zmq] Merge publishers * Add Python 3.5 classifier and venv * Replace assertEqual(None, \*) with assertIsNone in tests * Updated from global requirements * [zmq] Use json/msgpack instead of pickle * [AMQP 1.0] Add configuration parameters for send message deadline * [zmq] Refactor publishers * Re-factor the AMQP 1.0 addressing semantics * Add Python 3.4 functional tests for AMQP 1.0 driver * tests: allow to override the functionnal tests suite args * [zmq] Additional configurations for f-tests * Remove discover from test-requirements * tests: rabbitmq failover tests * [AMQP 1.0] Add acknowledge and requeue handling for incoming message * Imported Translations from Zanata * Updated from global requirements * Remove rabbitmq max\_retries * Config: no need to set default=None 5.5.0 ----- * [zmq] Fix message sending when using proxy and not using PUB/SUB * AMQP 1.0 - create only one Cyrus SASL configuration for the tests * Updated from global requirements * Refactor AMQP 1.0 command task to support timers * [zmq] Remove redundant Envelope class * [zmq] Properly stop ZmqServer * Refactor link management to support link recovery * [Trival] fix a typo nit * [zmq] Fix backend router port for proxy 5.4.0 ----- * [zmq] Remove unused Request.close method * Add query paramereters to TransportURL * Fix temporary problems with pika unit tests * [zmq] Periodic updates of endpoints connections 5.3.0 ----- * Improve the impl\_rabbit logging * Modify info of default\_notification\_exchange * Imported Translations from Zanata * [zmq] Remove rpc\_zmq\_concurrency option * [zmq] Fix timeout in ThreadingPoller.poll * Fix typo: 'olso' to 'oslo' * Updated from global requirements * [zmq] Don't skip non-direct message types * [zmq] Refactoring of zmq client * [impl\_rabbit] Remove deprecated get\_expiration method 5.2.0 ----- * Updated from global requirements * [AMQP 1.0] Randomize host list connection attempts * Modify the TransportURL's docstrings * Fix problems after refactoring RPC client * deprecate usage of transport aliases * Documents recommended executor * kafka: Deprecates host, port options * Updated from global requirements * Add reno for releasenotes management * Remove logging from serialize\_remote\_exception * [kafka] Add several bootstrap servers support * Add the proper branch back to .gitreview * Fix consuming from missing queues * Fix bug with version\_cap and target.version in RPCClient * Make TransportURL.parse aware of transport\_url * rabbit: Deprecates host, port, auth options * Remove deprecated localcontext * zeromq: Deprecates host, port options * Reorganize the AMQP 1.0 driver source files * Implements configurable connection factory * The need to wait for a given time is no longer valid in 3.2+ * [zmq] Reduce object serialization on router proxy * Updated from global requirements * [zmq] Add backend ROUTER to increase bandwidth * [zmq] Add Sentinel instructions to deployment guide * Rabbit driver: failure of rpc-calls with float timeout 5.1.0 ----- * Use eventletutils to check is\_monkey\_patched * remove feature branch from master .gitreview * [zmq] Second router proxy doesn't dispatch messages properly * Add parse.unquote to transport\_url * Fix simulator stat printing * Use single producer and to avoid an exchange redeclaration * [zmq] Redesign router proxy * Add feature branch to .gitreview file * Remove Beta development status from classifiers 5.0.0 ----- * Updated from global requirements * Fixes sumulator.py signal\_handler logic * Refactor RPC client * Send notify if notify=True passed * Improves exception handling and logging * Implements pika thread safe connection * Fix incorrect parameters order in assertIn call * Update the RPC cast() documentation * Fix unstable work of cast func tests * [zmq] Reduce threading from python proxy * Imported Translations from Zanata * use thread safe fnmatch * Refactor base interfaces * Gracefully handle missing TCP\_USER\_TIMEOUT * Simulator: handle SIGINT and SIGTERM signals * Updated from global requirements * Log the unique\_id in listener than msg\_id * serializer: deprecate RequestContextSerializer * Don't set html\_last\_updated\_fmt without git * Amqp driver send method temporary work-around * Updated from global requirements * Updated from global requirements * Allow simulator to be launched from arbitrary directory * [zmq] Fix cast message loss in simulator * Make transport\_url config option secret * Fix oslo.messaging for Mac OS X * Refactor driver's listener interface * [kafka] Do not remove kafka\_client during reset * Updated from global requirements * Replace expriration\_time by timer * [zmq] Reduce number of connections * Move server related logic from dispatchers * Fix typos in Oslo.messaging files * Fix Break in Windows platforms * [py34] replace file() with open() * Claim python3 compatability for Newton onwards * Simulator: collect error stats * Simulator: make parameter wait\_after\_msg float * Update CheckForLoggingIssues hacking rule from keystone * Simulator: align stats to whole seconds * Support python3 in simulator.py * Fix typo passend should be passenv * Always set all socket timeouts * Add a py34 functional test for rabbit * Small fixes * Use only unique topics for the Kafka driver * [zmq] Refactoring consumer side * [Kafka] Ensure a topics before consume messages * Fix problems during unstable network * Missing version parameter in can\_send\_version() * Bump rabbit\_transient\_queues\_ttl to 30 mins * Explicitly exclude tests from bandit scan * Fix Notification listener blocking behavior * Pika: fix sending fanout messages * Revert "Ensure the json result type is bytes on Python 3" * Replace deprecated LOG.warn with LOG.warning * Simulator: store results in JSON format * Simulator: calculate message latency statistics * Fix the driver shutdown/failover logic * Always delete exc\_info tuple, even if reply fails * Do not leak Listeners on failover * Simulator: always use random messages for time-bound tests * Fallback if git is absent * Simulator: implement own random generator instead of scipy * Simulator: fix batch-notify-server command * Work with kombu from upstream * Fail quickly if there on bad password * [zmq] Dynamic port range is ignored * [zmq] Implement Response and Envelope classes * [kafka] Use notification priority * Make simulator more asynchronous * Adds exhange declaration on sender's side * Updated from global requirements 4.5.0 ----- * amqp: log time elapsed between receiving a message and replying * [zmq] Matchmaker redis set instead of list * Allow Notifier to have multiple topics * Fix a minor syntax error in a log statement * Use PortOpt on kafka\_default\_port * Added duration to notify server/client * Ensure the json result type is bytes on Python 3 * Improves logging * Use more efficient mask\_dict\_password to mask password * Improves poller's stop logic * Typos of 'recieve' instead of 'receive' * [zmq] Support transport URL * Get kafka notifications to work with kafka-python 0.9.5 * Move server's logic from executors * Avoid hardcoding the notification topic and specify driver * [zmq] Fix cinder create volume hangs * Py3: Replace filter()/map() if a list is needed * Py3: Switch json to oslo\_serialization * Updated from global requirements 4.4.0 ----- * Updated from global requirements * Option rpc\_response\_timeout should not be used in zmq driver * Remove duplicate requirements * Reduce number of rabbitmq consumer tag used * Documents the mirror queue policy of RabbitMQ 3.0 * fix override\_pool\_size * Remove executor callback * Log format change in simulator.py * Fix kombu accept different TTL since version 3.0.25 * .testr.conf: revert workaround of testtools bug * Remove aioeventlet executor 4.3.0 ----- * simulator.py improvements * rabbit: improvements to QoS * Updated from global requirements * Remove server queue creating if target's server is empty * Updated from global requirements * Correctly set socket timeout for publishing * Updated from global requirements * Use more secure yaml.safe\_load() instead of yaml.load() * [kombu] Implement experimental message compression * [zmq] Multithreading access to zmq sockets * [zmq] ZMQ\_LINGER default value * Remove matchmaker\_redis configs from [DEFAULT] * Refactors base classes 4.2.0 ----- * Switches pika driver to eager connection to RabbitMQ * Remove bandit.yaml in favor of defaults * [zmq] Use PUSH/PULL for direct CAST * Updated from global requirements * support ability to set thread pool size per listener * Fix misspellings * [zmq] RPC timeout for CAST * Enable pep8 on oslo\_messaging/tests 4.1.0 ----- * [zmq] Fix slow down * Update translation setup * Let PikaDriver inherit base.BaseDriver * Improve simulator.py * Fixed some warnings about imports and variable * test: Don't test message's reply timeout * Updated from global requirements * Adds document and configuration guide * [zmq] Support KeyboardInterrupt for broker * [zmq] Reduce proxy for direct messaging * Fixed a couple of pep8 errors/warnings * assertEquals is deprecated, use assertEqual * Updated from global requirements * Updated from global requirements * Trivial: Remove unused logging import * replace string format arguments with function parameters * Adds params field to BlockingConnection object * Python 3 deprecated the logger.warn method in favor of warning * Fix URL in warning message * [zmq] Implement background redis polling from the client-side * rabbit: Add option to configure QoS prefetch count * rabbit: making interval\_max configurable * Imported Translations from Zanata * Updated from global requirements * Logging rpc client/server targets * Updated from global requirements * Topic/server arguments changed in simulator.py * [zmq] Update zmq-guide with new options * [zmq] Listeners management cleanup * Drop H237,H402,H904 in flake8 ignore list * Replace deprecated library function os.popen() with subprocess * py3: Replaces xrange() with six.moves.range() * Kombu: make reply and fanout queues expire instead of auto-delete * fix .gitreview - bad merge from pika branch * Explicitly add pika dependencies * Add duration option to simulator.py * [zmq] Added redis sentinel HA implementation to zmq driver * rabbit: set interval max for auto retry * [zmq] Add TTL to redis records * Updated from global requirements * make enforce\_type=True in CONF.set\_override * Use assertTrue/False instead of assertEqual(T/F) * Improvement of logging acorrding to oslo.i18n guideline * Updated from global requirements * rabbit: fix unit conversion error of expiration * list\_opts: update the notification options group * rabbit: Missing to pass parameter timeout to next * Fix formatting of code blocks in zmq docs * Adds unit tests for pika\_poll module * Updated from global requirements * [zmq] Switch notifications to PUB/SUB pattern * Optimize sending of a reply in RPC server * Optimize simulator.py for better throughput * Remove stale directory synced from oslo-incubator * Fix wrong bugs report URL in CONTRIBUTING * zmq: Don't log error when can't import zmq module 4.0.0 ----- * assertIsNone(val) instead of assertEqual(None,val) * Adds tests for pika\_message.py * [zmq] PUB-SUB pipeline * Updated from global requirements * Fixes conflicts after merging master * Updated from global requirements * Move to debug a too verbose log * Cleanup parameter docstrings * Removes MANIFEST.in as it is not needed explicitely by PBR * Revert "default of kombu\_missing\_consumer\_retry\_timeout" * Don't trigger error\_callback for known exc * Adds comment for pika\_pooler.py * Improves comment * Fix reconnection when heartbeat is missed * Revert "serializer: deprecate RequestContextSerializer" * Fix notifier options registration * notif: Check the driver features in dispatcher * batch notification listener * Updated from global requirements * Adds comment, updates pika-pool version * Preparations for configurable serialization * creates a dispatcher abstraction * Remove unnecessary quote * Fix multiline strings with missing spaces * Properly skip zmq tests without ZeroMQ being installed * kombu: remove compat of folsom reply format * Follow the plan about the single reply message 3.1.0 ----- * default of kombu\_missing\_consumer\_retry\_timeout * rename kombu\_reconnect\_timeout option * Skip Cyrus SASL tests if proton does not support Cyrus SASL * setUp/tearDown decorator for set/clear override * Adds comments and small fixes * Support older notifications set\_override keys * Don't hold the connection when reply fail * doc: explain rpc call/cast expection * Add a driver for Apache Kafka * Option group for notifications * Move ConnectionPool and ConnectionContext outside amqp.py * Use round robin failover strategy for Kombu driver * Revert "serializer: remove deprecated RequestContextSerializer" * Updated from global requirements * [zmq] Random failure with ZmqPortRangeExceededException * [zmq] Driver optimizations for CALL * Updated from global requirements * Use oslo\_config new type PortOpt for port options * serializer: remove deprecated RequestContextSerializer * Add log info for AMQP client * Updated from global requirements * Provide missing parts of error messages * Add Warning when we cannot notify * ignore .eggs directory * serializer: deprecate RequestContextSerializer * middleware: remove oslo.context usage * Removes additional select module patching * Fix delay before host reconnecting 3.0.0 ----- * Remove qpidd's driver from the tree * Provide alias to oslo\_messaging.notify.\_impl\_messaging * make pep8 faster * Updated from global requirements * Robustify locking in MessageHandlingServer * Updated from global requirements * cleanup tox.ini 2.9.0 ----- * [zmq] Add config options to specify dynamic ports range * [zmq] Make bind address configurable * [zmq][matchmaker] Distinguish targets by listener types * [zmq] Update zmq-deployment guide according to the new driver * Implements more smart retrying * Make "Connect(ing|ed) to AMQP server" log messages DEBUG level * Updated from global requirements * Decouple transport for RPC and Notification * Fixing the server example code Added server.stop() before server.wait() 2.8.1 ----- * Revert "Robustify locking in MessageHandlingServer" * Splits pika driver into several files * Fixes and improvements after testing on RabbitMQ cluster: * Move supported messaging drivers in-tree 2.8.0 ----- * Add a "bandit" target to tox.ini * Fix fanout exchange name pattern * Updated from global requirements * Remove a useless statement * Robustify locking in MessageHandlingServer * Use "secret=True" for password-related options * Imported Translations from Zanata * Modify simulator.py tool * Fix target resolution mismatch in neutron, nova, heat * Use yaml.safe\_load instead of yaml.load * Trivial locking cleanup in test\_listener * Remove unused event in ServerThreadHelper * Fix a race calling blocking MessageHandlingServer.start() * Fix assumptions in test\_server\_wait\_method * Rename MessageHandlingServer.\_executor for readability * Implements rabbit-pika driver * bootstrap branch * Updated from global requirements 2.7.0 ----- * Updated from global requirements * Some executors are not async so update docstring to reflect that * Updated from global requirements * Updated from global requirements * Small grammar messaging fix * Use a condition (and/or a dummy one) instead of a lock * Updated from global requirements 2.6.1 ----- * Fix failures when zmq is not available 2.6.0 ----- * AMQP1.0: Turn off debug tracing when running tox * Fix typo in rpc/server.py and notify/listener.py * Fix a typo in server.py * Use the hostname from the Transport for GSSAPI Authentication * Adapt functional tests to pika-driver * ConfFixture should work even when zmq/redis is not present * Added matchmaker timeouts and retries * AMQP 1.0: Properly initialize AMQP 1.0 configuration options * Updated from global requirements * Workaround test stream corruption issue * Skip Redis specific tests when it is not installed * Port the AMQP 1.0 driver to Python 3 * rabbit: shuffle hosts before building kombu URL * Updated from global requirements * Remove unnecessary rpc\_zmq\_port option * Non-blocking outgoing queue was implemented * Allow custom notification drivers * Fix the home-page value with Oslo wikipage * Include changelog/history in docs * Fix spelling typo in output * Change ignore-errors to ignore\_errors * Unsubscribe target listener when leaving * Add SASL configuration options for AMQP 1.0 driver * Updated from global requirements * Fix a few leaks in the AMQP 1.0 driver * Disable ACL if authentication cannot be performed * Imported Translations from Zanata * Enhance start/stop concurrency race condition fix * Updated from global requirements * Extend logging in amqpdriver * Remove useless additional requirement file * Fix AMQP 1.0 functional and unit test failures * Provide the executor 'wait' function a timeout and use it 2.5.0 ----- * Imported Translations from Transifex * Update path to subunit2html in post\_test\_hook * Fix typos in a document and a comment * Updated from global requirements * Imported Translations from Transifex * Updated from global requirements * Port the AMQP1 driver to new Pyngus SASL API * Updated from global requirements * Imported Translations from Transifex * Updated from global requirements * Add config options to the documentation * Updated from global requirements 2.4.0 ----- * Mask passwords when logging messages * Updated from global requirements * Use proper translating helper for logging * Improve simulator.py 2.3.0 ----- * Imported Translations from Transifex * Added trace logging for debuggability * Log warning instead of raising RuntimeError * Use pickle instead of jsonutils for serialization * Updated from global requirements * Acknowledgements implementation * Replace 'M' with 'Mitaka' * Add if condition for random.shuffle * Fix message missing after duplicated message error * Fix fork-related issues * FIx CPU time consuming in green\_poller poll() * Documenting main driver classes * Notifier implementation * Imported Translations from Transifex * Fix BaseDriver.listen\_for\_notifications() signature * ZMQ: Minor matchmaker improvement * Imported Translations from Transifex * Updated from global requirements * Add unit tests for zmq\_async 2.2.0 ----- * Imported Translations from Transifex * ZMQ: \`Lazify\` driver code * Ensures that some assumptions are true * Remove oslo namespace package * Register matchmaker\_redis\_opts in RedisMatchMaker * Imported Translations from Transifex * Updated from global requirements * ZMQ: Removed unused code and tests * ZMQ: Run more functional tests * Get rid of proxy process in zmq * Fully use futurist code-base to abstract concurrent.futures away 2.1.0 ----- * Imported Translations from Transifex * Updated from global requirements * Close sockets properly * add plugin documentation for executors and notifiers * Allows to change defaults opts * Target direct usage * Move zmq tests into a subdirectory 2.0.0 ----- * Allow a forward slash as a part of the user/password * Update 'impl\_eventlet' docstring to reflect actual impl * Updated from global requirements * tests: adjusts an expected time for gate * Updated from global requirements * Ensure callback variable capture + cleanup is done correctly * Remove oslo namespace package * ZMQ: Initial matchmaker implementation * Updated from global requirements * Fix threading zmq poller and proxy * Don't install pyngus on Python 3 * Fix amqp connection pool leak in ConnectionContext * Executor docstring & attribute tweaks 1.17.1 ------ * Use the warn\_eventlet\_not\_patched util function * Drop use of 'oslo' namespace package 1.17.0 ------ * Updated from global requirements * Add unit tests for zmq\_serializer * Updated from global requirements * Fix work with timeout in CallRequest.receive\_reply() * Fix mock use for mock 1.1.0 * Make heartbeat the default * ZMQ: Allow to raise remote exception * Local Fanout implementation * Drop use of 'oslo' namespace package * Use oslo.log in the zmq receiver * Imported Translations from Transifex * Remove usage of contentmanager for executors * Verify that version in 'prepare' is valid 1.16.0 ------ * Fix qpid's functional gate * Don't reply when we known that client is gone * Remove py26 artefacts from oslo.messaging code * Remove 2.6 classifier * Imported Translations from Transifex * Add WebOb and greenlet to requirements * Use ServiceBase from oslo.service as a parent class * Manual update the requirements * Deprecated impl\_qpid * Add a missed \`raise\` statement * Remove qpid-config call * Initial commit for new zmq driver implementation * Add tox target to find missing requirements * Fix qpid's functional gate * Imported Translations from Transifex * fix typo * Correct RPCVersionCapError message 1.15.0 ------ * Drop use of 'oslo' namespace package * Update .gitreview for feature/zmq * Use \`inferred=True\` by default * Enable amqp's protocol unit tests everywhere * Switch badges from 'pypip.in' to 'shields.io' * Don't use devstack to setup our functional env * Switch to warnings module instead of versionutils * Updated from global requirements * Get mox from mox3, not from six.moves * rabbit: Add logging on blocked connection * Provide better detection of failures during message send 1.14.0 ------ * Reduce \`magic\` conf attribute usage * Imported Translations from Transifex * Remove leftover oslo.config reference * replace rpc\_response\_timeout use in rabbit driver * Enable \`fanout\_target\` scenarios in test\_impl\_rabbit * Add drivers to the documentation 1.13.0 ------ * Ensure rpc\_response\_timeout is registered before using it * rabbit: test for new reply behavior 1.12.0 ------ * Fix condition in \_publish\_and\_retry\_on\_missing\_exchange() * Set places to 0 in self.assertAlmostEqual() * Allow to remove second \_send\_reply() call * Don't create a new channel in RabbitMQ Connection.reset() * Imported Translations from Transifex * Adding Publisher Acknowledgements/confirms * Fix deprecated\_group of rpc\_conn\_pool\_size * Refactor processing reply in ReplyWaiter * rabbit: doc fixes * consumer connections not closed properly 1.11.0 ------ * rabbit: smart timeout on missing exchange * rabbit: Fix message ttl not work * rabbit: remove publisher classes * rabbit: Set timeout on the underlying socket * Remove stale copy of context.py * Add one more functional test for MessagingTimeout * Fix list\_opts test to not check all deps * make it possible to import amqp driver without dependencies * Remove outdated release notes * rabbit: smarter declaration of the notif. queue * rabbit: redeclare consumers when ack/requeue fail * Bump kombu and amqp requirements * Updated from global requirements * rabbit: fix exception path in queue redeclaration * rabbit: fix consumers declaration * rabbit: remove unused consumer interfaces * rabbit: remove unused code * rabbit: Remove unused stuffs from publisher * Remove support for Python 3.3 * Updated from global requirements * Add RequestContextSerializer * Updated from global requirements * rabbit: fixes a logging issue * rabbit/qpid: simplify the consumer loop * Updated from global requirements * Imported Translations from Transifex * Fix missing space in help text * zmq: Add support for ZmqClient pooling * Enable eventlet dependency on Python 3 * Add JsonPayloadSerializer serializer * Fix test\_matchmaker\_redis on Python 3 * Disable and mark heartbeat as experimental 1.10.0 ------ * Uncap library requirements for liberty * Port ZMQ driver to Python 3 * Use unittest.mock on Python 3 * Enable redis test dependency on Python 3 * Remove amqp driver 'unpacked content' logging * Updated from global requirements * Add pypi download + version badges * Fix TypeError caused by err\_msg formatting * Fix typo in oslo\_messaging/\_drivers/protocols/amqp/opts.py * Document notification\_driver possible values * Do not skip functional test for amqp driver * Add functional test for notify.logger * Properly deserialize received AMQP 1.0 messages * Make notify driver messaging play well with publish\_errors * Imported Translations from Transifex 1.9.0 ----- * Use the oslo\_utils stop watch in decaying timer * Updated from global requirements * Remove 'UNIQUE\_ID is %s' logging * Sync with latest oslo-incubator * rabbit: fix ipv6 support * Create a unique transport for each server in the functional tests * Publish tracebacks only on debug level * Add pluggability for matchmakers * Make option [DEFAULT]amqp\_durable\_queues work * Reconnect on connection lost in heartbeat thread * Don't raise Timeout on no-matchmaker results * Imported Translations from Transifex * cleanup connection pool return * rabbit: Improves logging * fix up verb tense in log message * rabbit: heartbeat implementation * Fix changing keys during iteration in matchmaker heartbeat * Minor improvement * ZeroMQ deployment guide * Fix a couple typos to make it easier to read * Tiny problem with notify-server in simulator * Fix coverage report generation * Add support for multiple namespaces in Targets * tools: add simulator script * Deprecates the localcontext API * Update to oslo.context * Remove obsolete cross tests script * Fix the bug redis do not delete the expired keys 1.8.0 ----- * Updated from global requirements * NotifyPublisher need handle amqp\_auto\_delete * Fix matchmaker\_redis ack\_alive fails with KeyError * Properly distinguish between server index zero and no server 1.7.0 ----- * Add FAQ entry for notifier configuration * rabbit: Fix behavior of rabbit\_use\_ssl * amqp1: fix functional tests deps * Skip functional tests that fail due to a qpidd bug * Use import of zmq package for test skip * Remove unnecessary log messages from amqp1 unit tests * Include missing parameter in call to listen\_for\_notifications * Fix the import of the driver by the unit test * Add a new aioeventlet executor * Add missing unit test for a recent commit * Add the threading executor setup.cfg entrypoint * Move each drivers options into its own group * Refactor the replies waiter code * Imported Translations from Transifex * Fix notifications broken with ZMQ driver * Gate functionnal testing improvements * Treat sphinx warnings as errors * Move gate hooks to the oslo.messaging tree * Set the password used in gate * Update README.rst format to match expectations 1.6.0 ----- * Declare DirectPublisher exchanges with passive=True * Updated from global requirements * Expose \_impl\_test for designate * Update Oslo imports to remove namespace package * Speedup the rabbit tests * Fix functionnal tests * kombu: fix driver loading with kombu+qpid scheme * Fixed docstring for Notifier * zmq: Refactor test case shared code * Add more private symbols to the old namespace package * Updated from global requirements * Adjust tests for the new namespace * Fixes test\_two\_pools\_three\_listener * Add TimerTestCase missing tests case * Ensure kombu channels are closed * fix qpid test issue with eventlet monkey patching * Make setup.cfg packages include oslo.messaging * Upgrade to hacking 0.10 * Implements notification-dispatcher-filter * Add oslo.messaging.\_drivers.common for heat tests * Port zmq driver to Python 3 * Make sure zmq can work with redis * fix qpid test issue with eventlet monkey patching * Move files out of the namespace package * Add a info log when a reconnection occurs * rabbit: fix timeout timer when duration is None * Don't log each received messages * Fix some comments in a backporting review session * Enable IPv6-support in libzmq by default * Add a thread + futures executor based executor * safe\_log Sanitize Passwords in List of Dicts * Updated from global requirements * rabbit: add some tests when rpc\_backend is set * Warns user if thread monkeypatch is not done * Add functional and unit 0mq driver tests * The executor doesn't need to set the timeout * qpid: honor iterconsume timeout * rabbit: more precise iterconsume timeout * Workflow documentation is now in infra-manual * Touch up grammar in warning messages 1.5.1 ----- * Reintroduces fake\_rabbit config option * Make the RPCVersionCapError message clearer * Doc: 'wait' releases driver connection, not 'stop' * Don't allow call with fanout target * Imported Translations from Transifex * Add an optional executor callback to dispatcher 1.5.0 ----- * Rabbit: Fixes debug message format * Rabbit: iterconsume must honor timeout * Don't use oslo.cfg to set kombu in-memory driver * Don't share connection pool between driver object * Show what the threshold is being increased to * Wait for expected messages in listener pool test * Dispath messages in all listeners in a pool * Reduces the unit tests run times * Set correctly the messaging driver to use in tests * Always use a poll timeout in the executor * Have the timeout decrement inside the wait() method * Warn user if needed when the process is forked * Renamed PublishErrorsHandler * Fix reconnect race condition with RabbitMQ cluster * Create a new connection when a process fork has been detected * Add more TLS protocols to rabbit impl * Remove the use of PROTOCOL\_SSLv3 * Add qpid and amqp 1.0 tox targets * Updated from global requirements * Imported Translations from Transifex * rabbit: uses kombu instead of builtin stuffs * Allows to overriding oslotest environ var * Create ZeroMQ Context per socket * Remove unuseful param of the ConnectionContext * Updated from global requirements * Add basic tests for 0mq matchmakers * Notification listener pools * Updated from global requirements * Fix tiny typo in server.py * Switch to oslo.middleware * Updated from global requirements * Activate pep8 check that \_ is imported * Enable user authentication in the AMQP 1.0 driver * Documentation anomaly in TransportURL parse classmethod * Don't put the message payload into warning log * Updated from global requirements * Fix incorrect attribute name in matchmaker\_redis * Add pbr to installation requirements * Updated from global requirements * Add driver independent functional tests * Imported Translations from Transifex * zmq: Remove dead code * Updated from global requirements * Finish transition to oslo.i18n * Imported Translations from Transifex * Imported Translations from Transifex * qpid: Always auto-delete queue of DirectConsumer * Updated from global requirements * Imported Translations from Transifex * Enable oslo.i18n for oslo.messaging * Switch to oslo.serialization * Cleanup listener after stopping rpc server * Updated from global requirements * Track the attempted method when raising UnsupportedVersion * fix memory leak for function \_safe\_log * Stop using importutils from oslo-incubator * Add missing deprecated group amqp1 * Updated from global requirements * Stop using intersphinx * Add documentation explaining how to use the AMQP 1.0 driver * Imported Translations from Transifex * Construct ZmqListener with correct arguments * Message was send to wrong node with use zmq as rpc\_backend * Work toward Python 3.4 support and testing * Ensure the amqp options are present in config file * Add contributing page to docs * Import notifier middleware from oslo-incubator * Let oslotest manage the six.move setting for mox 1.4.1 ----- * Imported Translations from Transifex * Add square brackets for ipv6 based hosts * An initial implementation of an AMQP 1.0 based messaging driver * warn against sorting requirements * Improve help strings * Switch to oslo.utils * Fix Python 3 testing * Import oslo-incubator context module * Import oslo-incubator/middleware/base * Should not send replies for cast messages * Port to Python 3 * Sync jsonutils from oslo-incubator * Add parameter to customize Qpid receiver capacity * Make tests pass with random python hashseed * Set sample\_default for rpc\_zmq\_host * Enable PEP8 check E714 * Enable PEP8 check E265 * Enable PEP8 check E241 * Fix error in example of an RPC server * Replace lambda method \_ * Enable check for E226 * Updated from global requirements * Add release notes for 1.4.0.0a4 * Add release notes for stable/icehouse 1.3.1 release 1.4.0.0a4 --------- * Enabled hacking checks H305 and H307 * Bump hacking to version 0.9.2 * Fixes incorrect exchange lock in fake driver * Imported Translations from Transifex 1.4.0.0a3 --------- * Add release notes for 1.4.0.0a2/a3 * Fix AMQPListener for polling with timeout * Replaced 'e.g.' with 'for example' * Use assertEqual instead of assertIs for strings 1.4.0.0a2 --------- * Fix structure of unit tests in oslo.messaging (part 3 last) * Fix structure of unit tests in oslo.messaging (part 2) * Fix slow notification listener tests * encoding error in file * Fix info method of ListenerSetupMixin 1.4.0.0a1 --------- * Add release notes for 1.4.0.0a1 * Fix formatting of TransportURL.parse() docs * Remove duplicate docs for MessageHandlingServer * Add missing docs for list\_opts() * Add 'docs' tox environment * Replace usage of str() with six.text\_type * Fix structure of unit tests in oslo.messaging (part 1) * Synced jsonutils and its dependencies from oslo-incubator * Ensures listener queues exist in fake driver * RPC server doc: use the blocking executor * Fix the notifier example * Removes the use of mutables as default args * Set correct group for matchmaker\_redis options * replace string format arguments with function parameters * Removes contextlib.nested * Transport reconnection retries for notification * Disable connection pool in qpid interfaces tests * Updated from global requirements * Add check credentials to log message if rabbmitmq closes socket * Fix the notify method of the routing notifier * Handle unused allowed\_remote\_exmods in \_multi\_send * rabbit/qpid: remove the args/kwargs from ensure() * Add an example usage of RPCClient retry parameter * Add transport reconnection retries * Add an optional timeout parameter to Listener.poll * Bump hacking to 0.9.x series * Removes unused config option * fixed pep8 issue E265 * Setup for translation * Updated from global requirements * Remove amqp default exchange hack * remove default=None for config options * Cleaned up references to executor specific RPCServer types * Make the TransportUrl hashable * debug level logs should not be translated * Explicitly name subscription queue for responses * Fix passing envelope variable as timeout * Updated from global requirements * Synced jsonutils from oslo-incubator * Remove str() from LOG.\* and exceptions * Remove dependent module py3kcompat * Enable log messages to handle exceptions containing unicode * Updated from global requirements * Fix typo in docstring of notify/notifier * Full support of multiple hosts in transport url * Logical error in blockless fanout of zmq * Select AMQP message broker at random * Use a for loop to set the defaults for \_\_call\_\_ params * Update ensure()/reconnect() to catch MessagingError * Remove old drivers dead code * Import run\_cross\_tests.sh from oslo-incubator * Remove rendundant parentheses of cfg help strings * zmq: switch back to not using message envelopes * Trival:Fix assertEqual arguments order * Oslo-messaging-zmq-receiver cannot recive any messages 1.3.0 ----- * Add release notes for 1.3.0 * Ensure routing key is specified in the address for a direct producer * Fix wrong parameter description in docstring * Fixed inconsistent EventletContextManagerSpawnTest failures * Use messaging\_conf fixture configuration by default * Fixed possible pep8 failure due to pyflakes bug * Refactor AMQP message broker selection * Add unit test to check the order of Qpid hosts on reconnect * Fixed the issue for pop exception * Clean up for qpid tests * Add kombu driver library to requirements.txt * Use driver's notify\_send() method again * Remove vim header * Updated from global requirements * Fixed spelling error - runnung to running * Build log\_handler documentation * Add release notes up to 1.3.0a9 1.3.0a9 ------- * Remove use of sslutils 1.3.0a8 ------- * Expose PublishErrorsHandler through oslo.messaging * Use mock's call assert methods over call\_args\_list * notify listener: document the metadata callback parameter * Add missing data into the notif. endpoint callback * notification listener: add allow\_requeue param * Adds unit test cases to impl\_qpid * Do not leak \_unique\_id out of amqp drivers * Add multiple exchange per listerner in fake driver * Allow to requeue the notification message * Slow down Kombu reconnect attempts * Don't run python 3 tests by default * Gracefully handle consumer cancel notifications * Updated from global requirements * Convert to oslo.test * Add log\_handler to oslo.messaging * Add a link to the docs from the README * Pass the session to QpidMessage constructor * User a more accurate max\_delay for reconnects * Make the dispatcher responsible of the message ack * Don't reply to notification message * Abstract the acknowledge layer of a message * Implements notification listener and dispatcher * Switch over to oslosphinx * Improve help strings * Update ExpectedException handling * Ignore any egg and egg-info directories * Qpid: advance thru the list of brokers on reconnect * RabbitMQ: advance thru the list of brokers on reconnect 1.3.0a7 ------- * Make the dispatcher responsible to listen() * Allow fake driver to consume multiple topics * Allow different login methods to be used with kombu connections 1.3.0a6 ------- * Use stevedore's make\_test\_instance * Expose an entry point to list all config options * Fix test case name typo * Fix UnboundLocalError error 1.3.0a5 ------- * Fix help strings * Add release notes for 1.3.0a3 * python3: Switch to mox3 instead of mox * Remove dependencies on pep8, pyflakes and flake8 * Routing notifier 1.3.0a4 ------- * Removes use of timeutils.set\_time\_override * Fix spelling errors in comments * Fix test\_notifier\_logger for Python 3 * Minor Python 3 fixes * Remove copyright from empty files * Fix duplicate topic messages for Qpid topology=2 * Replace dict.iteritems() with six.iteritems() * Remove unused eventlet/greenlet from qpid/rabbit * fix test\_rabbit for Python 3 * Fix try/except syntax for Python 3 * Fix exception deserialiation on Python 3 * Add Sample priority * sysnchronize oslo-incubator modules * Remove eventlet related code in amqp driver * Fix syntax of relative imports for Python3 * Updated from global requirements * Updated from global requirements * Unify different names between Python2 and Python3 * Replace data structures' attribute with six module * Avoid creating qpid connection twice in initialization * Use six.moves.queue instead of Queue * Add transport aliases * Remove the partial implementation of ack\_on\_error * Fixed misspellings of common words * Add release notes for 1.3.0a2 * Unify different names between Python2/3 with six.moves * Remove vim header * Ensure context type is handled when using to\_dict * Refactors boolean returns 1.3.0a2 ------- * Simplify common base exception prototype * Properly reconnect subscribing clients when QPID broker restarts * Remove useless global vars / import * Avoid storing configuration in notifier * Implement a log handler using notifier * notifier: add audit level * Add 'warning' as an alias to 'warn' * Decouple from Oslo uuidutils module * Supply missing argument to raise\_invalid\_topology\_version() * Support a new qpid topology * Remove hosts as property in TransportURL * Remove property on virtual\_host in TransportURL * Updated from global requirements * Fix some typos and adjust capitalization * Changes driver method for notifications 1.3.0a1 ------- * Properly handle transport URL config on the client * Updated from global requirements * Updated from global requirements * Replace assertEquals with assertEqual * Properly handle transport:///vhost URL * Updated from global requirements * Make rpc\_backend default to 'rabbit' * Apply six for metaclass * Add third element to RPC versions for backports * Fix rpc client docs * Updated from global requirements * Remove cruft from setup.cfg * Updated from global requirements * Fixes a typo in the address string syntax * Implement the server side of ZmqDriver * Add zmq-receiver * Implement the client side of ZmqDriver * Import zmq driver code with minimal modifications 1.2.0a11 -------- * Fix race-condition in rabbit reply processing * Fix error message if message handler fails * Don't include msg\_id or reply\_q in casts * Remove check\_for\_lock support in RPCClient 1.2.0a10 -------- * Add a Notifier.prepare() method 1.2.0a9 ------- * Fix dictionary changed size during iteration 1.2.0a8 ------- * Fix transport URL parsing bug 1.2.0a7 ------- * Fix rabbit driver handling of None, etc. replies 1.2.0a6 ------- * Remove ConfFixture from toplevel public API * Fix fake driver handling of failure replies * Bumps hacking to 0.7.0 * Fix transport URL ipv6 parsing support 1.2.0a5 ------- * Fix handling of None, etc. replies 1.2.0a4 ------- 1.2.0a3 ------- * Add a unit testing configuration fixture * Add a TransportURL class to the public API 1.2.0a2 ------- * Ensure namespace package is installed 1.2.0a1 ------- * Add transport URL support to rabbit driver * Kill ability to specify exchange in transport URL * Fix capitalization, it's OpenStack * Fix handling expected exceptions in rabbit driver * Add thread-local store of request context * Add a context serialization hook * Removes a redundant version\_is\_compatible function * Document how call() handles remote exceptions * Add a per-transport allow\_remote\_exmods API * Expose RemoteError exception in the public API * Implement failure replies in the fake driver * Add API for expected endpoint exceptions * Add a driver method specifically for sending notifications * Enforce target preconditions outside of drivers * Add comments to ReplyWaiter.wait() * Remove some FIXMEs and debug logging * Remove unused IncomingMessage.done() * Implement wait\_for\_reply timeout in rabbit driver * Use testtools.TestCase assertion methods * Implement failure replies in rabbit driver * Add test with multiple waiting sender threads * Fix race condition in ReplyWaiters.wake\_all() * Add rabbit unit test for sending and receiving replies * Add some docs on target version numbers * Add tests for rabbit driver wire protcol * Pop \_unique\_id when checking for duplicates * Add a transport cleanup() method * Remove my notes and test scripts * Add initial qpid driver * Move most new rabbit driver code into amqpdriver * Move rpc\_conn\_pool\_size into amqp * Add simple rabbit driver unit test * Temporarily add eventlet to requirements * Add missing gettextutils * Add unit tests for object pool * Remove only\_free param to Pool.get() * Connection pool bugfix * Remove unused file * Add exception serialization tests * Don't call consume() each time iterconsume() is called * Add test code for the rabbit driver * Remove use of gettextutils * Add initial rabbit driver * Remove use of openstack.common.local * Use stdlib logging * Don't register options with cfg.CONF at module import * Port away from some eventlet infrastructure * Adjust imports in rabbit/qpid drivers * Import some needed modules from oslo-incubator * Add oslo-incubator code unmodified * Make executor threads more robust * Allow use of hacking 0.6.0 and fix min version * Include docstrings in published docs * Use oslo.sphinx and remove local copy of doc theme * Add some notes * Unit tests for notifier * Make test notifier useful * Use lowercase priority in log notifier * Use lowercase priority in notifications topic * Handle case where no drivers configured * Fix buglet in v2 messaging notifier * Make LOG private in notifier * Require a transport to construct a Notifier * Add serializer support to notifier * Rename context to ctxt in serializer API * Rename context to ctxt in notify API * Make Notifier public at top-level * Docstrings for notifier API * Fix notify drivers namespace * Remove backwards compat entry point aliases * Simplify public symbol exports * Use assertEqual() rather than assertEquals() * Remove accidental use of messaging.rpc\_server * Make exchange\_from\_url() use parse\_url() * Unit tests for URL parsing code * Fix parse\_urls() buglets * Move url utils into messaging.\_urls * Don't use common logging * Update example scripts for recent API changes * Fix fake driver with eventlet * Use log.warning() instead of log.warn() * Fix some pep8 issues * Don't translate exception messages * Knock off a few TODOs * Add can\_send\_version() to RPCClient * Check start() does nothing on a running server * Remove unused statements in base serializer * Fix thinko in exchange\_from\_url() * Call wait() in server tests * Add docstrings for base executor class * Remove a fixed fixme * Add a client call timeout test * Don't raise a driver-specific error on send * Add some docstrings to driver base * Test a bunch more RPC server scenarios * Make it possible to call prepare() on call context * Rework how queues get created in fake driver * Use testscenarios * Move files to new locations for oslo.messaging * Import stuff from oslo-incubator * Add oslo.messaging project infrastructure * Add some RPC server tests * More gracefully handle "no listeners" in fake driver * Better error handling in server.start() * Re-work server API to eliminate server subclasses * Add license header to \_executors/\_\_init\_\_.py * Add RPCDispatcher tests * Check for return value in client serializer test * Add note about can\_send\_version() * More client unit tests * Make RPCClient.check\_for\_lock a callable * Apply version cap check when casting * Make RPCVersionCapError extend base exception * Remove a bogus param from client.prepare() docs * pep8 fixes for serializer code * Simple RPCClient test * Unit tests * Move some stuff into doc/ * Implement Target.\_\_eq\_\_() * Fix bug in exchange\_from\_url() * pep8 fixes for fake driver * Make utils.parse\_url() docstring pep8 compliant * Don't translate exceptions * Misc pep8 fixes * pep8 fixes for toplevel package * Some error handling improvements * Recommend wrapping the client class rather than subclassing * Document how to use RPCClient directly * Document the public RPC API * Fix defaults for client.prepare() args * Fix client.cast() typo * Fix version\_cap typo * Allow all target attributes in client.prepare() * Expose Serializer from top-level namespace * Allow specifying a serializer when creating a server * Make endpoint.target optional * Dispatch methods in their own greenthreads * Make rpc.dispatcher private * Make the base RPCServer class private * Fix typo with the serializer work * Update use of stevedore * Require topics and target in notify driver constructors * Add generic serialization support * Support namespace in RPCClient.prepare() * Add parse\_url to \_utils * Remove entry point lists from the public API * Support capping message versions in the client * Fix RPCClient check\_for\_lock() * First cut at the notifier API * Add some notes * Add IncomingMessage abstraction * Pass a context dict * Fix docstring * Implement a fake driver * Adding reply infrastructure * Add some exceptions * Fix buglet with default timeout * Fix target/namespace target buglet * Fix rpc client buglets * Fix 'Blockinging' typos * Missing self parameter to server start() * Fix default\_exchange typo * Add forgotten piece of eventlet executor * It's \_executors not \_executor * Make poll() just return the message * Make drivers list public again * Add top-level convenience aliases * Prefix the executors module with underscore * Prefix the messaging.server module with an underscore * Prefix the drivers module with an underscore * Make transport methods private * Fix little typo in server exception class name * Add missing utils module * Add convenience RPC server classes * Update changes.txt for recent API changes * Use : for loading classes in entry\_points * Split the dispatcher from the executor and server * Make driver and transport methods public * Pass the driver instance to the listener instead of config * Try out replacing "executor" for "dispatcher" * Fix host vs server typo * Initial framework oslo.messaging-5.35.0/requirements.txt0000666000175100017510000000206413224676046020062 0ustar zuulzuul00000000000000# The order of packages is significant, because pip processes them in the order # of appearance. Changing the order has an impact on the overall integration # process, which may cause wedges in the gate later. pbr!=2.1.0,>=2.0.0 # Apache-2.0 futurist>=1.2.0 # Apache-2.0 oslo.config>=5.1.0 # Apache-2.0 oslo.log>=3.30.0 # Apache-2.0 oslo.utils>=3.33.0 # Apache-2.0 oslo.serialization!=2.19.1,>=2.18.0 # Apache-2.0 oslo.service!=1.28.1,>=1.24.0 # Apache-2.0 oslo.i18n>=3.15.3 # Apache-2.0 stevedore>=1.20.0 # Apache-2.0 debtcollector>=1.2.0 # Apache-2.0 monotonic>=0.6 # Apache-2.0 # for jsonutils six>=1.10.0 # MIT cachetools>=2.0.0 # MIT License WebOb>=1.7.1 # MIT # for the routing notifier PyYAML>=3.10 # MIT # rabbit driver is the default # we set the amqp version to ensure heartbeat works amqp!=2.1.4,>=2.1.1 # BSD kombu!=4.0.2,>=4.0.0 # BSD pika>=0.10.0 # BSD pika-pool>=0.1.3 # BSD # used by pika and zmq drivers futures>=3.0.0;python_version=='2.7' or python_version=='2.6' # BSD tenacity>=3.2.1 # Apache-2.0 # middleware oslo.middleware>=3.31.0 # Apache-2.0 oslo.messaging-5.35.0/LICENSE0000666000175100017510000002665213224676046015614 0ustar zuulzuul00000000000000 Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. --- License for python-keystoneclient versions prior to 2.1 --- All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of this project nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. oslo.messaging-5.35.0/AUTHORS0000664000175100017510000001703413224676255015651 0ustar zuulzuul00000000000000Aaron Rosen Abhijeet Malawade Adam Spiers Ala Rezmerita Alex Holden Alexei Kornienko Alexey Lebedeff Alexi Yelistratov Andreas Jaeger Andreas Jaeger Andrew Smith Anh Tran Assaf Muller Atsushi SAKAI Ayoub BOUSSELMI BANASHANKAR KALEBELAGUNDI VEERA Balazs Gibizer Ben Nemec Boris Pavlovic Brant Knudson Brian Elliott Chang Bo Guo ChangBo Guo(gcb) Chet Burgess Chris Dent Christian Berendt Christian Strack Clark Boylan Claudiu Belu Clint Byrum Corey Wright Cyril Roelandt Daisuke Fujita Dan Prince Dan Smith Davanum Srinivas (dims) Davanum Srinivas Davanum Srinivas David Medberry Dina Belova Dirk Mueller Dmitriy Ukhlov Dmitry Mescheryakov Dmitry Tantsur Dong Ma Doug Hellmann Doug Hellmann Doug Royal Edan David Edu Alcaniz Elancheran Subramanian Elena Ezhova Eric Brown Eric Guo Fei Long Wang Flaper Fesp Flavio Percoco Frode Nordahl Gauvain Pocentek George Silvis, III Gevorg Davoian Gordon Sim Gregory Haynes Haifeng.Yan Hanxi Liu Hiroyasu.OHYAMA Hu Yupeng Ihar Hrachyshka Ildar Svetlov Ilya Pekelny Ilya Shakhat Ilya Shakhat Ilya Tyaptin Iswarya_Vakati James Carey James E. Blair James Page Jamie Lennox Javeme Jens Rosenboom Jeremy Hanmer Jeremy Liu Jeremy Stanley JiaJunsu Jian Wen Jie Li Jim Rollenhagen Joe Gordon Joe Harrison John Eckersberg John L. Villalovos Joshua Harlow Joshua Harlow Joshua Harlow Juan Antonio Osorio Robles Julien Danjou Kenneth Giusti Kevin Benton Kirill Bespalov Komei Shimamura Konstantin Kalin Kui Shi Lance Bragstad Li Ma Li-zhigang Lukas Bezdicka Luong Anh Tuan Mark McLoughlin Matt Riedemann Matt Riedemann Matthew Booth Mehdi Abaakouk Mehdi Abaakouk Mehdi Abaakouk Mitsuhiro SHIGEMATSU Monty Taylor Nejc Saje Nguyen Hung Phuong Nicolas Simonds Nikhil Manchanda Nikola Dipanov Numan Siddique Oleksii Zamiatin OpenStack Release Bot Oscar Huang Paul Michali Paul Vinciguerra Pierre Riteau QingchuanHao Rajath Agasthya Ronald Bradford Ruby Loo Russell Bryant Ryan Rossiter Sandy Walsh Sean Dague Sergey Lukjanov Sergey Vilgelm Shahar Lev Stanislav Kudriashev Stanisław Pitucha Stephen Finucane Swapnil Kulkarni (coolsvap) Takashi NATSUME Thomas Bechtold Thomas Goirand Thomas Herve Thomas Herve Tony Breeds Victor Sergeyev Victor Stinner Victor Stinner Vincent Untz William Henry Xavier Queralt XiaBing Yao YAMAMOTO Takashi Yulia Portnova ZhangHongtao Zhao Lei Zhen Qin Zhi Kun Liu ZhiQiang Fan ZhongShengping Zhongyue Luo Zuul armando-migliaccio avnish blue55 chenxing dparalen dukhlov ericxiett gecong1973 gord chung gordon chung gtt116 howardlee hussainchachuliya jazeltq jinxingfang jolie joyce kbespalov kgriffs lidong lihong7313 <38098369@qq.com> lingyongxu liu-lixiu liusheng liuyamin loooosy lqslan maoshuai melissaml ozamiatin ricolin sonu.kumar tengqm ting.wang venkatamahesh wanglmopenstack weiweigu yan.haifeng zhangjl zhangshengping2012 zhangxuanyuan zhiCHang1990 oslo.messaging-5.35.0/PKG-INFO0000664000175100017510000000360713224676256015700 0ustar zuulzuul00000000000000Metadata-Version: 1.1 Name: oslo.messaging Version: 5.35.0 Summary: Oslo Messaging API Home-page: https://docs.openstack.org/oslo.messaging/latest/ Author: OpenStack Author-email: openstack-dev@lists.openstack.org License: UNKNOWN Description-Content-Type: UNKNOWN Description: ======================== Team and repository tags ======================== .. image:: https://governance.openstack.org/tc/badges/oslo.messaging.svg :target: https://governance.openstack.org/tc/reference/tags/index.html .. Change things from this point on Oslo Messaging Library ====================== .. image:: https://img.shields.io/pypi/v/oslo.messaging.svg :target: https://pypi.python.org/pypi/oslo.messaging/ :alt: Latest Version .. image:: https://img.shields.io/pypi/dm/oslo.messaging.svg :target: https://pypi.python.org/pypi/oslo.messaging/ :alt: Downloads The Oslo messaging API supports RPC and notifications over a number of different messaging transports. * License: Apache License, Version 2.0 * Documentation: https://docs.openstack.org/oslo.messaging/latest/ * Source: https://git.openstack.org/cgit/openstack/oslo.messaging * Bugs: https://bugs.launchpad.net/oslo.messaging Platform: UNKNOWN Classifier: Environment :: OpenStack Classifier: Intended Audience :: Developers Classifier: Intended Audience :: Information Technology Classifier: License :: OSI Approved :: Apache Software License Classifier: Operating System :: OS Independent Classifier: Programming Language :: Python Classifier: Programming Language :: Python :: 2 Classifier: Programming Language :: Python :: 2.7 Classifier: Programming Language :: Python :: 3 Classifier: Programming Language :: Python :: 3.5 oslo.messaging-5.35.0/etc/0000775000175100017510000000000013224676256015350 5ustar zuulzuul00000000000000oslo.messaging-5.35.0/etc/routing_notifier.yaml.sample0000666000175100017510000000134313224676046023102 0ustar zuulzuul00000000000000# Setting a priority AND an event means both have to be satisfied. # # However, defining different sets for the same driver allows you # to do OR operations. # # See how this logic is modelled below: # # if (priority in info, warn or error) or # (event == compute.scheduler.run_instance) # send to messaging driver ... # # if priority == 'poll' and # event == 'bandwidth.*' # send to poll driver group_1: messaging: accepted_priorities: ['info', 'warn', 'error'] poll: accepted_priorities: ['poll'] accepted_events: ['bandwidth.*'] log: accepted_events: ['compute.instance.exists'] group_2: messaging:⋅ accepted_events: ['compute.scheduler.run_instance.*'] oslo.messaging-5.35.0/.zuul.yaml0000666000175100017510000003206113224676046016537 0ustar zuulzuul00000000000000- job: name: oslo.messaging-tox-py27-func-amqp1 parent: openstack-tox-py27 vars: tox_envlist: py27-func-amqp1 - job: name: oslo.messaging-tox-py27-func-kafka parent: openstack-tox-py27 vars: tox_envlist: py27-func-kafka bindep_profile: kafka - job: name: oslo.messaging-tox-py27-func-pika parent: openstack-tox-py27 vars: tox_envlist: py27-func-pika bindep_profile: pika - job: name: oslo.messaging-tox-py27-func-rabbit parent: openstack-tox-py27 vars: tox_envlist: py27-func-rabbit bindep_profile: rabbit - job: name: oslo.messaging-tox-py27-func-zmq parent: openstack-tox-py27 vars: tox_envlist: py27-func-zmq bindep_profile: zmq - job: name: oslo.messaging-tox-py27-func-zmq-proxy parent: openstack-tox-py27 vars: tox_envlist: py27-func-zmq-proxy bindep_profile: zmq - job: name: oslo.messaging-tox-py27-func-zmq-pubsub parent: openstack-tox-py27 vars: tox_envlist: py27-func-zmq-pubsub bindep_profile: zmq - job: name: oslo.messaging-tox-py35-func-amqp1 parent: openstack-tox-py35 vars: tox_envlist: py35-func-amqp1 - job: name: oslo.messaging-tox-py35-func-rabbit parent: openstack-tox-py35 vars: tox_envlist: py35-func-rabbit bindep_profile: rabbit - job: name: oslo.messaging-tox-py35-func-zmq parent: openstack-tox-py35 vars: tox_envlist: py35-func-zmq bindep_profile: zmq - job: name: oslo.messaging-src-dsvm-full-rabbit-default parent: legacy-dsvm-base run: playbooks/oslo.messaging-src-dsvm-full-rabbit-default/run.yaml post-run: playbooks/oslo.messaging-src-dsvm-full-rabbit-default/post.yaml timeout: 10800 required-projects: - openstack-infra/devstack-gate - openstack/oslo.messaging - job: name: oslo.messaging-src-dsvm-full-pika-default parent: legacy-dsvm-base run: playbooks/oslo.messaging-src-dsvm-full-pika-default/run.yaml post-run: playbooks/oslo.messaging-src-dsvm-full-pika-default/post.yaml timeout: 10800 required-projects: - openstack-infra/devstack-gate - openstack/devstack-plugin-pika - openstack/oslo.messaging - job: name: oslo.messaging-src-dsvm-full-amqp1-dual-centos-7 parent: legacy-dsvm-base run: playbooks/oslo.messaging-src-dsvm-full-amqp1-dual-centos-7/run.yaml post-run: playbooks/oslo.messaging-src-dsvm-full-amqp1-dual-centos-7/post.yaml timeout: 10800 nodeset: legacy-centos-7 required-projects: - openstack-infra/devstack-gate - openstack/devstack-plugin-amqp1 - openstack/oslo.messaging - job: name: oslo.messaging-src-dsvm-full-amqp1-hybrid parent: legacy-dsvm-base run: playbooks/oslo.messaging-src-dsvm-full-amqp1-hybrid/run.yaml post-run: playbooks/oslo.messaging-src-dsvm-full-amqp1-hybrid/post.yaml timeout: 10800 required-projects: - openstack-infra/devstack-gate - openstack/devstack-plugin-amqp1 - openstack/oslo.messaging - job: name: oslo.messaging-src-dsvm-full-kafka-default parent: legacy-dsvm-base run: playbooks/oslo.messaging-src-dsvm-full-kafka-default/run.yaml post-run: playbooks/oslo.messaging-src-dsvm-full-kafka-default/post.yaml timeout: 10800 required-projects: - openstack-infra/devstack-gate - openstack/devstack-plugin-kafka - openstack/oslo.messaging - job: name: oslo.messaging-src-dsvm-full-kafka-default-centos-7 parent: legacy-dsvm-base run: playbooks/oslo.messaging-src-dsvm-full-kafka-default-centos-7/run.yaml post-run: playbooks/oslo.messaging-src-dsvm-full-kafka-default-centos-7/post.yaml timeout: 10800 nodeset: legacy-centos-7 required-projects: - openstack-infra/devstack-gate - openstack/devstack-plugin-kafka - openstack/oslo.messaging - job: name: oslo.messaging-src-dsvm-full-zmq-default parent: legacy-dsvm-base run: playbooks/oslo.messaging-src-dsvm-full-zmq-default/run.yaml post-run: playbooks/oslo.messaging-src-dsvm-full-zmq-default/post.yaml timeout: 10800 required-projects: - openstack-infra/devstack-gate - openstack/devstack-plugin-zmq - openstack/oslo.messaging - job: name: oslo.messaging-src-grenade-dsvm parent: legacy-dsvm-base run: playbooks/oslo.messaging-src-grenade-dsvm/run.yaml post-run: playbooks/oslo.messaging-src-grenade-dsvm/post.yaml timeout: 10800 required-projects: - openstack-dev/grenade - openstack-infra/devstack-gate - openstack/oslo.messaging irrelevant-files: - ^(test-|)requirements.txt$ - ^setup.cfg$ - job: name: oslo.messaging-src-grenade-dsvm-multinode parent: legacy-dsvm-base-multinode run: playbooks/oslo.messaging-src-grenade-dsvm-multinode/run.yaml post-run: playbooks/oslo.messaging-src-grenade-dsvm-multinode/post.yaml timeout: 10800 required-projects: - openstack-dev/grenade - openstack-infra/devstack-gate - openstack/oslo.messaging irrelevant-files: - ^(test-|)requirements.txt$ - ^setup.cfg$ nodeset: legacy-ubuntu-xenial-2-node - job: name: oslo.messaging-telemetry-dsvm-integration-amqp1 parent: legacy-dsvm-base run: playbooks/oslo.messaging-telemetry-dsvm-integration-amqp1/run.yaml post-run: playbooks/oslo.messaging-telemetry-dsvm-integration-amqp1/post.yaml timeout: 4200 required-projects: - openstack-infra/devstack-gate - openstack/aodh - openstack/ceilometer - openstack/devstack-plugin-amqp1 - openstack/oslo.messaging - openstack/panko # following are required when DEVSTACK_GATE_HEAT, which this # job turns on - openstack/dib-utils - openstack/diskimage-builder - job: name: oslo.messaging-telemetry-dsvm-integration-kafka parent: legacy-dsvm-base run: playbooks/oslo.messaging-telemetry-dsvm-integration-kafka/run.yaml post-run: playbooks/oslo.messaging-telemetry-dsvm-integration-kafka/post.yaml timeout: 4200 required-projects: - openstack-infra/devstack-gate - openstack/aodh - openstack/ceilometer - openstack/devstack-plugin-kafka - openstack/oslo.messaging - openstack/panko # following are required when DEVSTACK_GATE_HEAT, which this # job turns on - openstack/dib-utils - openstack/diskimage-builder - job: name: oslo.messaging-telemetry-dsvm-integration-pika parent: legacy-dsvm-base run: playbooks/oslo.messaging-telemetry-dsvm-integration-pika/run.yaml post-run: playbooks/oslo.messaging-telemetry-dsvm-integration-pika/post.yaml timeout: 4200 required-projects: - openstack-infra/devstack-gate - openstack/aodh - openstack/ceilometer - openstack/devstack-plugin-pika - openstack/oslo.messaging - openstack/panko # following are required when DEVSTACK_GATE_HEAT, which this # job turns on - openstack/dib-utils - openstack/diskimage-builder - job: name: oslo.messaging-telemetry-dsvm-integration-zmq parent: legacy-dsvm-base run: playbooks/oslo.messaging-telemetry-dsvm-integration-zmq/run.yaml post-run: playbooks/oslo.messaging-telemetry-dsvm-integration-zmq/post.yaml timeout: 4200 required-projects: - openstack-infra/devstack-gate - openstack/aodh - openstack/ceilometer - openstack/devstack-plugin-zmq - openstack/oslo.messaging - openstack/panko # following are required when DEVSTACK_GATE_HEAT, which this # job turns on - openstack/dib-utils - openstack/diskimage-builder - job: name: oslo.messaging-telemetry-dsvm-integration-rabbit parent: legacy-dsvm-base run: playbooks/oslo.messaging-telemetry-dsvm-integration-rabbit/run.yaml post-run: playbooks/oslo.messaging-telemetry-dsvm-integration-rabbit/post.yaml timeout: 7800 required-projects: - openstack-infra/devstack-gate - openstack/aodh - openstack/ceilometer - openstack/oslo.messaging - openstack/panko # following are required when DEVSTACK_GATE_HEAT, which this # job turns on - openstack/dib-utils - openstack/diskimage-builder - job: name: oslo.messaging-tempest-neutron-dsvm-src-rabbit-default parent: legacy-dsvm-base run: playbooks/oslo.messaging-tempest-neutron-dsvm-src-rabbit-default/run.yaml post-run: playbooks/oslo.messaging-tempest-neutron-dsvm-src-rabbit-default/post.yaml timeout: 7800 required-projects: - openstack-infra/devstack-gate - openstack/neutron - openstack/oslo.messaging - openstack/tempest - job: name: oslo.messaging-tempest-neutron-dsvm-src-amqp1-hybrid parent: legacy-dsvm-base run: playbooks/oslo.messaging-tempest-neutron-dsvm-src-amqp1-hybrid/run.yaml post-run: playbooks/oslo.messaging-tempest-neutron-dsvm-src-amqp1-hybrid/post.yaml timeout: 7800 required-projects: - openstack-infra/devstack-gate - openstack/devstack-plugin-amqp1 - openstack/neutron - openstack/oslo.messaging - openstack/tempest - job: name: oslo.messaging-tempest-neutron-dsvm-src-kafka-default parent: legacy-dsvm-base run: playbooks/oslo.messaging-tempest-neutron-dsvm-src-kafka-default/run.yaml post-run: playbooks/oslo.messaging-tempest-neutron-dsvm-src-kafka-default/post.yaml timeout: 7800 required-projects: - openstack-infra/devstack-gate - openstack/devstack-plugin-kafka - openstack/neutron - openstack/oslo.messaging - openstack/tempest - job: name: oslo.messaging-tempest-neutron-dsvm-src-pika-default parent: legacy-dsvm-base run: playbooks/oslo.messaging-tempest-neutron-dsvm-src-pika-default/run.yaml post-run: playbooks/oslo.messaging-tempest-neutron-dsvm-src-pika-default/post.yaml timeout: 7800 required-projects: - openstack-infra/devstack-gate - openstack/devstack-plugin-pika - openstack/neutron - openstack/oslo.messaging - openstack/tempest - job: name: oslo.messaging-tempest-neutron-dsvm-src-zmq-default parent: legacy-dsvm-base run: playbooks/oslo.messaging-tempest-neutron-dsvm-src-zmq-default/run.yaml post-run: playbooks/oslo.messaging-tempest-neutron-dsvm-src-zmq-default/post.yaml timeout: 7800 required-projects: - openstack-infra/devstack-gate - openstack/devstack-plugin-zmq - openstack/neutron - openstack/oslo.messaging - openstack/tempest - project: name: openstack/oslo.messaging check: jobs: - oslo.messaging-tox-py27-func-amqp1: voting: false - oslo.messaging-tox-py27-func-kafka: voting: false - oslo.messaging-tox-py27-func-pika - oslo.messaging-tox-py27-func-rabbit - oslo.messaging-tox-py27-func-zmq-proxy: voting: false - oslo.messaging-tox-py27-func-zmq-pubsub: voting: false - oslo.messaging-tox-py27-func-zmq: voting: false - oslo.messaging-tox-py35-func-amqp1: voting: false - oslo.messaging-tox-py35-func-rabbit: voting: false - oslo.messaging-tox-py35-func-zmq: voting: false - oslo.messaging-src-dsvm-full-rabbit-default - oslo.messaging-src-dsvm-full-amqp1-hybrid: voting: false branches: ^(?!stable/ocata).*$ - oslo.messaging-src-dsvm-full-amqp1-dual-centos-7: voting: false branches: ^(?!stable/ocata).*$ - oslo.messaging-src-dsvm-full-kafka-default-centos-7: voting: false - oslo.messaging-src-dsvm-full-kafka-default: voting: false - oslo.messaging-src-dsvm-full-pika-default: voting: false - oslo.messaging-src-dsvm-full-zmq-default: voting: false - oslo.messaging-src-grenade-dsvm: voting: false - oslo.messaging-src-grenade-dsvm-multinode: voting: false - oslo.messaging-telemetry-dsvm-integration-rabbit - oslo.messaging-telemetry-dsvm-integration-amqp1: voting: false - oslo.messaging-telemetry-dsvm-integration-kafka: voting: false - oslo.messaging-telemetry-dsvm-integration-pika: voting: false - oslo.messaging-telemetry-dsvm-integration-zmq: voting: false - oslo.messaging-tempest-neutron-dsvm-src-rabbit-default - oslo.messaging-tempest-neutron-dsvm-src-amqp1-hybrid: voting: false branches: ^(?!stable/ocata).*$ - oslo.messaging-tempest-neutron-dsvm-src-kafka-default: voting: false - oslo.messaging-tempest-neutron-dsvm-src-pika-default: voting: false - oslo.messaging-tempest-neutron-dsvm-src-zmq-default: voting: false gate: jobs: - oslo.messaging-tox-py27-func-rabbit - oslo.messaging-tox-py27-func-pika - oslo.messaging-telemetry-dsvm-integration-rabbit - oslo.messaging-src-dsvm-full-rabbit-default - oslo.messaging-tempest-neutron-dsvm-src-rabbit-default oslo.messaging-5.35.0/tox.ini0000666000175100017510000001055213224676077016116 0ustar zuulzuul00000000000000[tox] minversion = 2.0 envlist = py35,py27,pep8,bandit [testenv] setenv = VIRTUAL_ENV={envdir} passenv = OS_* ZUUL_CACHE_DIR REQUIREMENTS_PIP_LOCATION install_command = pip install {opts} {packages} deps = -c{env:UPPER_CONSTRAINTS_FILE:https://git.openstack.org/cgit/openstack/requirements/plain/upper-constraints.txt} -r{toxinidir}/test-requirements.txt -r{toxinidir}/requirements.txt commands = python setup.py testr --slowest --testr-args='{posargs}' [testenv:pep8] commands = flake8 deps = hacking<0.11,>=0.10.0 [testenv:cover] commands = python setup.py test --coverage --coverage-package-name=oslo_messaging --testr-args='{posargs}' [testenv:venv] commands = {posargs} [testenv:docs] basepython = python2.7 commands = python setup.py build_sphinx deps = -r{toxinidir}/doc/requirements.txt [testenv:py27-func-rabbit] setenv = {[testenv]setenv} TRANSPORT_DRIVER=rabbit commands = pifpaf run rabbitmq -- python setup.py testr --slowest --testr-args='{posargs:oslo_messaging.tests.functional}' [testenv:py35-func-rabbit] setenv = {[testenv]setenv} TRANSPORT_DRIVER=rabbit basepython = python3.5 commands = pifpaf run rabbitmq -- python setup.py testr --slowest --testr-args='{posargs:oslo_messaging.tests.functional}' [testenv:py27-func-pika] setenv = {[testenv]setenv} TRANSPORT_DRIVER=pika commands = pifpaf run rabbitmq -- python setup.py testr --slowest --testr-args='{posargs:oslo_messaging.tests.functional}' [testenv:py27-func-kafka] setenv = {[testenv]setenv} TRANSPORT_URL=kafka://127.0.0.1:9092/ OS_GROUP_REGEX=oslo_messaging.tests.functional commands = {toxinidir}/setup-test-env-kafka.sh python setup.py testr --slowest --testr-args='{posargs:oslo_messaging.tests.functional}' [testenv:py27-func-amqp1] setenv = {[testenv]setenv} TRANSPORT_URL=amqp://stackqpid:secretqpid@127.0.0.1:65123// AMQP1_BACKEND=qpidd ENVNAME={envname} WORKDIR={toxworkdir} commands = {toxinidir}/tools/setup-test-env-amqp1.sh python setup.py testr --slowest --testr-args='{posargs:oslo_messaging.tests.functional}' [testenv:py35-func-amqp1] basepython = python3.5 setenv = {[testenv]setenv} TRANSPORT_URL=amqp://stackqpid:secretqpid@127.0.0.1:65123// AMQP1_BACKEND=qpidd ENVNAME={envname} WORKDIR={toxworkdir} commands = {toxinidir}/tools/setup-test-env-amqp1.sh python setup.py testr --slowest --testr-args='{posargs:oslo_messaging.tests.functional}' [testenv:py27-func-zmq] commands = {toxinidir}/setup-test-env-zmq.sh python setup.py testr --slowest --testr-args='{posargs:oslo_messaging.tests.functional}' [testenv:py35-func-zmq] commands = {toxinidir}/setup-test-env-zmq.sh python setup.py testr --slowest --testr-args='{posargs:oslo_messaging.tests.functional}' [testenv:py27-func-zmq-dyn] commands = {toxinidir}/setup-test-env-zmq-direct-dynamic.sh python setup.py testr --slowest --testr-args='{posargs:oslo_messaging.tests.functional}' [testenv:py27-func-zmq-proxy] commands = {toxinidir}/setup-test-env-zmq-proxy.sh python setup.py testr --slowest --testr-args='{posargs:oslo_messaging.tests.functional}' [testenv:py27-func-zmq-pubsub] commands = {toxinidir}/setup-test-env-zmq-pub-sub.sh python setup.py testr --slowest --testr-args='{posargs:oslo_messaging.tests.functional}' [testenv:bandit] deps = -c{env:UPPER_CONSTRAINTS_FILE:https://git.openstack.org/cgit/openstack/requirements/plain/upper-constraints.txt} -r{toxinidir}/test-requirements.txt -r{toxinidir}/requirements.txt commands = bandit -r oslo_messaging -x tests -n5 [flake8] show-source = True enable-extensions = H203,H106 ignore = H405 exclude = .tox,dist,doc,*.egg,build,__init__.py [hacking] import_exceptions = oslo_messaging._i18n six.moves local-check-factory = oslo_messaging.hacking.checks.factory [testenv:pip-missing-reqs] # do not install test-requirements as that will pollute the virtualenv for # determining missing packages # this also means that pip-missing-reqs must be installed separately, outside # of the requirements.txt files deps = pip_missing_reqs commands = pip-missing-reqs -d --ignore-module=oslo_messaging* --ignore-file=oslo_messaging/tests/* --ignore-file=tests/ oslo_messaging [testenv:releasenotes] commands = sphinx-build -a -E -W -d releasenotes/build/doctrees -b html releasenotes/source releasenotes/build/html deps = -r{toxinidir}/doc/requirements.txt [testenv:bindep] deps = bindep commands = bindep {posargs} oslo.messaging-5.35.0/setup-test-env-kafka.sh0000777000175100017510000000111713224676046021111 0ustar zuulzuul00000000000000#!/bin/bash set -e . tools/functions.sh SCALA_VERSION=${SCALA_VERSION:-"2.12"} KAFKA_VERSION=${KAFKA_VERSION:-"1.0.0"} if [[ -z "$(which kafka-server-start)" ]] && [[ -z $(which kafka-server-start.sh) ]]; then DATADIR=$(mktemp -d /tmp/OSLOMSG-KAFKA.XXXXX) trap "clean_exit $DATADIR" EXIT tarball=kafka_${SCALA_VERSION}-${KAFKA_VERSION}.tgz wget http://www.apache.org/dist/kafka/${KAFKA_VERSION}/$tarball -O $DATADIR/$tarball tar -xzf $DATADIR/$tarball -C $DATADIR export PATH=$DATADIR/kafka_${SCALA_VERSION}-${KAFKA_VERSION}/bin:$PATH fi pifpaf run kafka -- $* oslo.messaging-5.35.0/releasenotes/0000775000175100017510000000000013224676256017266 5ustar zuulzuul00000000000000oslo.messaging-5.35.0/releasenotes/source/0000775000175100017510000000000013224676256020566 5ustar zuulzuul00000000000000oslo.messaging-5.35.0/releasenotes/source/conf.py0000666000175100017510000002211413224676046022064 0ustar zuulzuul00000000000000# -*- coding: utf-8 -*- # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # oslo.log Release Notes documentation build configuration file, created by # sphinx-quickstart on Tue Nov 3 17:40:50 2015. # # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # sys.path.insert(0, os.path.abspath('.')) # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. # needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ 'openstackdocstheme', 'reno.sphinxext', ] # openstackdocstheme options repository_name = 'openstack/oslo.messaging' bug_project = 'oslo.messaging' bug_tag = '' # Must set this variable to include year, month, day, hours, and minutes. html_last_updated_fmt = '%Y-%m-%d %H:%M' # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. # source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = u'oslo.messaging Release Notes' copyright = u'2016, oslo.messaging Developers' # Release notes do not need a version in the title, they span # multiple versions. # The full version, including alpha/beta/rc tags. release = '' # The short X.Y version. version = '' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: # today = '' # Else, today_fmt is used as the format for a strftime call. # today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = [] # The reST default role (used for this markup: `text`) to use for all # documents. # default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. # add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). # add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. # show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. # modindex_common_prefix = [] # If true, keep warnings as "system message" paragraphs in the built documents. # keep_warnings = False # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = 'openstackdocs' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. # html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. # html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". # html_title = None # A shorter title for the navigation bar. Default is the same as html_title. # html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. # html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. # html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # Add any extra paths that contain custom files (such as robots.txt or # .htaccess) here, relative to this directory. These files are copied # directly to the root of the documentation. # html_extra_path = [] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. # html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. # html_use_smartypants = True # Custom sidebar templates, maps document names to template names. # html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. # html_additional_pages = {} # If false, no module index is generated. # html_domain_indices = True # If false, no index is generated. # html_use_index = True # If true, the index is split into individual pages for each letter. # html_split_index = False # If true, links to the reST sources are added to the pages. # html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. # html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. # html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. # html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). # html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = 'oslo.messagingReleaseNotesDoc' # -- Options for LaTeX output --------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). # 'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). # 'pointsize': '10pt', # Additional stuff for the LaTeX preamble. # 'preamble': '', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ ('index', 'oslo.messagingReleaseNotes.tex', u'oslo.messaging Release Notes Documentation', u'oslo.messaging Developers', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. # latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. # latex_use_parts = False # If true, show page references after internal links. # latex_show_pagerefs = False # If true, show URL addresses after external links. # latex_show_urls = False # Documents to append as an appendix to all manuals. # latex_appendices = [] # If false, no module index is generated. # latex_domain_indices = True # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ ('index', 'oslo.messagingReleaseNotes', u'oslo.messaging Release Notes Documentation', [u'oslo.messaging Developers'], 1) ] # If true, show URL addresses after external links. # man_show_urls = False # -- Options for Texinfo output ------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ ('index', 'oslo.messagingReleaseNotes', u'oslo.messaging Release Notes Documentation', u'oslo.messaging Developers', 'oslo.messagingReleaseNotes', 'One line description of project.', 'Miscellaneous'), ] # Documents to append as an appendix to all manuals. # texinfo_appendices = [] # If false, no module index is generated. # texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. # texinfo_show_urls = 'footnote' # If true, do not generate a @detailmenu in the "Top" node's menu. # texinfo_no_detailmenu = False # -- Options for Internationalization output ------------------------------ locale_dirs = ['locale/'] oslo.messaging-5.35.0/releasenotes/source/locale/0000775000175100017510000000000013224676256022025 5ustar zuulzuul00000000000000oslo.messaging-5.35.0/releasenotes/source/locale/en_GB/0000775000175100017510000000000013224676256022777 5ustar zuulzuul00000000000000oslo.messaging-5.35.0/releasenotes/source/locale/en_GB/LC_MESSAGES/0000775000175100017510000000000013224676256024564 5ustar zuulzuul00000000000000oslo.messaging-5.35.0/releasenotes/source/locale/en_GB/LC_MESSAGES/releasenotes.po0000666000175100017510000001343513224676046027622 0ustar zuulzuul00000000000000# Andi Chandler , 2016. #zanata # Andi Chandler , 2017. #zanata msgid "" msgstr "" "Project-Id-Version: oslo.messaging Release Notes\n" "Report-Msgid-Bugs-To: \n" "POT-Creation-Date: 2017-12-13 03:51+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2017-12-05 10:32+0000\n" "Last-Translator: Andi Chandler \n" "Language-Team: English (United Kingdom)\n" "Language: en-GB\n" "X-Generator: Zanata 3.9.6\n" "Plural-Forms: nplurals=2; plural=(n != 1)\n" msgid "*conn_pool_min_size* (default 2)" msgstr "*conn_pool_min_size* (default 2)" msgid "*conn_pool_ttl* (defaul 1200)" msgstr "*conn_pool_ttl* (defaul 1200)" msgid "*retry* (default=-1)" msgstr "*retry* (default=-1)" msgid "5.20.0" msgstr "5.20.0" msgid "5.24.0" msgstr "5.24.0" msgid "5.24.2" msgstr "5.24.2" msgid "5.26.0" msgstr "5.26.0" msgid "5.27.0" msgstr "5.27.0" msgid "5.33.0" msgstr "5.33.0" msgid "5.6.0" msgstr "5.6.0" msgid "" "Add get_rpc_transport call to make the API clear for the separation of RPC " "and Notification messaging backends." msgstr "" "Add get_rpc_transport call to make the API clear for the separation of RPC " "and Notification messaging backends." msgid "" "Change the default value of RPC dispatcher access_policy to " "DefaultRPCAccessPolicy." msgstr "" "Change the default value of RPC dispatcher access_policy to " "DefaultRPCAccessPolicy." msgid "Configuration param 'retry' is added. Default is -1, indefinite" msgstr "Configuration param 'retry' is added. Default is -1, indefinite" msgid "Current Series Release Notes" msgstr "Current Series Release Notes" msgid "Default ttl is 1200s. Next configuration params was added" msgstr "Default TTL is 1200s. Next configuration params was added" msgid "" "Deprecate get_transport and use get_rpc_transport or " "get_notification_transport to make the API usage clear for the separation of " "RPC and Notification messaging backends." msgstr "" "Deprecate get_transport and use get_rpc_transport or " "get_notification_transport to make the API usage clear for the separation of " "RPC and Notification messaging backends." msgid "Deprecation Notes" msgstr "Deprecation Notes" msgid "Idle connections in the pool will be expired and closed." msgstr "Idle connections in the pool will be expired and closed." msgid "New Features" msgstr "New Features" msgid "Newton Series Release Notes" msgstr "Newton Series Release Notes" msgid "Ocata Series Release Notes" msgstr "Ocata Series Release Notes" msgid "" "On rabbitmq, in the past, acknownlegement of messages was done within the " "application callback thread/greenlet. This thread was blocked until the " "message was ack. In newton, we rewrote the message acknownlegement to ensure " "we haven't two threads writting the the socket at the same times. Now all " "pendings ack are done by the main thread. They are no more reason to block " "the application callback thread until the message is ack. Other driver " "already release the application callback threads before the message is " "acknownleged. This is also the case for rabbitmq, now." msgstr "" "On RabbitMQ, in the past, acknowledgement of messages was done within the " "application callback thread/greenlet. This thread was blocked until the " "message was acknowledged. In Newton, we rewrote the message acknowledgement " "to ensure we haven't two threads writing to the socket at the same time. Now " "all pending acknowledgements are done by the main thread. They are no more " "reason to block the application callback thread until the message is " "acknowledged. Other drivers already release the application callback threads " "before the message is acknowledged. This is also the case for RabbitMQ now." msgid "Other Notes" msgstr "Other Notes" msgid "Pike Series Release Notes" msgstr "Pike Series Release Notes" msgid "" "RequestContextSerializer was deprecated since 4.6, and it isn't used by any " "other project, so we can remove it safely." msgstr "" "RequestContextSerializer was deprecated since 4.6, and it isn't used by any " "other project, so we can remove it safely." msgid "Retry support for oslo_messaging_notifications driver" msgstr "Retry support for oslo_messaging_notifications driver" msgid "" "The blocking executor has been deprecated for removal in Rocky. Its usage " "was never recommended for applications, and it has no test coverage. " "Applications should choose the appropriate threading model that maps their " "usage instead." msgstr "" "The blocking executor has been deprecated for removal in Rocky. Its usage " "was never recommended for applications, and it has no test coverage. " "Applications should choose the appropriate threading model that maps their " "usage instead." msgid "" "The pika driver has been deprecated for removal in Rocky. This driver was " "developed as a replacement for the default rabbit driver. However testing " "has not shown any appreciable improvement over the default rabbit driver in " "terms of performance and stability." msgstr "" "The Pika driver has been deprecated for removal in Rocky. This driver was " "developed as a replacement for the default rabbit driver. However testing " "has not shown any appreciable improvement over the default rabbit driver in " "terms of performance and stability." msgid "" "The rabbitmq driver option ``DEFAULT/max_retries`` has been deprecated for " "removal (at a later point in the future) as it did not make logical sense " "for notifications and for RPC." msgstr "" "The RabbitMQ driver option ``DEFAULT/max_retries`` has been deprecated for " "removal (at a later point in the future) as it did not make logical sense " "for notifications and for RPC." msgid "Upgrade Notes" msgstr "Upgrade Notes" msgid "oslo.messaging Release Notes" msgstr "oslo.messaging Release Notes" oslo.messaging-5.35.0/releasenotes/source/newton.rst0000666000175100017510000000021413224676046022626 0ustar zuulzuul00000000000000============================ Newton Series Release Notes ============================ .. release-notes:: :branch: origin/stable/newton oslo.messaging-5.35.0/releasenotes/source/_static/0000775000175100017510000000000013224676256022214 5ustar zuulzuul00000000000000oslo.messaging-5.35.0/releasenotes/source/_static/.placeholder0000666000175100017510000000000013224676046024464 0ustar zuulzuul00000000000000oslo.messaging-5.35.0/releasenotes/source/unreleased.rst0000666000175100017510000000015613224676046023450 0ustar zuulzuul00000000000000============================= Current Series Release Notes ============================= .. release-notes:: oslo.messaging-5.35.0/releasenotes/source/index.rst0000666000175100017510000000025013224676046022423 0ustar zuulzuul00000000000000============================= oslo.messaging Release Notes ============================= .. toctree:: :maxdepth: 1 unreleased pike ocata newton oslo.messaging-5.35.0/releasenotes/source/ocata.rst0000666000175100017510000000021013224676046022377 0ustar zuulzuul00000000000000=========================== Ocata Series Release Notes =========================== .. release-notes:: :branch: origin/stable/ocata oslo.messaging-5.35.0/releasenotes/source/_templates/0000775000175100017510000000000013224676256022723 5ustar zuulzuul00000000000000oslo.messaging-5.35.0/releasenotes/source/_templates/.placeholder0000666000175100017510000000000013224676046025173 0ustar zuulzuul00000000000000oslo.messaging-5.35.0/releasenotes/source/pike.rst0000666000175100017510000000021713224676046022247 0ustar zuulzuul00000000000000=================================== Pike Series Release Notes =================================== .. release-notes:: :branch: stable/pike oslo.messaging-5.35.0/releasenotes/notes/0000775000175100017510000000000013224676256020416 5ustar zuulzuul00000000000000oslo.messaging-5.35.0/releasenotes/notes/connection_ttl-2cf0fe6e1ab8c73c.yaml0000666000175100017510000000033713224676046026760 0ustar zuulzuul00000000000000--- features: - | | Idle connections in the pool will be expired and closed. | Default ttl is 1200s. Next configuration params was added * *conn_pool_ttl* (defaul 1200) * *conn_pool_min_size* (default 2) oslo.messaging-5.35.0/releasenotes/notes/fix-access_policy-deafult-a6954a147cb002b0.yaml0000666000175100017510000000015713224676046030446 0ustar zuulzuul00000000000000--- upgrade: - | Change the default value of RPC dispatcher access_policy to DefaultRPCAccessPolicy. oslo.messaging-5.35.0/releasenotes/notes/remove-RequestContextSerializer-234c0496a7e0376b.yaml0000666000175100017510000000021413224676046031673 0ustar zuulzuul00000000000000--- upgrade: - RequestContextSerializer was deprecated since 4.6, and it isn't used by any other project, so we can remove it safely. oslo.messaging-5.35.0/releasenotes/notes/get_rpc_transport-4aa3511ad9754a60.yaml0000666000175100017510000000052713224676046027166 0ustar zuulzuul00000000000000--- features: - | Add get_rpc_transport call to make the API clear for the separation of RPC and Notification messaging backends. deprecations: - | Deprecate get_transport and use get_rpc_transport or get_notification_transport to make the API usage clear for the separation of RPC and Notification messaging backends. ././@LongLink0000000000000000000000000000015700000000000011220 Lustar 00000000000000oslo.messaging-5.35.0/releasenotes/notes/option-rabbitmq-max_retries-has-been-deprecated-471f66a9e6d672a2.yamloslo.messaging-5.35.0/releasenotes/notes/option-rabbitmq-max_retries-has-been-deprecated-471f66a9e6d0000666000175100017510000000032213224676046033226 0ustar zuulzuul00000000000000--- deprecations: - The rabbitmq driver option ``DEFAULT/max_retries`` has been deprecated for removal (at a later point in the future) as it did not make logical sense for notifications and for RPC. oslo.messaging-5.35.0/releasenotes/notes/pika-driver-has-been-deprecated-e2407fa53c91fe5c.yaml0000666000175100017510000000045413224676046031575 0ustar zuulzuul00000000000000--- deprecations: - The pika driver has been deprecated for removal in Rocky. This driver was developed as a replacement for the default rabbit driver. However testing has not shown any appreciable improvement over the default rabbit driver in terms of performance and stability. oslo.messaging-5.35.0/releasenotes/notes/retry-support-07996ef04dda9482.yaml0000666000175100017510000000026313224676046026334 0ustar zuulzuul00000000000000--- features: - | | Retry support for oslo_messaging_notifications driver | Configuration param 'retry' is added. Default is -1, indefinite * *retry* (default=-1) oslo.messaging-5.35.0/releasenotes/notes/blocking-executor-deprecated-895146c1c3bf2f51.yaml0000666000175100017510000000041313224676046031157 0ustar zuulzuul00000000000000--- deprecations: - The blocking executor has been deprecated for removal in Rocky. Its usage was never recommended for applications, and it has no test coverage. Applications should choose the appropriate threading model that maps their usage instead. oslo.messaging-5.35.0/releasenotes/notes/rabbit-no-wait-for-ack-9e5de3e1320d7660.yaml0000666000175100017510000000115313224676046027576 0ustar zuulzuul00000000000000--- other: - | On rabbitmq, in the past, acknownlegement of messages was done within the application callback thread/greenlet. This thread was blocked until the message was ack. In newton, we rewrote the message acknownlegement to ensure we haven't two threads writting the the socket at the same times. Now all pendings ack are done by the main thread. They are no more reason to block the application callback thread until the message is ack. Other driver already release the application callback threads before the message is acknownleged. This is also the case for rabbitmq, now. oslo.messaging-5.35.0/releasenotes/notes/add_reno-3b4ae0789e9c45b4.yaml0000666000175100017510000000007113224676046025276 0ustar zuulzuul00000000000000--- other: - Switch to reno for managing release notes.oslo.messaging-5.35.0/.coveragerc0000666000175100017510000000015113224676046016712 0ustar zuulzuul00000000000000[run] branch = True source = oslo_messaging omit = oslo_messaging/tests/* [report] ignore_errors = True oslo.messaging-5.35.0/oslo_messaging/0000775000175100017510000000000013224676256017606 5ustar zuulzuul00000000000000oslo.messaging-5.35.0/oslo_messaging/dispatcher.py0000666000175100017510000000175513224676046022315 0ustar zuulzuul00000000000000 # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc import six __all__ = [ "DispatcherBase" ] @six.add_metaclass(abc.ABCMeta) class DispatcherBase(object): "Base class for dispatcher" @abc.abstractmethod def dispatch(self, incoming): """Dispatch incoming messages to the endpoints and return result :param incoming: incoming object for dispatching to the endpoint :type incoming: object, depends on endpoint type """ oslo.messaging-5.35.0/oslo_messaging/tests/0000775000175100017510000000000013224676256020750 5ustar zuulzuul00000000000000oslo.messaging-5.35.0/oslo_messaging/tests/drivers/0000775000175100017510000000000013224676256022426 5ustar zuulzuul00000000000000oslo.messaging-5.35.0/oslo_messaging/tests/drivers/zmq/0000775000175100017510000000000013224676256023235 5ustar zuulzuul00000000000000oslo.messaging-5.35.0/oslo_messaging/tests/drivers/zmq/zmq_common.py0000666000175100017510000000720413224676046025770 0ustar zuulzuul00000000000000# Copyright 2015 Mirantis, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import logging import threading import fixtures from six.moves import mock import testtools import oslo_messaging from oslo_messaging._drivers.zmq_driver import zmq_async from oslo_messaging._drivers.zmq_driver import zmq_options from oslo_messaging._i18n import _LE from oslo_messaging.tests import utils as test_utils LOG = logging.getLogger(__name__) zmq = zmq_async.import_zmq() class TestServerListener(object): def __init__(self, driver): self.driver = driver self.listener = None self.executor = zmq_async.get_executor(self._run) self._stop = threading.Event() self._received = threading.Event() self.message = None def listen(self, target): self.listener = self.driver.listen(target, None, None)._poll_style_listener self.executor.execute() def listen_notifications(self, targets_and_priorities): self.listener = self.driver.listen_for_notifications( targets_and_priorities, None, None, None)._poll_style_listener self.executor.execute() def _run(self): try: messages = self.listener.poll() if messages: message = messages[0] message.acknowledge() self._received.set() self.message = message message.reply(reply=True) except Exception: LOG.exception(_LE("Unexpected exception occurred.")) def stop(self): self.executor.stop() class ZmqBaseTestCase(test_utils.BaseTestCase): """Base test case for all ZMQ tests """ @testtools.skipIf(zmq is None, "zmq not available") def setUp(self): super(ZmqBaseTestCase, self).setUp() self.messaging_conf.transport_driver = 'zmq' zmq_options.register_opts(self.conf, mock.MagicMock()) # Set config values self.internal_ipc_dir = self.useFixture(fixtures.TempDir()).path kwargs = {'rpc_zmq_bind_address': '127.0.0.1', 'rpc_zmq_host': '127.0.0.1', 'rpc_zmq_ipc_dir': self.internal_ipc_dir, 'use_pub_sub': False, 'use_router_proxy': False, 'rpc_zmq_matchmaker': 'dummy'} self.config(group='oslo_messaging_zmq', **kwargs) self.config(rpc_response_timeout=5) # Get driver transport = oslo_messaging.get_transport(self.conf) self.driver = transport._driver self.listener = TestServerListener(self.driver) self.addCleanup( StopRpc(self, [('listener', 'stop'), ('driver', 'cleanup')]) ) class StopRpc(object): def __init__(self, obj, attrs_and_stops): self.obj = obj self.attrs_and_stops = attrs_and_stops def __call__(self): for attr, stop in self.attrs_and_stops: if hasattr(self.obj, attr): obj_attr = getattr(self.obj, attr) if hasattr(obj_attr, stop): obj_attr_stop = getattr(obj_attr, stop) obj_attr_stop() oslo.messaging-5.35.0/oslo_messaging/tests/drivers/zmq/__init__.py0000666000175100017510000000000013224676046025333 0ustar zuulzuul00000000000000oslo.messaging-5.35.0/oslo_messaging/tests/drivers/zmq/test_zmq_async.py0000666000175100017510000000633413224676046026657 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from six.moves import mock import testtools from oslo_messaging._drivers.zmq_driver.poller import green_poller from oslo_messaging._drivers.zmq_driver.poller import threading_poller from oslo_messaging._drivers.zmq_driver import zmq_async from oslo_messaging.tests import utils as test_utils zmq = zmq_async.import_zmq() class TestImportZmq(test_utils.BaseTestCase): @testtools.skipIf(zmq is None, "zmq not available") def setUp(self): super(TestImportZmq, self).setUp() def test_when_eventlet_is_available_then_load_eventlet_green_zmq(self): zmq_async.eventletutils.is_monkey_patched = lambda _: True mock_try_import = mock.Mock() zmq_async.importutils.try_import = mock_try_import zmq_async.import_zmq() mock_try_import.assert_called_with('eventlet.green.zmq', default=None) def test_when_evetlet_is_unavailable_then_load_zmq(self): zmq_async.eventletutils.is_monkey_patched = lambda _: False mock_try_import = mock.Mock() zmq_async.importutils.try_import = mock_try_import zmq_async.import_zmq() mock_try_import.assert_called_with('zmq', default=None) class TestGetPoller(test_utils.BaseTestCase): @testtools.skipIf(zmq is None, "zmq not available") def setUp(self): super(TestGetPoller, self).setUp() def test_when_eventlet_is_available_then_return_GreenPoller(self): zmq_async.eventletutils.is_monkey_patched = lambda _: True poller = zmq_async.get_poller() self.assertIsInstance(poller, green_poller.GreenPoller) def test_when_eventlet_is_unavailable_then_return_ThreadingPoller(self): zmq_async.eventletutils.is_monkey_patched = lambda _: False poller = zmq_async.get_poller() self.assertIsInstance(poller, threading_poller.ThreadingPoller) class TestGetExecutor(test_utils.BaseTestCase): @testtools.skipIf(zmq is None, "zmq not available") def setUp(self): super(TestGetExecutor, self).setUp() def test_when_eventlet_module_is_available_then_return_GreenExecutor(self): zmq_async.eventletutils.is_monkey_patched = lambda _: True executor = zmq_async.get_executor('any method') self.assertIsInstance(executor, green_poller.GreenExecutor) self.assertEqual('any method', executor._method) def test_when_eventlet_is_unavailable_then_return_ThreadingExecutor(self): zmq_async.eventletutils.is_monkey_patched = lambda _: False executor = zmq_async.get_executor('any method') self.assertIsInstance(executor, threading_poller.ThreadingExecutor) self.assertEqual('any method', executor._method) oslo.messaging-5.35.0/oslo_messaging/tests/drivers/zmq/test_impl_zmq.py0000666000175100017510000001130713224676046026477 0ustar zuulzuul00000000000000# Copyright 2015 Mirantis, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import testtools import oslo_messaging from oslo_messaging._drivers import impl_zmq from oslo_messaging._drivers.zmq_driver import zmq_async from oslo_messaging._drivers.zmq_driver import zmq_socket from oslo_messaging.tests.drivers.zmq import zmq_common from oslo_messaging.tests import utils as test_utils zmq = zmq_async.import_zmq() class ZmqTestPortsRange(zmq_common.ZmqBaseTestCase): @testtools.skipIf(zmq is None, "zmq not available") def setUp(self): super(ZmqTestPortsRange, self).setUp() # Set config values kwargs = {'rpc_zmq_min_port': 5555, 'rpc_zmq_max_port': 5560} self.config(group='oslo_messaging_zmq', **kwargs) def test_ports_range(self): listeners = [] for i in range(10): try: target = oslo_messaging.Target(topic='testtopic_' + str(i)) new_listener = self.driver.listen(target, None, None) listeners.append(new_listener) except zmq_socket.ZmqPortBusy: pass self.assertLessEqual(len(listeners), 5) for l in listeners: l.cleanup() class TestConfZmqDriverLoad(test_utils.BaseTestCase): @testtools.skipIf(zmq is None, "zmq not available") def setUp(self): super(TestConfZmqDriverLoad, self).setUp() self.messaging_conf.transport_driver = 'zmq' def test_driver_load(self): transport = oslo_messaging.get_transport(self.conf) self.assertIsInstance(transport._driver, impl_zmq.ZmqDriver) class TestZmqBasics(zmq_common.ZmqBaseTestCase): @testtools.skipIf(zmq is None, "zmq not available") def setUp(self): super(TestZmqBasics, self).setUp() self.target = oslo_messaging.Target(topic='topic') self.ctxt = {'key': 'value'} self.message = {'method': 'qwerty', 'args': {'int': 1, 'bool': True}} def test_send_call_without_method_failure(self): self.message.pop('method') self.listener.listen(self.target) self.assertRaises(KeyError, self.driver.send, self.target, self.ctxt, self.message, wait_for_reply=True, timeout=10) def _check_listener_received(self): self.assertTrue(self.listener._received.isSet()) self.assertEqual(self.ctxt, self.listener.message.ctxt) self.assertEqual(self.message, self.listener.message.message) def test_send_call_success(self): self.listener.listen(self.target) result = self.driver.send(self.target, self.ctxt, self.message, wait_for_reply=True, timeout=10) self.assertTrue(result) self._check_listener_received() def test_send_call_direct_success(self): self.target.server = 'server' self.listener.listen(self.target) result = self.driver.send(self.target, self.ctxt, self.message, wait_for_reply=True, timeout=10) self.assertTrue(result) self._check_listener_received() def test_send_cast_direct_success(self): self.target.server = 'server' self.listener.listen(self.target) result = self.driver.send(self.target, self.ctxt, self.message, wait_for_reply=False) self.listener._received.wait(5) self.assertIsNone(result) self._check_listener_received() def test_send_fanout_success(self): self.target.fanout = True self.listener.listen(self.target) result = self.driver.send(self.target, self.ctxt, self.message, wait_for_reply=False) self.listener._received.wait(5) self.assertIsNone(result) self._check_listener_received() def test_send_notify_success(self): self.listener.listen_notifications([(self.target, 'info')]) self.target.topic += '.info' result = self.driver.send_notification(self.target, self.ctxt, self.message, '3.0') self.listener._received.wait(5) self.assertIsNone(result) self._check_listener_received() oslo.messaging-5.35.0/oslo_messaging/tests/drivers/zmq/test_zmq_ack_manager.py0000666000175100017510000002455713224676046030001 0ustar zuulzuul00000000000000# Copyright 2016 Mirantis, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from six.moves import mock import testtools import time import oslo_messaging from oslo_messaging._drivers.zmq_driver.client import zmq_receivers from oslo_messaging._drivers.zmq_driver.client import zmq_senders from oslo_messaging._drivers.zmq_driver.proxy import zmq_proxy from oslo_messaging._drivers.zmq_driver.server.consumers.zmq_dealer_consumer \ import DealerConsumerWithAcks from oslo_messaging._drivers.zmq_driver import zmq_async from oslo_messaging._drivers.zmq_driver import zmq_options from oslo_messaging.tests.drivers.zmq import zmq_common from oslo_messaging.tests import utils as test_utils zmq = zmq_async.import_zmq() class TestZmqAckManager(test_utils.BaseTestCase): @testtools.skipIf(zmq is None, "zmq not available") def setUp(self): super(TestZmqAckManager, self).setUp() # register and set necessary config opts self.messaging_conf.transport_driver = 'zmq' zmq_options.register_opts(self.conf, mock.MagicMock()) kwargs = {'rpc_zmq_matchmaker': 'dummy', 'use_pub_sub': False, 'use_router_proxy': True, 'rpc_thread_pool_size': 1, 'rpc_use_acks': True, 'rpc_ack_timeout_base': 5, 'rpc_ack_timeout_multiplier': 1, 'rpc_retry_attempts': 2} self.config(group='oslo_messaging_zmq', **kwargs) self.conf.register_opts(zmq_proxy.zmq_proxy_opts, group='zmq_proxy_opts') # mock set_result method of futures self.set_result_patcher = mock.patch.object( zmq_receivers.futurist.Future, 'set_result', side_effect=zmq_receivers.futurist.Future.set_result, autospec=True ) self.set_result = self.set_result_patcher.start() # mock send method of senders self.send_patcher = mock.patch.object( zmq_senders.RequestSenderProxy, 'send', side_effect=zmq_senders.RequestSenderProxy.send, autospec=True ) self.send = self.send_patcher.start() # get driver transport = oslo_messaging.get_transport(self.conf) self.driver = transport._driver # prepare and launch proxy self.proxy = zmq_proxy.ZmqProxy(self.conf) vars(self.driver.matchmaker).update(vars(self.proxy.matchmaker)) self.executor = zmq_async.get_executor(self.proxy.run) self.executor.execute() # create listener self.listener = zmq_common.TestServerListener(self.driver) # create target and message self.target = oslo_messaging.Target(topic='topic', server='server') self.message = {'method': 'xyz', 'args': {'x': 1, 'y': 2, 'z': 3}} # start listening to target self.listener.listen(self.target) # get ack manager self.ack_manager = self.driver.client.get().publishers['default'] self.addCleanup( zmq_common.StopRpc( self, [('listener', 'stop'), ('executor', 'stop'), ('proxy', 'close'), ('driver', 'cleanup'), ('send_patcher', 'stop'), ('set_result_patcher', 'stop')] ) ) # wait for all connections to be established # and all parties to be ready for messaging time.sleep(1) @mock.patch.object(DealerConsumerWithAcks, '_acknowledge', side_effect=DealerConsumerWithAcks._acknowledge, autospec=True) def test_cast_success_without_retries(self, received_ack_mock): result = self.driver.send( self.target, {}, self.message, wait_for_reply=False ) self.assertIsNone(result) self.ack_manager.pool.shutdown(wait=True) self.assertTrue(self.listener._received.isSet()) self.assertEqual(self.message, self.listener.message.message) self.assertEqual(1, self.send.call_count) self.assertEqual(1, received_ack_mock.call_count) self.assertEqual(2, self.set_result.call_count) def test_cast_success_with_one_retry(self): with mock.patch.object(DealerConsumerWithAcks, '_acknowledge') as lost_ack_mock: result = self.driver.send( self.target, {}, self.message, wait_for_reply=False ) self.assertIsNone(result) self.listener._received.wait(5) self.assertTrue(self.listener._received.isSet()) self.assertEqual(self.message, self.listener.message.message) self.assertEqual(1, self.send.call_count) self.assertEqual(1, lost_ack_mock.call_count) self.assertEqual(0, self.set_result.call_count) self.listener._received.clear() with mock.patch.object(DealerConsumerWithAcks, '_acknowledge', side_effect=DealerConsumerWithAcks._acknowledge, autospec=True) as received_ack_mock: self.ack_manager.pool.shutdown(wait=True) self.assertFalse(self.listener._received.isSet()) self.assertEqual(2, self.send.call_count) self.assertEqual(1, received_ack_mock.call_count) self.assertEqual(2, self.set_result.call_count) def test_cast_success_with_two_retries(self): with mock.patch.object(DealerConsumerWithAcks, '_acknowledge') as lost_ack_mock: result = self.driver.send( self.target, {}, self.message, wait_for_reply=False ) self.assertIsNone(result) self.listener._received.wait(5) self.assertTrue(self.listener._received.isSet()) self.assertEqual(self.message, self.listener.message.message) self.assertEqual(1, self.send.call_count) self.assertEqual(1, lost_ack_mock.call_count) self.assertEqual(0, self.set_result.call_count) self.listener._received.clear() self.listener._received.wait(7.5) self.assertFalse(self.listener._received.isSet()) self.assertEqual(2, self.send.call_count) self.assertEqual(2, lost_ack_mock.call_count) self.assertEqual(0, self.set_result.call_count) with mock.patch.object(DealerConsumerWithAcks, '_acknowledge', side_effect=DealerConsumerWithAcks._acknowledge, autospec=True) as received_ack_mock: self.ack_manager.pool.shutdown(wait=True) self.assertFalse(self.listener._received.isSet()) self.assertEqual(3, self.send.call_count) self.assertEqual(1, received_ack_mock.call_count) self.assertEqual(2, self.set_result.call_count) @mock.patch.object(DealerConsumerWithAcks, '_acknowledge') def test_cast_failure_exhausted_retries(self, lost_ack_mock): result = self.driver.send( self.target, {}, self.message, wait_for_reply=False ) self.assertIsNone(result) self.ack_manager.pool.shutdown(wait=True) self.assertTrue(self.listener._received.isSet()) self.assertEqual(self.message, self.listener.message.message) self.assertEqual(3, self.send.call_count) self.assertEqual(3, lost_ack_mock.call_count) self.assertEqual(1, self.set_result.call_count) @mock.patch.object(DealerConsumerWithAcks, '_acknowledge', side_effect=DealerConsumerWithAcks._acknowledge, autospec=True) @mock.patch.object(DealerConsumerWithAcks, '_reply', side_effect=DealerConsumerWithAcks._reply, autospec=True) @mock.patch.object(DealerConsumerWithAcks, '_reply_from_cache', side_effect=DealerConsumerWithAcks._reply_from_cache, autospec=True) def test_call_success_without_retries(self, unused_reply_from_cache_mock, received_reply_mock, received_ack_mock): result = self.driver.send( self.target, {}, self.message, wait_for_reply=True, timeout=10 ) self.assertIsNotNone(result) self.ack_manager.pool.shutdown(wait=True) self.assertTrue(self.listener._received.isSet()) self.assertEqual(self.message, self.listener.message.message) self.assertEqual(1, self.send.call_count) self.assertEqual(1, received_ack_mock.call_count) self.assertEqual(3, self.set_result.call_count) received_reply_mock.assert_called_once_with(mock.ANY, mock.ANY, reply=True, failure=None) self.assertEqual(0, unused_reply_from_cache_mock.call_count) @mock.patch.object(DealerConsumerWithAcks, '_acknowledge') @mock.patch.object(DealerConsumerWithAcks, '_reply') @mock.patch.object(DealerConsumerWithAcks, '_reply_from_cache') def test_call_failure_exhausted_retries(self, lost_reply_from_cache_mock, lost_reply_mock, lost_ack_mock): self.assertRaises(oslo_messaging.MessagingTimeout, self.driver.send, self.target, {}, self.message, wait_for_reply=True, timeout=20) self.ack_manager.pool.shutdown(wait=True) self.assertTrue(self.listener._received.isSet()) self.assertEqual(self.message, self.listener.message.message) self.assertEqual(3, self.send.call_count) self.assertEqual(3, lost_ack_mock.call_count) self.assertEqual(2, self.set_result.call_count) lost_reply_mock.assert_called_once_with(mock.ANY, reply=True, failure=None) self.assertEqual(2, lost_reply_from_cache_mock.call_count) oslo.messaging-5.35.0/oslo_messaging/tests/drivers/zmq/test_zmq_transport_url.py0000666000175100017510000001152013224676046030451 0ustar zuulzuul00000000000000# Copyright 2016 Mirantis, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from six.moves import mock import testtools import oslo_messaging from oslo_messaging._drivers import common from oslo_messaging._drivers.zmq_driver.matchmaker.zmq_matchmaker_base \ import MatchmakerDummy from oslo_messaging._drivers.zmq_driver.matchmaker import zmq_matchmaker_redis from oslo_messaging._drivers.zmq_driver import zmq_async from oslo_messaging.tests import utils as test_utils zmq = zmq_async.import_zmq() redis = zmq_matchmaker_redis.redis sentinel = zmq_matchmaker_redis.redis_sentinel class TestZmqTransportUrl(test_utils.BaseTestCase): @testtools.skipIf(zmq is None, "zmq not available") def setUp(self): super(TestZmqTransportUrl, self).setUp() def setup_url(self, url): transport = oslo_messaging.get_transport(self.conf, url) self.addCleanup(transport.cleanup) driver = transport._driver return driver, url def mock_redis(self): if redis is None: self.skipTest("redis not available") else: redis_patcher = mock.patch.object(redis, 'StrictRedis') self.addCleanup(redis_patcher.stop) return redis_patcher.start() def mock_sentinel(self): if sentinel is None: self.skipTest("sentinel not available") else: sentinel_patcher = mock.patch.object(sentinel, 'Sentinel') self.addCleanup(sentinel_patcher.stop) return sentinel_patcher.start() def test_empty_url(self): self.mock_redis() driver, url = self.setup_url("zmq:///") self.assertIs(zmq_matchmaker_redis.MatchmakerRedis, driver.matchmaker.__class__) self.assertEqual('zmq', driver.matchmaker.url.transport) def test_error_url(self): self.assertRaises(common.RPCException, self.setup_url, "zmq+error:///") def test_dummy_url(self): driver, url = self.setup_url("zmq+dummy:///") self.assertIs(MatchmakerDummy, driver.matchmaker.__class__) self.assertEqual('zmq+dummy', driver.matchmaker.url.transport) def test_redis_url(self): self.mock_redis() driver, url = self.setup_url("zmq+redis:///") self.assertIs(zmq_matchmaker_redis.MatchmakerRedis, driver.matchmaker.__class__) self.assertEqual('zmq+redis', driver.matchmaker.url.transport) def test_sentinel_url(self): self.mock_sentinel() driver, url = self.setup_url("zmq+sentinel:///") self.assertIs(zmq_matchmaker_redis.MatchmakerSentinel, driver.matchmaker.__class__) self.assertEqual('zmq+sentinel', driver.matchmaker.url.transport) def test_host_with_credentials_url(self): self.mock_redis() driver, url = self.setup_url("zmq://:password@host:60000/") self.assertIs(zmq_matchmaker_redis.MatchmakerRedis, driver.matchmaker.__class__) self.assertEqual('zmq', driver.matchmaker.url.transport) self.assertEqual( [{"host": "host", "port": 60000, "password": "password"}], driver.matchmaker._redis_hosts ) def test_redis_multiple_hosts_url(self): self.mock_redis() driver, url = self.setup_url( "zmq+redis://host1:60001,host2:60002,host3:60003/" ) self.assertIs(zmq_matchmaker_redis.MatchmakerRedis, driver.matchmaker.__class__) self.assertEqual('zmq+redis', driver.matchmaker.url.transport) self.assertEqual( [{"host": "host1", "port": 60001, "password": None}, {"host": "host2", "port": 60002, "password": None}, {"host": "host3", "port": 60003, "password": None}], driver.matchmaker._redis_hosts ) def test_sentinel_multiple_hosts_url(self): self.mock_sentinel() driver, url = self.setup_url( "zmq+sentinel://host1:20001,host2:20002,host3:20003/" ) self.assertIs(zmq_matchmaker_redis.MatchmakerSentinel, driver.matchmaker.__class__) self.assertEqual('zmq+sentinel', driver.matchmaker.url.transport) self.assertEqual( [("host1", 20001), ("host2", 20002), ("host3", 20003)], driver.matchmaker._sentinel_hosts ) oslo.messaging-5.35.0/oslo_messaging/tests/drivers/zmq/test_pub_sub.py0000777000175100017510000001167013224676046026314 0ustar zuulzuul00000000000000# Copyright 2015-2016 Mirantis, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import json import time import msgpack import six import testscenarios from oslo_config import cfg import oslo_messaging from oslo_messaging._drivers.zmq_driver.proxy.central \ import zmq_publisher_proxy from oslo_messaging._drivers.zmq_driver.proxy import zmq_proxy from oslo_messaging._drivers.zmq_driver import zmq_address from oslo_messaging._drivers.zmq_driver import zmq_async from oslo_messaging._drivers.zmq_driver import zmq_names from oslo_messaging._drivers.zmq_driver import zmq_version from oslo_messaging.tests.drivers.zmq import zmq_common load_tests = testscenarios.load_tests_apply_scenarios zmq = zmq_async.import_zmq() opt_group = cfg.OptGroup(name='zmq_proxy_opts', title='ZeroMQ proxy options') cfg.CONF.register_opts(zmq_proxy.zmq_proxy_opts, group=opt_group) class TestPubSub(zmq_common.ZmqBaseTestCase): LISTENERS_COUNT = 3 scenarios = [ ('json', {'serialization': 'json', 'dumps': lambda obj: six.b(json.dumps(obj))}), ('msgpack', {'serialization': 'msgpack', 'dumps': msgpack.dumps}) ] def setUp(self): super(TestPubSub, self).setUp() kwargs = {'use_pub_sub': True, 'rpc_zmq_serialization': self.serialization} self.config(group='oslo_messaging_zmq', **kwargs) self.config(host="127.0.0.1", group="zmq_proxy_opts") self.config(publisher_port=0, group="zmq_proxy_opts") self.publisher = zmq_publisher_proxy.PublisherProxy( self.conf, self.driver.matchmaker) self.driver.matchmaker.register_publisher( (self.publisher.host, ''), expire=self.conf.oslo_messaging_zmq.zmq_target_expire) self.listeners = [] for _ in range(self.LISTENERS_COUNT): self.listeners.append(zmq_common.TestServerListener(self.driver)) def tearDown(self): super(TestPubSub, self).tearDown() self.publisher.cleanup() for listener in self.listeners: listener.stop() def _send_request(self, target): # Needed only in test env to give listener a chance to connect # before request fires time.sleep(1) context = {} message = {'method': 'hello-world'} self.publisher.send_request( [b"reply_id", b'', six.b(zmq_version.MESSAGE_VERSION), six.b(str(zmq_names.CAST_FANOUT_TYPE)), zmq_address.target_to_subscribe_filter(target), b"message_id", self.dumps([context, message])] ) def _check_listener(self, listener): listener._received.wait(timeout=5) self.assertTrue(listener._received.isSet()) method = listener.message.message[u'method'] self.assertEqual(u'hello-world', method) def _check_listener_negative(self, listener): listener._received.wait(timeout=1) self.assertFalse(listener._received.isSet()) def test_single_listener(self): target = oslo_messaging.Target(topic='testtopic', fanout=True) self.listener.listen(target) self._send_request(target) self._check_listener(self.listener) def test_all_listeners(self): target = oslo_messaging.Target(topic='testtopic', fanout=True) for listener in self.listeners: listener.listen(target) self._send_request(target) for listener in self.listeners: self._check_listener(listener) def test_filtered(self): target = oslo_messaging.Target(topic='testtopic', fanout=True) target_wrong = oslo_messaging.Target(topic='wrong', fanout=True) self.listeners[0].listen(target) self.listeners[1].listen(target) self.listeners[2].listen(target_wrong) self._send_request(target) self._check_listener(self.listeners[0]) self._check_listener(self.listeners[1]) self._check_listener_negative(self.listeners[2]) def test_topic_part_matching(self): target = oslo_messaging.Target(topic='testtopic', server='server') target_part = oslo_messaging.Target(topic='testtopic', fanout=True) self.listeners[0].listen(target) self.listeners[1].listen(target) self._send_request(target_part) self._check_listener(self.listeners[0]) self._check_listener(self.listeners[1]) oslo.messaging-5.35.0/oslo_messaging/tests/drivers/zmq/test_zmq_ttl_cache.py0000666000175100017510000000751713224676046027474 0ustar zuulzuul00000000000000# Copyright 2016 Mirantis, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import time from oslo_messaging._drivers.zmq_driver.server import zmq_ttl_cache from oslo_messaging.tests import utils as test_utils class TestZmqTTLCache(test_utils.BaseTestCase): def setUp(self): super(TestZmqTTLCache, self).setUp() def call_count_decorator(unbound_method): def wrapper(self, *args, **kwargs): wrapper.call_count += 1 return unbound_method(self, *args, **kwargs) wrapper.call_count = 0 return wrapper zmq_ttl_cache.TTLCache._update_cache = \ call_count_decorator(zmq_ttl_cache.TTLCache._update_cache) self.cache = zmq_ttl_cache.TTLCache(ttl=1) self.addCleanup(lambda: self.cache.cleanup()) def _test_add_get(self): self.cache.add('x', 'a') self.assertEqual(self.cache.get('x'), 'a') self.assertEqual(self.cache.get('x', 'b'), 'a') self.assertIsNone(self.cache.get('y')) self.assertEqual(self.cache.get('y', 'b'), 'b') time.sleep(1) self.assertIsNone(self.cache.get('x')) self.assertEqual(self.cache.get('x', 'b'), 'b') def test_add_get_with_executor(self): self._test_add_get() def test_add_get_without_executor(self): self.cache._executor.stop() self._test_add_get() def _test_in_operator(self): self.cache.add(1) self.assertIn(1, self.cache) time.sleep(0.5) self.cache.add(2) self.assertIn(1, self.cache) self.assertIn(2, self.cache) time.sleep(0.75) self.cache.add(3) self.assertNotIn(1, self.cache) self.assertIn(2, self.cache) self.assertIn(3, self.cache) time.sleep(0.5) self.assertNotIn(2, self.cache) self.assertIn(3, self.cache) def test_in_operator_with_executor(self): self._test_in_operator() def test_in_operator_without_executor(self): self.cache._executor.stop() self._test_in_operator() def _is_expired(self, key): with self.cache._lock: _, expiration_time = self.cache._cache[key] return self.cache._is_expired(expiration_time, time.time()) def test_executor(self): self.cache.add(1) self.assertEqual([1], sorted(self.cache._cache.keys())) self.assertFalse(self._is_expired(1)) time.sleep(0.75) self.assertEqual(1, self.cache._update_cache.call_count) self.cache.add(2) self.assertEqual([1, 2], sorted(self.cache._cache.keys())) self.assertFalse(self._is_expired(1)) self.assertFalse(self._is_expired(2)) time.sleep(0.75) self.assertEqual(2, self.cache._update_cache.call_count) self.cache.add(3) if 1 in self.cache: self.assertEqual([1, 2, 3], sorted(self.cache._cache.keys())) self.assertTrue(self._is_expired(1)) else: self.assertEqual([2, 3], sorted(self.cache._cache.keys())) self.assertFalse(self._is_expired(2)) self.assertFalse(self._is_expired(3)) time.sleep(0.75) self.assertEqual(3, self.cache._update_cache.call_count) self.assertEqual([3], sorted(self.cache._cache.keys())) self.assertFalse(self._is_expired(3)) oslo.messaging-5.35.0/oslo_messaging/tests/drivers/zmq/test_routing_table.py0000666000175100017510000000502513224676046027505 0ustar zuulzuul00000000000000# Copyright 2016 Mirantis, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_messaging._drivers.zmq_driver.client import zmq_routing_table from oslo_messaging._drivers.zmq_driver import zmq_async from oslo_messaging.tests import utils as test_utils zmq = zmq_async.import_zmq() class TestRoutingTable(test_utils.BaseTestCase): def setUp(self): super(TestRoutingTable, self).setUp() def test_get_next_while_origin_changed(self): table = zmq_routing_table.RoutingTable(self.conf) table.register("topic1.server1", "1") table.register("topic1.server1", "2") table.register("topic1.server1", "3") rr_gen = table.get_hosts_round_robin("topic1.server1") result = [] for i in range(3): result.append(next(rr_gen)) self.assertEqual(3, len(result)) self.assertIn("1", result) self.assertIn("2", result) self.assertIn("3", result) table.register("topic1.server1", "4") table.register("topic1.server1", "5") table.register("topic1.server1", "6") result = [] for i in range(6): result.append(next(rr_gen)) self.assertEqual(6, len(result)) self.assertIn("1", result) self.assertIn("2", result) self.assertIn("3", result) self.assertIn("4", result) self.assertIn("5", result) self.assertIn("6", result) def test_no_targets(self): table = zmq_routing_table.RoutingTable(self.conf) rr_gen = table.get_hosts_round_robin("topic1.server1") result = [] for t in rr_gen: result.append(t) self.assertEqual(0, len(result)) def test_target_unchanged(self): table = zmq_routing_table.RoutingTable(self.conf) table.register("topic1.server1", "1") rr_gen = table.get_hosts_round_robin("topic1.server1") result = [] for i in range(3): result.append(next(rr_gen)) self.assertEqual(["1", "1", "1"], result) oslo.messaging-5.35.0/oslo_messaging/tests/drivers/zmq/test_zmq_address.py0000666000175100017510000000537613224676046027174 0ustar zuulzuul00000000000000# Copyright 2016 Mirantis, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import testscenarios import testtools import oslo_messaging from oslo_messaging._drivers.zmq_driver import zmq_address from oslo_messaging._drivers.zmq_driver import zmq_async from oslo_messaging._drivers.zmq_driver import zmq_names from oslo_messaging.tests import utils as test_utils zmq = zmq_async.import_zmq() load_tests = testscenarios.load_tests_apply_scenarios class TestZmqAddress(test_utils.BaseTestCase): scenarios = [ ('router', {'listener_type': zmq_names.socket_type_str(zmq.ROUTER)}), ('dealer', {'listener_type': zmq_names.socket_type_str(zmq.DEALER)}) ] @testtools.skipIf(zmq is None, "zmq not available") def test_target_to_key_topic_only(self): target = oslo_messaging.Target(topic='topic') key = zmq_address.target_to_key(target, self.listener_type) self.assertEqual(self.listener_type + '/topic', key) @testtools.skipIf(zmq is None, "zmq not available") def test_target_to_key_topic_server_round_robin(self): target = oslo_messaging.Target(topic='topic', server='server') key = zmq_address.target_to_key(target, self.listener_type) self.assertEqual(self.listener_type + '/topic/server', key) @testtools.skipIf(zmq is None, "zmq not available") def test_target_to_key_topic_fanout(self): target = oslo_messaging.Target(topic='topic', fanout=True) key = zmq_address.target_to_key(target, self.listener_type) self.assertEqual(self.listener_type + '/topic', key) @testtools.skipIf(zmq is None, "zmq not available") def test_target_to_key_topic_server_fanout(self): target = oslo_messaging.Target(topic='topic', server='server', fanout=True) key = zmq_address.target_to_key(target, self.listener_type) self.assertEqual(self.listener_type + '/topic', key) @testtools.skipIf(zmq is None, "zmq not available") def test_target_to_key_topic_server_fanout_no_prefix(self): target = oslo_messaging.Target(topic='topic', server='server', fanout=True) key = zmq_address.target_to_key(target) self.assertEqual('topic', key) oslo.messaging-5.35.0/oslo_messaging/tests/drivers/zmq/matchmaker/0000775000175100017510000000000013224676256025351 5ustar zuulzuul00000000000000oslo.messaging-5.35.0/oslo_messaging/tests/drivers/zmq/matchmaker/__init__.py0000666000175100017510000000000013224676046027447 0ustar zuulzuul00000000000000oslo.messaging-5.35.0/oslo_messaging/tests/drivers/zmq/matchmaker/test_impl_matchmaker.py0000777000175100017510000001114113224676046032117 0ustar zuulzuul00000000000000# Copyright 2014 Canonical, Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import inspect from stevedore import driver import testscenarios import oslo_messaging from oslo_messaging.tests import utils as test_utils from oslo_utils import importutils redis = importutils.try_import('redis') def redis_available(): '''Helper to see if local redis server is running''' if not redis: return False try: redis.StrictRedis(socket_timeout=1).ping() return True except redis.exceptions.ConnectionError: return False load_tests = testscenarios.load_tests_apply_scenarios class TestImplMatchmaker(test_utils.BaseTestCase): scenarios = [ ("dummy", {"rpc_zmq_matchmaker": "dummy"}), ("redis", {"rpc_zmq_matchmaker": "redis"}), ] def setUp(self): super(TestImplMatchmaker, self).setUp() if self.rpc_zmq_matchmaker == "redis": if not redis_available(): self.skipTest("redis unavailable") self.test_matcher = driver.DriverManager( 'oslo.messaging.zmq.matchmaker', self.rpc_zmq_matchmaker, ).driver(self.conf) if self.rpc_zmq_matchmaker == "redis": for redis_instance in self.test_matcher._redis_instances: self.addCleanup(redis_instance.flushdb) self.target = oslo_messaging.Target(topic="test_topic") self.host1 = b"test_host1" self.host2 = b"test_host2" def test_register(self): self.test_matcher.register( self.target, self.host1, "test", expire=self.conf.oslo_messaging_zmq.zmq_target_expire) self.assertEqual([self.host1], self.test_matcher.get_hosts(self.target, "test")) def test_register_two_hosts(self): self.test_matcher.register( self.target, self.host1, "test", expire=self.conf.oslo_messaging_zmq.zmq_target_expire) self.test_matcher.register( self.target, self.host2, "test", expire=self.conf.oslo_messaging_zmq.zmq_target_expire) self.assertItemsEqual(self.test_matcher.get_hosts(self.target, "test"), [self.host1, self.host2]) def test_register_unregister(self): self.test_matcher.register( self.target, self.host1, "test", expire=self.conf.oslo_messaging_zmq.zmq_target_expire) self.test_matcher.register( self.target, self.host2, "test", expire=self.conf.oslo_messaging_zmq.zmq_target_expire) self.test_matcher.unregister(self.target, self.host2, "test") self.assertItemsEqual(self.test_matcher.get_hosts(self.target, "test"), [self.host1]) def test_register_two_same_hosts(self): self.test_matcher.register( self.target, self.host1, "test", expire=self.conf.oslo_messaging_zmq.zmq_target_expire) self.test_matcher.register( self.target, self.host1, "test", expire=self.conf.oslo_messaging_zmq.zmq_target_expire) self.assertEqual([self.host1], self.test_matcher.get_hosts(self.target, "test")) def test_get_hosts_wrong_topic(self): target = oslo_messaging.Target(topic="no_such_topic") self.assertEqual([], self.test_matcher.get_hosts(target, "test")) def test_handle_redis_package_error(self): if self.rpc_zmq_matchmaker == "redis": # move 'redis' variable to prevent this case affect others module = inspect.getmodule(self.test_matcher) redis_package = module.redis # 'redis' variable is set to None, when package importing is failed module.redis = None self.assertRaises(ImportError, self.test_matcher.__init__, self.conf) # retrieve 'redis' variable which is set originally module.redis = redis_package oslo.messaging-5.35.0/oslo_messaging/tests/drivers/zmq/test_zmq_version.py0000666000175100017510000000356513224676046027232 0ustar zuulzuul00000000000000# Copyright 2016 Mirantis, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_messaging._drivers.zmq_driver import zmq_version from oslo_messaging.tests import utils as test_utils class Doer(object): def __init__(self): self.x = 1 self.y = 2 self.z = 3 def _sudo(self): pass def do(self): pass def _do_v_1_1(self): pass def _do_v_2_2(self): pass def _do_v_3_3(self): pass class TestZmqVersion(test_utils.BaseTestCase): def setUp(self): super(TestZmqVersion, self).setUp() self.doer = Doer() def test_get_unknown_attr_versions(self): self.assertRaises(AssertionError, zmq_version.get_method_versions, self.doer, 'qwerty') def test_get_non_method_attr_versions(self): for attr_name in vars(self.doer): self.assertRaises(AssertionError, zmq_version.get_method_versions, self.doer, attr_name) def test_get_private_method_versions(self): self.assertRaises(AssertionError, zmq_version.get_method_versions, self.doer, '_sudo') def test_get_public_method_versions(self): do_versions = zmq_version.get_method_versions(self.doer, 'do') self.assertEqual(['1.1', '2.2', '3.3'], sorted(do_versions.keys())) oslo.messaging-5.35.0/oslo_messaging/tests/drivers/test_amqp_driver.py0000666000175100017510000026620313224676046026360 0ustar zuulzuul00000000000000# Copyright (C) 2014 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import logging import os import select import shlex import shutil from six.moves import mock import socket import subprocess import sys import tempfile import threading import time import uuid from oslo_utils import importutils from six import moves from string import Template import testtools import oslo_messaging from oslo_messaging.tests import utils as test_utils # TODO(kgiusti) Conditionally run these tests only if the necessary # dependencies are installed. This should be removed once the proton libraries # are available in the base repos for all supported platforms. pyngus = importutils.try_import("pyngus") if pyngus: from oslo_messaging._drivers.amqp1_driver.addressing \ import AddresserFactory from oslo_messaging._drivers.amqp1_driver.addressing \ import LegacyAddresser from oslo_messaging._drivers.amqp1_driver.addressing \ import RoutableAddresser import oslo_messaging._drivers.impl_amqp1 as amqp_driver # The Cyrus-based SASL tests can only be run if the installed version of proton # has been built with Cyrus SASL support. _proton = importutils.try_import("proton") CYRUS_ENABLED = (pyngus and pyngus.VERSION >= (2, 0, 0) and _proton and getattr(_proton.SASL, "extended", lambda: False)()) # same with SSL # SSL_ENABLED = (_proton and getattr(_proton.SSL, "present", lambda: False)()) SSL_ENABLED = False LOG = logging.getLogger(__name__) def _wait_until(predicate, timeout): deadline = timeout + time.time() while not predicate() and deadline > time.time(): time.sleep(0.1) class _ListenerThread(threading.Thread): """Run a blocking listener in a thread.""" def __init__(self, listener, msg_count, msg_ack=True): super(_ListenerThread, self).__init__() self.listener = listener self.msg_count = msg_count self._msg_ack = msg_ack self.messages = moves.queue.Queue() self.daemon = True self.started = threading.Event() self._done = False self.start() self.started.wait() def run(self): LOG.debug("Listener started") self.started.set() while not self._done: for in_msg in self.listener.poll(timeout=0.5): self.messages.put(in_msg) self.msg_count -= 1 self._done = self.msg_count == 0 if self._msg_ack: in_msg.acknowledge() if in_msg.message.get('method') == 'echo': in_msg.reply(reply={'correlation-id': in_msg.message.get('id')}) else: in_msg.requeue() LOG.debug("Listener stopped") def get_messages(self): """Returns a list of all received messages.""" msgs = [] try: while True: m = self.messages.get(False) msgs.append(m) except moves.queue.Empty: pass return msgs def kill(self, timeout=30): self._done = True self.join(timeout) @testtools.skipUnless(pyngus, "proton modules not present") class TestProtonDriverLoad(test_utils.BaseTestCase): def setUp(self): super(TestProtonDriverLoad, self).setUp() self.messaging_conf.transport_driver = 'amqp' def test_driver_load(self): transport = oslo_messaging.get_transport(self.conf) self.assertIsInstance(transport._driver, amqp_driver.ProtonDriver) class _AmqpBrokerTestCase(test_utils.BaseTestCase): """Creates a single FakeBroker for use by the tests""" @testtools.skipUnless(pyngus, "proton modules not present") def setUp(self): super(_AmqpBrokerTestCase, self).setUp() self._broker = FakeBroker(self.conf.oslo_messaging_amqp) self._broker_addr = "amqp://%s:%d" % (self._broker.host, self._broker.port) self._broker_url = oslo_messaging.TransportURL.parse( self.conf, self._broker_addr) def tearDown(self): super(_AmqpBrokerTestCase, self).tearDown() if self._broker: self._broker.stop() class _AmqpBrokerTestCaseAuto(_AmqpBrokerTestCase): """Like _AmqpBrokerTestCase, but starts the broker""" @testtools.skipUnless(pyngus, "proton modules not present") def setUp(self): super(_AmqpBrokerTestCaseAuto, self).setUp() self._broker.start() class TestAmqpSend(_AmqpBrokerTestCaseAuto): """Test sending and receiving messages.""" def test_driver_unconnected_cleanup(self): """Verify the driver can cleanly shutdown even if never connected.""" driver = amqp_driver.ProtonDriver(self.conf, self._broker_url) driver.cleanup() def test_listener_cleanup(self): """Verify unused listener can cleanly shutdown.""" driver = amqp_driver.ProtonDriver(self.conf, self._broker_url) target = oslo_messaging.Target(topic="test-topic") listener = driver.listen(target, None, None)._poll_style_listener self.assertIsInstance(listener, amqp_driver.ProtonListener) driver.cleanup() def test_send_no_reply(self): driver = amqp_driver.ProtonDriver(self.conf, self._broker_url) target = oslo_messaging.Target(topic="test-topic") listener = _ListenerThread( driver.listen(target, None, None)._poll_style_listener, 1) rc = driver.send(target, {"context": True}, {"msg": "value"}, wait_for_reply=False) self.assertIsNone(rc) listener.join(timeout=30) self.assertFalse(listener.isAlive()) self.assertEqual({"msg": "value"}, listener.messages.get().message) predicate = lambda: (self._broker.sender_link_ack_count == 1) _wait_until(predicate, 30) self.assertTrue(predicate()) driver.cleanup() def test_send_exchange_with_reply(self): driver = amqp_driver.ProtonDriver(self.conf, self._broker_url) target1 = oslo_messaging.Target(topic="test-topic", exchange="e1") listener1 = _ListenerThread( driver.listen(target1, None, None)._poll_style_listener, 1) target2 = oslo_messaging.Target(topic="test-topic", exchange="e2") listener2 = _ListenerThread( driver.listen(target2, None, None)._poll_style_listener, 1) rc = driver.send(target1, {"context": "whatever"}, {"method": "echo", "id": "e1"}, wait_for_reply=True, timeout=30) self.assertIsNotNone(rc) self.assertEqual('e1', rc.get('correlation-id')) rc = driver.send(target2, {"context": "whatever"}, {"method": "echo", "id": "e2"}, wait_for_reply=True, timeout=30) self.assertIsNotNone(rc) self.assertEqual('e2', rc.get('correlation-id')) listener1.join(timeout=30) self.assertFalse(listener1.isAlive()) listener2.join(timeout=30) self.assertFalse(listener2.isAlive()) driver.cleanup() def test_messaging_patterns(self): """Verify the direct, shared, and fanout message patterns work.""" driver = amqp_driver.ProtonDriver(self.conf, self._broker_url) target1 = oslo_messaging.Target(topic="test-topic", server="server1") listener1 = _ListenerThread( driver.listen(target1, None, None)._poll_style_listener, 4) target2 = oslo_messaging.Target(topic="test-topic", server="server2") listener2 = _ListenerThread( driver.listen(target2, None, None)._poll_style_listener, 3) shared_target = oslo_messaging.Target(topic="test-topic") fanout_target = oslo_messaging.Target(topic="test-topic", fanout=True) # this should go to only one server: driver.send(shared_target, {"context": "whatever"}, {"method": "echo", "id": "either-1"}, wait_for_reply=True) self.assertEqual(1, self._broker.topic_count) self.assertEqual(1, self._broker.direct_count) # reply # this should go to the other server: driver.send(shared_target, {"context": "whatever"}, {"method": "echo", "id": "either-2"}, wait_for_reply=True) self.assertEqual(2, self._broker.topic_count) self.assertEqual(2, self._broker.direct_count) # reply # these should only go to listener1: driver.send(target1, {"context": "whatever"}, {"method": "echo", "id": "server1-1"}, wait_for_reply=True) driver.send(target1, {"context": "whatever"}, {"method": "echo", "id": "server1-2"}, wait_for_reply=True) self.assertEqual(6, self._broker.direct_count) # 2X(send+reply) # this should only go to listener2: driver.send(target2, {"context": "whatever"}, {"method": "echo", "id": "server2"}, wait_for_reply=True) self.assertEqual(8, self._broker.direct_count) # both listeners should get a copy: driver.send(fanout_target, {"context": "whatever"}, {"method": "echo", "id": "fanout"}) listener1.join(timeout=30) self.assertFalse(listener1.isAlive()) listener2.join(timeout=30) self.assertFalse(listener2.isAlive()) self.assertEqual(1, self._broker.fanout_count) listener1_ids = [x.message.get('id') for x in listener1.get_messages()] listener2_ids = [x.message.get('id') for x in listener2.get_messages()] self.assertTrue('fanout' in listener1_ids and 'fanout' in listener2_ids) self.assertTrue('server1-1' in listener1_ids and 'server1-1' not in listener2_ids) self.assertTrue('server1-2' in listener1_ids and 'server1-2' not in listener2_ids) self.assertTrue('server2' in listener2_ids and 'server2' not in listener1_ids) if 'either-1' in listener1_ids: self.assertTrue('either-2' in listener2_ids and 'either-2' not in listener1_ids and 'either-1' not in listener2_ids) else: self.assertTrue('either-2' in listener1_ids and 'either-2' not in listener2_ids and 'either-1' in listener2_ids) predicate = lambda: (self._broker.sender_link_ack_count == 12) _wait_until(predicate, 30) self.assertTrue(predicate()) driver.cleanup() def test_send_timeout(self): """Verify send timeout - no reply sent.""" driver = amqp_driver.ProtonDriver(self.conf, self._broker_url) target = oslo_messaging.Target(topic="test-topic") listener = _ListenerThread( driver.listen(target, None, None)._poll_style_listener, 1) # the listener will drop this message: self.assertRaises(oslo_messaging.MessagingTimeout, driver.send, target, {"context": "whatever"}, {"method": "drop"}, wait_for_reply=True, timeout=1.0) listener.join(timeout=30) self.assertFalse(listener.isAlive()) driver.cleanup() def test_released_send(self): """Verify exception thrown if send Nacked.""" driver = amqp_driver.ProtonDriver(self.conf, self._broker_url) target = oslo_messaging.Target(topic="no listener") # the broker will send a nack (released) since there is no active # listener for the target: self.assertRaises(oslo_messaging.MessageDeliveryFailure, driver.send, target, {"context": "whatever"}, {"method": "drop"}, wait_for_reply=True, retry=0, timeout=1.0) driver.cleanup() def test_send_not_acked(self): """Verify exception thrown ack dropped.""" self.config(pre_settled=[], group="oslo_messaging_amqp") driver = amqp_driver.ProtonDriver(self.conf, self._broker_url) # set this directly so we can use a value < minimum allowed driver._default_send_timeout = 2 target = oslo_messaging.Target(topic="!no-ack!") # the broker will silently discard: self.assertRaises(oslo_messaging.MessageDeliveryFailure, driver.send, target, {"context": "whatever"}, {"method": "drop"}, retry=0, wait_for_reply=True) driver.cleanup() def test_no_ack_cast(self): """Verify no exception is thrown if acks are turned off""" # set casts to ignore ack self.config(pre_settled=['rpc-cast'], group="oslo_messaging_amqp") driver = amqp_driver.ProtonDriver(self.conf, self._broker_url) # set this directly so we can use a value < minimum allowed driver._default_send_timeout = 2 target = oslo_messaging.Target(topic="!no-ack!") # the broker will silently discard this cast, but since ack'ing is # disabled the send does not fail driver.send(target, {"context": "whatever"}, {"method": "drop"}, wait_for_reply=False) driver.cleanup() def test_call_late_reply(self): """What happens if reply arrives after timeout?""" class _SlowResponder(_ListenerThread): def __init__(self, listener, delay): self._delay = delay super(_SlowResponder, self).__init__(listener, 1) def run(self): self.started.set() while not self._done: for in_msg in self.listener.poll(timeout=0.5): time.sleep(self._delay) in_msg.acknowledge() in_msg.reply(reply={'correlation-id': in_msg.message.get('id')}) self.messages.put(in_msg) self._done = True driver = amqp_driver.ProtonDriver(self.conf, self._broker_url) target = oslo_messaging.Target(topic="test-topic") listener = _SlowResponder( driver.listen(target, None, None)._poll_style_listener, 3) self.assertRaises(oslo_messaging.MessagingTimeout, driver.send, target, {"context": "whatever"}, {"method": "echo", "id": "???"}, wait_for_reply=True, timeout=1.0) listener.join(timeout=30) self.assertFalse(listener.isAlive()) predicate = lambda: (self._broker.sender_link_ack_count == 1) _wait_until(predicate, 30) self.assertTrue(predicate()) driver.cleanup() def test_call_failed_reply(self): """Send back an exception generated at the listener""" class _FailedResponder(_ListenerThread): def __init__(self, listener): super(_FailedResponder, self).__init__(listener, 1) def run(self): self.started.set() while not self._done: for in_msg in self.listener.poll(timeout=0.5): try: raise RuntimeError("Oopsie!") except RuntimeError: in_msg.reply(reply=None, failure=sys.exc_info()) self._done = True driver = amqp_driver.ProtonDriver(self.conf, self._broker_url) target = oslo_messaging.Target(topic="test-topic") listener = _FailedResponder( driver.listen(target, None, None)._poll_style_listener) self.assertRaises(RuntimeError, driver.send, target, {"context": "whatever"}, {"method": "echo"}, wait_for_reply=True, timeout=5.0) listener.join(timeout=30) self.assertFalse(listener.isAlive()) driver.cleanup() def test_call_reply_timeout(self): """What happens if the replier times out?""" class _TimeoutListener(_ListenerThread): def __init__(self, listener): super(_TimeoutListener, self).__init__(listener, 1) def run(self): self.started.set() while not self._done: for in_msg in self.listener.poll(timeout=0.5): # reply will never be acked (simulate drop): in_msg._reply_to = "!no-ack!" in_msg.reply(reply={'correlation-id': in_msg.message.get("id")}) self._done = True driver = amqp_driver.ProtonDriver(self.conf, self._broker_url) driver._default_reply_timeout = 1 target = oslo_messaging.Target(topic="test-topic") listener = _TimeoutListener( driver.listen(target, None, None)._poll_style_listener) self.assertRaises(oslo_messaging.MessagingTimeout, driver.send, target, {"context": "whatever"}, {"method": "echo"}, wait_for_reply=True, timeout=3) listener.join(timeout=30) self.assertFalse(listener.isAlive()) driver.cleanup() def test_listener_requeue(self): "Emulate Server requeue on listener incoming messages" self.config(pre_settled=[], group="oslo_messaging_amqp") driver = amqp_driver.ProtonDriver(self.conf, self._broker_url) driver.require_features(requeue=True) target = oslo_messaging.Target(topic="test-topic") listener = _ListenerThread( driver.listen(target, None, None)._poll_style_listener, 1, msg_ack=False) rc = driver.send(target, {"context": True}, {"msg": "value"}, wait_for_reply=False) self.assertIsNone(rc) listener.join(timeout=30) self.assertFalse(listener.isAlive()) predicate = lambda: (self._broker.sender_link_requeue_count == 1) _wait_until(predicate, 30) self.assertTrue(predicate()) driver.cleanup() def test_sender_minimal_credit(self): # ensure capacity is replenished when only 1 credit is configured self.config(reply_link_credit=1, rpc_server_credit=1, group="oslo_messaging_amqp") driver = amqp_driver.ProtonDriver(self.conf, self._broker_url) target = oslo_messaging.Target(topic="test-topic", server="server") listener = _ListenerThread(driver.listen(target, None, None)._poll_style_listener, 4) for i in range(4): threading.Thread(target=driver.send, args=(target, {"context": "whatever"}, {"method": "echo"}), kwargs={'wait_for_reply': True}).start() predicate = lambda: (self._broker.direct_count == 8) _wait_until(predicate, 30) self.assertTrue(predicate()) listener.join(timeout=30) driver.cleanup() def test_sender_link_maintenance(self): # ensure links are purged from cache self.config(default_sender_link_timeout=1, group="oslo_messaging_amqp") driver = amqp_driver.ProtonDriver(self.conf, self._broker_url) target = oslo_messaging.Target(topic="test-topic-maint") listener = _ListenerThread( driver.listen(target, None, None)._poll_style_listener, 3) # the send should create a receiver link on the broker rc = driver.send(target, {"context": True}, {"msg": "value"}, wait_for_reply=False) self.assertIsNone(rc) predicate = lambda: (self._broker.receiver_link_count == 1) _wait_until(predicate, 30) self.assertTrue(predicate()) self.assertTrue(listener.isAlive()) self.assertEqual({"msg": "value"}, listener.messages.get().message) predicate = lambda: (self._broker.receiver_link_count == 0) _wait_until(predicate, 30) self.assertTrue(predicate()) # the next send should create a separate receiver link on the broker rc = driver.send(target, {"context": True}, {"msg": "value"}, wait_for_reply=False) self.assertIsNone(rc) predicate = lambda: (self._broker.receiver_link_count == 1) _wait_until(predicate, 30) self.assertTrue(predicate()) self.assertTrue(listener.isAlive()) self.assertEqual({"msg": "value"}, listener.messages.get().message) predicate = lambda: (self._broker.receiver_link_count == 0) _wait_until(predicate, 30) self.assertTrue(predicate()) driver.cleanup() class TestAmqpNotification(_AmqpBrokerTestCaseAuto): """Test sending and receiving notifications.""" def test_notification(self): driver = amqp_driver.ProtonDriver(self.conf, self._broker_url) notifications = [(oslo_messaging.Target(topic="topic-1"), 'info'), (oslo_messaging.Target(topic="topic-1"), 'error'), (oslo_messaging.Target(topic="topic-2"), 'debug')] nl = driver.listen_for_notifications( notifications, None, None, None)._poll_style_listener # send one for each support version: msg_count = len(notifications) * 2 listener = _ListenerThread(nl, msg_count) targets = ['topic-1.info', 'topic-1.bad', # will raise MessageDeliveryFailure 'bad-topic.debug', # will raise MessageDeliveryFailure 'topic-1.error', 'topic-2.debug'] excepted_targets = [] for version in (1.0, 2.0): for t in targets: try: driver.send_notification(oslo_messaging.Target(topic=t), "context", {'target': t}, version, retry=0) except oslo_messaging.MessageDeliveryFailure: excepted_targets.append(t) listener.join(timeout=30) self.assertFalse(listener.isAlive()) topics = [x.message.get('target') for x in listener.get_messages()] self.assertEqual(msg_count, len(topics)) self.assertEqual(2, topics.count('topic-1.info')) self.assertEqual(2, topics.count('topic-1.error')) self.assertEqual(2, topics.count('topic-2.debug')) self.assertEqual(4, self._broker.dropped_count) self.assertEqual(2, excepted_targets.count('topic-1.bad')) self.assertEqual(2, excepted_targets.count('bad-topic.debug')) driver.cleanup() def test_released_notification(self): """Broker sends a Nack (released)""" driver = amqp_driver.ProtonDriver(self.conf, self._broker_url) self.assertRaises(oslo_messaging.MessageDeliveryFailure, driver.send_notification, oslo_messaging.Target(topic="bad address"), "context", {'target': "bad address"}, 2.0, retry=0) driver.cleanup() def test_notification_not_acked(self): """Simulate drop of ack from broker""" driver = amqp_driver.ProtonDriver(self.conf, self._broker_url) # set this directly so we can use a value < minimum allowed driver._default_notify_timeout = 2 self.assertRaises(oslo_messaging.MessageDeliveryFailure, driver.send_notification, oslo_messaging.Target(topic="!no-ack!"), "context", {'target': "!no-ack!"}, 2.0, retry=0) driver.cleanup() def test_no_ack_notification(self): """Verify no exception is thrown if acks are turned off""" # add a couple of illegal values for coverage of the warning self.config(pre_settled=['notify', 'fleabag', 'poochie'], group="oslo_messaging_amqp") driver = amqp_driver.ProtonDriver(self.conf, self._broker_url) # set this directly so we can use a value < minimum allowed driver._default_notify_timeout = 2 driver.send_notification(oslo_messaging.Target(topic="!no-ack!"), "context", {'target': "!no-ack!"}, 2.0) driver.cleanup() @testtools.skipUnless(pyngus and pyngus.VERSION < (2, 0, 0), "pyngus module not present") class TestAuthentication(test_utils.BaseTestCase): """Test user authentication using the old pyngus API""" def setUp(self): super(TestAuthentication, self).setUp() # for simplicity, encode the credentials as they would appear 'on the # wire' in a SASL frame - username and password prefixed by zero. user_credentials = ["\0joe\0secret"] self._broker = FakeBroker(self.conf.oslo_messaging_amqp, sasl_mechanisms="PLAIN", user_credentials=user_credentials) self._broker.start() def tearDown(self): super(TestAuthentication, self).tearDown() self._broker.stop() def test_authentication_ok(self): """Verify that username and password given in TransportHost are accepted by the broker. """ addr = "amqp://joe:secret@%s:%d" % (self._broker.host, self._broker.port) url = oslo_messaging.TransportURL.parse(self.conf, addr) driver = amqp_driver.ProtonDriver(self.conf, url) target = oslo_messaging.Target(topic="test-topic") listener = _ListenerThread( driver.listen(target, None, None)._poll_style_listener, 1) rc = driver.send(target, {"context": True}, {"method": "echo"}, wait_for_reply=True) self.assertIsNotNone(rc) listener.join(timeout=30) self.assertFalse(listener.isAlive()) driver.cleanup() def test_authentication_failure(self): """Verify that a bad password given in TransportHost is rejected by the broker. """ addr = "amqp://joe:badpass@%s:%d" % (self._broker.host, self._broker.port) url = oslo_messaging.TransportURL.parse(self.conf, addr) driver = amqp_driver.ProtonDriver(self.conf, url) target = oslo_messaging.Target(topic="test-topic") _ListenerThread( driver.listen(target, None, None)._poll_style_listener, 1) self.assertRaises(oslo_messaging.MessageDeliveryFailure, driver.send, target, {"context": True}, {"method": "echo"}, wait_for_reply=True, retry=2) driver.cleanup() @testtools.skipUnless(CYRUS_ENABLED, "Cyrus SASL not supported") class TestCyrusAuthentication(test_utils.BaseTestCase): """Test the driver's Cyrus SASL integration""" _conf_dir = None # Note: don't add ANONYMOUS or EXTERNAL mechs without updating the # test_authentication_bad_mechs test below _mechs = "DIGEST-MD5 SCRAM-SHA-1 CRAM-MD5 PLAIN" @classmethod def setUpClass(cls): # The Cyrus library can only be initialized once per _process_ # Create a SASL configuration and user database, # add a user 'joe' with password 'secret': cls._conf_dir = "/tmp/amqp1_tests_%s" % os.getpid() # no, we cannot use tempfile.mkdtemp() as it will 'helpfully' remove # the temp dir after the first test is run os.makedirs(cls._conf_dir) db = os.path.join(cls._conf_dir, 'openstack.sasldb') _t = "echo secret | saslpasswd2 -c -p -f ${db} -u myrealm joe" cmd = Template(_t).substitute(db=db) try: subprocess.check_call(args=cmd, shell=True) except Exception: shutil.rmtree(cls._conf_dir, ignore_errors=True) cls._conf_dir = None return # configure the SASL server: conf = os.path.join(cls._conf_dir, 'openstack.conf') t = Template("""sasldb_path: ${db} pwcheck_method: auxprop auxprop_plugin: sasldb mech_list: ${mechs} """) with open(conf, 'w') as f: f.write(t.substitute(db=db, mechs=cls._mechs)) @classmethod def tearDownClass(cls): if cls._conf_dir: shutil.rmtree(cls._conf_dir, ignore_errors=True) def setUp(self): # fire up a test broker with the SASL config: super(TestCyrusAuthentication, self).setUp() if TestCyrusAuthentication._conf_dir is None: self.skipTest("Cyrus SASL tools not installed") _mechs = TestCyrusAuthentication._mechs _dir = TestCyrusAuthentication._conf_dir self._broker = FakeBroker(self.conf.oslo_messaging_amqp, sasl_mechanisms=_mechs, user_credentials=["\0joe@myrealm\0secret"], sasl_config_dir=_dir, sasl_config_name="openstack") self._broker.start() self.messaging_conf.transport_driver = 'amqp' self.conf = self.messaging_conf.conf def tearDown(self): if self._broker: self._broker.stop() self._broker = None super(TestCyrusAuthentication, self).tearDown() def _authentication_test(self, addr, retry=None): url = oslo_messaging.TransportURL.parse(self.conf, addr) driver = amqp_driver.ProtonDriver(self.conf, url) target = oslo_messaging.Target(topic="test-topic") listener = _ListenerThread( driver.listen(target, None, None)._poll_style_listener, 1) try: rc = driver.send(target, {"context": True}, {"method": "echo"}, wait_for_reply=True, retry=retry) self.assertIsNotNone(rc) listener.join(timeout=30) self.assertFalse(listener.isAlive()) finally: driver.cleanup() def test_authentication_ok(self): """Verify that username and password given in TransportHost are accepted by the broker. """ addr = "amqp://joe@myrealm:secret@%s:%d" % (self._broker.host, self._broker.port) self._authentication_test(addr) def test_authentication_failure(self): """Verify that a bad password given in TransportHost is rejected by the broker. """ addr = "amqp://joe@myrealm:badpass@%s:%d" % (self._broker.host, self._broker.port) try: self._authentication_test(addr, retry=2) except oslo_messaging.MessageDeliveryFailure as e: # verify the exception indicates the failure was an authentication # error self.assertTrue('amqp:unauthorized-access' in str(e)) else: self.assertIsNone("Expected authentication failure") def test_authentication_bad_mechs(self): """Verify that the connection fails if the client's SASL mechanisms do not match the broker's. """ self.config(sasl_mechanisms="EXTERNAL ANONYMOUS", group="oslo_messaging_amqp") addr = "amqp://joe@myrealm:secret@%s:%d" % (self._broker.host, self._broker.port) self.assertRaises(oslo_messaging.MessageDeliveryFailure, self._authentication_test, addr, retry=0) def test_authentication_default_username(self): """Verify that a configured username/password is used if none appears in the URL. Deprecated: username password deprecated in favor of transport_url """ addr = "amqp://%s:%d" % (self._broker.host, self._broker.port) self.config(username="joe@myrealm", password="secret", group="oslo_messaging_amqp") self._authentication_test(addr) def test_authentication_default_realm(self): """Verify that default realm is used if none present in username""" addr = "amqp://joe:secret@%s:%d" % (self._broker.host, self._broker.port) self.config(sasl_default_realm="myrealm", group="oslo_messaging_amqp") self._authentication_test(addr) def test_authentication_ignore_default_realm(self): """Verify that default realm is not used if realm present in username """ addr = "amqp://joe@myrealm:secret@%s:%d" % (self._broker.host, self._broker.port) self.config(sasl_default_realm="bad-realm", group="oslo_messaging_amqp") self._authentication_test(addr) @testtools.skipUnless(pyngus, "proton modules not present") class TestFailover(test_utils.BaseTestCase): def setUp(self): super(TestFailover, self).setUp() # configure different addressing modes on the brokers to test failing # over from one type of backend to another self.config(addressing_mode='dynamic', group="oslo_messaging_amqp") self._brokers = [FakeBroker(self.conf.oslo_messaging_amqp, product="qpid-cpp"), FakeBroker(self.conf.oslo_messaging_amqp, product="routable")] self._primary = 0 self._backup = 1 hosts = [] for broker in self._brokers: hosts.append(oslo_messaging.TransportHost(hostname=broker.host, port=broker.port)) self._broker_url = oslo_messaging.TransportURL(self.conf, transport="amqp", hosts=hosts) def tearDown(self): super(TestFailover, self).tearDown() for broker in self._brokers: if broker.isAlive(): broker.stop() def _failover(self, fail_broker): self._brokers[0].start() self._brokers[1].start() # self.config(trace=True, group="oslo_messaging_amqp") driver = amqp_driver.ProtonDriver(self.conf, self._broker_url) target = oslo_messaging.Target(topic="my-topic") listener = _ListenerThread( driver.listen(target, None, None)._poll_style_listener, 2) # wait for listener links to come up on either broker # 4 == 3 links per listener + 1 for the global reply queue predicate = lambda: ((self._brokers[0].sender_link_count == 4) or (self._brokers[1].sender_link_count == 4)) _wait_until(predicate, 30) self.assertTrue(predicate()) if self._brokers[1].sender_link_count == 4: self._primary = 1 self._backup = 0 rc = driver.send(target, {"context": "whatever"}, {"method": "echo", "id": "echo-1"}, wait_for_reply=True, timeout=30) self.assertIsNotNone(rc) self.assertEqual('echo-1', rc.get('correlation-id')) # 1 request msg, 1 response: self.assertEqual(1, self._brokers[self._primary].topic_count) self.assertEqual(1, self._brokers[self._primary].direct_count) # invoke failover method fail_broker(self._brokers[self._primary]) # wait for listener links to re-establish on broker 1 # 4 = 3 links per listener + 1 for the global reply queue predicate = lambda: self._brokers[self._backup].sender_link_count == 4 _wait_until(predicate, 30) self.assertTrue(predicate()) rc = driver.send(target, {"context": "whatever"}, {"method": "echo", "id": "echo-2"}, wait_for_reply=True, timeout=2) self.assertIsNotNone(rc) self.assertEqual('echo-2', rc.get('correlation-id')) # 1 request msg, 1 response: self.assertEqual(1, self._brokers[self._backup].topic_count) self.assertEqual(1, self._brokers[self._backup].direct_count) listener.join(timeout=30) self.assertFalse(listener.isAlive()) # note: stopping the broker first tests cleaning up driver without a # connection active self._brokers[self._backup].stop() driver.cleanup() def test_broker_crash(self): """Simulate a failure of one broker.""" def _meth(broker): # fail broker: broker.stop() time.sleep(0.5) self._failover(_meth) def test_broker_shutdown(self): """Simulate a normal shutdown of a broker.""" def _meth(broker): broker.stop(clean=True) time.sleep(0.5) self._failover(_meth) def test_heartbeat_failover(self): """Simulate broker heartbeat timeout.""" def _meth(broker): # keep alive heartbeat from primary broker will stop, which should # force failover to backup broker in about two seconds broker.pause() self.config(idle_timeout=2, group="oslo_messaging_amqp") self._failover(_meth) self._brokers[self._primary].stop() def test_listener_failover(self): """Verify that Listeners sharing the same topic are re-established after failover. """ self._brokers[0].start() # self.config(trace=True, group="oslo_messaging_amqp") driver = amqp_driver.ProtonDriver(self.conf, self._broker_url) target = oslo_messaging.Target(topic="my-topic") bcast = oslo_messaging.Target(topic="my-topic", fanout=True) listener1 = _ListenerThread( driver.listen(target, None, None)._poll_style_listener, 2) listener2 = _ListenerThread( driver.listen(target, None, None)._poll_style_listener, 2) # wait for 7 sending links to become active on the broker. # 7 = 3 links per Listener + 1 global reply link predicate = lambda: self._brokers[0].sender_link_count == 7 _wait_until(predicate, 30) self.assertTrue(predicate()) driver.send(bcast, {"context": "whatever"}, {"method": "ignore", "id": "echo-1"}) # 1 message per listener predicate = lambda: self._brokers[0].fanout_sent_count == 2 _wait_until(predicate, 30) self.assertTrue(predicate()) # start broker 1 then shutdown broker 0: self._brokers[1].start() self._brokers[0].stop(clean=True) # wait again for 7 sending links to re-establish on broker 1 predicate = lambda: self._brokers[1].sender_link_count == 7 _wait_until(predicate, 30) self.assertTrue(predicate()) driver.send(bcast, {"context": "whatever"}, {"method": "ignore", "id": "echo-2"}) # 1 message per listener predicate = lambda: self._brokers[1].fanout_sent_count == 2 _wait_until(predicate, 30) self.assertTrue(predicate()) listener1.join(timeout=30) listener2.join(timeout=30) self.assertFalse(listener1.isAlive() or listener2.isAlive()) driver.cleanup() self._brokers[1].stop() @testtools.skipUnless(pyngus, "proton modules not present") class TestLinkRecovery(_AmqpBrokerTestCase): def _send_retry(self, reject, retries): self._reject = reject def on_active(link): if self._reject > 0: link.close() self._reject -= 1 else: link.add_capacity(10) self._broker.on_receiver_active = on_active self._broker.start() self.config(link_retry_delay=1, group="oslo_messaging_amqp") driver = amqp_driver.ProtonDriver(self.conf, self._broker_url) target = oslo_messaging.Target(topic="test-topic") listener = _ListenerThread(driver.listen(target, None, None)._poll_style_listener, 1) try: rc = driver.send(target, {"context": "whatever"}, {"method": "echo", "id": "e1"}, wait_for_reply=True, retry=retries) self.assertIsNotNone(rc) self.assertEqual(rc.get('correlation-id'), 'e1') except Exception: listener.kill() driver.cleanup() raise listener.join(timeout=30) self.assertFalse(listener.isAlive()) self.assertEqual(listener.messages.get().message.get('method'), "echo") driver.cleanup() def test_send_retry_ok(self): # verify sender with retry=3 survives 2 link failures: self._send_retry(reject=2, retries=3) def test_send_retry_fail(self): # verify sender fails if retries exhausted self.assertRaises(oslo_messaging.MessageDeliveryFailure, self._send_retry, reject=3, retries=2) def test_listener_recovery(self): # verify a listener recovers if all links fail: self._addrs = {'unicast.test-topic': 2, 'broadcast.test-topic.all': 2, 'exclusive.test-topic.server': 2} self._recovered = threading.Event() self._count = 0 def _on_active(link): t = link.target_address if t in self._addrs: if self._addrs[t] > 0: link.close() self._addrs[t] -= 1 else: self._count += 1 if self._count == len(self._addrs): self._recovered.set() self._broker.on_sender_active = _on_active self._broker.start() self.config(link_retry_delay=1, group="oslo_messaging_amqp") driver = amqp_driver.ProtonDriver(self.conf, self._broker_url) target = oslo_messaging.Target(topic="test-topic", server="server") listener = _ListenerThread(driver.listen(target, None, None)._poll_style_listener, 3) # wait for recovery self.assertTrue(self._recovered.wait(timeout=30)) # verify server RPC: rc = driver.send(target, {"context": "whatever"}, {"method": "echo", "id": "e1"}, wait_for_reply=True) self.assertIsNotNone(rc) self.assertEqual(rc.get('correlation-id'), 'e1') # verify balanced RPC: target.server = None rc = driver.send(target, {"context": "whatever"}, {"method": "echo", "id": "e2"}, wait_for_reply=True) self.assertIsNotNone(rc) self.assertEqual(rc.get('correlation-id'), 'e2') # verify fanout: target.fanout = True driver.send(target, {"context": "whatever"}, {"msg": "value"}, wait_for_reply=False) listener.join(timeout=30) self.assertTrue(self._broker.fanout_count == 1) self.assertFalse(listener.isAlive()) self.assertEqual(listener.messages.get().message.get('method'), "echo") driver.cleanup() def test_sender_credit_blocked(self): # ensure send requests resume once credit is provided self._blocked_links = set() def _on_active(link): # refuse granting credit for the broadcast link if self._broker._addresser._is_multicast(link.source_address): self._blocked_links.add(link) else: # unblock all link when RPC call is made link.add_capacity(10) for l in self._blocked_links: l.add_capacity(10) self._broker.on_receiver_active = _on_active self._broker.on_credit_exhausted = lambda link: None self._broker.start() driver = amqp_driver.ProtonDriver(self.conf, self._broker_url) target = oslo_messaging.Target(topic="test-topic", server="server") listener = _ListenerThread(driver.listen(target, None, None)._poll_style_listener, 4) target.fanout = True target.server = None # these threads will share the same link for i in range(3): t = threading.Thread(target=driver.send, args=(target, {"context": "whatever"}, {"msg": "n=%d" % i}), kwargs={'wait_for_reply': False}) t.start() # casts return once message is put on active link t.join(timeout=30) time.sleep(1) # ensure messages are going nowhere self.assertEqual(self._broker.fanout_sent_count, 0) # this will trigger the release of credit for the previous links target.fanout = False rc = driver.send(target, {"context": "whatever"}, {"method": "echo", "id": "e1"}, wait_for_reply=True) self.assertIsNotNone(rc) self.assertEqual(rc.get('correlation-id'), 'e1') listener.join(timeout=30) self.assertTrue(self._broker.fanout_count == 3) self.assertFalse(listener.isAlive()) driver.cleanup() @testtools.skipUnless(pyngus, "proton modules not present") class TestAddressing(test_utils.BaseTestCase): # Verify the addressing modes supported by the driver def _address_test(self, rpc_target, targets_priorities): # verify proper messaging semantics for a given addressing mode broker = FakeBroker(self.conf.oslo_messaging_amqp) broker.start() url = oslo_messaging.TransportURL.parse(self.conf, "amqp://%s:%d" % (broker.host, broker.port)) driver = amqp_driver.ProtonDriver(self.conf, url) rl = [] for server in ["Server1", "Server2"]: _ = driver.listen(rpc_target(server=server), None, None)._poll_style_listener # 3 == 1 msg to server + 1 fanout msg + 1 anycast msg rl.append(_ListenerThread(_, 3)) nl = [] for n in range(2): _ = driver.listen_for_notifications(targets_priorities, None, None, None)._poll_style_listener nl.append(_ListenerThread(_, len(targets_priorities))) driver.send(rpc_target(server="Server1"), {"context": "whatever"}, {"msg": "Server1"}) driver.send(rpc_target(server="Server2"), {"context": "whatever"}, {"msg": "Server2"}) driver.send(rpc_target(fanout=True), {"context": "whatever"}, {"msg": "Fanout"}) # FakeBroker should evenly distribute these across the servers driver.send(rpc_target(server=None), {"context": "whatever"}, {"msg": "Anycast1"}) driver.send(rpc_target(server=None), {"context": "whatever"}, {"msg": "Anycast2"}) expected = [] for n in targets_priorities: # this is how the notifier creates an address: topic = "%s.%s" % (n[0].topic, n[1]) target = oslo_messaging.Target(topic=topic) driver.send_notification(target, {"context": "whatever"}, {"msg": topic}, 2.0) expected.append(topic) for l in rl: l.join(timeout=30) # anycast will not evenly distribute an odd number of msgs predicate = lambda: len(expected) == (nl[0].messages.qsize() + nl[1].messages.qsize()) _wait_until(predicate, 30) for l in nl: l.kill(timeout=30) s1_payload = [m.message.get('msg') for m in rl[0].get_messages()] s2_payload = [m.message.get('msg') for m in rl[1].get_messages()] self.assertTrue("Server1" in s1_payload and "Server2" not in s1_payload) self.assertTrue("Server2" in s2_payload and "Server1" not in s2_payload) self.assertEqual(s1_payload.count("Fanout"), 1) self.assertEqual(s2_payload.count("Fanout"), 1) self.assertEqual((s1_payload + s2_payload).count("Anycast1"), 1) self.assertEqual((s1_payload + s2_payload).count("Anycast2"), 1) n1_payload = [m.message.get('msg') for m in nl[0].get_messages()] n2_payload = [m.message.get('msg') for m in nl[1].get_messages()] self.assertEqual((n1_payload + n2_payload).sort(), expected.sort()) driver.cleanup() broker.stop() return broker.message_log def test_routable_address(self): # verify routable address mode self.config(addressing_mode='routable', group="oslo_messaging_amqp") _opts = self.conf.oslo_messaging_amqp notifications = [(oslo_messaging.Target(topic="test-topic"), 'info'), (oslo_messaging.Target(topic="test-topic"), 'error'), (oslo_messaging.Target(topic="test-topic"), 'debug')] msgs = self._address_test(oslo_messaging.Target(exchange="ex", topic="test-topic"), notifications) addrs = [m.address for m in msgs] notify_addrs = [a for a in addrs if a.startswith(_opts.notify_address_prefix)] self.assertEqual(len(notify_addrs), len(notifications)) # expect all notifications to be 'anycast' self.assertEqual(len(notifications), len([a for a in notify_addrs if _opts.anycast_address in a])) rpc_addrs = [a for a in addrs if a.startswith(_opts.rpc_address_prefix)] # 2 anycast messages self.assertEqual(2, len([a for a in rpc_addrs if _opts.anycast_address in a])) # 1 fanout sent self.assertEqual(1, len([a for a in rpc_addrs if _opts.multicast_address in a])) # 2 unicast messages (1 for each server) self.assertEqual(2, len([a for a in rpc_addrs if _opts.unicast_address in a])) def test_legacy_address(self): # verify legacy address mode self.config(addressing_mode='legacy', group="oslo_messaging_amqp") _opts = self.conf.oslo_messaging_amqp notifications = [(oslo_messaging.Target(topic="test-topic"), 'info'), (oslo_messaging.Target(topic="test-topic"), 'error'), (oslo_messaging.Target(topic="test-topic"), 'debug')] msgs = self._address_test(oslo_messaging.Target(exchange="ex", topic="test-topic"), notifications) addrs = [m.address for m in msgs] server_addrs = [a for a in addrs if a.startswith(_opts.server_request_prefix)] broadcast_addrs = [a for a in addrs if a.startswith(_opts.broadcast_prefix)] group_addrs = [a for a in addrs if a.startswith(_opts.group_request_prefix)] # 2 server address messages sent self.assertEqual(len(server_addrs), 2) # 1 fanout address message sent self.assertEqual(len(broadcast_addrs), 1) # group messages: 2 rpc + all notifications self.assertEqual(len(group_addrs), 2 + len(notifications)) def test_address_options(self): # verify addressing configuration options self.config(addressing_mode='routable', group="oslo_messaging_amqp") self.config(rpc_address_prefix="RPC-PREFIX", group="oslo_messaging_amqp") self.config(notify_address_prefix="NOTIFY-PREFIX", group="oslo_messaging_amqp") self.config(multicast_address="MULTI-CAST", group="oslo_messaging_amqp") self.config(unicast_address="UNI-CAST", group="oslo_messaging_amqp") self.config(anycast_address="ANY-CAST", group="oslo_messaging_amqp") self.config(default_notification_exchange="NOTIFY-EXCHANGE", group="oslo_messaging_amqp") self.config(default_rpc_exchange="RPC-EXCHANGE", group="oslo_messaging_amqp") notifications = [(oslo_messaging.Target(topic="test-topic"), 'info'), (oslo_messaging.Target(topic="test-topic"), 'error'), (oslo_messaging.Target(topic="test-topic"), 'debug')] msgs = self._address_test(oslo_messaging.Target(exchange=None, topic="test-topic"), notifications) addrs = [m.address for m in msgs] notify_addrs = [a for a in addrs if a.startswith("NOTIFY-PREFIX")] self.assertEqual(len(notify_addrs), len(notifications)) # expect all notifications to be 'anycast' self.assertEqual(len(notifications), len([a for a in notify_addrs if "ANY-CAST" in a])) # and all should contain the default exchange: self.assertEqual(len(notifications), len([a for a in notify_addrs if "NOTIFY-EXCHANGE" in a])) rpc_addrs = [a for a in addrs if a.startswith("RPC-PREFIX")] # 2 RPC anycast messages self.assertEqual(2, len([a for a in rpc_addrs if "ANY-CAST" in a])) # 1 RPC fanout sent self.assertEqual(1, len([a for a in rpc_addrs if "MULTI-CAST" in a])) # 2 RPC unicast messages (1 for each server) self.assertEqual(2, len([a for a in rpc_addrs if "UNI-CAST" in a])) self.assertEqual(len(rpc_addrs), len([a for a in rpc_addrs if "RPC-EXCHANGE" in a])) def _dynamic_test(self, product): # return the addresser used when connected to 'product' broker = FakeBroker(self.conf.oslo_messaging_amqp, product=product) broker.start() url = oslo_messaging.TransportURL.parse(self.conf, "amqp://%s:%d" % (broker.host, broker.port)) driver = amqp_driver.ProtonDriver(self.conf, url) # need to send a message to initate the connection to the broker target = oslo_messaging.Target(topic="test-topic", server="Server") listener = _ListenerThread( driver.listen(target, None, None)._poll_style_listener, 1) driver.send(target, {"context": True}, {"msg": "value"}, wait_for_reply=False) listener.join(timeout=30) addresser = driver._ctrl.addresser driver.cleanup() broker.stop() # clears the driver's addresser return addresser def test_dynamic_addressing(self): # simply check that the correct addresser is provided based on the # identity of the messaging back-end self.config(addressing_mode='dynamic', group="oslo_messaging_amqp") self.assertIsInstance(self._dynamic_test("router"), RoutableAddresser) self.assertIsInstance(self._dynamic_test("qpid-cpp"), LegacyAddresser) @testtools.skipUnless(pyngus, "proton modules not present") class TestMessageRetransmit(_AmqpBrokerTestCase): # test message is retransmitted if safe to do so def _test_retransmit(self, nack_method): self._nack_count = 2 def _on_message(message, handle, link): if self._nack_count: self._nack_count -= 1 nack_method(link, handle) else: self._broker.forward_message(message, handle, link) self._broker.on_message = _on_message self._broker.start() self.config(link_retry_delay=1, pre_settled=[], group="oslo_messaging_amqp") driver = amqp_driver.ProtonDriver(self.conf, self._broker_url) target = oslo_messaging.Target(topic="test-topic") listener = _ListenerThread(driver.listen(target, None, None)._poll_style_listener, 1) try: rc = driver.send(target, {"context": "whatever"}, {"method": "echo", "id": "blah"}, wait_for_reply=True, retry=2) # initial send + up to 2 resends except Exception: # Some test runs are expected to raise an exception, # clean up the listener since no message was received listener.kill(timeout=30) raise else: self.assertIsNotNone(rc) self.assertEqual(0, self._nack_count) self.assertEqual(rc.get('correlation-id'), 'blah') listener.join(timeout=30) finally: self.assertFalse(listener.isAlive()) driver.cleanup() def test_released(self): # should retry and succeed self._test_retransmit(lambda l, h: l.message_released(h)) def test_modified(self): # should retry and succeed self._test_retransmit(lambda l, h: l.message_modified(h, False, False, {})) def test_modified_failed(self): # since delivery_failed is set to True, should fail self.assertRaises(oslo_messaging.MessageDeliveryFailure, self._test_retransmit, lambda l, h: l.message_modified(h, True, False, {})) def test_rejected(self): # rejected - should fail self.assertRaises(oslo_messaging.MessageDeliveryFailure, self._test_retransmit, lambda l, h: l.message_rejected(h, {})) @testtools.skipUnless(SSL_ENABLED, "OpenSSL not supported") class TestSSL(test_utils.BaseTestCase): """Test the driver's OpenSSL integration""" def setUp(self): super(TestSSL, self).setUp() # Create the CA, server, and client SSL certificates: self._tmpdir = tempfile.mkdtemp(prefix='amqp1') files = ['ca_key', 'ca_cert', 's_key', 's_req', 's_cert', 'c_key', 'c_req', 'c_cert', 'bad_cert', 'bad_req', 'bad_key'] conf = dict(zip(files, [os.path.join(self._tmpdir, "%s.pem" % f) for f in files])) conf['pw'] = 'password' conf['s_name'] = '127.0.0.1' conf['c_name'] = 'client.com' self._ssl_config = conf ssl_setup = [ # create self-signed CA certificate: Template('openssl req -x509 -nodes -newkey rsa:2048' ' -subj "/CN=Trusted.CA.com" -keyout ${ca_key}' ' -out ${ca_cert}').substitute(conf), # create Server key and certificate: Template('openssl genrsa -out ${s_key} 2048').substitute(conf), Template('openssl req -new -key ${s_key} -subj /CN=${s_name}' ' -passin pass:${pw} -out ${s_req}').substitute(conf), Template('openssl x509 -req -in ${s_req} -CA ${ca_cert}' ' -CAkey ${ca_key} -CAcreateserial -out' ' ${s_cert}').substitute(conf), # create a "bad" Server cert for testing CN validation: Template('openssl genrsa -out ${bad_key} 2048').substitute(conf), Template('openssl req -new -key ${bad_key} -subj /CN=Invalid' ' -passin pass:${pw} -out ${bad_req}').substitute(conf), Template('openssl x509 -req -in ${bad_req} -CA ${ca_cert}' ' -CAkey ${ca_key} -CAcreateserial -out' ' ${bad_cert}').substitute(conf), # create Client key and certificate for client authentication: Template('openssl genrsa -out ${c_key} 2048').substitute(conf), Template('openssl req -new -key ${c_key} -subj /CN=${c_name}' ' -passin pass:${pw} -out' ' ${c_req}').substitute(conf), Template('openssl x509 -req -in ${c_req} -CA ${ca_cert}' ' -CAkey ${ca_key} -CAcreateserial -out' ' ${c_cert}').substitute(conf) ] for cmd in ssl_setup: try: subprocess.check_call(args=shlex.split(cmd)) except Exception: shutil.rmtree(self._tmpdir, ignore_errors=True) self._tmpdir = None self.skipTest("OpenSSL tools not installed - skipping") def _ssl_server_ok(self, url): self._broker.start() self.config(ssl_ca_file=self._ssl_config['ca_cert'], group='oslo_messaging_amqp') tport_url = oslo_messaging.TransportURL.parse(self.conf, url) driver = amqp_driver.ProtonDriver(self.conf, tport_url) target = oslo_messaging.Target(topic="test-topic") listener = _ListenerThread( driver.listen(target, None, None)._poll_style_listener, 1) driver.send(target, {"context": "whatever"}, {"method": "echo", "a": "b"}, wait_for_reply=True, timeout=30) listener.join(timeout=30) self.assertFalse(listener.isAlive()) driver.cleanup() def test_server_ok(self): # test client authenticates server self._broker = FakeBroker(self.conf.oslo_messaging_amqp, sock_addr=self._ssl_config['s_name'], ssl_config=self._ssl_config) url = "amqp://%s:%d" % (self._broker.host, self._broker.port) self._ssl_server_ok(url) def test_server_ignore_vhost_ok(self): # test client authenticates server and ignores vhost self._broker = FakeBroker(self.conf.oslo_messaging_amqp, sock_addr=self._ssl_config['s_name'], ssl_config=self._ssl_config) url = "amqp://%s:%d/my-vhost" % (self._broker.host, self._broker.port) self._ssl_server_ok(url) def test_server_check_vhost_ok(self): # test client authenticates server using vhost as CN # Use 'Invalid' from bad_cert CN self.config(ssl_verify_vhost=True, group='oslo_messaging_amqp') self._ssl_config['s_cert'] = self._ssl_config['bad_cert'] self._ssl_config['s_key'] = self._ssl_config['bad_key'] self._broker = FakeBroker(self.conf.oslo_messaging_amqp, sock_addr=self._ssl_config['s_name'], ssl_config=self._ssl_config) url = "amqp://%s:%d/Invalid" % (self._broker.host, self._broker.port) self._ssl_server_ok(url) @mock.patch('ssl.get_default_verify_paths') def test_server_ok_with_ssl_set_in_transport_url(self, mock_verify_paths): # test client authenticates server self._broker = FakeBroker(self.conf.oslo_messaging_amqp, sock_addr=self._ssl_config['s_name'], ssl_config=self._ssl_config) url = oslo_messaging.TransportURL.parse( self.conf, "amqp://%s:%d?ssl=1" % (self._broker.host, self._broker.port)) self._broker.start() mock_verify_paths.return_value = mock.Mock( cafile=self._ssl_config['ca_cert']) driver = amqp_driver.ProtonDriver(self.conf, url) target = oslo_messaging.Target(topic="test-topic") listener = _ListenerThread( driver.listen(target, None, None)._poll_style_listener, 1) driver.send(target, {"context": "whatever"}, {"method": "echo", "a": "b"}, wait_for_reply=True, timeout=30) listener.join(timeout=30) self.assertFalse(listener.isAlive()) driver.cleanup() def test_bad_server_fail(self): # test client does not connect to invalid server self._ssl_config['s_cert'] = self._ssl_config['bad_cert'] self._ssl_config['s_key'] = self._ssl_config['bad_key'] self._broker = FakeBroker(self.conf.oslo_messaging_amqp, sock_addr=self._ssl_config['s_name'], ssl_config=self._ssl_config) url = oslo_messaging.TransportURL.parse(self.conf, "amqp://%s:%d" % (self._broker.host, self._broker.port)) self._broker.start() self.config(ssl_ca_file=self._ssl_config['ca_cert'], group='oslo_messaging_amqp') driver = amqp_driver.ProtonDriver(self.conf, url) target = oslo_messaging.Target(topic="test-topic") self.assertRaises(oslo_messaging.MessageDeliveryFailure, driver.send, target, {"context": "whatever"}, {"method": "echo", "a": "b"}, wait_for_reply=False, retry=1) driver.cleanup() def test_client_auth_ok(self): # test server authenticates client self._ssl_config['authenticate_client'] = True self._broker = FakeBroker(self.conf.oslo_messaging_amqp, sock_addr=self._ssl_config['s_name'], ssl_config=self._ssl_config) url = oslo_messaging.TransportURL.parse(self.conf, "amqp://%s:%d" % (self._broker.host, self._broker.port)) self._broker.start() self.config(ssl_ca_file=self._ssl_config['ca_cert'], ssl_cert_file=self._ssl_config['c_cert'], ssl_key_file=self._ssl_config['c_key'], ssl_key_password=self._ssl_config['pw'], group='oslo_messaging_amqp') driver = amqp_driver.ProtonDriver(self.conf, url) target = oslo_messaging.Target(topic="test-topic") listener = _ListenerThread( driver.listen(target, None, None)._poll_style_listener, 1) driver.send(target, {"context": "whatever"}, {"method": "echo", "a": "b"}, wait_for_reply=True, timeout=30) listener.join(timeout=30) self.assertFalse(listener.isAlive()) driver.cleanup() def tearDown(self): if self._broker: self._broker.stop() self._broker = None if self._tmpdir: shutil.rmtree(self._tmpdir, ignore_errors=True) super(TestSSL, self).tearDown() @testtools.skipUnless(pyngus, "proton modules not present") class TestVHost(_AmqpBrokerTestCaseAuto): """Verify the pseudo virtual host behavior""" def _vhost_test(self): """Verify that all messaging for a particular vhost stays on that vhost """ self.config(pseudo_vhost=True, group="oslo_messaging_amqp") vhosts = ["None", "HOSTA", "HOSTB", "HOSTC"] target = oslo_messaging.Target(topic="test-topic") fanout = oslo_messaging.Target(topic="test-topic", fanout=True) listeners = {} ldrivers = {} sdrivers = {} replies = {} msgs = {} for vhost in vhosts: url = copy.copy(self._broker_url) url.virtual_host = vhost if vhost != "None" else None ldriver = amqp_driver.ProtonDriver(self.conf, url) listeners[vhost] = _ListenerThread( ldriver.listen(target, None, None)._poll_style_listener, 10) ldrivers[vhost] = ldriver sdrivers[vhost] = amqp_driver.ProtonDriver(self.conf, url) replies[vhost] = [] msgs[vhost] = [] # send a fanout and a single rpc call to each listener for vhost in vhosts: if vhost == "HOSTC": # expect no messages to HOSTC continue sdrivers[vhost].send(fanout, {"context": vhost}, {"vhost": vhost, "fanout": True, "id": vhost}) replies[vhost].append(sdrivers[vhost].send(target, {"context": vhost}, {"method": "echo", "id": vhost}, wait_for_reply=True)) time.sleep(1) for vhost in vhosts: msgs[vhost] += listeners[vhost].get_messages() if vhost == "HOSTC": # HOSTC should get nothing self.assertEqual(0, len(msgs[vhost])) self.assertEqual(0, len(replies[vhost])) continue self.assertEqual(2, len(msgs[vhost])) for m in msgs[vhost]: # the id must match the vhost self.assertEqual(vhost, m.message.get("id")) self.assertEqual(1, len(replies[vhost])) for m in replies[vhost]: # same for correlation id self.assertEqual(vhost, m.get("correlation-id")) for vhost in vhosts: listeners[vhost].kill() ldrivers[vhost].cleanup sdrivers[vhost].cleanup() def test_vhost_routing(self): """Test vhost using routable addresses """ self.config(addressing_mode='routable', group="oslo_messaging_amqp") self._vhost_test() def test_vhost_legacy(self): """Test vhost using legacy addresses """ self.config(addressing_mode='legacy', group="oslo_messaging_amqp") self._vhost_test() class FakeBroker(threading.Thread): """A test AMQP message 'broker'.""" if pyngus: class Connection(pyngus.ConnectionEventHandler): """A single AMQP connection.""" def __init__(self, server, socket_, name, product, sasl_mechanisms, user_credentials, sasl_config_dir, sasl_config_name): """Create a Connection using socket_.""" self.socket = socket_ self.name = name self.server = server self.sasl_mechanisms = sasl_mechanisms self.user_credentials = user_credentials properties = {'x-server': True} # setup SASL: if self.sasl_mechanisms: properties['x-sasl-mechs'] = self.sasl_mechanisms if "ANONYMOUS" not in self.sasl_mechanisms: properties['x-require-auth'] = True if sasl_config_dir: properties['x-sasl-config-dir'] = sasl_config_dir if sasl_config_name: properties['x-sasl-config-name'] = sasl_config_name # setup SSL if self.server._ssl_config: ssl = self.server._ssl_config properties['x-ssl-server'] = True properties['x-ssl-identity'] = (ssl['s_cert'], ssl['s_key'], ssl['pw']) # check for client authentication if ssl.get('authenticate_client'): properties['x-ssl-ca-file'] = ssl['ca_cert'] properties['x-ssl-verify-mode'] = 'verify-peer' properties['x-ssl-peer-name'] = ssl['c_name'] # misc connection properties if product: properties['properties'] = {'product': product} self.connection = server.container.create_connection( name, self, properties) self.connection.user_context = self if pyngus.VERSION < (2, 0, 0): # older versions of pyngus don't recognize the sasl # connection properties, so configure them manually: if sasl_mechanisms: self.connection.pn_sasl.mechanisms(sasl_mechanisms) self.connection.pn_sasl.server() self.connection.open() self.sender_links = set() self.receiver_links = set() self.dead_links = set() def destroy(self): """Destroy the test connection.""" for link in self.sender_links | self.receiver_links: link.destroy() self.sender_links.clear() self.receiver_links.clear() self.dead_links.clear() self.connection.destroy() self.connection = None self.socket.close() self.socket = None def fileno(self): """Allows use of this in a select() call.""" return self.socket.fileno() def process_input(self): """Called when socket is read-ready.""" try: pyngus.read_socket_input(self.connection, self.socket) self.connection.process(time.time()) except socket.error: self._socket_error() def send_output(self): """Called when socket is write-ready.""" try: pyngus.write_socket_output(self.connection, self.socket) self.connection.process(time.time()) except socket.error: self._socket_error() def _socket_error(self): self.connection.close_input() self.connection.close_output() # the broker will clean up in its main loop # Pyngus ConnectionEventHandler callbacks: def connection_active(self, connection): self.server.connection_count += 1 def connection_remote_closed(self, connection, reason): """Peer has closed the connection.""" self.connection.close() def connection_closed(self, connection): """Connection close completed.""" self.server.connection_count -= 1 def connection_failed(self, connection, error): """Connection failure detected.""" self.connection_closed(connection) def sender_requested(self, connection, link_handle, name, requested_source, properties): """Create a new message source.""" addr = requested_source or "source-" + uuid.uuid4().hex link = FakeBroker.SenderLink(self.server, self, link_handle, addr) self.sender_links.add(link) def receiver_requested(self, connection, link_handle, name, requested_target, properties): """Create a new message consumer.""" addr = requested_target or "target-" + uuid.uuid4().hex FakeBroker.ReceiverLink(self.server, self, link_handle, addr) def sasl_step(self, connection, pn_sasl): # only called if not using Cyrus SASL if 'PLAIN' in self.sasl_mechanisms: credentials = pn_sasl.recv() if not credentials: return # wait until some arrives if credentials not in self.user_credentials: # failed return pn_sasl.done(pn_sasl.AUTH) pn_sasl.done(pn_sasl.OK) class SenderLink(pyngus.SenderEventHandler): """An AMQP sending link.""" def __init__(self, server, conn, handle, src_addr=None): self.server = server self.conn = conn cnn = conn.connection self.link = cnn.accept_sender(handle, source_override=src_addr, event_handler=self) conn.sender_links.add(self) self.link.open() self.routed = False def destroy(self): """Destroy the link.""" conn = self.conn self.conn = None conn.sender_links.remove(self) conn.dead_links.discard(self) if self.link: self.link.destroy() self.link = None def send_message(self, message): """Send a message over this link.""" def pyngus_callback(link, handle, state, info): if state == pyngus.SenderLink.ACCEPTED: self.server.sender_link_ack_count += 1 elif state == pyngus.SenderLink.RELEASED: self.server.sender_link_requeue_count += 1 self.link.send(message, delivery_callback=pyngus_callback) def _cleanup(self): if self.routed: self.server.remove_route(self.link.source_address, self) self.routed = False self.conn.dead_links.add(self) # Pyngus SenderEventHandler callbacks: def sender_active(self, sender_link): self.server.sender_link_count += 1 self.server.add_route(self.link.source_address, self) self.routed = True self.server.on_sender_active(sender_link) def sender_remote_closed(self, sender_link, error): self.link.close() def sender_closed(self, sender_link): self.server.sender_link_count -= 1 self._cleanup() def sender_failed(self, sender_link, error): self.sender_closed(sender_link) class ReceiverLink(pyngus.ReceiverEventHandler): """An AMQP Receiving link.""" def __init__(self, server, conn, handle, addr=None): self.server = server self.conn = conn cnn = conn.connection self.link = cnn.accept_receiver(handle, target_override=addr, event_handler=self) conn.receiver_links.add(self) self.link.open() def destroy(self): """Destroy the link.""" conn = self.conn self.conn = None conn.receiver_links.remove(self) conn.dead_links.discard(self) if self.link: self.link.destroy() self.link = None # ReceiverEventHandler callbacks: def receiver_active(self, receiver_link): self.server.receiver_link_count += 1 self.server.on_receiver_active(receiver_link) def receiver_remote_closed(self, receiver_link, error): self.link.close() def receiver_closed(self, receiver_link): self.server.receiver_link_count -= 1 self.conn.dead_links.add(self) def receiver_failed(self, receiver_link, error): self.receiver_closed(receiver_link) def message_received(self, receiver_link, message, handle): """Forward this message out the proper sending link.""" self.server.on_message(message, handle, receiver_link) if self.link.capacity < 1: self.server.on_credit_exhausted(self.link) def __init__(self, cfg, sock_addr="", sock_port=0, product=None, default_exchange="Test-Exchange", sasl_mechanisms="ANONYMOUS", user_credentials=None, sasl_config_dir=None, sasl_config_name=None, ssl_config=None): """Create a fake broker listening on sock_addr:sock_port.""" if not pyngus: raise AssertionError("pyngus module not present") threading.Thread.__init__(self) self._product = product self._sasl_mechanisms = sasl_mechanisms self._sasl_config_dir = sasl_config_dir self._sasl_config_name = sasl_config_name self._user_credentials = user_credentials self._ssl_config = ssl_config self._wakeup_pipe = os.pipe() self._my_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self._my_socket.bind((sock_addr, sock_port)) self.host, self.port = self._my_socket.getsockname() self.container = pyngus.Container("test_server_%s:%d" % (self.host, self.port)) # create an addresser using the test client's config and expected # message bus so the broker can parse the message addresses af = AddresserFactory(default_exchange, cfg.addressing_mode, legacy_server_prefix=cfg.server_request_prefix, legacy_broadcast_prefix=cfg.broadcast_prefix, legacy_group_prefix=cfg.group_request_prefix, rpc_prefix=cfg.rpc_address_prefix, notify_prefix=cfg.notify_address_prefix, multicast=cfg.multicast_address, unicast=cfg.unicast_address, anycast=cfg.anycast_address) props = {'product': product} if product else {} self._addresser = af(props) self._connections = {} self._sources = {} self._pause = threading.Event() # count of messages forwarded, by messaging pattern self.direct_count = 0 self.topic_count = 0 self.fanout_count = 0 self.fanout_sent_count = 0 self.dropped_count = 0 # counts for active links and connections: self.connection_count = 0 self.sender_link_count = 0 self.receiver_link_count = 0 self.sender_link_ack_count = 0 self.sender_link_requeue_count = 0 # log of all messages received by the broker self.message_log = [] # callback hooks self.on_sender_active = lambda link: None self.on_receiver_active = lambda link: link.add_capacity(10) self.on_credit_exhausted = lambda link: link.add_capacity(10) self.on_message = lambda m, h, l: self.forward_message(m, h, l) def start(self): """Start the server.""" LOG.debug("Starting Test Broker on %s:%d", self.host, self.port) self._shutdown = False self._closing = False self.daemon = True self._pause.set() self._my_socket.listen(10) super(FakeBroker, self).start() def pause(self): self._pause.clear() os.write(self._wakeup_pipe[1], b'!') def unpause(self): self._pause.set() def stop(self, clean=False): """Stop the server.""" # If clean is True, attempt a clean shutdown by closing all open # links/connections first. Otherwise force an immediate disconnect LOG.debug("Stopping test Broker %s:%d", self.host, self.port) if clean: self._closing = 1 else: self._shutdown = True self._pause.set() os.write(self._wakeup_pipe[1], b'!') self.join() LOG.debug("Test Broker %s:%d stopped", self.host, self.port) def run(self): """Process I/O and timer events until the broker is stopped.""" LOG.debug("Test Broker on %s:%d started", self.host, self.port) while not self._shutdown: self._pause.wait() readers, writers, timers = self.container.need_processing() # map pyngus Connections back to _TestConnections: readfd = [c.user_context for c in readers] readfd.extend([self._my_socket, self._wakeup_pipe[0]]) writefd = [c.user_context for c in writers] timeout = None if timers: # [0] == next expiring timer deadline = timers[0].next_tick now = time.time() timeout = 0 if deadline <= now else deadline - now readable, writable, ignore = select.select(readfd, writefd, [], timeout) worked = set() for r in readable: if r is self._my_socket: # new inbound connection request received sock, addr = self._my_socket.accept() if not self._closing: # create a new Connection for it: name = str(addr) conn = FakeBroker.Connection(self, sock, name, self._product, self._sasl_mechanisms, self._user_credentials, self._sasl_config_dir, self._sasl_config_name) self._connections[conn.name] = conn else: sock.close() # drop it elif r is self._wakeup_pipe[0]: os.read(self._wakeup_pipe[0], 512) else: r.process_input() worked.add(r) for t in timers: now = time.time() if t.next_tick > now: break t.process(now) conn = t.user_context worked.add(conn) for w in writable: w.send_output() worked.add(w) # clean up any closed connections or links: while worked: conn = worked.pop() if conn.connection.closed: del self._connections[conn.name] conn.destroy() else: while conn.dead_links: conn.dead_links.pop().destroy() if self._closing and not self._connections: self._shutdown = True elif self._closing == 1: # start closing connections self._closing = 2 for conn in self._connections.values(): conn.connection.close() # Shutting down. Any open links are just disconnected - the peer will # see a socket close. self._my_socket.close() for conn in self._connections.values(): conn.destroy() self._connections = None self.container.destroy() self.container = None return 0 def add_route(self, address, link): # route from address -> link[, link ...] if address not in self._sources: self._sources[address] = [link] elif link not in self._sources[address]: self._sources[address].append(link) def remove_route(self, address, link): if address in self._sources: if link in self._sources[address]: self._sources[address].remove(link) if not self._sources[address]: del self._sources[address] def forward_message(self, message, handle, rlink): # returns True if message was routed self.message_log.append(message) dest = message.address if dest not in self._sources: # can't forward self.dropped_count += 1 # observe magic "don't ack" address if '!no-ack!' not in dest: rlink.message_released(handle) return LOG.debug("Forwarding [%s]", dest) # route "behavior" determined by address prefix: if self._addresser._is_multicast(dest): self.fanout_count += 1 for link in self._sources[dest]: self.fanout_sent_count += 1 LOG.debug("Broadcast to %s", dest) link.send_message(message) elif self._addresser._is_anycast(dest): # round-robin: self.topic_count += 1 link = self._sources[dest].pop(0) link.send_message(message) LOG.debug("Send to %s", dest) self._sources[dest].append(link) else: # unicast: self.direct_count += 1 LOG.debug("Unicast to %s", dest) self._sources[dest][0].send_message(message) rlink.message_accepted(handle) oslo.messaging-5.35.0/oslo_messaging/tests/drivers/__init__.py0000666000175100017510000000000013224676046024524 0ustar zuulzuul00000000000000oslo.messaging-5.35.0/oslo_messaging/tests/drivers/test_impl_rabbit.py0000666000175100017510000012104013224676046026320 0ustar zuulzuul00000000000000# Copyright 2013 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime import ssl import sys import threading import time import uuid import fixtures import kombu import kombu.transport.memory from oslo_config import cfg from oslo_serialization import jsonutils import testscenarios import oslo_messaging from oslo_messaging._drivers import amqpdriver from oslo_messaging._drivers import common as driver_common from oslo_messaging._drivers import impl_rabbit as rabbit_driver from oslo_messaging.exceptions import MessageDeliveryFailure from oslo_messaging.tests import utils as test_utils from six.moves import mock load_tests = testscenarios.load_tests_apply_scenarios class TestDeprecatedRabbitDriverLoad(test_utils.BaseTestCase): def setUp(self): super(TestDeprecatedRabbitDriverLoad, self).setUp( conf=cfg.ConfigOpts()) self.messaging_conf.transport_driver = 'rabbit' self.config(fake_rabbit=True, group="oslo_messaging_rabbit") def test_driver_load(self): transport = oslo_messaging.get_transport(self.conf) self.addCleanup(transport.cleanup) driver = transport._driver url = driver._get_connection()._url self.assertIsInstance(driver, rabbit_driver.RabbitDriver) self.assertEqual('memory:////', url) class TestHeartbeat(test_utils.BaseTestCase): @mock.patch('oslo_messaging._drivers.impl_rabbit.LOG') @mock.patch('kombu.connection.Connection.heartbeat_check') @mock.patch('oslo_messaging._drivers.impl_rabbit.Connection.' '_heartbeat_supported_and_enabled', return_value=True) @mock.patch('oslo_messaging._drivers.impl_rabbit.Connection.' 'ensure_connection') def _do_test_heartbeat_sent(self, fake_ensure_connection, fake_heartbeat_support, fake_heartbeat, fake_logger, heartbeat_side_effect=None, info=None): event = threading.Event() def heartbeat_check(rate=2): event.set() if heartbeat_side_effect: raise heartbeat_side_effect fake_heartbeat.side_effect = heartbeat_check transport = oslo_messaging.get_transport(self.conf, 'kombu+memory:////') self.addCleanup(transport.cleanup) conn = transport._driver._get_connection() conn.ensure(method=lambda: True) event.wait() conn._heartbeat_stop() # check heartbeat have been called self.assertLess(0, fake_heartbeat.call_count) if not heartbeat_side_effect: self.assertEqual(1, fake_ensure_connection.call_count) self.assertEqual(2, fake_logger.debug.call_count) self.assertEqual(0, fake_logger.info.call_count) else: self.assertEqual(2, fake_ensure_connection.call_count) self.assertEqual(2, fake_logger.debug.call_count) self.assertEqual(1, fake_logger.info.call_count) self.assertIn(mock.call(info, mock.ANY), fake_logger.info.mock_calls) def test_test_heartbeat_sent_default(self): self._do_test_heartbeat_sent() def test_test_heartbeat_sent_connection_fail(self): self._do_test_heartbeat_sent( heartbeat_side_effect=kombu.exceptions.OperationalError, info='A recoverable connection/channel error occurred, ' 'trying to reconnect: %s') class TestRabbitQos(test_utils.BaseTestCase): def connection_with(self, prefetch, purpose): self.config(rabbit_qos_prefetch_count=prefetch, group="oslo_messaging_rabbit") transport = oslo_messaging.get_transport(self.conf, 'kombu+memory:////') transport._driver._get_connection(purpose) @mock.patch('kombu.transport.memory.Channel.basic_qos') def test_qos_sent_on_listen_connection(self, fake_basic_qos): self.connection_with(prefetch=1, purpose=driver_common.PURPOSE_LISTEN) fake_basic_qos.assert_called_once_with(0, 1, False) @mock.patch('kombu.transport.memory.Channel.basic_qos') def test_qos_not_sent_when_cfg_zero(self, fake_basic_qos): self.connection_with(prefetch=0, purpose=driver_common.PURPOSE_LISTEN) fake_basic_qos.assert_not_called() @mock.patch('kombu.transport.memory.Channel.basic_qos') def test_qos_not_sent_on_send_connection(self, fake_basic_qos): self.connection_with(prefetch=1, purpose=driver_common.PURPOSE_SEND) fake_basic_qos.assert_not_called() class TestRabbitDriverLoad(test_utils.BaseTestCase): scenarios = [ ('rabbit', dict(transport_driver='rabbit', url='amqp://guest:guest@localhost:5672//')), ('kombu', dict(transport_driver='kombu', url='amqp://guest:guest@localhost:5672//')), ('rabbit+memory', dict(transport_driver='kombu+memory', url='memory:///')) ] @mock.patch('oslo_messaging._drivers.impl_rabbit.Connection.ensure') @mock.patch('oslo_messaging._drivers.impl_rabbit.Connection.reset') def test_driver_load(self, fake_ensure, fake_reset): self.config(heartbeat_timeout_threshold=60, group='oslo_messaging_rabbit') self.messaging_conf.transport_driver = self.transport_driver transport = oslo_messaging.get_transport(self.conf) self.addCleanup(transport.cleanup) driver = transport._driver url = driver._get_connection()._url self.assertIsInstance(driver, rabbit_driver.RabbitDriver) self.assertEqual(self.url, url) class TestRabbitDriverLoadSSL(test_utils.BaseTestCase): scenarios = [ ('no_ssl', dict(options=dict(), expected=False)), ('no_ssl_with_options', dict(options=dict(ssl_version='TLSv1'), expected=False)), ('just_ssl', dict(options=dict(ssl=True), expected=True)), ('ssl_with_options', dict(options=dict(ssl=True, ssl_version='TLSv1', ssl_key_file='foo', ssl_cert_file='bar', ssl_ca_file='foobar'), expected=dict(ssl_version=3, keyfile='foo', certfile='bar', ca_certs='foobar', cert_reqs=ssl.CERT_REQUIRED))), ] @mock.patch('oslo_messaging._drivers.impl_rabbit.Connection.ensure') @mock.patch('kombu.connection.Connection') def test_driver_load(self, connection_klass, fake_ensure): self.config(group="oslo_messaging_rabbit", **self.options) transport = oslo_messaging.get_transport(self.conf, 'kombu+memory:////') self.addCleanup(transport.cleanup) connection = transport._driver._get_connection() connection_klass.assert_called_once_with( 'memory:///', transport_options={ 'client_properties': { 'capabilities': { 'connection.blocked': True, 'consumer_cancel_notify': True, 'authentication_failure_close': True, }, 'connection_name': connection.name}, 'confirm_publish': True, 'on_blocked': mock.ANY, 'on_unblocked': mock.ANY}, ssl=self.expected, login_method='AMQPLAIN', heartbeat=60, failover_strategy='round-robin' ) class TestRabbitPublisher(test_utils.BaseTestCase): @mock.patch('kombu.messaging.Producer.publish') def test_send_with_timeout(self, fake_publish): transport = oslo_messaging.get_transport(self.conf, 'kombu+memory:////') exchange_mock = mock.Mock() with transport._driver._get_connection( driver_common.PURPOSE_SEND) as pool_conn: conn = pool_conn.connection conn._publish(exchange_mock, 'msg', routing_key='routing_key', timeout=1) fake_publish.assert_called_with( 'msg', expiration=1, exchange=exchange_mock, compression=self.conf.oslo_messaging_rabbit.kombu_compression, routing_key='routing_key') @mock.patch('kombu.messaging.Producer.publish') def test_send_no_timeout(self, fake_publish): transport = oslo_messaging.get_transport(self.conf, 'kombu+memory:////') exchange_mock = mock.Mock() with transport._driver._get_connection( driver_common.PURPOSE_SEND) as pool_conn: conn = pool_conn.connection conn._publish(exchange_mock, 'msg', routing_key='routing_key') fake_publish.assert_called_with( 'msg', expiration=None, compression=self.conf.oslo_messaging_rabbit.kombu_compression, exchange=exchange_mock, routing_key='routing_key') def test_declared_queue_publisher(self): transport = oslo_messaging.get_transport(self.conf, 'kombu+memory:////') self.addCleanup(transport.cleanup) e_passive = kombu.entity.Exchange( name='foobar', type='topic', passive=True) e_active = kombu.entity.Exchange( name='foobar', type='topic', passive=False) with transport._driver._get_connection( driver_common.PURPOSE_SEND) as pool_conn: conn = pool_conn.connection exc = conn.connection.channel_errors[0] def try_send(exchange): conn._ensure_publishing( conn._publish_and_creates_default_queue, exchange, {}, routing_key='foobar') with mock.patch('kombu.transport.virtual.Channel.close'): # Ensure the exchange does not exists self.assertRaises(oslo_messaging.MessageDeliveryFailure, try_send, e_passive) # Create it try_send(e_active) # Ensure it creates it try_send(e_passive) with mock.patch('kombu.messaging.Producer.publish', side_effect=exc): with mock.patch('kombu.transport.virtual.Channel.close'): # Ensure the exchange is already in cache self.assertIn('foobar', conn._declared_exchanges) # Reset connection self.assertRaises(oslo_messaging.MessageDeliveryFailure, try_send, e_passive) # Ensure the cache is empty self.assertEqual(0, len(conn._declared_exchanges)) try_send(e_active) self.assertIn('foobar', conn._declared_exchanges) def test_send_exception_remap(self): bad_exc = Exception("Non-oslo.messaging exception") transport = oslo_messaging.get_transport(self.conf, 'kombu+memory:////') exchange_mock = mock.Mock() with transport._driver._get_connection( driver_common.PURPOSE_SEND) as pool_conn: conn = pool_conn.connection with mock.patch('kombu.messaging.Producer.publish', side_effect=bad_exc): self.assertRaises(MessageDeliveryFailure, conn._ensure_publishing, conn._publish, exchange_mock, 'msg') class TestRabbitConsume(test_utils.BaseTestCase): def test_consume_timeout(self): transport = oslo_messaging.get_transport(self.conf, 'kombu+memory:////') self.addCleanup(transport.cleanup) deadline = time.time() + 6 with transport._driver._get_connection( driver_common.PURPOSE_LISTEN) as conn: self.assertRaises(driver_common.Timeout, conn.consume, timeout=3) # kombu memory transport doesn't really raise error # so just simulate a real driver behavior conn.connection.connection.recoverable_channel_errors = (IOError,) conn.declare_fanout_consumer("notif.info", lambda msg: True) with mock.patch('kombu.connection.Connection.drain_events', side_effect=IOError): self.assertRaises(driver_common.Timeout, conn.consume, timeout=3) self.assertEqual(0, int(deadline - time.time())) def test_consume_from_missing_queue(self): transport = oslo_messaging.get_transport(self.conf, 'kombu+memory://') self.addCleanup(transport.cleanup) with transport._driver._get_connection( driver_common.PURPOSE_LISTEN) as conn: with mock.patch('kombu.Queue.consume') as consume, mock.patch( 'kombu.Queue.declare') as declare: conn.declare_topic_consumer(exchange_name='test', topic='test', callback=lambda msg: True) import amqp consume.side_effect = [amqp.NotFound, None] conn.connection.connection.recoverable_connection_errors = () conn.connection.connection.recoverable_channel_errors = () self.assertEqual(1, declare.call_count) conn.connection.connection.drain_events = mock.Mock() # Ensure that a queue will be re-declared if the consume method # of kombu.Queue raise amqp.NotFound conn.consume() self.assertEqual(2, declare.call_count) def test_consume_from_missing_queue_with_io_error_on_redeclaration(self): transport = oslo_messaging.get_transport(self.conf, 'kombu+memory://') self.addCleanup(transport.cleanup) with transport._driver._get_connection( driver_common.PURPOSE_LISTEN) as conn: with mock.patch('kombu.Queue.consume') as consume, mock.patch( 'kombu.Queue.declare') as declare: conn.declare_topic_consumer(exchange_name='test', topic='test', callback=lambda msg: True) import amqp consume.side_effect = [amqp.NotFound, None] declare.side_effect = [IOError, None] conn.connection.connection.recoverable_connection_errors = ( IOError,) conn.connection.connection.recoverable_channel_errors = () self.assertEqual(1, declare.call_count) conn.connection.connection.drain_events = mock.Mock() # Ensure that a queue will be re-declared after # 'queue not found' exception despite on connection error. conn.consume() self.assertEqual(3, declare.call_count) def test_connection_ack_have_disconnected_kombu_connection(self): transport = oslo_messaging.get_transport(self.conf, 'kombu+memory:////') self.addCleanup(transport.cleanup) with transport._driver._get_connection( driver_common.PURPOSE_LISTEN) as conn: channel = conn.connection.channel with mock.patch('kombu.connection.Connection.connected', new_callable=mock.PropertyMock, return_value=False): self.assertRaises(driver_common.Timeout, conn.connection.consume, timeout=0.01) # Ensure a new channel have been setuped self.assertNotEqual(channel, conn.connection.channel) class TestRabbitTransportURL(test_utils.BaseTestCase): scenarios = [ ('none', dict(url=None, expected=["amqp://guest:guest@localhost:5672//"])), ('memory', dict(url='kombu+memory:////', expected=["memory:///"])), ('empty', dict(url='rabbit:///', expected=['amqp://guest:guest@localhost:5672/'])), ('localhost', dict(url='rabbit://localhost/', expected=['amqp://:@localhost:5672/'])), ('virtual_host', dict(url='rabbit:///vhost', expected=['amqp://guest:guest@localhost:5672/vhost'])), ('no_creds', dict(url='rabbit://host/virtual_host', expected=['amqp://:@host:5672/virtual_host'])), ('no_port', dict(url='rabbit://user:password@host/virtual_host', expected=['amqp://user:password@host:5672/virtual_host'])), ('full_url', dict(url='rabbit://user:password@host:10/virtual_host', expected=['amqp://user:password@host:10/virtual_host'])), ('full_two_url', dict(url='rabbit://user:password@host:10,' 'user2:password2@host2:12/virtual_host', expected=["amqp://user:password@host:10/virtual_host", "amqp://user2:password2@host2:12/virtual_host"] )), ('rabbit_ipv6', dict(url='rabbit://u:p@[fd00:beef:dead:55::133]:10/vhost', expected=['amqp://u:p@[fd00:beef:dead:55::133]:10/vhost'])), ('rabbit_ipv4', dict(url='rabbit://user:password@10.20.30.40:10/vhost', expected=['amqp://user:password@10.20.30.40:10/vhost'])), ] def setUp(self): super(TestRabbitTransportURL, self).setUp() self.messaging_conf.transport_driver = 'rabbit' self.config(heartbeat_timeout_threshold=0, group='oslo_messaging_rabbit') @mock.patch('oslo_messaging._drivers.impl_rabbit.Connection.ensure') @mock.patch('oslo_messaging._drivers.impl_rabbit.Connection.reset') def test_transport_url(self, fake_reset, fake_ensure): transport = oslo_messaging.get_transport(self.conf, self.url) self.addCleanup(transport.cleanup) driver = transport._driver urls = driver._get_connection()._url.split(";") self.assertEqual(sorted(self.expected), sorted(urls)) class TestSendReceive(test_utils.BaseTestCase): _n_senders = [ ('single_sender', dict(n_senders=1)), ('multiple_senders', dict(n_senders=10)), ] _context = [ ('empty_context', dict(ctxt={})), ('with_context', dict(ctxt={'user': 'mark'})), ] _reply = [ ('rx_id', dict(rx_id=True, reply=None)), ('none', dict(rx_id=False, reply=None)), ('empty_list', dict(rx_id=False, reply=[])), ('empty_dict', dict(rx_id=False, reply={})), ('false', dict(rx_id=False, reply=False)), ('zero', dict(rx_id=False, reply=0)), ] _failure = [ ('success', dict(failure=False)), ('failure', dict(failure=True, expected=False)), ('expected_failure', dict(failure=True, expected=True)), ] _timeout = [ ('no_timeout', dict(timeout=None)), ('timeout', dict(timeout=0.01)), # FIXME(markmc): timeout=0 is broken? ] @classmethod def generate_scenarios(cls): cls.scenarios = testscenarios.multiply_scenarios(cls._n_senders, cls._context, cls._reply, cls._failure, cls._timeout) def test_send_receive(self): self.config(kombu_missing_consumer_retry_timeout=0.5, group="oslo_messaging_rabbit") self.config(heartbeat_timeout_threshold=0, group="oslo_messaging_rabbit") transport = oslo_messaging.get_transport(self.conf, 'kombu+memory:////') self.addCleanup(transport.cleanup) driver = transport._driver target = oslo_messaging.Target(topic='testtopic') listener = driver.listen(target, None, None)._poll_style_listener senders = [] replies = [] msgs = [] def send_and_wait_for_reply(i): try: timeout = self.timeout replies.append(driver.send(target, self.ctxt, {'tx_id': i}, wait_for_reply=True, timeout=timeout)) self.assertFalse(self.failure) self.assertIsNone(self.timeout) except (ZeroDivisionError, oslo_messaging.MessagingTimeout) as e: replies.append(e) self.assertTrue(self.failure or self.timeout is not None) while len(senders) < self.n_senders: senders.append(threading.Thread(target=send_and_wait_for_reply, args=(len(senders), ))) for i in range(len(senders)): senders[i].start() received = listener.poll()[0] self.assertIsNotNone(received) self.assertEqual(self.ctxt, received.ctxt) self.assertEqual({'tx_id': i}, received.message) msgs.append(received) # reply in reverse, except reply to the first guy second from last order = list(range(len(senders) - 1, -1, -1)) if len(order) > 1: order[-1], order[-2] = order[-2], order[-1] for i in order: if self.timeout is None: if self.failure: try: raise ZeroDivisionError except Exception: failure = sys.exc_info() msgs[i].reply(failure=failure) elif self.rx_id: msgs[i].reply({'rx_id': i}) else: msgs[i].reply(self.reply) senders[i].join() self.assertEqual(len(senders), len(replies)) for i, reply in enumerate(replies): if self.timeout is not None: self.assertIsInstance(reply, oslo_messaging.MessagingTimeout) elif self.failure: self.assertIsInstance(reply, ZeroDivisionError) elif self.rx_id: self.assertEqual({'rx_id': order[i]}, reply) else: self.assertEqual(self.reply, reply) TestSendReceive.generate_scenarios() class TestPollAsync(test_utils.BaseTestCase): def test_poll_timeout(self): transport = oslo_messaging.get_transport(self.conf, 'kombu+memory:////') self.addCleanup(transport.cleanup) driver = transport._driver target = oslo_messaging.Target(topic='testtopic') listener = driver.listen(target, None, None)._poll_style_listener received = listener.poll(timeout=0.050) self.assertEqual([], received) class TestRacyWaitForReply(test_utils.BaseTestCase): def test_send_receive(self): transport = oslo_messaging.get_transport(self.conf, 'kombu+memory:////') self.addCleanup(transport.cleanup) driver = transport._driver target = oslo_messaging.Target(topic='testtopic') listener = driver.listen(target, None, None)._poll_style_listener senders = [] replies = [] msgs = [] wait_conditions = [] orig_reply_waiter = amqpdriver.ReplyWaiter.wait def reply_waiter(self, msg_id, timeout): if wait_conditions: cond = wait_conditions.pop() with cond: cond.notify() with cond: cond.wait() return orig_reply_waiter(self, msg_id, timeout) self.useFixture(fixtures.MockPatchObject( amqpdriver.ReplyWaiter, 'wait', reply_waiter)) def send_and_wait_for_reply(i, wait_for_reply): replies.append(driver.send(target, {}, {'tx_id': i}, wait_for_reply=wait_for_reply, timeout=None)) while len(senders) < 2: t = threading.Thread(target=send_and_wait_for_reply, args=(len(senders), True)) t.daemon = True senders.append(t) # test the case then msg_id is not set t = threading.Thread(target=send_and_wait_for_reply, args=(len(senders), False)) t.daemon = True senders.append(t) # Start the first guy, receive his message, but delay his polling notify_condition = threading.Condition() wait_conditions.append(notify_condition) with notify_condition: senders[0].start() notify_condition.wait() msgs.extend(listener.poll()) self.assertEqual({'tx_id': 0}, msgs[-1].message) # Start the second guy, receive his message senders[1].start() msgs.extend(listener.poll()) self.assertEqual({'tx_id': 1}, msgs[-1].message) # Reply to both in order, making the second thread queue # the reply meant for the first thread msgs[0].reply({'rx_id': 0}) msgs[1].reply({'rx_id': 1}) # Wait for the second thread to finish senders[1].join() # Start the 3rd guy, receive his message senders[2].start() msgs.extend(listener.poll()) self.assertEqual({'tx_id': 2}, msgs[-1].message) # Verify the _send_reply was not invoked by driver: with mock.patch.object(msgs[2], '_send_reply') as method: msgs[2].reply({'rx_id': 2}) self.assertEqual(0, method.call_count) # Wait for the 3rd thread to finish senders[2].join() # Let the first thread continue with notify_condition: notify_condition.notify() # Wait for the first thread to finish senders[0].join() # Verify replies were received out of order self.assertEqual(len(senders), len(replies)) self.assertEqual({'rx_id': 1}, replies[0]) self.assertIsNone(replies[1]) self.assertEqual({'rx_id': 0}, replies[2]) def _declare_queue(target): connection = kombu.connection.BrokerConnection(transport='memory') # Kludge to speed up tests. connection.transport.polling_interval = 0.0 connection.connect() channel = connection.channel() # work around 'memory' transport bug in 1.1.3 channel._new_queue('ae.undeliver') if target.fanout: exchange = kombu.entity.Exchange(name=target.topic + '_fanout', type='fanout', durable=False, auto_delete=True) queue = kombu.entity.Queue(name=target.topic + '_fanout_12345', channel=channel, exchange=exchange, routing_key=target.topic) elif target.server: exchange = kombu.entity.Exchange(name='openstack', type='topic', durable=False, auto_delete=False) topic = '%s.%s' % (target.topic, target.server) queue = kombu.entity.Queue(name=topic, channel=channel, exchange=exchange, routing_key=topic) else: exchange = kombu.entity.Exchange(name='openstack', type='topic', durable=False, auto_delete=False) queue = kombu.entity.Queue(name=target.topic, channel=channel, exchange=exchange, routing_key=target.topic) queue.declare() return connection, channel, queue class TestRequestWireFormat(test_utils.BaseTestCase): _target = [ ('topic_target', dict(topic='testtopic', server=None, fanout=False)), ('server_target', dict(topic='testtopic', server='testserver', fanout=False)), ('fanout_target', dict(topic='testtopic', server=None, fanout=True)), ] _msg = [ ('empty_msg', dict(msg={}, expected={})), ('primitive_msg', dict(msg={'foo': 'bar'}, expected={'foo': 'bar'})), ('complex_msg', dict(msg={'a': {'b': datetime.datetime(1920, 2, 3, 4, 5, 6, 7)}}, expected={'a': {'b': '1920-02-03T04:05:06.000007'}})), ] _context = [ ('empty_ctxt', dict(ctxt={}, expected_ctxt={})), ('user_project_ctxt', dict(ctxt={'user': 'mark', 'project': 'snarkybunch'}, expected_ctxt={'_context_user': 'mark', '_context_project': 'snarkybunch'})), ] _compression = [ ('gzip_compression', dict(compression='gzip')), ('without_compression', dict(compression=None)) ] @classmethod def generate_scenarios(cls): cls.scenarios = testscenarios.multiply_scenarios(cls._msg, cls._context, cls._target, cls._compression) def setUp(self): super(TestRequestWireFormat, self).setUp() self.uuids = [] self.orig_uuid4 = uuid.uuid4 self.useFixture(fixtures.MonkeyPatch('uuid.uuid4', self.mock_uuid4)) def mock_uuid4(self): self.uuids.append(self.orig_uuid4()) return self.uuids[-1] def test_request_wire_format(self): self.conf.oslo_messaging_rabbit.kombu_compression = self.compression transport = oslo_messaging.get_transport(self.conf, 'kombu+memory:////') self.addCleanup(transport.cleanup) driver = transport._driver target = oslo_messaging.Target(topic=self.topic, server=self.server, fanout=self.fanout) connection, channel, queue = _declare_queue(target) self.addCleanup(connection.release) driver.send(target, self.ctxt, self.msg) msgs = [] def callback(msg): msg = channel.message_to_python(msg) msg.ack() msgs.append(msg.payload) queue.consume(callback=callback, consumer_tag='1', nowait=False) connection.drain_events() self.assertEqual(1, len(msgs)) self.assertIn('oslo.message', msgs[0]) received = msgs[0] received['oslo.message'] = jsonutils.loads(received['oslo.message']) # FIXME(markmc): add _msg_id and _reply_q check expected_msg = { '_unique_id': self.uuids[0].hex, } expected_msg.update(self.expected) expected_msg.update(self.expected_ctxt) expected = { 'oslo.version': '2.0', 'oslo.message': expected_msg, } self.assertEqual(expected, received) TestRequestWireFormat.generate_scenarios() def _create_producer(target): connection = kombu.connection.BrokerConnection(transport='memory') # Kludge to speed up tests. connection.transport.polling_interval = 0.0 connection.connect() channel = connection.channel() # work around 'memory' transport bug in 1.1.3 channel._new_queue('ae.undeliver') if target.fanout: exchange = kombu.entity.Exchange(name=target.topic + '_fanout', type='fanout', durable=False, auto_delete=True) producer = kombu.messaging.Producer(exchange=exchange, channel=channel, routing_key=target.topic) elif target.server: exchange = kombu.entity.Exchange(name='openstack', type='topic', durable=False, auto_delete=False) topic = '%s.%s' % (target.topic, target.server) producer = kombu.messaging.Producer(exchange=exchange, channel=channel, routing_key=topic) else: exchange = kombu.entity.Exchange(name='openstack', type='topic', durable=False, auto_delete=False) producer = kombu.messaging.Producer(exchange=exchange, channel=channel, routing_key=target.topic) return connection, producer class TestReplyWireFormat(test_utils.BaseTestCase): _target = [ ('topic_target', dict(topic='testtopic', server=None, fanout=False)), ('server_target', dict(topic='testtopic', server='testserver', fanout=False)), ('fanout_target', dict(topic='testtopic', server=None, fanout=True)), ] _msg = [ ('empty_msg', dict(msg={}, expected={})), ('primitive_msg', dict(msg={'foo': 'bar'}, expected={'foo': 'bar'})), ('complex_msg', dict(msg={'a': {'b': '1920-02-03T04:05:06.000007'}}, expected={'a': {'b': '1920-02-03T04:05:06.000007'}})), ] _context = [ ('empty_ctxt', dict(ctxt={}, expected_ctxt={})), ('user_project_ctxt', dict(ctxt={'_context_user': 'mark', '_context_project': 'snarkybunch'}, expected_ctxt={'user': 'mark', 'project': 'snarkybunch'})), ] _compression = [ ('gzip_compression', dict(compression='gzip')), ('without_compression', dict(compression=None)) ] @classmethod def generate_scenarios(cls): cls.scenarios = testscenarios.multiply_scenarios(cls._msg, cls._context, cls._target, cls._compression) def test_reply_wire_format(self): self.conf.oslo_messaging_rabbit.kombu_compression = self.compression transport = oslo_messaging.get_transport(self.conf, 'kombu+memory:////') self.addCleanup(transport.cleanup) driver = transport._driver target = oslo_messaging.Target(topic=self.topic, server=self.server, fanout=self.fanout) listener = driver.listen(target, None, None)._poll_style_listener connection, producer = _create_producer(target) self.addCleanup(connection.release) msg = { 'oslo.version': '2.0', 'oslo.message': {} } msg['oslo.message'].update(self.msg) msg['oslo.message'].update(self.ctxt) msg['oslo.message'].update({ '_msg_id': uuid.uuid4().hex, '_unique_id': uuid.uuid4().hex, '_reply_q': 'reply_' + uuid.uuid4().hex, }) msg['oslo.message'] = jsonutils.dumps(msg['oslo.message']) producer.publish(msg) received = listener.poll()[0] self.assertIsNotNone(received) self.assertEqual(self.expected_ctxt, received.ctxt) self.assertEqual(self.expected, received.message) TestReplyWireFormat.generate_scenarios() class RpcKombuHATestCase(test_utils.BaseTestCase): def setUp(self): super(RpcKombuHATestCase, self).setUp() self.brokers = ['host1', 'host2', 'host3', 'host4', 'host5'] self.config(rabbit_hosts=self.brokers, rabbit_retry_interval=0.01, rabbit_retry_backoff=0.01, kombu_reconnect_delay=0, heartbeat_timeout_threshold=0, group="oslo_messaging_rabbit") self.useFixture(fixtures.MockPatch( 'kombu.connection.Connection.connection')) self.useFixture(fixtures.MockPatch( 'kombu.connection.Connection.channel')) # starting from the first broker in the list url = oslo_messaging.TransportURL.parse(self.conf, None) self.connection = rabbit_driver.Connection(self.conf, url, driver_common.PURPOSE_SEND) self.useFixture(fixtures.MockPatch( 'kombu.connection.Connection.connect')) self.addCleanup(self.connection.close) def test_ensure_four_retry(self): mock_callback = mock.Mock(side_effect=IOError) self.assertRaises(oslo_messaging.MessageDeliveryFailure, self.connection.ensure, mock_callback, retry=4) self.assertEqual(6, mock_callback.call_count) def test_ensure_one_retry(self): mock_callback = mock.Mock(side_effect=IOError) self.assertRaises(oslo_messaging.MessageDeliveryFailure, self.connection.ensure, mock_callback, retry=1) self.assertEqual(3, mock_callback.call_count) def test_ensure_no_retry(self): mock_callback = mock.Mock(side_effect=IOError) self.assertRaises(oslo_messaging.MessageDeliveryFailure, self.connection.ensure, mock_callback, retry=0) self.assertEqual(2, mock_callback.call_count) class ConnectionLockTestCase(test_utils.BaseTestCase): def _thread(self, lock, sleep, heartbeat=False): def thread_task(): if heartbeat: with lock.for_heartbeat(): time.sleep(sleep) else: with lock: time.sleep(sleep) t = threading.Thread(target=thread_task) t.daemon = True t.start() start = time.time() def get_elapsed_time(): t.join() return time.time() - start return get_elapsed_time def test_workers_only(self): l = rabbit_driver.ConnectionLock() t1 = self._thread(l, 1) t2 = self._thread(l, 1) self.assertAlmostEqual(1, t1(), places=0) self.assertAlmostEqual(2, t2(), places=0) def test_worker_and_heartbeat(self): l = rabbit_driver.ConnectionLock() t1 = self._thread(l, 1) t2 = self._thread(l, 1, heartbeat=True) self.assertAlmostEqual(1, t1(), places=0) self.assertAlmostEqual(2, t2(), places=0) def test_workers_and_heartbeat(self): l = rabbit_driver.ConnectionLock() t1 = self._thread(l, 1) t2 = self._thread(l, 1) t3 = self._thread(l, 1) t4 = self._thread(l, 1, heartbeat=True) t5 = self._thread(l, 1) self.assertAlmostEqual(1, t1(), places=0) self.assertAlmostEqual(2, t4(), places=0) self.assertAlmostEqual(3, t2(), places=0) self.assertAlmostEqual(4, t3(), places=0) self.assertAlmostEqual(5, t5(), places=0) def test_heartbeat(self): l = rabbit_driver.ConnectionLock() t1 = self._thread(l, 1, heartbeat=True) t2 = self._thread(l, 1) self.assertAlmostEqual(1, t1(), places=0) self.assertAlmostEqual(2, t2(), places=0) oslo.messaging-5.35.0/oslo_messaging/tests/drivers/test_pool.py0000666000175100017510000000703613224676046025015 0ustar zuulzuul00000000000000 # Copyright 2013 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import threading import uuid import fixtures import testscenarios from oslo_messaging._drivers import pool from oslo_messaging.tests import utils as test_utils load_tests = testscenarios.load_tests_apply_scenarios class PoolTestCase(test_utils.BaseTestCase): _max_size = [ ('default_size', dict(max_size=None, n_iters=4)), ('set_max_size', dict(max_size=10, n_iters=10)), ] _create_error = [ ('no_create_error', dict(create_error=False)), ('create_error', dict(create_error=True)), ] @classmethod def generate_scenarios(cls): cls.scenarios = testscenarios.multiply_scenarios(cls._max_size, cls._create_error) class TestPool(pool.Pool): def create(self): return uuid.uuid4() class ThreadWaitWaiter(object): """A gross hack. Stub out the condition variable's wait() method and spin until it has been called by each thread. """ def __init__(self, cond, n_threads, test): self.cond = cond self.test = test self.n_threads = n_threads self.n_waits = 0 self.orig_wait = cond.wait def count_waits(**kwargs): self.n_waits += 1 self.orig_wait(**kwargs) self.test.useFixture(fixtures.MockPatchObject( self.cond, 'wait', count_waits)) def wait(self): while self.n_waits < self.n_threads: pass self.test.useFixture(fixtures.MockPatchObject( self.cond, 'wait', self.orig_wait)) def test_pool(self): kwargs = {} if self.max_size is not None: kwargs['max_size'] = self.max_size p = self.TestPool(**kwargs) if self.create_error: def create_error(): raise RuntimeError orig_create = p.create self.useFixture(fixtures.MockPatchObject( p, 'create', create_error)) self.assertRaises(RuntimeError, p.get) self.useFixture(fixtures.MockPatchObject( p, 'create', orig_create)) objs = [] for i in range(self.n_iters): objs.append(p.get()) self.assertIsInstance(objs[i], uuid.UUID) def wait_for_obj(): o = p.get() self.assertIn(o, objs) waiter = self.ThreadWaitWaiter(p._cond, self.n_iters, self) threads = [] for i in range(self.n_iters): t = threading.Thread(target=wait_for_obj) t.start() threads.append(t) waiter.wait() for o in objs: p.put(o) for t in threads: t.join() for o in objs: p.put(o) for o in p.iter_free(): self.assertIn(o, objs) objs.remove(o) self.assertEqual([], objs) PoolTestCase.generate_scenarios() oslo.messaging-5.35.0/oslo_messaging/tests/drivers/test_impl_kafka.py0000666000175100017510000001343213224676077026143 0ustar zuulzuul00000000000000# Copyright (C) 2015 Cisco Systems, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import kafka import kafka.errors from six.moves import mock import testscenarios import oslo_messaging from oslo_messaging._drivers import impl_kafka as kafka_driver from oslo_messaging.tests import utils as test_utils load_tests = testscenarios.load_tests_apply_scenarios class TestKafkaDriverLoad(test_utils.BaseTestCase): def setUp(self): super(TestKafkaDriverLoad, self).setUp() self.messaging_conf.transport_driver = 'kafka' def test_driver_load(self): transport = oslo_messaging.get_notification_transport(self.conf) self.assertIsInstance(transport._driver, kafka_driver.KafkaDriver) class TestKafkaTransportURL(test_utils.BaseTestCase): scenarios = [ ('none', dict(url=None, expected=dict(hostaddrs=['localhost:9092'], vhost=None))), ('empty', dict(url='kafka:///', expected=dict(hostaddrs=['localhost:9092'], vhost=''))), ('host', dict(url='kafka://127.0.0.1', expected=dict(hostaddrs=['127.0.0.1:9092'], vhost=None))), ('port', dict(url='kafka://localhost:1234', expected=dict(hostaddrs=['localhost:1234'], vhost=None))), ('vhost', dict(url='kafka://localhost:1234/my_host', expected=dict(hostaddrs=['localhost:1234'], vhost='my_host'))), ('two', dict(url='kafka://localhost:1234,localhost2:1234', expected=dict(hostaddrs=['localhost:1234', 'localhost2:1234'], vhost=None))), ] def setUp(self): super(TestKafkaTransportURL, self).setUp() self.messaging_conf.transport_driver = 'kafka' def test_transport_url(self): transport = oslo_messaging.get_notification_transport(self.conf, self.url) self.addCleanup(transport.cleanup) driver = transport._driver self.assertEqual(self.expected['hostaddrs'], driver.pconn.hostaddrs) self.assertEqual(self.expected['vhost'], driver.virtual_host) class TestKafkaDriver(test_utils.BaseTestCase): """Unit Test cases to test the kafka driver """ def setUp(self): super(TestKafkaDriver, self).setUp() self.messaging_conf.transport_driver = 'kafka' transport = oslo_messaging.get_notification_transport(self.conf) self.driver = transport._driver def test_send(self): target = oslo_messaging.Target(topic="topic_test") self.assertRaises(NotImplementedError, self.driver.send, target, {}, {}) def test_send_notification(self): target = oslo_messaging.Target(topic="topic_test") with mock.patch("kafka.KafkaProducer") as fake_producer_class: fake_producer = fake_producer_class.return_value fake_producer.send.side_effect = kafka.errors.NoBrokersAvailable self.assertRaises(kafka.errors.NoBrokersAvailable, self.driver.send_notification, target, {}, {"payload": ["test_1"]}, None, retry=3) self.assertEqual(3, fake_producer.send.call_count) def test_listen(self): target = oslo_messaging.Target(topic="topic_test") self.assertRaises(NotImplementedError, self.driver.listen, target, None, None) def test_listen_for_notifications(self): targets_and_priorities = [ (oslo_messaging.Target(topic="topic_test_1"), "sample"), ] expected_topics = ["topic_test_1.sample"] with mock.patch("kafka.KafkaConsumer") as consumer: self.driver.listen_for_notifications( targets_and_priorities, "kafka_test", 1000, 10) consumer.assert_called_once_with( *expected_topics, group_id="kafka_test", bootstrap_servers=['localhost:9092'], max_partition_fetch_bytes=mock.ANY, selector=mock.ANY ) def test_cleanup(self): listeners = [mock.MagicMock(), mock.MagicMock()] self.driver.listeners.extend(listeners) self.driver.cleanup() for listener in listeners: listener.close.assert_called_once_with() class TestKafkaConnection(test_utils.BaseTestCase): def setUp(self): super(TestKafkaConnection, self).setUp() self.messaging_conf.transport_driver = 'kafka' transport = oslo_messaging.get_notification_transport(self.conf) self.driver = transport._driver def test_notify(self): with mock.patch("kafka.KafkaProducer") as fake_producer_class: fake_producer = fake_producer_class.return_value self.driver.pconn.notify_send("fake_topic", {"fake_ctxt": "fake_param"}, {"fake_text": "fake_message_1"}, 10) self.assertEqual(2, len(fake_producer.send.mock_calls)) oslo.messaging-5.35.0/oslo_messaging/tests/drivers/pika/0000775000175100017510000000000013224676256023352 5ustar zuulzuul00000000000000oslo.messaging-5.35.0/oslo_messaging/tests/drivers/pika/__init__.py0000666000175100017510000000000013224676046025450 0ustar zuulzuul00000000000000oslo.messaging-5.35.0/oslo_messaging/tests/drivers/pika/test_message.py0000666000175100017510000005440413224676046026415 0ustar zuulzuul00000000000000# Copyright 2015 Mirantis, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import functools import unittest from concurrent import futures from oslo_serialization import jsonutils from oslo_utils import timeutils import pika from six.moves import mock import oslo_messaging from oslo_messaging._drivers.pika_driver import pika_commons as pika_drv_cmns from oslo_messaging._drivers.pika_driver import pika_message as pika_drv_msg class PikaIncomingMessageTestCase(unittest.TestCase): def setUp(self): self._pika_engine = mock.Mock() self._channel = mock.Mock() self._delivery_tag = 12345 self._method = pika.spec.Basic.Deliver(delivery_tag=self._delivery_tag) self._properties = pika.BasicProperties( content_type="application/json", headers={"version": "1.0"}, ) self._body = ( b'{"_$_key_context":"context_value",' b'"payload_key": "payload_value"}' ) def test_message_body_parsing(self): message = pika_drv_msg.PikaIncomingMessage( self._pika_engine, self._channel, self._method, self._properties, self._body ) self.assertEqual("context_value", message.ctxt.get("key_context", None)) self.assertEqual("payload_value", message.message.get("payload_key", None)) def test_message_acknowledge(self): message = pika_drv_msg.PikaIncomingMessage( self._pika_engine, self._channel, self._method, self._properties, self._body ) message.acknowledge() self.assertEqual(1, self._channel.basic_ack.call_count) self.assertEqual({"delivery_tag": self._delivery_tag}, self._channel.basic_ack.call_args[1]) def test_message_acknowledge_no_ack(self): message = pika_drv_msg.PikaIncomingMessage( self._pika_engine, None, self._method, self._properties, self._body ) message.acknowledge() self.assertEqual(0, self._channel.basic_ack.call_count) def test_message_requeue(self): message = pika_drv_msg.PikaIncomingMessage( self._pika_engine, self._channel, self._method, self._properties, self._body ) message.requeue() self.assertEqual(1, self._channel.basic_nack.call_count) self.assertEqual({"delivery_tag": self._delivery_tag, 'requeue': True}, self._channel.basic_nack.call_args[1]) def test_message_requeue_no_ack(self): message = pika_drv_msg.PikaIncomingMessage( self._pika_engine, None, self._method, self._properties, self._body ) message.requeue() self.assertEqual(0, self._channel.basic_nack.call_count) class RpcPikaIncomingMessageTestCase(unittest.TestCase): def setUp(self): self._pika_engine = mock.Mock() self._pika_engine.rpc_reply_retry_attempts = 3 self._pika_engine.rpc_reply_retry_delay = 0.25 self._channel = mock.Mock() self._delivery_tag = 12345 self._method = pika.spec.Basic.Deliver(delivery_tag=self._delivery_tag) self._body = ( b'{"_$_key_context":"context_value",' b'"payload_key":"payload_value"}' ) self._properties = pika.BasicProperties( content_type="application/json", headers={"version": "1.0"}, ) def test_call_message_body_parsing(self): self._properties.correlation_id = 123456789 self._properties.reply_to = "reply_queue" message = pika_drv_msg.RpcPikaIncomingMessage( self._pika_engine, self._channel, self._method, self._properties, self._body ) self.assertEqual("context_value", message.ctxt.get("key_context", None)) self.assertEqual(123456789, message.msg_id) self.assertEqual("reply_queue", message.reply_q) self.assertEqual("payload_value", message.message.get("payload_key", None)) def test_cast_message_body_parsing(self): message = pika_drv_msg.RpcPikaIncomingMessage( self._pika_engine, self._channel, self._method, self._properties, self._body ) self.assertEqual("context_value", message.ctxt.get("key_context", None)) self.assertIsNone(message.msg_id) self.assertIsNone(message.reply_q) self.assertEqual("payload_value", message.message.get("payload_key", None)) @mock.patch(("oslo_messaging._drivers.pika_driver.pika_message." "PikaOutgoingMessage.send")) def test_reply_for_cast_message(self, send_reply_mock): message = pika_drv_msg.RpcPikaIncomingMessage( self._pika_engine, self._channel, self._method, self._properties, self._body ) self.assertEqual("context_value", message.ctxt.get("key_context", None)) self.assertIsNone(message.msg_id) self.assertIsNone(message.reply_q) self.assertEqual("payload_value", message.message.get("payload_key", None)) message.reply(reply=object()) self.assertEqual(0, send_reply_mock.call_count) @mock.patch("oslo_messaging._drivers.pika_driver.pika_message." "RpcReplyPikaOutgoingMessage") @mock.patch("tenacity.retry") def test_positive_reply_for_call_message(self, retry_mock, outgoing_message_mock): self._properties.correlation_id = 123456789 self._properties.reply_to = "reply_queue" message = pika_drv_msg.RpcPikaIncomingMessage( self._pika_engine, self._channel, self._method, self._properties, self._body ) self.assertEqual("context_value", message.ctxt.get("key_context", None)) self.assertEqual(123456789, message.msg_id) self.assertEqual("reply_queue", message.reply_q) self.assertEqual("payload_value", message.message.get("payload_key", None)) reply = "all_fine" message.reply(reply=reply) outgoing_message_mock.assert_called_once_with( self._pika_engine, 123456789, failure_info=None, reply='all_fine', content_type='application/json' ) outgoing_message_mock().send.assert_called_once_with( reply_q='reply_queue', stopwatch=mock.ANY, retrier=mock.ANY ) retry_mock.assert_called_once_with( stop=mock.ANY, retry=mock.ANY, wait=mock.ANY ) @mock.patch("oslo_messaging._drivers.pika_driver.pika_message." "RpcReplyPikaOutgoingMessage") @mock.patch("tenacity.retry") def test_negative_reply_for_call_message(self, retry_mock, outgoing_message_mock): self._properties.correlation_id = 123456789 self._properties.reply_to = "reply_queue" message = pika_drv_msg.RpcPikaIncomingMessage( self._pika_engine, self._channel, self._method, self._properties, self._body ) self.assertEqual("context_value", message.ctxt.get("key_context", None)) self.assertEqual(123456789, message.msg_id) self.assertEqual("reply_queue", message.reply_q) self.assertEqual("payload_value", message.message.get("payload_key", None)) failure_info = object() message.reply(failure=failure_info) outgoing_message_mock.assert_called_once_with( self._pika_engine, 123456789, failure_info=failure_info, reply=None, content_type='application/json' ) outgoing_message_mock().send.assert_called_once_with( reply_q='reply_queue', stopwatch=mock.ANY, retrier=mock.ANY ) retry_mock.assert_called_once_with( stop=mock.ANY, retry=mock.ANY, wait=mock.ANY ) class RpcReplyPikaIncomingMessageTestCase(unittest.TestCase): def setUp(self): self._pika_engine = mock.Mock() self._pika_engine.allowed_remote_exmods = [ pika_drv_cmns.EXCEPTIONS_MODULE, "oslo_messaging.exceptions" ] self._channel = mock.Mock() self._delivery_tag = 12345 self._method = pika.spec.Basic.Deliver(delivery_tag=self._delivery_tag) self._properties = pika.BasicProperties( content_type="application/json", headers={"version": "1.0"}, correlation_id=123456789 ) def test_positive_reply_message_body_parsing(self): body = b'{"s": "all fine"}' message = pika_drv_msg.RpcReplyPikaIncomingMessage( self._pika_engine, self._channel, self._method, self._properties, body ) self.assertEqual(123456789, message.msg_id) self.assertIsNone(message.failure) self.assertEqual("all fine", message.result) def test_negative_reply_message_body_parsing(self): body = (b'{' b' "e": {' b' "s": "Error message",' b' "t": ["TRACE HERE"],' b' "c": "MessagingException",' b' "m": "oslo_messaging.exceptions"' b' }' b'}') message = pika_drv_msg.RpcReplyPikaIncomingMessage( self._pika_engine, self._channel, self._method, self._properties, body ) self.assertEqual(123456789, message.msg_id) self.assertIsNone(message.result) self.assertEqual( 'Error message\n' 'TRACE HERE', str(message.failure) ) self.assertIsInstance(message.failure, oslo_messaging.MessagingException) class PikaOutgoingMessageTestCase(unittest.TestCase): def setUp(self): self._pika_engine = mock.MagicMock() self._pika_engine.default_content_type = "application/json" self._exchange = "it is exchange" self._routing_key = "it is routing key" self._expiration = 1 self._stopwatch = ( timeutils.StopWatch(duration=self._expiration).start() ) self._mandatory = object() self._message = {"msg_type": 1, "msg_str": "hello"} self._context = {"request_id": 555, "token": "it is a token"} @mock.patch("oslo_serialization.jsonutils.dump_as_bytes", new=functools.partial(jsonutils.dump_as_bytes, sort_keys=True)) def test_send_with_confirmation(self): message = pika_drv_msg.PikaOutgoingMessage( self._pika_engine, self._message, self._context ) message.send( exchange=self._exchange, routing_key=self._routing_key, confirm=True, mandatory=self._mandatory, persistent=True, stopwatch=self._stopwatch, retrier=None ) self._pika_engine.connection_with_confirmation_pool.acquire( ).__enter__().channel.publish.assert_called_once_with( body=mock.ANY, exchange=self._exchange, mandatory=self._mandatory, properties=mock.ANY, routing_key=self._routing_key ) body = self._pika_engine.connection_with_confirmation_pool.acquire( ).__enter__().channel.publish.call_args[1]["body"] self.assertEqual( b'{"_$_request_id": 555, "_$_token": "it is a token", ' b'"msg_str": "hello", "msg_type": 1}', body ) props = self._pika_engine.connection_with_confirmation_pool.acquire( ).__enter__().channel.publish.call_args[1]["properties"] self.assertEqual('application/json', props.content_type) self.assertEqual(2, props.delivery_mode) self.assertTrue(self._expiration * 1000 - float(props.expiration) < 100) self.assertEqual({'version': '1.0'}, props.headers) self.assertTrue(props.message_id) @mock.patch("oslo_serialization.jsonutils.dump_as_bytes", new=functools.partial(jsonutils.dump_as_bytes, sort_keys=True)) def test_send_without_confirmation(self): message = pika_drv_msg.PikaOutgoingMessage( self._pika_engine, self._message, self._context ) message.send( exchange=self._exchange, routing_key=self._routing_key, confirm=False, mandatory=self._mandatory, persistent=False, stopwatch=self._stopwatch, retrier=None ) self._pika_engine.connection_without_confirmation_pool.acquire( ).__enter__().channel.publish.assert_called_once_with( body=mock.ANY, exchange=self._exchange, mandatory=self._mandatory, properties=mock.ANY, routing_key=self._routing_key ) body = self._pika_engine.connection_without_confirmation_pool.acquire( ).__enter__().channel.publish.call_args[1]["body"] self.assertEqual( b'{"_$_request_id": 555, "_$_token": "it is a token", ' b'"msg_str": "hello", "msg_type": 1}', body ) props = self._pika_engine.connection_without_confirmation_pool.acquire( ).__enter__().channel.publish.call_args[1]["properties"] self.assertEqual('application/json', props.content_type) self.assertEqual(1, props.delivery_mode) self.assertTrue(self._expiration * 1000 - float(props.expiration) < 100) self.assertEqual({'version': '1.0'}, props.headers) self.assertTrue(props.message_id) class RpcPikaOutgoingMessageTestCase(unittest.TestCase): def setUp(self): self._exchange = "it is exchange" self._routing_key = "it is routing key" self._pika_engine = mock.MagicMock() self._pika_engine.get_rpc_exchange_name.return_value = self._exchange self._pika_engine.get_rpc_queue_name.return_value = self._routing_key self._pika_engine.default_content_type = "application/json" self._message = {"msg_type": 1, "msg_str": "hello"} self._context = {"request_id": 555, "token": "it is a token"} @mock.patch("oslo_serialization.jsonutils.dump_as_bytes", new=functools.partial(jsonutils.dump_as_bytes, sort_keys=True)) def test_send_cast_message(self): message = pika_drv_msg.RpcPikaOutgoingMessage( self._pika_engine, self._message, self._context ) expiration = 1 stopwatch = timeutils.StopWatch(duration=expiration).start() message.send( exchange=self._exchange, routing_key=self._routing_key, reply_listener=None, stopwatch=stopwatch, retrier=None ) self._pika_engine.connection_with_confirmation_pool.acquire( ).__enter__().channel.publish.assert_called_once_with( body=mock.ANY, exchange=self._exchange, mandatory=True, properties=mock.ANY, routing_key=self._routing_key ) body = self._pika_engine.connection_with_confirmation_pool.acquire( ).__enter__().channel.publish.call_args[1]["body"] self.assertEqual( b'{"_$_request_id": 555, "_$_token": "it is a token", ' b'"msg_str": "hello", "msg_type": 1}', body ) props = self._pika_engine.connection_with_confirmation_pool.acquire( ).__enter__().channel.publish.call_args[1]["properties"] self.assertEqual('application/json', props.content_type) self.assertEqual(1, props.delivery_mode) self.assertTrue(expiration * 1000 - float(props.expiration) < 100) self.assertEqual({'version': '1.0'}, props.headers) self.assertIsNone(props.correlation_id) self.assertIsNone(props.reply_to) self.assertTrue(props.message_id) @mock.patch("oslo_serialization.jsonutils.dump_as_bytes", new=functools.partial(jsonutils.dump_as_bytes, sort_keys=True)) def test_send_call_message(self): message = pika_drv_msg.RpcPikaOutgoingMessage( self._pika_engine, self._message, self._context ) expiration = 1 stopwatch = timeutils.StopWatch(duration=expiration).start() result = "it is a result" reply_queue_name = "reply_queue_name" future = futures.Future() future.set_result(result) reply_listener = mock.Mock() reply_listener.register_reply_waiter.return_value = future reply_listener.get_reply_qname.return_value = reply_queue_name res = message.send( exchange=self._exchange, routing_key=self._routing_key, reply_listener=reply_listener, stopwatch=stopwatch, retrier=None ) self.assertEqual(result, res) self._pika_engine.connection_with_confirmation_pool.acquire( ).__enter__().channel.publish.assert_called_once_with( body=mock.ANY, exchange=self._exchange, mandatory=True, properties=mock.ANY, routing_key=self._routing_key ) body = self._pika_engine.connection_with_confirmation_pool.acquire( ).__enter__().channel.publish.call_args[1]["body"] self.assertEqual( b'{"_$_request_id": 555, "_$_token": "it is a token", ' b'"msg_str": "hello", "msg_type": 1}', body ) props = self._pika_engine.connection_with_confirmation_pool.acquire( ).__enter__().channel.publish.call_args[1]["properties"] self.assertEqual('application/json', props.content_type) self.assertEqual(1, props.delivery_mode) self.assertTrue(expiration * 1000 - float(props.expiration) < 100) self.assertEqual({'version': '1.0'}, props.headers) self.assertEqual(message.msg_id, props.correlation_id) self.assertEqual(reply_queue_name, props.reply_to) self.assertTrue(props.message_id) class RpcReplyPikaOutgoingMessageTestCase(unittest.TestCase): def setUp(self): self._reply_q = "reply_queue_name" self._expiration = 1 self._stopwatch = ( timeutils.StopWatch(duration=self._expiration).start() ) self._pika_engine = mock.MagicMock() self._rpc_reply_exchange = "rpc_reply_exchange" self._pika_engine.rpc_reply_exchange = self._rpc_reply_exchange self._pika_engine.default_content_type = "application/json" self._msg_id = 12345567 @mock.patch("oslo_serialization.jsonutils.dump_as_bytes", new=functools.partial(jsonutils.dump_as_bytes, sort_keys=True)) def test_success_message_send(self): message = pika_drv_msg.RpcReplyPikaOutgoingMessage( self._pika_engine, self._msg_id, reply="all_fine" ) message.send(self._reply_q, stopwatch=self._stopwatch, retrier=None) self._pika_engine.connection_with_confirmation_pool.acquire( ).__enter__().channel.publish.assert_called_once_with( body=b'{"s": "all_fine"}', exchange=self._rpc_reply_exchange, mandatory=True, properties=mock.ANY, routing_key=self._reply_q ) props = self._pika_engine.connection_with_confirmation_pool.acquire( ).__enter__().channel.publish.call_args[1]["properties"] self.assertEqual('application/json', props.content_type) self.assertEqual(1, props.delivery_mode) self.assertTrue(self._expiration * 1000 - float(props.expiration) < 100) self.assertEqual({'version': '1.0'}, props.headers) self.assertEqual(message.msg_id, props.correlation_id) self.assertIsNone(props.reply_to) self.assertTrue(props.message_id) @mock.patch("traceback.format_exception", new=lambda x, y, z: z) @mock.patch("oslo_serialization.jsonutils.dump_as_bytes", new=functools.partial(jsonutils.dump_as_bytes, sort_keys=True)) def test_failure_message_send(self): failure_info = (oslo_messaging.MessagingException, oslo_messaging.MessagingException("Error message"), ['It is a trace']) message = pika_drv_msg.RpcReplyPikaOutgoingMessage( self._pika_engine, self._msg_id, failure_info=failure_info ) message.send(self._reply_q, stopwatch=self._stopwatch, retrier=None) self._pika_engine.connection_with_confirmation_pool.acquire( ).__enter__().channel.publish.assert_called_once_with( body=mock.ANY, exchange=self._rpc_reply_exchange, mandatory=True, properties=mock.ANY, routing_key=self._reply_q ) body = self._pika_engine.connection_with_confirmation_pool.acquire( ).__enter__().channel.publish.call_args[1]["body"] self.assertEqual( b'{"e": {"c": "MessagingException", ' b'"m": "oslo_messaging.exceptions", "s": "Error message", ' b'"t": ["It is a trace"]}}', body ) props = self._pika_engine.connection_with_confirmation_pool.acquire( ).__enter__().channel.publish.call_args[1]["properties"] self.assertEqual('application/json', props.content_type) self.assertEqual(1, props.delivery_mode) self.assertTrue(self._expiration * 1000 - float(props.expiration) < 100) self.assertEqual({'version': '1.0'}, props.headers) self.assertEqual(message.msg_id, props.correlation_id) self.assertIsNone(props.reply_to) self.assertTrue(props.message_id) oslo.messaging-5.35.0/oslo_messaging/tests/drivers/pika/test_poller.py0000666000175100017510000004065313224676046026267 0ustar zuulzuul00000000000000# Copyright 2015 Mirantis, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import threading import time import unittest from concurrent import futures from six.moves import mock from oslo_messaging._drivers.pika_driver import pika_exceptions as pika_drv_exc from oslo_messaging._drivers.pika_driver import pika_poller class PikaPollerTestCase(unittest.TestCase): def setUp(self): self._pika_engine = mock.Mock() self._poller_connection_mock = mock.Mock() self._poller_channel_mock = mock.Mock() self._poller_connection_mock.channel.return_value = ( self._poller_channel_mock ) self._pika_engine.create_connection.return_value = ( self._poller_connection_mock ) self._executor = futures.ThreadPoolExecutor(1) def timer_task(timeout, callback): time.sleep(timeout) callback() self._poller_connection_mock.add_timeout.side_effect = ( lambda *args: self._executor.submit(timer_task, *args) ) self._prefetch_count = 123 @mock.patch("oslo_messaging._drivers.pika_driver.pika_poller.PikaPoller." "_declare_queue_binding") def test_start(self, declare_queue_binding_mock): poller = pika_poller.PikaPoller( self._pika_engine, 1, None, self._prefetch_count, None ) poller.start(None) self.assertTrue(self._pika_engine.create_connection.called) self.assertTrue(self._poller_connection_mock.channel.called) self.assertTrue(declare_queue_binding_mock.called) def test_start_when_connection_unavailable(self): poller = pika_poller.PikaPoller( self._pika_engine, 1, None, self._prefetch_count, None ) self._pika_engine.create_connection.side_effect = ( pika_drv_exc.EstablishConnectionException ) # start() should not raise socket.timeout exception poller.start(None) # stop is needed to stop reconnection background job poller.stop() @mock.patch("oslo_messaging._drivers.pika_driver.pika_poller.PikaPoller." "_declare_queue_binding") def test_message_processing(self, declare_queue_binding_mock): res = [] def on_incoming_callback(incoming): res.append(incoming) incoming_message_class_mock = mock.Mock() poller = pika_poller.PikaPoller( self._pika_engine, 1, None, self._prefetch_count, incoming_message_class=incoming_message_class_mock ) unused = object() method = object() properties = object() body = object() poller.start(on_incoming_callback) poller._on_message_with_ack_callback( unused, method, properties, body ) self.assertEqual(1, len(res)) self.assertEqual([incoming_message_class_mock.return_value], res[0]) incoming_message_class_mock.assert_called_once_with( self._pika_engine, self._poller_channel_mock, method, properties, body ) self.assertTrue(self._pika_engine.create_connection.called) self.assertTrue(self._poller_connection_mock.channel.called) self.assertTrue(declare_queue_binding_mock.called) @mock.patch("oslo_messaging._drivers.pika_driver.pika_poller.PikaPoller." "_declare_queue_binding") def test_message_processing_batch(self, declare_queue_binding_mock): incoming_message_class_mock = mock.Mock() n = 10 params = [] res = [] def on_incoming_callback(incoming): res.append(incoming) poller = pika_poller.PikaPoller( self._pika_engine, n, None, self._prefetch_count, incoming_message_class=incoming_message_class_mock ) for i in range(n): params.append((object(), object(), object(), object())) poller.start(on_incoming_callback) for i in range(n): poller._on_message_with_ack_callback( *params[i] ) self.assertEqual(1, len(res)) self.assertEqual(10, len(res[0])) self.assertEqual(n, incoming_message_class_mock.call_count) for i in range(n): self.assertEqual(incoming_message_class_mock.return_value, res[0][i]) self.assertEqual( (self._pika_engine, self._poller_channel_mock) + params[i][1:], incoming_message_class_mock.call_args_list[i][0] ) self.assertTrue(self._pika_engine.create_connection.called) self.assertTrue(self._poller_connection_mock.channel.called) self.assertTrue(declare_queue_binding_mock.called) @mock.patch("oslo_messaging._drivers.pika_driver.pika_poller.PikaPoller." "_declare_queue_binding") def test_message_processing_batch_with_timeout(self, declare_queue_binding_mock): incoming_message_class_mock = mock.Mock() n = 10 timeout = 1 res = [] evt = threading.Event() def on_incoming_callback(incoming): res.append(incoming) evt.set() poller = pika_poller.PikaPoller( self._pika_engine, n, timeout, self._prefetch_count, incoming_message_class=incoming_message_class_mock ) params = [] success_count = 5 poller.start(on_incoming_callback) for i in range(n): params.append((object(), object(), object(), object())) for i in range(success_count): poller._on_message_with_ack_callback( *params[i] ) self.assertTrue(evt.wait(timeout * 2)) self.assertEqual(1, len(res)) self.assertEqual(success_count, len(res[0])) self.assertEqual(success_count, incoming_message_class_mock.call_count) for i in range(success_count): self.assertEqual(incoming_message_class_mock.return_value, res[0][i]) self.assertEqual( (self._pika_engine, self._poller_channel_mock) + params[i][1:], incoming_message_class_mock.call_args_list[i][0] ) self.assertTrue(self._pika_engine.create_connection.called) self.assertTrue(self._poller_connection_mock.channel.called) self.assertTrue(declare_queue_binding_mock.called) class RpcServicePikaPollerTestCase(unittest.TestCase): def setUp(self): self._pika_engine = mock.Mock() self._poller_connection_mock = mock.Mock() self._poller_channel_mock = mock.Mock() self._poller_connection_mock.channel.return_value = ( self._poller_channel_mock ) self._pika_engine.create_connection.return_value = ( self._poller_connection_mock ) self._pika_engine.get_rpc_queue_name.side_effect = ( lambda topic, server, no_ack, worker=False: "_".join([topic, str(server), str(no_ack), str(worker)]) ) self._pika_engine.get_rpc_exchange_name.side_effect = ( lambda exchange: exchange ) self._prefetch_count = 123 self._target = mock.Mock(exchange="exchange", topic="topic", server="server") self._pika_engine.rpc_queue_expiration = 12345 @mock.patch("oslo_messaging._drivers.pika_driver.pika_message." "RpcPikaIncomingMessage") def test_declare_rpc_queue_bindings(self, rpc_pika_incoming_message_mock): poller = pika_poller.RpcServicePikaPoller( self._pika_engine, self._target, 1, None, self._prefetch_count ) poller.start(None) self.assertTrue(self._pika_engine.create_connection.called) self.assertTrue(self._poller_connection_mock.channel.called) declare_queue_binding_by_channel_mock = ( self._pika_engine.declare_queue_binding_by_channel ) self.assertEqual( 6, declare_queue_binding_by_channel_mock.call_count ) declare_queue_binding_by_channel_mock.assert_has_calls(( mock.call( channel=self._poller_channel_mock, durable=False, exchange="exchange", exchange_type='direct', queue="topic_None_True_False", queue_expiration=12345, routing_key="topic_None_True_False" ), mock.call( channel=self._poller_channel_mock, durable=False, exchange="exchange", exchange_type='direct', queue="topic_server_True_False", queue_expiration=12345, routing_key="topic_server_True_False" ), mock.call( channel=self._poller_channel_mock, durable=False, exchange="exchange", exchange_type='direct', queue="topic_server_True_True", queue_expiration=12345, routing_key="topic_all_workers_True_False" ), mock.call( channel=self._poller_channel_mock, durable=False, exchange="exchange", exchange_type='direct', queue="topic_None_False_False", queue_expiration=12345, routing_key="topic_None_False_False" ), mock.call( channel=self._poller_channel_mock, durable=False, exchange="exchange", exchange_type='direct', queue="topic_server_False_False", queue_expiration=12345, routing_key='topic_server_False_False' ), mock.call( channel=self._poller_channel_mock, durable=False, exchange="exchange", exchange_type='direct', queue="topic_server_False_True", queue_expiration=12345, routing_key='topic_all_workers_False_False' ) )) class RpcReplyServicePikaPollerTestCase(unittest.TestCase): def setUp(self): self._pika_engine = mock.Mock() self._poller_connection_mock = mock.Mock() self._poller_channel_mock = mock.Mock() self._poller_connection_mock.channel.return_value = ( self._poller_channel_mock ) self._pika_engine.create_connection.return_value = ( self._poller_connection_mock ) self._prefetch_count = 123 self._exchange = "rpc_reply_exchange" self._queue = "rpc_reply_queue" self._pika_engine.rpc_reply_retry_delay = 12132543456 self._pika_engine.rpc_queue_expiration = 12345 self._pika_engine.rpc_reply_retry_attempts = 3 def test_declare_rpc_reply_queue_binding(self): poller = pika_poller.RpcReplyPikaPoller( self._pika_engine, self._exchange, self._queue, 1, None, self._prefetch_count, ) poller.start(None) poller.stop() declare_queue_binding_by_channel_mock = ( self._pika_engine.declare_queue_binding_by_channel ) self.assertEqual( 1, declare_queue_binding_by_channel_mock.call_count ) declare_queue_binding_by_channel_mock.assert_called_once_with( channel=self._poller_channel_mock, durable=False, exchange='rpc_reply_exchange', exchange_type='direct', queue='rpc_reply_queue', queue_expiration=12345, routing_key='rpc_reply_queue' ) class NotificationPikaPollerTestCase(unittest.TestCase): def setUp(self): self._pika_engine = mock.Mock() self._poller_connection_mock = mock.Mock() self._poller_channel_mock = mock.Mock() self._poller_connection_mock.channel.return_value = ( self._poller_channel_mock ) self._pika_engine.create_connection.return_value = ( self._poller_connection_mock ) self._prefetch_count = 123 self._target_and_priorities = ( ( mock.Mock(exchange="exchange1", topic="topic1", server="server1"), 1 ), ( mock.Mock(exchange="exchange1", topic="topic1"), 2 ), ( mock.Mock(exchange="exchange2", topic="topic2",), 1 ), ) self._pika_engine.notification_persistence = object() def test_declare_notification_queue_bindings_default_queue(self): poller = pika_poller.NotificationPikaPoller( self._pika_engine, self._target_and_priorities, 1, None, self._prefetch_count, None ) poller.start(None) self.assertTrue(self._pika_engine.create_connection.called) self.assertTrue(self._poller_connection_mock.channel.called) declare_queue_binding_by_channel_mock = ( self._pika_engine.declare_queue_binding_by_channel ) self.assertEqual( 3, declare_queue_binding_by_channel_mock.call_count ) declare_queue_binding_by_channel_mock.assert_has_calls(( mock.call( channel=self._poller_channel_mock, durable=self._pika_engine.notification_persistence, exchange="exchange1", exchange_type='direct', queue="topic1.1", queue_expiration=None, routing_key="topic1.1" ), mock.call( channel=self._poller_channel_mock, durable=self._pika_engine.notification_persistence, exchange="exchange1", exchange_type='direct', queue="topic1.2", queue_expiration=None, routing_key="topic1.2" ), mock.call( channel=self._poller_channel_mock, durable=self._pika_engine.notification_persistence, exchange="exchange2", exchange_type='direct', queue="topic2.1", queue_expiration=None, routing_key="topic2.1" ) )) def test_declare_notification_queue_bindings_custom_queue(self): poller = pika_poller.NotificationPikaPoller( self._pika_engine, self._target_and_priorities, 1, None, self._prefetch_count, "custom_queue_name" ) poller.start(None) self.assertTrue(self._pika_engine.create_connection.called) self.assertTrue(self._poller_connection_mock.channel.called) declare_queue_binding_by_channel_mock = ( self._pika_engine.declare_queue_binding_by_channel ) self.assertEqual( 3, declare_queue_binding_by_channel_mock.call_count ) declare_queue_binding_by_channel_mock.assert_has_calls(( mock.call( channel=self._poller_channel_mock, durable=self._pika_engine.notification_persistence, exchange="exchange1", exchange_type='direct', queue="custom_queue_name", queue_expiration=None, routing_key="topic1.1" ), mock.call( channel=self._poller_channel_mock, durable=self._pika_engine.notification_persistence, exchange="exchange1", exchange_type='direct', queue="custom_queue_name", queue_expiration=None, routing_key="topic1.2" ), mock.call( channel=self._poller_channel_mock, durable=self._pika_engine.notification_persistence, exchange="exchange2", exchange_type='direct', queue="custom_queue_name", queue_expiration=None, routing_key="topic2.1" ) )) oslo.messaging-5.35.0/oslo_messaging/tests/test_utils.py0000666000175100017510000000672313224676046023530 0ustar zuulzuul00000000000000 # Copyright 2013 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_messaging._drivers import common from oslo_messaging import _utils as utils from oslo_messaging.tests import utils as test_utils from six.moves import mock class VersionIsCompatibleTestCase(test_utils.BaseTestCase): def test_version_is_compatible_same(self): self.assertTrue(utils.version_is_compatible('1.23', '1.23')) def test_version_is_compatible_newer_minor(self): self.assertTrue(utils.version_is_compatible('1.24', '1.23')) def test_version_is_compatible_older_minor(self): self.assertFalse(utils.version_is_compatible('1.22', '1.23')) def test_version_is_compatible_major_difference1(self): self.assertFalse(utils.version_is_compatible('2.23', '1.23')) def test_version_is_compatible_major_difference2(self): self.assertFalse(utils.version_is_compatible('1.23', '2.23')) def test_version_is_compatible_newer_rev(self): self.assertFalse(utils.version_is_compatible('1.23', '1.23.1')) def test_version_is_compatible_newer_rev_both(self): self.assertFalse(utils.version_is_compatible('1.23.1', '1.23.2')) def test_version_is_compatible_older_rev_both(self): self.assertTrue(utils.version_is_compatible('1.23.2', '1.23.1')) def test_version_is_compatible_older_rev(self): self.assertTrue(utils.version_is_compatible('1.24', '1.23.1')) def test_version_is_compatible_no_rev_is_zero(self): self.assertTrue(utils.version_is_compatible('1.23.0', '1.23')) class TimerTestCase(test_utils.BaseTestCase): def test_no_duration_no_callback(self): t = common.DecayingTimer() t.start() remaining = t.check_return() self.assertIsNone(remaining) def test_no_duration_but_maximum(self): t = common.DecayingTimer() t.start() remaining = t.check_return(maximum=2) self.assertEqual(2, remaining) @mock.patch('oslo_utils.timeutils.now') def test_duration_expired_no_callback(self, now): now.return_value = 0 t = common.DecayingTimer(2) t.start() now.return_value = 3 remaining = t.check_return() self.assertEqual(0, remaining) @mock.patch('oslo_utils.timeutils.now') def test_duration_callback(self, now): now.return_value = 0 t = common.DecayingTimer(2) t.start() now.return_value = 3 callback = mock.Mock() remaining = t.check_return(callback) self.assertEqual(0, remaining) callback.assert_called_once_with() @mock.patch('oslo_utils.timeutils.now') def test_duration_callback_with_args(self, now): now.return_value = 0 t = common.DecayingTimer(2) t.start() now.return_value = 3 callback = mock.Mock() remaining = t.check_return(callback, 1, a='b') self.assertEqual(0, remaining) callback.assert_called_once_with(1, a='b') oslo.messaging-5.35.0/oslo_messaging/tests/test_target.py0000666000175100017510000001546213224676046023656 0ustar zuulzuul00000000000000 # Copyright 2013 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import testscenarios import oslo_messaging from oslo_messaging.tests import utils as test_utils load_tests = testscenarios.load_tests_apply_scenarios class TargetConstructorTestCase(test_utils.BaseTestCase): scenarios = [ ('all_none', dict(kwargs=dict())), ('exchange', dict(kwargs=dict(exchange='testexchange'))), ('topic', dict(kwargs=dict(topic='testtopic'))), ('namespace', dict(kwargs=dict(namespace='testnamespace'))), ('version', dict(kwargs=dict(version='3.4'))), ('server', dict(kwargs=dict(server='testserver'))), ('fanout', dict(kwargs=dict(fanout=True))), ] def test_constructor(self): target = oslo_messaging.Target(**self.kwargs) for k in self.kwargs: self.assertEqual(self.kwargs[k], getattr(target, k)) for k in ['exchange', 'topic', 'namespace', 'version', 'server', 'fanout']: if k in self.kwargs: continue self.assertIsNone(getattr(target, k)) class TargetCallableTestCase(test_utils.BaseTestCase): scenarios = [ ('all_none', dict(attrs=dict(), kwargs=dict(), vals=dict())), ('exchange_attr', dict(attrs=dict(exchange='testexchange'), kwargs=dict(), vals=dict(exchange='testexchange'))), ('exchange_arg', dict(attrs=dict(), kwargs=dict(exchange='testexchange'), vals=dict(exchange='testexchange'))), ('topic_attr', dict(attrs=dict(topic='testtopic'), kwargs=dict(), vals=dict(topic='testtopic'))), ('topic_arg', dict(attrs=dict(), kwargs=dict(topic='testtopic'), vals=dict(topic='testtopic'))), ('namespace_attr', dict(attrs=dict(namespace='testnamespace'), kwargs=dict(), vals=dict(namespace='testnamespace'))), ('namespace_arg', dict(attrs=dict(), kwargs=dict(namespace='testnamespace'), vals=dict(namespace='testnamespace'))), ('version_attr', dict(attrs=dict(version='3.4'), kwargs=dict(), vals=dict(version='3.4'))), ('version_arg', dict(attrs=dict(), kwargs=dict(version='3.4'), vals=dict(version='3.4'))), ('server_attr', dict(attrs=dict(server='testserver'), kwargs=dict(), vals=dict(server='testserver'))), ('server_arg', dict(attrs=dict(), kwargs=dict(server='testserver'), vals=dict(server='testserver'))), ('fanout_attr', dict(attrs=dict(fanout=True), kwargs=dict(), vals=dict(fanout=True))), ('fanout_arg', dict(attrs=dict(), kwargs=dict(fanout=True), vals=dict(fanout=True))), ] def test_callable(self): target = oslo_messaging.Target(**self.attrs) target = target(**self.kwargs) for k in self.vals: self.assertEqual(self.vals[k], getattr(target, k)) for k in ['exchange', 'topic', 'namespace', 'version', 'server', 'fanout']: if k in self.vals: continue self.assertIsNone(getattr(target, k)) class TargetReprTestCase(test_utils.BaseTestCase): scenarios = [ ('all_none', dict(kwargs=dict(), repr='')), ('exchange', dict(kwargs=dict(exchange='testexchange'), repr='exchange=testexchange')), ('topic', dict(kwargs=dict(topic='testtopic'), repr='topic=testtopic')), ('namespace', dict(kwargs=dict(namespace='testnamespace'), repr='namespace=testnamespace')), ('version', dict(kwargs=dict(version='3.4'), repr='version=3.4')), ('server', dict(kwargs=dict(server='testserver'), repr='server=testserver')), ('fanout', dict(kwargs=dict(fanout=True), repr='fanout=True')), ('exchange_and_fanout', dict(kwargs=dict(exchange='testexchange', fanout=True), repr='exchange=testexchange, ' 'fanout=True')), ] def test_repr(self): target = oslo_messaging.Target(**self.kwargs) self.assertEqual('', str(target)) _notset = object() class EqualityTestCase(test_utils.BaseTestCase): @classmethod def generate_scenarios(cls): attr = [ ('exchange', dict(attr='exchange')), ('topic', dict(attr='topic')), ('namespace', dict(attr='namespace')), ('version', dict(attr='version')), ('server', dict(attr='server')), ('fanout', dict(attr='fanout')), ] a = [ ('a_notset', dict(a_value=_notset)), ('a_none', dict(a_value=None)), ('a_empty', dict(a_value='')), ('a_foo', dict(a_value='foo')), ('a_bar', dict(a_value='bar')), ] b = [ ('b_notset', dict(b_value=_notset)), ('b_none', dict(b_value=None)), ('b_empty', dict(b_value='')), ('b_foo', dict(b_value='foo')), ('b_bar', dict(b_value='bar')), ] cls.scenarios = testscenarios.multiply_scenarios(attr, a, b) for s in cls.scenarios: s[1]['equals'] = (s[1]['a_value'] == s[1]['b_value']) def test_equality(self): a_kwargs = {self.attr: self.a_value} b_kwargs = {self.attr: self.b_value} a = oslo_messaging.Target(**a_kwargs) b = oslo_messaging.Target(**b_kwargs) if self.equals: self.assertEqual(a, b) self.assertFalse(a != b) else: self.assertNotEqual(a, b) self.assertFalse(a == b) EqualityTestCase.generate_scenarios() oslo.messaging-5.35.0/oslo_messaging/tests/__init__.py0000666000175100017510000000170213224676046023060 0ustar zuulzuul00000000000000# Copyright 2014 eNovance # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import eventlet eventlet.monkey_patch() # oslotest prepares mock for six in oslotest/__init__.py as follow: # six.add_move(six.MovedModule('mock', 'mock', 'unittest.mock')) and # oslo.messaging imports oslotest before importing test submodules to # setup six.moves for mock, then "from six.moves import mock" works well. import oslotest oslo.messaging-5.35.0/oslo_messaging/tests/functional/0000775000175100017510000000000013224676256023112 5ustar zuulzuul00000000000000oslo.messaging-5.35.0/oslo_messaging/tests/functional/test_rabbitmq.py0000666000175100017510000001243213224676046026325 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import signal import time import fixtures from pifpaf.drivers import rabbitmq from oslo_messaging.tests.functional import utils from oslo_messaging.tests import utils as test_utils class ConnectedPortMatcher(object): def __init__(self, port): self.port = port def __eq__(self, data): return data.get("port") == self.port def __repr__(self): return "" % self.port class RabbitMQFailoverTests(test_utils.BaseTestCase): DRIVERS = [ "rabbit", ] def test_failover_scenario(self): # NOTE(sileht): run this test only if functional suite run of a driver # that use rabbitmq as backend self.driver = os.environ.get('TRANSPORT_DRIVER') if self.driver not in self.DRIVERS: self.skipTest("TRANSPORT_DRIVER is not set to a rabbit driver") # NOTE(sileht): Allow only one response at a time, to # have only one tcp connection for reply and ensure it will failover # correctly self.config(heartbeat_timeout_threshold=1, rpc_conn_pool_size=1, kombu_reconnect_delay=0, rabbit_retry_interval=0, rabbit_retry_backoff=0, group='oslo_messaging_rabbit') self.pifpaf = self.useFixture(rabbitmq.RabbitMQDriver(cluster=True, port=5692)) self.url = self.pifpaf.env["PIFPAF_URL"] self.n1 = self.pifpaf.env["PIFPAF_RABBITMQ_NODENAME1"] self.n2 = self.pifpaf.env["PIFPAF_RABBITMQ_NODENAME2"] self.n3 = self.pifpaf.env["PIFPAF_RABBITMQ_NODENAME3"] # NOTE(gdavoian): additional tweak for pika driver if self.driver == "pika": self.url = self.url.replace("rabbit", "pika") # ensure connections will be establish to the first node self.pifpaf.stop_node(self.n2) self.pifpaf.stop_node(self.n3) self.servers = self.useFixture(utils.RpcServerGroupFixture( self.conf, self.url, endpoint=self, names=["server"])) # Don't randomize rabbit hosts self.useFixture(fixtures.MockPatch( 'oslo_messaging._drivers.impl_rabbit.random', side_effect=lambda x: x)) # NOTE(sileht): this connects server connections and reply # connection to nodename n1 self.client = self.servers.client(0) self.client.ping() self._check_ports(self.pifpaf.port) # Switch to node n2 self.pifpaf.start_node(self.n2) self.assertEqual("callback done", self.client.kill_and_process()) self.assertEqual("callback done", self.client.just_process()) self._check_ports(self.pifpaf.get_port(self.n2)) # Switch to node n3 self.pifpaf.start_node(self.n3) time.sleep(0.1) self.pifpaf.kill_node(self.n2, signal=signal.SIGKILL) time.sleep(0.1) self.assertEqual("callback done", self.client.just_process()) self._check_ports(self.pifpaf.get_port(self.n3)) self.pifpaf.start_node(self.n1) time.sleep(0.1) self.pifpaf.kill_node(self.n3, signal=signal.SIGKILL) time.sleep(0.1) self.assertEqual("callback done", self.client.just_process()) self._check_ports(self.pifpaf.get_port(self.n1)) def kill_and_process(self, *args, **kargs): self.pifpaf.kill_node(self.n1, signal=signal.SIGKILL) time.sleep(0.1) return "callback done" def just_process(self, *args, **kargs): return "callback done" def _check_ports(self, port): getattr(self, '_check_ports_%s_driver' % self.driver)(port) def _check_ports_pika_driver(self, port): rpc_server = self.servers.servers[0].server # FIXME(sileht): Check other connections connections = [ rpc_server.listener._connection ] for conn in connections: self.assertEqual( port, conn._impl.socket.getpeername()[1]) def _check_ports_rabbit_driver(self, port): rpc_server = self.servers.servers[0].server connection_contexts = [ # rpc server rpc_server.listener._poll_style_listener.conn, # rpc client self.client.client.transport._driver._get_connection(), # rpc client replies waiter self.client.client.transport._driver._reply_q_conn, ] ports = [cctxt.connection.channel.connection.sock.getpeername()[1] for cctxt in connection_contexts] self.assertEqual([port] * len(ports), ports, "expected: %s, rpc-server: %s, rpc-client: %s, " "rpc-replies: %s" % tuple([port] + ports)) oslo.messaging-5.35.0/oslo_messaging/tests/functional/zmq/0000775000175100017510000000000013224676256023721 5ustar zuulzuul00000000000000oslo.messaging-5.35.0/oslo_messaging/tests/functional/zmq/multiproc_utils.py0000666000175100017510000001643213224676046027536 0ustar zuulzuul00000000000000# Copyright 2016 Mirantis, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import logging import logging.handlers import multiprocessing import os import sys import threading import time import uuid from oslo_config import cfg import oslo_messaging from oslo_messaging._drivers.zmq_driver import zmq_async from oslo_messaging.tests.functional import utils LOG = logging.getLogger(__name__) zmq = zmq_async.import_zmq() class QueueHandler(logging.Handler): """This is a logging handler which sends events to a multiprocessing queue. The plan is to add it to Python 3.2, but this can be copy pasted into user code for use with earlier Python versions. """ def __init__(self, queue): """Initialise an instance, using the passed queue.""" logging.Handler.__init__(self) self.queue = queue def emit(self, record): """Emit a record. Writes the LogRecord to the queue. """ try: ei = record.exc_info if ei: # just to get traceback text into record.exc_text dummy = self.format(record) # noqa record.exc_info = None # not needed any more self.queue.put_nowait(record) except (KeyboardInterrupt, SystemExit): raise except Exception: self.handleError(record) def listener_configurer(conf): root = logging.getLogger() h = logging.StreamHandler(sys.stdout) f = logging.Formatter('%(asctime)s %(processName)-10s %(name)s ' '%(levelname)-8s %(message)s') h.setFormatter(f) root.addHandler(h) log_path = conf.oslo_messaging_zmq.rpc_zmq_ipc_dir + \ "/" + "zmq_multiproc.log" file_handler = logging.StreamHandler(open(log_path, 'w')) file_handler.setFormatter(f) root.addHandler(file_handler) def server_configurer(queue): h = QueueHandler(queue) root = logging.getLogger() root.addHandler(h) root.setLevel(logging.DEBUG) def listener_thread(queue, configurer, conf): configurer(conf) while True: time.sleep(0.3) try: record = queue.get() if record is None: break logger = logging.getLogger(record.name) logger.handle(record) except (KeyboardInterrupt, SystemExit): raise class Client(oslo_messaging.RPCClient): def __init__(self, transport, topic): super(Client, self).__init__( transport=transport, target=oslo_messaging.Target(topic=topic)) self.replies = [] def call_a(self): LOG.warning("call_a - client side") rep = self.call({}, 'call_a') LOG.warning("after call_a - client side") self.replies.append(rep) return rep class ReplyServerEndpoint(object): def call_a(self, *args, **kwargs): LOG.warning("call_a - Server endpoint reached!") return "OK" class Server(object): def __init__(self, conf, log_queue, transport_url, name, topic=None): self.conf = conf self.log_queue = log_queue self.transport_url = transport_url self.name = name self.topic = topic or str(uuid.uuid4()) self.ready = multiprocessing.Value('b', False) self._stop = multiprocessing.Event() def start(self): self.process = multiprocessing.Process(target=self._run_server, name=self.name, args=(self.conf, self.transport_url, self.log_queue, self.ready)) self.process.start() LOG.debug("Server process started: pid: %d", self.process.pid) def _run_server(self, conf, url, log_queue, ready): server_configurer(log_queue) LOG.debug("Starting RPC server") transport = oslo_messaging.get_transport(conf, url=url) target = oslo_messaging.Target(topic=self.topic, server=self.name) self.rpc_server = oslo_messaging.get_rpc_server( transport=transport, target=target, endpoints=[ReplyServerEndpoint()], executor='eventlet') self.rpc_server.start() ready.value = True LOG.debug("RPC server being started") while not self._stop.is_set(): LOG.debug("Waiting for the stop signal ...") time.sleep(1) self.rpc_server.stop() self.rpc_server.wait() LOG.debug("Leaving process T:%s Pid:%d", str(target), os.getpid()) def cleanup(self): LOG.debug("Stopping server") self.shutdown() def shutdown(self): self._stop.set() def restart(self, time_for_restart=1): pass def hang(self): pass def crash(self): pass def ping(self): pass class MultiprocTestCase(utils.SkipIfNoTransportURL): def setUp(self): super(MultiprocTestCase, self).setUp(conf=cfg.ConfigOpts()) if not self.url.startswith("zmq"): self.skipTest("ZeroMQ specific skipped...") self.transport = oslo_messaging.get_transport(self.conf, url=self.url) LOG.debug("Start log queue") self.log_queue = multiprocessing.Queue() self.log_listener = threading.Thread(target=listener_thread, args=(self.log_queue, listener_configurer, self.conf)) self.log_listener.start() self.spawned = [] self.conf.prog = "test_prog" self.conf.project = "test_project" def tearDown(self): for process in self.spawned: process.cleanup() super(MultiprocTestCase, self).tearDown() def get_client(self, topic): return Client(self.transport, topic) def spawn_server(self, wait_for_server=False, topic=None): name = "server_%d_%s" % (len(self.spawned), str(uuid.uuid4())[:8]) server = Server(self.conf, self.log_queue, self.url, name, topic) LOG.debug("[SPAWN] %s (starting)...", server.name) server.start() if wait_for_server: while not server.ready.value: LOG.debug("[SPAWN] %s (waiting for server ready)...", server.name) time.sleep(1) LOG.debug("[SPAWN] Server %s:%d started.", server.name, server.process.pid) self.spawned.append(server) return server def spawn_servers(self, number, wait_for_server=False, common_topic=True): topic = str(uuid.uuid4()) if common_topic else None for _ in range(number): self.spawn_server(wait_for_server, topic) oslo.messaging-5.35.0/oslo_messaging/tests/functional/zmq/__init__.py0000666000175100017510000000000013224676046026017 0ustar zuulzuul00000000000000oslo.messaging-5.35.0/oslo_messaging/tests/functional/zmq/test_startup.py0000666000175100017510000000335113224676046027035 0ustar zuulzuul00000000000000# Copyright 2016 Mirantis, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import sys from oslo_messaging.tests.functional.zmq import multiproc_utils class StartupOrderTestCase(multiproc_utils.MultiprocTestCase): def setUp(self): super(StartupOrderTestCase, self).setUp() self.conf.prog = "test_prog" self.conf.project = "test_project" self.config(rpc_response_timeout=10) log_path = os.path.join(self.conf.oslo_messaging_zmq.rpc_zmq_ipc_dir, str(os.getpid()) + ".log") sys.stdout = open(log_path, "wb", buffering=0) def test_call_client_wait_for_server(self): server = self.spawn_server(wait_for_server=True) client = self.get_client(server.topic) for _ in range(3): reply = client.call_a() self.assertIsNotNone(reply) self.assertEqual(3, len(client.replies)) def test_call_client_dont_wait_for_server(self): server = self.spawn_server(wait_for_server=False) client = self.get_client(server.topic) for _ in range(3): reply = client.call_a() self.assertIsNotNone(reply) self.assertEqual(3, len(client.replies)) oslo.messaging-5.35.0/oslo_messaging/tests/functional/__init__.py0000666000175100017510000000000013224676046025210 0ustar zuulzuul00000000000000oslo.messaging-5.35.0/oslo_messaging/tests/functional/notify/0000775000175100017510000000000013224676256024422 5ustar zuulzuul00000000000000oslo.messaging-5.35.0/oslo_messaging/tests/functional/notify/__init__.py0000666000175100017510000000000013224676046026520 0ustar zuulzuul00000000000000oslo.messaging-5.35.0/oslo_messaging/tests/functional/notify/test_logger.py0000666000175100017510000000600213224676046027307 0ustar zuulzuul00000000000000# Copyright 2015 NetEase Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import logging import uuid import testscenarios import oslo_messaging from oslo_messaging.tests.functional import utils load_tests = testscenarios.load_tests_apply_scenarios class LoggingNotificationHandlerTestCase(utils.SkipIfNoTransportURL): """Test case for `oslo_messaging.LoggingNotificationHandler` Build up a logger using this handler, then test logging under messaging and messagingv2 driver. Make sure receive expected logging notifications. """ _priority = [ ('debug', dict(priority='debug')), ('info', dict(priority='info')), ('warn', dict(priority='warn')), ('error', dict(priority='error')), ('critical', dict(priority='critical')), ] _driver = [ ('messaging', dict(driver='messaging')), ('messagingv2', dict(driver='messagingv2')), ] @classmethod def generate_scenarios(cls): cls.scenarios = testscenarios.multiply_scenarios(cls._priority, cls._driver) def test_logging(self): # NOTE(gtt): Using different topic to make tests run in parallel topic = 'test_logging_%s_driver_%s' % (self.priority, self.driver) if self.url.startswith("kafka://"): self.conf.set_override('consumer_group', str(uuid.uuid4()), group='oslo_messaging_kafka') self.config(driver=[self.driver], topics=[topic], group='oslo_messaging_notifications') listener = self.useFixture( utils.NotificationFixture(self.conf, self.url, [topic])) log_notify = oslo_messaging.LoggingNotificationHandler(self.url) log = logging.getLogger(topic) log.setLevel(logging.DEBUG) log.addHandler(log_notify) log_method = getattr(log, self.priority) log_method('Test logging at priority: %s' % self.priority) events = listener.get_events(timeout=5) self.assertEqual(1, len(events)) info_event = events[0] self.assertEqual(self.priority, info_event[0]) self.assertEqual('logrecord', info_event[1]) for key in ['name', 'thread', 'extra', 'process', 'funcName', 'levelno', 'processName', 'pathname', 'lineno', 'msg', 'exc_info', 'levelname']: self.assertIn(key, info_event[2]) LoggingNotificationHandlerTestCase.generate_scenarios() oslo.messaging-5.35.0/oslo_messaging/tests/functional/utils.py0000666000175100017510000003723713224676046024637 0ustar zuulzuul00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import time import uuid import fixtures from oslo_config import cfg from six import moves import oslo_messaging from oslo_messaging._drivers.kafka_driver import kafka_options from oslo_messaging._drivers.zmq_driver import zmq_options from oslo_messaging.notify import notifier from oslo_messaging.tests import utils as test_utils class TestServerEndpoint(object): """This MessagingServer that will be used during functional testing.""" def __init__(self): self.ival = 0 self.sval = '' def add(self, ctxt, increment): self.ival += increment return self.ival def subtract(self, ctxt, increment): if self.ival < increment: raise ValueError("ival can't go negative!") self.ival -= increment return self.ival def append(self, ctxt, text): self.sval += text return self.sval def long_running_task(self, ctxt, seconds): time.sleep(seconds) class TransportFixture(fixtures.Fixture): """Fixture defined to setup the oslo_messaging transport.""" def __init__(self, conf, url): self.conf = conf self.url = url def setUp(self): super(TransportFixture, self).setUp() self.transport = oslo_messaging.get_transport(self.conf, url=self.url) def cleanUp(self): try: self.transport.cleanup() except fixtures.TimeoutException: pass super(TransportFixture, self).cleanUp() def wait(self): # allow time for the server to connect to the broker time.sleep(0.5) class RPCTransportFixture(TransportFixture): """Fixture defined to setup RPC transport.""" def setUp(self): super(RPCTransportFixture, self).setUp() self.transport = oslo_messaging.get_rpc_transport(self.conf, url=self.url) class NotificationTransportFixture(TransportFixture): """Fixture defined to setup notification transport.""" def setUp(self): super(NotificationTransportFixture, self).setUp() self.transport = oslo_messaging.get_notification_transport( self.conf, url=self.url) class RpcServerFixture(fixtures.Fixture): """Fixture to setup the TestServerEndpoint.""" def __init__(self, conf, url, target, endpoint=None, ctrl_target=None, executor='eventlet'): super(RpcServerFixture, self).__init__() self.conf = conf self.url = url self.target = target self.endpoint = endpoint or TestServerEndpoint() self.executor = executor self.syncq = moves.queue.Queue() self.ctrl_target = ctrl_target or self.target def setUp(self): super(RpcServerFixture, self).setUp() endpoints = [self.endpoint, self] transport = self.useFixture(RPCTransportFixture(self.conf, self.url)) self.server = oslo_messaging.get_rpc_server( transport=transport.transport, target=self.target, endpoints=endpoints, executor=self.executor) self._ctrl = oslo_messaging.RPCClient(transport.transport, self.ctrl_target) self._start() transport.wait() def cleanUp(self): self._stop() super(RpcServerFixture, self).cleanUp() def _start(self): self.thread = test_utils.ServerThreadHelper(self.server) self.thread.start() def _stop(self): self.thread.stop() self.thread.join(timeout=30) if self.thread.isAlive(): raise Exception("Server did not shutdown correctly") def ping(self, ctxt): pass def sync(self, ctxt): self.syncq.put('x') class RpcServerGroupFixture(fixtures.Fixture): def __init__(self, conf, url, topic=None, names=None, exchange=None, use_fanout_ctrl=False, endpoint=None): self.conf = conf self.url = url # NOTE(sileht): topic and server_name must be unique # to be able to run all tests in parallel self.topic = topic or str(uuid.uuid4()) self.names = names or ["server_%i_%s" % (i, str(uuid.uuid4())[:8]) for i in range(3)] self.exchange = exchange self.targets = [self._target(server=n) for n in self.names] self.use_fanout_ctrl = use_fanout_ctrl self.endpoint = endpoint def setUp(self): super(RpcServerGroupFixture, self).setUp() self.servers = [self.useFixture(self._server(t)) for t in self.targets] def _target(self, server=None, fanout=False): t = oslo_messaging.Target(exchange=self.exchange, topic=self.topic) t.server = server t.fanout = fanout return t def _server(self, target): ctrl = None if self.use_fanout_ctrl: ctrl = self._target(fanout=True) server = RpcServerFixture(self.conf, self.url, target, endpoint=self.endpoint, ctrl_target=ctrl) return server def client(self, server=None, cast=False): if server is None: target = self._target() else: if server == 'all': target = self._target(fanout=True) elif 0 <= server < len(self.targets): target = self.targets[server] else: raise ValueError("Invalid value for server: %r" % server) transport = self.useFixture(RPCTransportFixture(self.conf, self.url)) client = ClientStub(transport.transport, target, cast=cast, timeout=5) transport.wait() return client def sync(self, server=None): if server is None: for i in range(len(self.servers)): self.client(i).ping() else: if server == 'all': for s in self.servers: s.syncq.get(timeout=5) elif 0 <= server < len(self.targets): self.servers[server].syncq.get(timeout=5) else: raise ValueError("Invalid value for server: %r" % server) class RpcCall(object): def __init__(self, client, method, context): self.client = client self.method = method self.context = context def __call__(self, **kwargs): self.context['time'] = time.ctime() self.context['cast'] = False result = self.client.call(self.context, self.method, **kwargs) return result class RpcCast(RpcCall): def __call__(self, **kwargs): self.context['time'] = time.ctime() self.context['cast'] = True self.client.cast(self.context, self.method, **kwargs) class ClientStub(object): def __init__(self, transport, target, cast=False, name=None, **kwargs): self.name = name or "functional-tests" self.cast = cast self.client = oslo_messaging.RPCClient(transport, target, **kwargs) def __getattr__(self, name): context = {"application": self.name} if self.cast: return RpcCast(self.client, name, context) else: return RpcCall(self.client, name, context) class InvalidDistribution(object): def __init__(self, original, received): self.original = original self.received = received self.missing = [] self.extra = [] self.wrong_order = [] def describe(self): text = "Sent %s, got %s; " % (self.original, self.received) e1 = ["%r was missing" % m for m in self.missing] e2 = ["%r was not expected" % m for m in self.extra] e3 = ["%r expected before %r" % (m[0], m[1]) for m in self.wrong_order] return text + ", ".join(e1 + e2 + e3) def __len__(self): return len(self.extra) + len(self.missing) + len(self.wrong_order) def get_details(self): return {} class IsValidDistributionOf(object): """Test whether a given list can be split into particular sub-lists. All items in the original list must be in exactly one sub-list, and must appear in that sub-list in the same order with respect to any other items as in the original list. """ def __init__(self, original): self.original = original def __str__(self): return 'IsValidDistribution(%s)' % self.original def match(self, actual): errors = InvalidDistribution(self.original, actual) received = [[i for i in l] for l in actual] def _remove(obj, lists): for l in lists: if obj in l: front = l[0] l.remove(obj) return front return None for item in self.original: o = _remove(item, received) if not o: errors.missing += item elif item != o: errors.wrong_order.append([item, o]) for l in received: errors.extra += l return errors or None class SkipIfNoTransportURL(test_utils.BaseTestCase): def setUp(self, conf=cfg.CONF): super(SkipIfNoTransportURL, self).setUp(conf=conf) driver = os.environ.get("TRANSPORT_DRIVER") if driver: self.url = os.environ.get('PIFPAF_URL') if driver == "pika" and self.url: self.url = self.url.replace("rabbit://", "pika://") else: self.url = os.environ.get('TRANSPORT_URL') if not self.url: self.skipTest("No transport url configured") transport_url = oslo_messaging.TransportURL.parse(conf, self.url) zmq_options.register_opts(conf, transport_url) zmq_matchmaker = os.environ.get('ZMQ_MATCHMAKER') if zmq_matchmaker: self.config(rpc_zmq_matchmaker=zmq_matchmaker, group="oslo_messaging_zmq") zmq_ipc_dir = os.environ.get('ZMQ_IPC_DIR') if zmq_ipc_dir: self.config(group="oslo_messaging_zmq", rpc_zmq_ipc_dir=zmq_ipc_dir) zmq_redis_port = os.environ.get('ZMQ_REDIS_PORT') if zmq_redis_port: self.config(port=zmq_redis_port, check_timeout=10000, wait_timeout=1000, group="matchmaker_redis") zmq_use_pub_sub = os.environ.get('ZMQ_USE_PUB_SUB') zmq_use_router_proxy = os.environ.get('ZMQ_USE_ROUTER_PROXY') zmq_use_acks = os.environ.get('ZMQ_USE_ACKS') self.config(use_pub_sub=zmq_use_pub_sub, use_router_proxy=zmq_use_router_proxy, rpc_use_acks=zmq_use_acks, group='oslo_messaging_zmq') zmq_use_dynamic_connections = \ os.environ.get('ZMQ_USE_DYNAMIC_CONNECTIONS') self.config(use_dynamic_connections=zmq_use_dynamic_connections, group='oslo_messaging_zmq') kafka_options.register_opts(conf, transport_url) self.config(producer_batch_size=0, group='oslo_messaging_kafka') class NotificationFixture(fixtures.Fixture): def __init__(self, conf, url, topics, batch=None): super(NotificationFixture, self).__init__() self.conf = conf self.url = url self.topics = topics self.events = moves.queue.Queue() self.name = str(id(self)) self.batch = batch def setUp(self): super(NotificationFixture, self).setUp() targets = [oslo_messaging.Target(topic=t) for t in self.topics] # add a special topic for internal notifications targets.append(oslo_messaging.Target(topic=self.name)) transport = self.useFixture(NotificationTransportFixture(self.conf, self.url)) self.server = self._get_server(transport, targets) self._ctrl = self.notifier('internal', topics=[self.name]) self._start() transport.wait() def cleanUp(self): self._stop() super(NotificationFixture, self).cleanUp() def _get_server(self, transport, targets): return oslo_messaging.get_notification_listener( transport.transport, targets, [self], 'eventlet') def _start(self): self.thread = test_utils.ServerThreadHelper(self.server) self.thread.start() def _stop(self): self.thread.stop() self.thread.join(timeout=30) if self.thread.isAlive(): raise Exception("Server did not shutdown properly") def notifier(self, publisher, topics=None): transport = self.useFixture(NotificationTransportFixture(self.conf, self.url)) n = notifier.Notifier(transport.transport, publisher, driver='messaging', topics=topics or self.topics) transport.wait() return n def debug(self, ctxt, publisher, event_type, payload, metadata): self.events.put(['debug', event_type, payload, publisher]) def audit(self, ctxt, publisher, event_type, payload, metadata): self.events.put(['audit', event_type, payload, publisher]) def info(self, ctxt, publisher, event_type, payload, metadata): self.events.put(['info', event_type, payload, publisher]) def warn(self, ctxt, publisher, event_type, payload, metadata): self.events.put(['warn', event_type, payload, publisher]) def error(self, ctxt, publisher, event_type, payload, metadata): self.events.put(['error', event_type, payload, publisher]) def critical(self, ctxt, publisher, event_type, payload, metadata): self.events.put(['critical', event_type, payload, publisher]) def sample(self, ctxt, publisher, event_type, payload, metadata): pass # Just used for internal shutdown control def get_events(self, timeout=0.5): results = [] try: while True: results.append(self.events.get(timeout=timeout)) except moves.queue.Empty: pass return results class BatchNotificationFixture(NotificationFixture): def __init__(self, conf, url, topics, batch_size=5, batch_timeout=2): super(BatchNotificationFixture, self).__init__(conf, url, topics) self.batch_size = batch_size self.batch_timeout = batch_timeout def _get_server(self, transport, targets): return oslo_messaging.get_batch_notification_listener( transport.transport, targets, [self], 'eventlet', batch_timeout=self.batch_timeout, batch_size=self.batch_size) def debug(self, messages): self.events.put(['debug', messages]) def audit(self, messages): self.events.put(['audit', messages]) def info(self, messages): self.events.put(['info', messages]) def warn(self, messages): self.events.put(['warn', messages]) def error(self, messages): self.events.put(['error', messages]) def critical(self, messages): self.events.put(['critical', messages]) def sample(self, messages): pass # Just used for internal shutdown control oslo.messaging-5.35.0/oslo_messaging/tests/functional/test_functional.py0000666000175100017510000004274713224676046026702 0ustar zuulzuul00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import time import uuid import concurrent.futures from oslo_config import cfg import six.moves from testtools import matchers import oslo_messaging from oslo_messaging.tests.functional import utils class CallTestCase(utils.SkipIfNoTransportURL): def setUp(self): super(CallTestCase, self).setUp(conf=cfg.ConfigOpts()) if self.url.startswith("kafka://"): self.skipTest("kafka does not support RPC API") self.conf.prog = "test_prog" self.conf.project = "test_project" self.config(heartbeat_timeout_threshold=0, group='oslo_messaging_rabbit') def test_specific_server(self): group = self.useFixture(utils.RpcServerGroupFixture( self.conf, self.url) ) client = group.client(1) client.append(text='open') self.assertEqual('openstack', client.append(text='stack')) client.add(increment=2) self.assertEqual(12, client.add(increment=10)) self.assertEqual(9, client.subtract(increment=3)) self.assertEqual('openstack', group.servers[1].endpoint.sval) self.assertEqual(9, group.servers[1].endpoint.ival) for i in [0, 2]: self.assertEqual('', group.servers[i].endpoint.sval) self.assertEqual(0, group.servers[i].endpoint.ival) def test_server_in_group(self): group = self.useFixture( utils.RpcServerGroupFixture(self.conf, self.url) ) client = group.client() data = [c for c in 'abcdefghijklmn'] for i in data: client.append(text=i) for s in group.servers: self.assertThat(len(s.endpoint.sval), matchers.GreaterThan(0)) actual = [[c for c in s.endpoint.sval] for s in group.servers] self.assertThat(actual, utils.IsValidDistributionOf(data)) def test_different_exchanges(self): # If the different exchanges are not honoured, then the # teardown may hang unless we broadcast all control messages # to each server group1 = self.useFixture( utils.RpcServerGroupFixture(self.conf, self.url, use_fanout_ctrl=True)) group2 = self.useFixture( utils.RpcServerGroupFixture(self.conf, self.url, exchange="a", use_fanout_ctrl=True)) group3 = self.useFixture( utils.RpcServerGroupFixture(self.conf, self.url, exchange="b", use_fanout_ctrl=True)) client1 = group1.client(1) data1 = [c for c in 'abcdefghijklmn'] for i in data1: client1.append(text=i) client2 = group2.client() data2 = [c for c in 'opqrstuvwxyz'] for i in data2: client2.append(text=i) actual1 = [[c for c in s.endpoint.sval] for s in group1.servers] self.assertThat(actual1, utils.IsValidDistributionOf(data1)) actual1 = [c for c in group1.servers[1].endpoint.sval] self.assertThat([actual1], utils.IsValidDistributionOf(data1)) for s in group1.servers: expected = len(data1) if group1.servers.index(s) == 1 else 0 self.assertEqual(expected, len(s.endpoint.sval)) self.assertEqual(0, s.endpoint.ival) actual2 = [[c for c in s.endpoint.sval] for s in group2.servers] for s in group2.servers: self.assertThat(len(s.endpoint.sval), matchers.GreaterThan(0)) self.assertEqual(0, s.endpoint.ival) self.assertThat(actual2, utils.IsValidDistributionOf(data2)) for s in group3.servers: self.assertEqual(0, len(s.endpoint.sval)) self.assertEqual(0, s.endpoint.ival) def test_timeout(self): transport = self.useFixture( utils.RPCTransportFixture(self.conf, self.url) ) target = oslo_messaging.Target(topic="no_such_topic") c = utils.ClientStub(transport.transport, target, timeout=1) self.assertThat(c.ping, matchers.raises(oslo_messaging.MessagingTimeout)) def test_exception(self): group = self.useFixture( utils.RpcServerGroupFixture(self.conf, self.url) ) client = group.client(1) client.add(increment=2) self.assertRaises(ValueError, client.subtract, increment=3) def test_timeout_with_concurrently_queues(self): transport = self.useFixture( utils.RPCTransportFixture(self.conf, self.url) ) target = oslo_messaging.Target(topic="topic_" + str(uuid.uuid4()), server="server_" + str(uuid.uuid4())) server = self.useFixture( utils.RpcServerFixture(self.conf, self.url, target, executor="threading")) client = utils.ClientStub(transport.transport, target, cast=False, timeout=5) def short_periodical_tasks(): for i in range(10): client.add(increment=1) time.sleep(1) with concurrent.futures.ThreadPoolExecutor(max_workers=2) as executor: future = executor.submit(client.long_running_task, seconds=10) executor.submit(short_periodical_tasks) self.assertRaises(oslo_messaging.MessagingTimeout, future.result) self.assertEqual(10, server.endpoint.ival) def test_endpoint_version_namespace(self): # verify endpoint version and namespace are checked target = oslo_messaging.Target(topic="topic_" + str(uuid.uuid4()), server="server_" + str(uuid.uuid4()), namespace="Name1", version="7.5") class _endpoint(object): def __init__(self, target): self.target = target() def test(self, ctxt, echo): return echo transport = self.useFixture( utils.RPCTransportFixture(self.conf, self.url) ) self.useFixture( utils.RpcServerFixture(self.conf, self.url, target, executor="threading", endpoint=_endpoint(target))) client1 = utils.ClientStub(transport.transport, target, cast=False, timeout=5) self.assertEqual("Hi there", client1.test(echo="Hi there")) # unsupported version target2 = target() target2.version = "7.6" client2 = utils.ClientStub(transport.transport, target2, cast=False, timeout=5) self.assertRaises(oslo_messaging.rpc.client.RemoteError, client2.test, echo="Expect failure") # no matching namespace target3 = oslo_messaging.Target(topic=target.topic, server=target.server, version=target.version, namespace="Name2") client3 = utils.ClientStub(transport.transport, target3, cast=False, timeout=5) self.assertRaises(oslo_messaging.rpc.client.RemoteError, client3.test, echo="Expect failure") def test_bad_endpoint(self): # 'target' attribute is reserved and should be of type Target class _endpoint(object): def target(self, ctxt, echo): return echo target = oslo_messaging.Target(topic="topic_" + str(uuid.uuid4()), server="server_" + str(uuid.uuid4())) transport = self.useFixture( utils.RPCTransportFixture(self.conf, self.url) ) self.assertRaises(TypeError, oslo_messaging.get_rpc_server, transport=transport.transport, target=target, endpoints=[_endpoint()], executor="threading") class CastTestCase(utils.SkipIfNoTransportURL): # Note: casts return immediately, so these tests utilise a special # internal sync() cast to ensure prior casts are complete before # making the necessary assertions. def setUp(self): super(CastTestCase, self).setUp() if self.url.startswith("kafka://"): self.skipTest("kafka does not support RPC API") def test_specific_server(self): group = self.useFixture( utils.RpcServerGroupFixture(self.conf, self.url) ) client = group.client(1, cast=True) client.append(text='open') client.append(text='stack') client.add(increment=2) client.add(increment=10) time.sleep(0.3) client.sync() group.sync(1) self.assertIn(group.servers[1].endpoint.sval, ["openstack", "stackopen"]) self.assertEqual(12, group.servers[1].endpoint.ival) for i in [0, 2]: self.assertEqual('', group.servers[i].endpoint.sval) self.assertEqual(0, group.servers[i].endpoint.ival) def test_server_in_group(self): if self.url.startswith("amqp:"): self.skipTest("QPID-6307") group = self.useFixture( utils.RpcServerGroupFixture(self.conf, self.url) ) client = group.client(cast=True) for i in range(20): client.add(increment=1) for i in range(len(group.servers)): # expect each server to get a sync client.sync() group.sync(server="all") total = 0 for s in group.servers: ival = s.endpoint.ival self.assertThat(ival, matchers.GreaterThan(0)) self.assertThat(ival, matchers.LessThan(20)) total += ival self.assertEqual(20, total) def test_fanout(self): group = self.useFixture( utils.RpcServerGroupFixture(self.conf, self.url) ) client = group.client('all', cast=True) client.append(text='open') client.append(text='stack') client.add(increment=2) client.add(increment=10) time.sleep(0.3) client.sync() group.sync(server='all') for s in group.servers: self.assertIn(s.endpoint.sval, ["openstack", "stackopen"]) self.assertEqual(12, s.endpoint.ival) class NotifyTestCase(utils.SkipIfNoTransportURL): # NOTE(sileht): Each test must not use the same topics # to be run in parallel def test_simple(self): if self.url.startswith("kafka://"): self.conf.set_override('consumer_group', 'test_simple', group='oslo_messaging_kafka') listener = self.useFixture( utils.NotificationFixture(self.conf, self.url, ['test_simple'])) notifier = listener.notifier('abc') notifier.info({}, 'test', 'Hello World!') event = listener.events.get(timeout=1) self.assertEqual('info', event[0]) self.assertEqual('test', event[1]) self.assertEqual('Hello World!', event[2]) self.assertEqual('abc', event[3]) def test_multiple_topics(self): if self.url.startswith("kafka://"): self.conf.set_override('consumer_group', 'test_multiple_topics', group='oslo_messaging_kafka') listener = self.useFixture( utils.NotificationFixture(self.conf, self.url, ['a', 'b'])) a = listener.notifier('pub-a', topics=['a']) b = listener.notifier('pub-b', topics=['b']) sent = { 'pub-a': [a, 'test-a', 'payload-a'], 'pub-b': [b, 'test-b', 'payload-b'] } for e in sent.values(): e[0].info({}, e[1], e[2]) received = {} while len(received) < len(sent): e = listener.events.get(timeout=1) received[e[3]] = e for key in received: actual = received[key] expected = sent[key] self.assertEqual('info', actual[0]) self.assertEqual(expected[1], actual[1]) self.assertEqual(expected[2], actual[2]) def test_multiple_servers(self): if self.url.startswith("amqp:"): self.skipTest("QPID-6307") if self.url.startswith("zmq"): self.skipTest("ZeroMQ-PUB-SUB") if self.url.startswith("kafka"): self.skipTest("Kafka: Need to be fixed") listener_a = self.useFixture( utils.NotificationFixture(self.conf, self.url, ['test-topic'])) listener_b = self.useFixture( utils.NotificationFixture(self.conf, self.url, ['test-topic'])) n = listener_a.notifier('pub') events_out = [('test-%s' % c, 'payload-%s' % c) for c in 'abcdefgh'] for event_type, payload in events_out: n.info({}, event_type, payload) events_in = [[(e[1], e[2]) for e in listener_a.get_events()], [(e[1], e[2]) for e in listener_b.get_events()]] self.assertThat(events_in, utils.IsValidDistributionOf(events_out)) for stream in events_in: self.assertThat(len(stream), matchers.GreaterThan(0)) def test_independent_topics(self): if self.url.startswith("kafka://"): self.conf.set_override('consumer_group', 'test_independent_topics_a', group='oslo_messaging_kafka') listener_a = self.useFixture( utils.NotificationFixture(self.conf, self.url, ['1'])) if self.url.startswith("kafka://"): self.conf.set_override('consumer_group', 'test_independent_topics_b', group='oslo_messaging_kafka') listener_b = self.useFixture( utils.NotificationFixture(self.conf, self.url, ['2'])) a = listener_a.notifier('pub-1', topics=['1']) b = listener_b.notifier('pub-2', topics=['2']) a_out = [('test-1-%s' % c, 'payload-1-%s' % c) for c in 'abcdefgh'] for event_type, payload in a_out: a.info({}, event_type, payload) b_out = [('test-2-%s' % c, 'payload-2-%s' % c) for c in 'ijklmnop'] for event_type, payload in b_out: b.info({}, event_type, payload) def check_received(listener, publisher, messages): actuals = sorted([listener.events.get(timeout=0.5) for __ in range(len(a_out))]) expected = sorted([['info', m[0], m[1], publisher] for m in messages]) self.assertEqual(expected, actuals) check_received(listener_a, "pub-1", a_out) check_received(listener_b, "pub-2", b_out) def test_all_categories(self): if self.url.startswith("kafka://"): self.conf.set_override('consumer_group', 'test_all_categories', group='oslo_messaging_kafka') listener = self.useFixture(utils.NotificationFixture( self.conf, self.url, ['test_all_categories'])) n = listener.notifier('abc') cats = ['debug', 'audit', 'info', 'warn', 'error', 'critical'] events = [(getattr(n, c), c, 'type-' + c, c + '-data') for c in cats] for e in events: e[0]({}, e[2], e[3]) # order between events with different categories is not guaranteed received = {} for expected in events: e = listener.events.get(timeout=1) received[e[0]] = e for expected in events: actual = received[expected[1]] self.assertEqual(expected[1], actual[0]) self.assertEqual(expected[2], actual[1]) self.assertEqual(expected[3], actual[2]) def test_simple_batch(self): if self.url.startswith("amqp:"): backend = os.environ.get("AMQP1_BACKEND") if backend == "qdrouterd": # end-to-end acknowledgement with router intermediary # sender pends until batch_size or timeout reached self.skipTest("qdrouterd backend") if self.url.startswith("kafka://"): self.conf.set_override('consumer_group', 'test_simple_batch', group='oslo_messaging_kafka') listener = self.useFixture( utils.BatchNotificationFixture(self.conf, self.url, ['test_simple_batch'], batch_size=100, batch_timeout=2)) notifier = listener.notifier('abc') for i in six.moves.range(0, 205): notifier.info({}, 'test%s' % i, 'Hello World!') events = listener.get_events(timeout=3) self.assertEqual(3, len(events)) self.assertEqual(100, len(events[0][1])) self.assertEqual(100, len(events[1][1])) self.assertEqual(5, len(events[2][1])) oslo.messaging-5.35.0/oslo_messaging/tests/test_exception_serialization.py0000666000175100017510000002562613224676046027326 0ustar zuulzuul00000000000000 # Copyright 2013 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import sys from oslo_serialization import jsonutils import six import testscenarios import oslo_messaging from oslo_messaging._drivers import common as exceptions from oslo_messaging.tests import utils as test_utils load_tests = testscenarios.load_tests_apply_scenarios EXCEPTIONS_MODULE = 'exceptions' if six.PY2 else 'builtins' OTHER_EXCEPTIONS_MODULE = 'builtins' if six.PY2 else 'exceptions' class NovaStyleException(Exception): format = 'I am Nova' def __init__(self, message=None, **kwargs): self.kwargs = kwargs if not message: message = self.format % kwargs super(NovaStyleException, self).__init__(message) class KwargsStyleException(NovaStyleException): format = 'I am %(who)s' def add_remote_postfix(ex): ex_type = type(ex) message = str(ex) str_override = lambda self: message new_ex_type = type(ex_type.__name__ + "_Remote", (ex_type,), {'__str__': str_override, '__unicode__': str_override}) new_ex_type.__module__ = '%s_Remote' % ex.__class__.__module__ try: ex.__class__ = new_ex_type except TypeError: ex.args = (message,) + ex.args[1:] return ex class SerializeRemoteExceptionTestCase(test_utils.BaseTestCase): _add_remote = [ ('add_remote', dict(add_remote=True)), ('do_not_add_remote', dict(add_remote=False)), ] _exception_types = [ ('bog_standard', dict(cls=Exception, args=['test'], kwargs={}, clsname='Exception', modname=EXCEPTIONS_MODULE, msg='test')), ('nova_style', dict(cls=NovaStyleException, args=[], kwargs={}, clsname='NovaStyleException', modname=__name__, msg='I am Nova')), ('nova_style_with_msg', dict(cls=NovaStyleException, args=['testing'], kwargs={}, clsname='NovaStyleException', modname=__name__, msg='testing')), ('kwargs_style', dict(cls=KwargsStyleException, args=[], kwargs={'who': 'Oslo'}, clsname='KwargsStyleException', modname=__name__, msg='I am Oslo')), ] @classmethod def generate_scenarios(cls): cls.scenarios = testscenarios.multiply_scenarios(cls._add_remote, cls._exception_types) def test_serialize_remote_exception(self): try: try: raise self.cls(*self.args, **self.kwargs) except Exception as ex: # Note: in Python 3 ex variable will be cleared at the end of # the except clause, so explicitly make an extra copy of it cls_error = ex if self.add_remote: ex = add_remote_postfix(ex) raise ex except Exception: exc_info = sys.exc_info() serialized = exceptions.serialize_remote_exception(exc_info) failure = jsonutils.loads(serialized) self.assertEqual(self.clsname, failure['class'], failure) self.assertEqual(self.modname, failure['module']) self.assertEqual(self.msg, failure['message']) self.assertEqual([self.msg], failure['args']) self.assertEqual(self.kwargs, failure['kwargs']) # Note: _Remote prefix not stripped from tracebacks tb = cls_error.__class__.__name__ + ': ' + self.msg self.assertIn(tb, ''.join(failure['tb'])) SerializeRemoteExceptionTestCase.generate_scenarios() class DeserializeRemoteExceptionTestCase(test_utils.BaseTestCase): _standard_allowed = [__name__] scenarios = [ ('bog_standard', dict(allowed=_standard_allowed, clsname='Exception', modname=EXCEPTIONS_MODULE, cls=Exception, args=['test'], kwargs={}, str='test\ntraceback\ntraceback\n', remote_name='Exception', remote_args=('test\ntraceback\ntraceback\n', ), remote_kwargs={})), ('different_python_versions', dict(allowed=_standard_allowed, clsname='Exception', modname=OTHER_EXCEPTIONS_MODULE, cls=Exception, args=['test'], kwargs={}, str='test\ntraceback\ntraceback\n', remote_name='Exception', remote_args=('test\ntraceback\ntraceback\n', ), remote_kwargs={})), ('nova_style', dict(allowed=_standard_allowed, clsname='NovaStyleException', modname=__name__, cls=NovaStyleException, args=[], kwargs={}, str='test\ntraceback\ntraceback\n', remote_name='NovaStyleException_Remote', remote_args=('I am Nova', ), remote_kwargs={})), ('nova_style_with_msg', dict(allowed=_standard_allowed, clsname='NovaStyleException', modname=__name__, cls=NovaStyleException, args=['testing'], kwargs={}, str='test\ntraceback\ntraceback\n', remote_name='NovaStyleException_Remote', remote_args=('testing', ), remote_kwargs={})), ('kwargs_style', dict(allowed=_standard_allowed, clsname='KwargsStyleException', modname=__name__, cls=KwargsStyleException, args=[], kwargs={'who': 'Oslo'}, str='test\ntraceback\ntraceback\n', remote_name='KwargsStyleException_Remote', remote_args=('I am Oslo', ), remote_kwargs={})), ('not_allowed', dict(allowed=[], clsname='NovaStyleException', modname=__name__, cls=oslo_messaging.RemoteError, args=[], kwargs={}, str=("Remote error: NovaStyleException test\n" "[%r]." % u'traceback\ntraceback\n'), msg=("Remote error: NovaStyleException test\n" "[%r]." % u'traceback\ntraceback\n'), remote_name='RemoteError', remote_args=(), remote_kwargs={'exc_type': 'NovaStyleException', 'value': 'test', 'traceback': 'traceback\ntraceback\n'})), ('unknown_module', dict(allowed=['notexist'], clsname='Exception', modname='notexist', cls=oslo_messaging.RemoteError, args=[], kwargs={}, str=("Remote error: Exception test\n" "[%r]." % u'traceback\ntraceback\n'), msg=("Remote error: Exception test\n" "[%r]." % u'traceback\ntraceback\n'), remote_name='RemoteError', remote_args=(), remote_kwargs={'exc_type': 'Exception', 'value': 'test', 'traceback': 'traceback\ntraceback\n'})), ('unknown_exception', dict(allowed=[], clsname='FarcicalError', modname=EXCEPTIONS_MODULE, cls=oslo_messaging.RemoteError, args=[], kwargs={}, str=("Remote error: FarcicalError test\n" "[%r]." % u'traceback\ntraceback\n'), msg=("Remote error: FarcicalError test\n" "[%r]." % u'traceback\ntraceback\n'), remote_name='RemoteError', remote_args=(), remote_kwargs={'exc_type': 'FarcicalError', 'value': 'test', 'traceback': 'traceback\ntraceback\n'})), ('unknown_kwarg', dict(allowed=[], clsname='Exception', modname=EXCEPTIONS_MODULE, cls=oslo_messaging.RemoteError, args=[], kwargs={'foobar': 'blaa'}, str=("Remote error: Exception test\n" "[%r]." % u'traceback\ntraceback\n'), msg=("Remote error: Exception test\n" "[%r]." % u'traceback\ntraceback\n'), remote_name='RemoteError', remote_args=(), remote_kwargs={'exc_type': 'Exception', 'value': 'test', 'traceback': 'traceback\ntraceback\n'})), ('system_exit', dict(allowed=[], clsname='SystemExit', modname=EXCEPTIONS_MODULE, cls=oslo_messaging.RemoteError, args=[], kwargs={}, str=("Remote error: SystemExit test\n" "[%r]." % u'traceback\ntraceback\n'), msg=("Remote error: SystemExit test\n" "[%r]." % u'traceback\ntraceback\n'), remote_name='RemoteError', remote_args=(), remote_kwargs={'exc_type': 'SystemExit', 'value': 'test', 'traceback': 'traceback\ntraceback\n'})), ] def test_deserialize_remote_exception(self): failure = { 'class': self.clsname, 'module': self.modname, 'message': 'test', 'tb': ['traceback\ntraceback\n'], 'args': self.args, 'kwargs': self.kwargs, } serialized = jsonutils.dumps(failure) ex = exceptions.deserialize_remote_exception(serialized, self.allowed) self.assertIsInstance(ex, self.cls) self.assertEqual(self.remote_name, ex.__class__.__name__) self.assertEqual(self.str, six.text_type(ex)) if hasattr(self, 'msg'): self.assertEqual(self.msg, six.text_type(ex)) self.assertEqual((self.msg,) + self.remote_args, ex.args) else: self.assertEqual(self.remote_args, ex.args) oslo.messaging-5.35.0/oslo_messaging/tests/test_config_opts_proxy.py0000666000175100017510000000701613224676046026137 0ustar zuulzuul00000000000000# Copyright 2016 Mirantis, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_config import types from oslo_messaging._drivers import common as drv_cmn from oslo_messaging.tests import utils as test_utils from oslo_messaging import transport class TestConfigOptsProxy(test_utils.BaseTestCase): def test_rabbit(self): group = 'oslo_messaging_rabbit' self.config(rabbit_retry_interval=1, rabbit_qos_prefetch_count=0, rabbit_max_retries=3, group=group) dummy_opts = [cfg.ListOpt('list_str', item_type=types.String(), default=[]), cfg.ListOpt('list_int', item_type=types.Integer(), default=[]), cfg.DictOpt('dict', default={}), cfg.BoolOpt('bool', default=False), cfg.StrOpt('str', default='default')] self.conf.register_opts(dummy_opts, group=group) url = transport.TransportURL.parse( self.conf, "rabbit:///" "?rabbit_qos_prefetch_count=2" "&list_str=1&list_str=2&list_str=3" "&list_int=1&list_int=2&list_int=3" "&dict=x:1&dict=y:2&dict=z:3" "&bool=True" ) conf = drv_cmn.ConfigOptsProxy(self.conf, url, group) self.assertRaises(cfg.NoSuchOptError, conf.__getattr__, 'unknown_group') self.assertIsInstance(getattr(conf, group), conf.GroupAttrProxy) self.assertEqual(1, conf.oslo_messaging_rabbit.rabbit_retry_interval) self.assertEqual(2, conf.oslo_messaging_rabbit.rabbit_qos_prefetch_count) self.assertEqual(3, conf.oslo_messaging_rabbit.rabbit_max_retries) self.assertEqual(['1', '2', '3'], conf.oslo_messaging_rabbit.list_str) self.assertEqual([1, 2, 3], conf.oslo_messaging_rabbit.list_int) self.assertEqual({'x': '1', 'y': '2', 'z': '3'}, conf.oslo_messaging_rabbit.dict) self.assertEqual(True, conf.oslo_messaging_rabbit.bool) self.assertEqual('default', conf.oslo_messaging_rabbit.str) def test_not_in_group(self): group = 'oslo_messaging_rabbit' url = transport.TransportURL.parse( self.conf, "rabbit:///?unknown_opt=4" ) self.assertRaises(cfg.NoSuchOptError, drv_cmn.ConfigOptsProxy, self.conf, url, group) def test_invalid_value(self): group = 'oslo_messaging_rabbit' self.config(kombu_reconnect_delay=5.0, group=group) url = transport.TransportURL.parse( self.conf, "rabbit:///?kombu_reconnect_delay=invalid_value" ) self.assertRaises(ValueError, drv_cmn.ConfigOptsProxy, self.conf, url, group) oslo.messaging-5.35.0/oslo_messaging/tests/test_expected_exceptions.py0000666000175100017510000000410713224676046026424 0ustar zuulzuul00000000000000 # Copyright 2012 OpenStack Foundation # Copyright 2013 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import oslo_messaging from oslo_messaging.tests import utils as test_utils class TestExpectedExceptions(test_utils.BaseTestCase): def test_exception(self): e = None try: try: raise ValueError() except Exception: raise oslo_messaging.ExpectedException() except oslo_messaging.ExpectedException as e: self.assertIsInstance(e, oslo_messaging.ExpectedException) self.assertTrue(hasattr(e, 'exc_info')) self.assertIsInstance(e.exc_info[1], ValueError) def test_decorator_expected(self): class FooException(Exception): pass @oslo_messaging.expected_exceptions(FooException) def naughty(): raise FooException() self.assertRaises(oslo_messaging.ExpectedException, naughty) def test_decorator_expected_subclass(self): class FooException(Exception): pass class BarException(FooException): pass @oslo_messaging.expected_exceptions(FooException) def naughty(): raise BarException() self.assertRaises(oslo_messaging.ExpectedException, naughty) def test_decorator_unexpected(self): class FooException(Exception): pass @oslo_messaging.expected_exceptions(FooException) def really_naughty(): raise ValueError() self.assertRaises(ValueError, really_naughty) oslo.messaging-5.35.0/oslo_messaging/tests/notify/0000775000175100017510000000000013224676256022260 5ustar zuulzuul00000000000000oslo.messaging-5.35.0/oslo_messaging/tests/notify/test_listener.py0000666000175100017510000005075713224676046025533 0ustar zuulzuul00000000000000 # Copyright 2013 eNovance # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import fixtures import threading from oslo_config import cfg import testscenarios import oslo_messaging from oslo_messaging.notify import dispatcher from oslo_messaging.notify import notifier as msg_notifier from oslo_messaging.tests import utils as test_utils from six.moves import mock load_tests = testscenarios.load_tests_apply_scenarios class RestartableServerThread(object): def __init__(self, server): self.server = server self.thread = None def start(self): if self.thread is None: self.thread = test_utils.ServerThreadHelper(self.server) self.thread.start() def stop(self): if self.thread is not None: self.thread.stop() self.thread.join(timeout=15) ret = self.thread.isAlive() self.thread = None return ret return True class ListenerSetupMixin(object): class ThreadTracker(object): def __init__(self): self._received_msgs = 0 self.threads = [] self.lock = threading.Condition() def info(self, *args, **kwargs): # NOTE(sileht): this run into an other thread with self.lock: self._received_msgs += 1 self.lock.notify_all() def wait_for_messages(self, expect_messages): with self.lock: while self._received_msgs < expect_messages: self.lock.wait() def stop(self): for thread in self.threads: thread.stop() self.threads = [] def start(self, thread): self.threads.append(thread) thread.start() def setUp(self): self.trackers = {} self.addCleanup(self._stop_trackers) def _stop_trackers(self): for pool in self.trackers: self.trackers[pool].stop() self.trackers = {} def _setup_listener(self, transport, endpoints, targets=None, pool=None, batch=False): if pool is None: tracker_name = '__default__' else: tracker_name = pool if targets is None: targets = [oslo_messaging.Target(topic='testtopic')] tracker = self.trackers.setdefault( tracker_name, self.ThreadTracker()) if batch: listener = oslo_messaging.get_batch_notification_listener( transport, targets=targets, endpoints=[tracker] + endpoints, allow_requeue=True, pool=pool, executor='eventlet', batch_size=batch[0], batch_timeout=batch[1]) else: listener = oslo_messaging.get_notification_listener( transport, targets=targets, endpoints=[tracker] + endpoints, allow_requeue=True, pool=pool, executor='eventlet') thread = RestartableServerThread(listener) tracker.start(thread) return thread def wait_for_messages(self, expect_messages, tracker_name='__default__'): self.trackers[tracker_name].wait_for_messages(expect_messages) def _setup_notifier(self, transport, topics=['testtopic'], publisher_id='testpublisher'): return oslo_messaging.Notifier(transport, topics=topics, driver='messaging', publisher_id=publisher_id) class TestNotifyListener(test_utils.BaseTestCase, ListenerSetupMixin): def __init__(self, *args): super(TestNotifyListener, self).__init__(*args) ListenerSetupMixin.__init__(self) def setUp(self): super(TestNotifyListener, self).setUp(conf=cfg.ConfigOpts()) ListenerSetupMixin.setUp(self) self.useFixture(fixtures.MonkeyPatch( 'oslo_messaging._drivers.impl_fake.FakeExchangeManager._exchanges', new_value={})) @mock.patch('debtcollector.deprecate') def test_constructor(self, deprecate): transport = msg_notifier.get_notification_transport( self.conf, url='fake:') target = oslo_messaging.Target(topic='foo') endpoints = [object()] listener = oslo_messaging.get_notification_listener( transport, [target], endpoints) self.assertIs(listener.conf, self.conf) self.assertIs(listener.transport, transport) self.assertIsInstance(listener.dispatcher, dispatcher.NotificationDispatcher) self.assertIs(listener.dispatcher.endpoints, endpoints) self.assertEqual('blocking', listener.executor_type) deprecate.assert_called_once_with( 'blocking executor is deprecated. Executor default will be ' 'removed. Use explicitly threading or eventlet instead', removal_version='rocky', version='pike', category=FutureWarning) def test_no_target_topic(self): transport = msg_notifier.get_notification_transport( self.conf, url='fake:') listener = oslo_messaging.get_notification_listener( transport, [oslo_messaging.Target()], [mock.Mock()]) try: listener.start() except Exception as ex: self.assertIsInstance(ex, oslo_messaging.InvalidTarget, ex) else: self.assertTrue(False) def test_unknown_executor(self): transport = msg_notifier.get_notification_transport( self.conf, url='fake:') try: oslo_messaging.get_notification_listener(transport, [], [], executor='foo') except Exception as ex: self.assertIsInstance(ex, oslo_messaging.ExecutorLoadFailure) self.assertEqual('foo', ex.executor) else: self.assertTrue(False) def test_batch_timeout(self): transport = oslo_messaging.get_notification_transport(self.conf, url='fake:') endpoint = mock.Mock() endpoint.info.return_value = None listener_thread = self._setup_listener(transport, [endpoint], batch=(5, 1)) notifier = self._setup_notifier(transport) for _ in range(12): notifier.info({}, 'an_event.start', 'test message') self.wait_for_messages(3) self.assertFalse(listener_thread.stop()) messages = [dict(ctxt={}, publisher_id='testpublisher', event_type='an_event.start', payload='test message', metadata={'message_id': mock.ANY, 'timestamp': mock.ANY})] endpoint.info.assert_has_calls([mock.call(messages * 5), mock.call(messages * 5), mock.call(messages * 2)]) def test_batch_size(self): transport = oslo_messaging.get_notification_transport(self.conf, url='fake:') endpoint = mock.Mock() endpoint.info.return_value = None listener_thread = self._setup_listener(transport, [endpoint], batch=(5, None)) notifier = self._setup_notifier(transport) for _ in range(10): notifier.info({}, 'an_event.start', 'test message') self.wait_for_messages(2) self.assertFalse(listener_thread.stop()) messages = [dict(ctxt={}, publisher_id='testpublisher', event_type='an_event.start', payload='test message', metadata={'message_id': mock.ANY, 'timestamp': mock.ANY})] endpoint.info.assert_has_calls([mock.call(messages * 5), mock.call(messages * 5)]) def test_batch_size_exception_path(self): transport = oslo_messaging.get_notification_transport(self.conf, url='fake:') endpoint = mock.Mock() endpoint.info.side_effect = [None, Exception('boom!')] listener_thread = self._setup_listener(transport, [endpoint], batch=(5, None)) notifier = self._setup_notifier(transport) for _ in range(10): notifier.info({}, 'an_event.start', 'test message') self.wait_for_messages(2) self.assertFalse(listener_thread.stop()) messages = [dict(ctxt={}, publisher_id='testpublisher', event_type='an_event.start', payload='test message', metadata={'message_id': mock.ANY, 'timestamp': mock.ANY})] endpoint.info.assert_has_calls([mock.call(messages * 5)]) def test_one_topic(self): transport = msg_notifier.get_notification_transport( self.conf, url='fake:') endpoint = mock.Mock() endpoint.info.return_value = None listener_thread = self._setup_listener(transport, [endpoint]) notifier = self._setup_notifier(transport) notifier.info({}, 'an_event.start', 'test message') self.wait_for_messages(1) self.assertFalse(listener_thread.stop()) endpoint.info.assert_called_once_with( {}, 'testpublisher', 'an_event.start', 'test message', {'message_id': mock.ANY, 'timestamp': mock.ANY}) def test_two_topics(self): transport = msg_notifier.get_notification_transport( self.conf, url='fake:') endpoint = mock.Mock() endpoint.info.return_value = None targets = [oslo_messaging.Target(topic="topic1"), oslo_messaging.Target(topic="topic2")] listener_thread = self._setup_listener(transport, [endpoint], targets=targets) notifier = self._setup_notifier(transport, topics=['topic1']) notifier.info({'ctxt': '1'}, 'an_event.start1', 'test') notifier = self._setup_notifier(transport, topics=['topic2']) notifier.info({'ctxt': '2'}, 'an_event.start2', 'test') self.wait_for_messages(2) self.assertFalse(listener_thread.stop()) endpoint.info.assert_has_calls([ mock.call({'ctxt': '1'}, 'testpublisher', 'an_event.start1', 'test', {'timestamp': mock.ANY, 'message_id': mock.ANY}), mock.call({'ctxt': '2'}, 'testpublisher', 'an_event.start2', 'test', {'timestamp': mock.ANY, 'message_id': mock.ANY})], any_order=True) def test_two_exchanges(self): transport = msg_notifier.get_notification_transport( self.conf, url='fake:') endpoint = mock.Mock() endpoint.info.return_value = None targets = [oslo_messaging.Target(topic="topic", exchange="exchange1"), oslo_messaging.Target(topic="topic", exchange="exchange2")] listener_thread = self._setup_listener(transport, [endpoint], targets=targets) notifier = self._setup_notifier(transport, topics=["topic"]) def mock_notifier_exchange(name): def side_effect(target, ctxt, message, version, retry): target.exchange = name return transport._driver.send_notification(target, ctxt, message, version, retry=retry) transport._send_notification = mock.MagicMock( side_effect=side_effect) notifier.info({'ctxt': '0'}, 'an_event.start', 'test message default exchange') mock_notifier_exchange('exchange1') notifier.info({'ctxt': '1'}, 'an_event.start', 'test message exchange1') mock_notifier_exchange('exchange2') notifier.info({'ctxt': '2'}, 'an_event.start', 'test message exchange2') self.wait_for_messages(2) self.assertFalse(listener_thread.stop()) endpoint.info.assert_has_calls([ mock.call({'ctxt': '1'}, 'testpublisher', 'an_event.start', 'test message exchange1', {'timestamp': mock.ANY, 'message_id': mock.ANY}), mock.call({'ctxt': '2'}, 'testpublisher', 'an_event.start', 'test message exchange2', {'timestamp': mock.ANY, 'message_id': mock.ANY})], any_order=True) def test_two_endpoints(self): transport = msg_notifier.get_notification_transport( self.conf, url='fake:') endpoint1 = mock.Mock() endpoint1.info.return_value = None endpoint2 = mock.Mock() endpoint2.info.return_value = oslo_messaging.NotificationResult.HANDLED listener_thread = self._setup_listener(transport, [endpoint1, endpoint2]) notifier = self._setup_notifier(transport) notifier.info({}, 'an_event.start', 'test') self.wait_for_messages(1) self.assertFalse(listener_thread.stop()) endpoint1.info.assert_called_once_with( {}, 'testpublisher', 'an_event.start', 'test', { 'timestamp': mock.ANY, 'message_id': mock.ANY}) endpoint2.info.assert_called_once_with( {}, 'testpublisher', 'an_event.start', 'test', { 'timestamp': mock.ANY, 'message_id': mock.ANY}) def test_requeue(self): transport = msg_notifier.get_notification_transport( self.conf, url='fake:') endpoint = mock.Mock() endpoint.info = mock.Mock() def side_effect_requeue(*args, **kwargs): if endpoint.info.call_count == 1: return oslo_messaging.NotificationResult.REQUEUE return oslo_messaging.NotificationResult.HANDLED endpoint.info.side_effect = side_effect_requeue listener_thread = self._setup_listener(transport, [endpoint]) notifier = self._setup_notifier(transport) notifier.info({}, 'an_event.start', 'test') self.wait_for_messages(2) self.assertFalse(listener_thread.stop()) endpoint.info.assert_has_calls([ mock.call({}, 'testpublisher', 'an_event.start', 'test', {'timestamp': mock.ANY, 'message_id': mock.ANY}), mock.call({}, 'testpublisher', 'an_event.start', 'test', {'timestamp': mock.ANY, 'message_id': mock.ANY})]) def test_two_pools(self): transport = msg_notifier.get_notification_transport( self.conf, url='fake:') endpoint1 = mock.Mock() endpoint1.info.return_value = None endpoint2 = mock.Mock() endpoint2.info.return_value = None targets = [oslo_messaging.Target(topic="topic")] listener1_thread = self._setup_listener(transport, [endpoint1], targets=targets, pool="pool1") listener2_thread = self._setup_listener(transport, [endpoint2], targets=targets, pool="pool2") notifier = self._setup_notifier(transport, topics=["topic"]) notifier.info({'ctxt': '0'}, 'an_event.start', 'test message0') notifier.info({'ctxt': '1'}, 'an_event.start', 'test message1') self.wait_for_messages(2, "pool1") self.wait_for_messages(2, "pool2") self.assertFalse(listener2_thread.stop()) self.assertFalse(listener1_thread.stop()) def mocked_endpoint_call(i): return mock.call({'ctxt': '%d' % i}, 'testpublisher', 'an_event.start', 'test message%d' % i, {'timestamp': mock.ANY, 'message_id': mock.ANY}) endpoint1.info.assert_has_calls([mocked_endpoint_call(0), mocked_endpoint_call(1)]) endpoint2.info.assert_has_calls([mocked_endpoint_call(0), mocked_endpoint_call(1)]) def test_two_pools_three_listener(self): transport = msg_notifier.get_notification_transport( self.conf, url='fake:') endpoint1 = mock.Mock() endpoint1.info.return_value = None endpoint2 = mock.Mock() endpoint2.info.return_value = None endpoint3 = mock.Mock() endpoint3.info.return_value = None targets = [oslo_messaging.Target(topic="topic")] listener1_thread = self._setup_listener(transport, [endpoint1], targets=targets, pool="pool1") listener2_thread = self._setup_listener(transport, [endpoint2], targets=targets, pool="pool2") listener3_thread = self._setup_listener(transport, [endpoint3], targets=targets, pool="pool2") def mocked_endpoint_call(i): return mock.call({'ctxt': '%d' % i}, 'testpublisher', 'an_event.start', 'test message%d' % i, {'timestamp': mock.ANY, 'message_id': mock.ANY}) notifier = self._setup_notifier(transport, topics=["topic"]) mocked_endpoint1_calls = [] for i in range(0, 25): notifier.info({'ctxt': '%d' % i}, 'an_event.start', 'test message%d' % i) mocked_endpoint1_calls.append(mocked_endpoint_call(i)) self.wait_for_messages(25, 'pool2') listener2_thread.stop() for i in range(0, 25): notifier.info({'ctxt': '%d' % i}, 'an_event.start', 'test message%d' % i) mocked_endpoint1_calls.append(mocked_endpoint_call(i)) self.wait_for_messages(50, 'pool2') listener2_thread.start() listener3_thread.stop() for i in range(0, 25): notifier.info({'ctxt': '%d' % i}, 'an_event.start', 'test message%d' % i) mocked_endpoint1_calls.append(mocked_endpoint_call(i)) self.wait_for_messages(75, 'pool2') listener3_thread.start() for i in range(0, 25): notifier.info({'ctxt': '%d' % i}, 'an_event.start', 'test message%d' % i) mocked_endpoint1_calls.append(mocked_endpoint_call(i)) self.wait_for_messages(100, 'pool1') self.wait_for_messages(100, 'pool2') self.assertFalse(listener3_thread.stop()) self.assertFalse(listener2_thread.stop()) self.assertFalse(listener1_thread.stop()) self.assertEqual(100, endpoint1.info.call_count) endpoint1.info.assert_has_calls(mocked_endpoint1_calls) self.assertLessEqual(25, endpoint2.info.call_count) self.assertLessEqual(25, endpoint3.info.call_count) self.assertEqual(100, endpoint2.info.call_count + endpoint3.info.call_count) for call in mocked_endpoint1_calls: self.assertIn(call, endpoint2.info.mock_calls + endpoint3.info.mock_calls) class TestListenerTransportWarning(test_utils.BaseTestCase): @mock.patch('oslo_messaging.notify.listener.LOG') def test_warning_when_rpc_transport(self, log): transport = oslo_messaging.get_rpc_transport(self.conf) target = oslo_messaging.Target(topic='foo') endpoints = [object()] oslo_messaging.get_notification_listener( transport, [target], endpoints) log.warning.assert_called_once_with( "Using RPC transport for notifications. Please use " "get_notification_transport to obtain a " "notification transport instance.") oslo.messaging-5.35.0/oslo_messaging/tests/notify/test_log_handler.py0000666000175100017510000000474613224676046026161 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import logging import fixtures import oslo_messaging from oslo_messaging.notify import log_handler from oslo_messaging.tests import utils as test_utils from six.moves import mock class PublishErrorsHandlerTestCase(test_utils.BaseTestCase): """Tests for log.PublishErrorsHandler""" def setUp(self): super(PublishErrorsHandlerTestCase, self).setUp() self.publisherrorshandler = (log_handler. PublishErrorsHandler(logging.ERROR)) def test_emit_cfg_log_notifier_in_notifier_drivers(self): drivers = ['messaging', 'log'] self.config(driver=drivers, group='oslo_messaging_notifications') self.stub_flg = True transport = oslo_messaging.get_notification_transport(self.conf) notifier = oslo_messaging.Notifier(transport) def fake_notifier(*args, **kwargs): self.stub_flg = False self.useFixture(fixtures.MockPatchObject( notifier, 'error', fake_notifier)) logrecord = logging.LogRecord(name='name', level='WARN', pathname='/tmp', lineno=1, msg='Message', args=None, exc_info=None) self.publisherrorshandler.emit(logrecord) self.assertTrue(self.stub_flg) @mock.patch('oslo_messaging.notify.notifier.Notifier._notify') def test_emit_notification(self, mock_notify): logrecord = logging.LogRecord(name='name', level='ERROR', pathname='/tmp', lineno=1, msg='Message', args=None, exc_info=None) self.publisherrorshandler.emit(logrecord) self.assertEqual('error.publisher', self.publisherrorshandler._notifier.publisher_id) mock_notify.assert_called_with({}, 'error_notification', {'error': 'Message'}, 'ERROR') oslo.messaging-5.35.0/oslo_messaging/tests/notify/__init__.py0000666000175100017510000000000013224676046024356 0ustar zuulzuul00000000000000oslo.messaging-5.35.0/oslo_messaging/tests/notify/test_dispatcher.py0000666000175100017510000002276713224676046026034 0ustar zuulzuul00000000000000 # Copyright 2013 eNovance # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_utils import timeutils import testscenarios import oslo_messaging from oslo_messaging.notify import dispatcher as notify_dispatcher from oslo_messaging.tests import utils as test_utils from six.moves import mock load_tests = testscenarios.load_tests_apply_scenarios notification_msg = dict( publisher_id="publisher_id", event_type="compute.start", payload={"info": "fuu"}, message_id="uuid", timestamp=str(timeutils.utcnow()) ) class TestDispatcher(test_utils.BaseTestCase): scenarios = [ ('no_endpoints', dict(endpoints=[], endpoints_expect_calls=[], priority='info', ex=None, return_value=oslo_messaging.NotificationResult.HANDLED)), ('one_endpoints', dict(endpoints=[['warn']], endpoints_expect_calls=['warn'], priority='warn', ex=None, return_value=oslo_messaging.NotificationResult.HANDLED)), ('two_endpoints_only_one_match', dict(endpoints=[['warn'], ['info']], endpoints_expect_calls=[None, 'info'], priority='info', ex=None, return_value=oslo_messaging.NotificationResult.HANDLED)), ('two_endpoints_both_match', dict(endpoints=[['debug', 'info'], ['info', 'debug']], endpoints_expect_calls=['debug', 'debug'], priority='debug', ex=None, return_value=oslo_messaging.NotificationResult.HANDLED)), ('no_return_value', dict(endpoints=[['warn']], endpoints_expect_calls=['warn'], priority='warn', ex=None, return_value=None)), ('requeue', dict(endpoints=[['debug', 'warn']], endpoints_expect_calls=['debug'], priority='debug', msg=notification_msg, ex=None, return_value=oslo_messaging.NotificationResult.REQUEUE)), ('exception', dict(endpoints=[['debug', 'warn']], endpoints_expect_calls=['debug'], priority='debug', msg=notification_msg, ex=Exception, return_value=oslo_messaging.NotificationResult.HANDLED)), ] def test_dispatcher(self): endpoints = [] for endpoint_methods in self.endpoints: e = mock.Mock(spec=endpoint_methods) endpoints.append(e) for m in endpoint_methods: method = getattr(e, m) if self.ex: method.side_effect = self.ex() else: method.return_value = self.return_value msg = notification_msg.copy() msg['priority'] = self.priority dispatcher = notify_dispatcher.NotificationDispatcher(endpoints, None) incoming = mock.Mock(ctxt={}, message=msg) res = dispatcher.dispatch(incoming) expected_res = ( notify_dispatcher.NotificationResult.REQUEUE if (self.return_value == notify_dispatcher.NotificationResult.REQUEUE or self.ex is not None) else notify_dispatcher.NotificationResult.HANDLED ) self.assertEqual(expected_res, res) # check endpoint callbacks are called or not for i, endpoint_methods in enumerate(self.endpoints): for m in endpoint_methods: if m == self.endpoints_expect_calls[i]: method = getattr(endpoints[i], m) method.assert_called_once_with( {}, msg['publisher_id'], msg['event_type'], msg['payload'], { 'timestamp': mock.ANY, 'message_id': mock.ANY }) else: self.assertEqual(0, endpoints[i].call_count) @mock.patch('oslo_messaging.notify.dispatcher.LOG') def test_dispatcher_unknown_prio(self, mylog): msg = notification_msg.copy() msg['priority'] = 'what???' dispatcher = notify_dispatcher.NotificationDispatcher( [mock.Mock()], None) res = dispatcher.dispatch(mock.Mock(ctxt={}, message=msg)) self.assertIsNone(res) mylog.warning.assert_called_once_with('Unknown priority "%s"', 'what???') class TestDispatcherFilter(test_utils.BaseTestCase): scenarios = [ ('publisher_id_match', dict(filter_rule=dict(publisher_id='^compute.*'), publisher_id='compute01.manager', event_type='instance.create.start', context={}, match=True)), ('publisher_id_nomatch', dict(filter_rule=dict(publisher_id='^compute.*'), publisher_id='network01.manager', event_type='instance.create.start', context={}, match=False)), ('event_type_match', dict(filter_rule=dict(event_type='^instance\.create'), publisher_id='compute01.manager', event_type='instance.create.start', context={}, match=True)), ('event_type_nomatch', dict(filter_rule=dict(event_type='^instance\.delete'), publisher_id='compute01.manager', event_type='instance.create.start', context={}, match=False)), # this is only for simulation ('event_type_not_string', dict(filter_rule=dict(event_type='^instance\.delete'), publisher_id='compute01.manager', event_type=['instance.swim', 'instance.fly'], context={}, match=False)), ('context_match', dict(filter_rule=dict(context={'user': '^adm'}), publisher_id='compute01.manager', event_type='instance.create.start', context={'user': 'admin'}, match=True)), ('context_key_missing', dict(filter_rule=dict(context={'user': '^adm'}), publisher_id='compute01.manager', event_type='instance.create.start', context={'project': 'admin'}, metadata={}, match=False)), ('metadata_match', dict(filter_rule=dict(metadata={'message_id': '^99'}), publisher_id='compute01.manager', event_type='instance.create.start', context={}, match=True)), ('metadata_key_missing', dict(filter_rule=dict(metadata={'user': '^adm'}), publisher_id='compute01.manager', event_type='instance.create.start', context={}, match=False)), ('payload_match', dict(filter_rule=dict(payload={'state': '^active$'}), publisher_id='compute01.manager', event_type='instance.create.start', context={}, match=True)), ('payload_no_match', dict(filter_rule=dict(payload={'state': '^deleted$'}), publisher_id='compute01.manager', event_type='instance.create.start', context={}, match=False)), ('payload_key_missing', dict(filter_rule=dict(payload={'user': '^adm'}), publisher_id='compute01.manager', event_type='instance.create.start', context={}, match=False)), ('payload_value_none', dict(filter_rule=dict(payload={'virtual_size': '2048'}), publisher_id='compute01.manager', event_type='instance.create.start', context={}, match=False)), ('mix_match', dict(filter_rule=dict(event_type='^instance\.create', publisher_id='^compute', context={'user': '^adm'}), publisher_id='compute01.manager', event_type='instance.create.start', context={'user': 'admin'}, match=True)), ] def test_filters(self): notification_filter = oslo_messaging.NotificationFilter( **self.filter_rule) endpoint = mock.Mock(spec=['info'], filter_rule=notification_filter) dispatcher = notify_dispatcher.NotificationDispatcher( [endpoint], serializer=None) message = {'payload': {'state': 'active', 'virtual_size': None}, 'priority': 'info', 'publisher_id': self.publisher_id, 'event_type': self.event_type, 'timestamp': '2014-03-03 18:21:04.369234', 'message_id': '99863dda-97f0-443a-a0c1-6ed317b7fd45'} incoming = mock.Mock(ctxt=self.context, message=message) dispatcher.dispatch(incoming) if self.match: self.assertEqual(1, endpoint.info.call_count) else: self.assertEqual(0, endpoint.info.call_count) oslo.messaging-5.35.0/oslo_messaging/tests/notify/test_middleware.py0000666000175100017510000002053613224676046026013 0ustar zuulzuul00000000000000# Copyright 2013-2014 eNovance # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import uuid import webob from oslo_messaging.notify import middleware from oslo_messaging.tests import utils from six.moves import mock class FakeApp(object): def __call__(self, env, start_response): body = 'Some response' start_response('200 OK', [ ('Content-Type', 'text/plain'), ('Content-Length', str(sum(map(len, body)))) ]) return [body] class FakeFailingApp(object): def __call__(self, env, start_response): raise Exception("It happens!") class NotifierMiddlewareTest(utils.BaseTestCase): def test_notification(self): m = middleware.RequestNotifier(FakeApp()) req = webob.Request.blank('/foo/bar', environ={'REQUEST_METHOD': 'GET', 'HTTP_X_AUTH_TOKEN': uuid.uuid4()}) with mock.patch( 'oslo_messaging.notify.notifier.Notifier._notify') as notify: m(req) # Check first notification with only 'request' call_args = notify.call_args_list[0][0] self.assertEqual('http.request', call_args[1]) self.assertEqual('INFO', call_args[3]) self.assertEqual(set(['request']), set(call_args[2].keys())) request = call_args[2]['request'] self.assertEqual('/foo/bar', request['PATH_INFO']) self.assertEqual('GET', request['REQUEST_METHOD']) self.assertIn('HTTP_X_SERVICE_NAME', request) self.assertNotIn('HTTP_X_AUTH_TOKEN', request) self.assertFalse(any(map(lambda s: s.startswith('wsgi.'), request.keys())), "WSGI fields are filtered out") # Check second notification with request + response call_args = notify.call_args_list[1][0] self.assertEqual('http.response', call_args[1]) self.assertEqual('INFO', call_args[3]) self.assertEqual(set(['request', 'response']), set(call_args[2].keys())) request = call_args[2]['request'] self.assertEqual('/foo/bar', request['PATH_INFO']) self.assertEqual('GET', request['REQUEST_METHOD']) self.assertIn('HTTP_X_SERVICE_NAME', request) self.assertNotIn('HTTP_X_AUTH_TOKEN', request) self.assertFalse(any(map(lambda s: s.startswith('wsgi.'), request.keys())), "WSGI fields are filtered out") response = call_args[2]['response'] self.assertEqual('200 OK', response['status']) self.assertEqual('13', response['headers']['content-length']) def test_notification_response_failure(self): m = middleware.RequestNotifier(FakeFailingApp()) req = webob.Request.blank('/foo/bar', environ={'REQUEST_METHOD': 'GET', 'HTTP_X_AUTH_TOKEN': uuid.uuid4()}) with mock.patch( 'oslo_messaging.notify.notifier.Notifier._notify') as notify: try: m(req) self.fail("Application exception has not been re-raised") except Exception: pass # Check first notification with only 'request' call_args = notify.call_args_list[0][0] self.assertEqual('http.request', call_args[1]) self.assertEqual('INFO', call_args[3]) self.assertEqual(set(['request']), set(call_args[2].keys())) request = call_args[2]['request'] self.assertEqual('/foo/bar', request['PATH_INFO']) self.assertEqual('GET', request['REQUEST_METHOD']) self.assertIn('HTTP_X_SERVICE_NAME', request) self.assertNotIn('HTTP_X_AUTH_TOKEN', request) self.assertFalse(any(map(lambda s: s.startswith('wsgi.'), request.keys())), "WSGI fields are filtered out") # Check second notification with 'request' and 'exception' call_args = notify.call_args_list[1][0] self.assertEqual('http.response', call_args[1]) self.assertEqual('INFO', call_args[3]) self.assertEqual(set(['request', 'exception']), set(call_args[2].keys())) request = call_args[2]['request'] self.assertEqual('/foo/bar', request['PATH_INFO']) self.assertEqual('GET', request['REQUEST_METHOD']) self.assertIn('HTTP_X_SERVICE_NAME', request) self.assertNotIn('HTTP_X_AUTH_TOKEN', request) self.assertFalse(any(map(lambda s: s.startswith('wsgi.'), request.keys())), "WSGI fields are filtered out") exception = call_args[2]['exception'] self.assertIn('middleware.py', exception['traceback'][0]) self.assertIn('It happens!', exception['traceback'][-1]) self.assertEqual("Exception('It happens!',)", exception['value']) def test_process_request_fail(self): def notify_error(context, publisher_id, event_type, priority, payload): raise Exception('error') with mock.patch('oslo_messaging.notify.notifier.Notifier._notify', notify_error): m = middleware.RequestNotifier(FakeApp()) req = webob.Request.blank('/foo/bar', environ={'REQUEST_METHOD': 'GET'}) m.process_request(req) def test_process_response_fail(self): def notify_error(context, publisher_id, event_type, priority, payload): raise Exception('error') with mock.patch('oslo_messaging.notify.notifier.Notifier._notify', notify_error): m = middleware.RequestNotifier(FakeApp()) req = webob.Request.blank('/foo/bar', environ={'REQUEST_METHOD': 'GET'}) m.process_response(req, webob.response.Response()) def test_ignore_req_opt(self): m = middleware.RequestNotifier(FakeApp(), ignore_req_list='get, PUT') req = webob.Request.blank('/skip/foo', environ={'REQUEST_METHOD': 'GET'}) req1 = webob.Request.blank('/skip/foo', environ={'REQUEST_METHOD': 'PUT'}) req2 = webob.Request.blank('/accept/foo', environ={'REQUEST_METHOD': 'POST'}) with mock.patch( 'oslo_messaging.notify.notifier.Notifier._notify') as notify: # Check GET request does not send notification m(req) m(req1) self.assertEqual(0, len(notify.call_args_list)) # Check non-GET request does send notification m(req2) self.assertEqual(2, len(notify.call_args_list)) call_args = notify.call_args_list[0][0] self.assertEqual('http.request', call_args[1]) self.assertEqual('INFO', call_args[3]) self.assertEqual(set(['request']), set(call_args[2].keys())) request = call_args[2]['request'] self.assertEqual('/accept/foo', request['PATH_INFO']) self.assertEqual('POST', request['REQUEST_METHOD']) call_args = notify.call_args_list[1][0] self.assertEqual('http.response', call_args[1]) self.assertEqual('INFO', call_args[3]) self.assertEqual(set(['request', 'response']), set(call_args[2].keys())) oslo.messaging-5.35.0/oslo_messaging/tests/notify/test_notifier.py0000777000175100017510000005550113224676046025520 0ustar zuulzuul00000000000000 # Copyright 2013 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime import logging import sys import uuid import fixtures from oslo_serialization import jsonutils from oslo_utils import strutils from oslo_utils import timeutils from stevedore import dispatch from stevedore import extension import testscenarios import yaml import oslo_messaging from oslo_messaging.notify import _impl_log from oslo_messaging.notify import _impl_test from oslo_messaging.notify import messaging from oslo_messaging.notify import notifier as msg_notifier from oslo_messaging import serializer as msg_serializer from oslo_messaging.tests import utils as test_utils from six.moves import mock load_tests = testscenarios.load_tests_apply_scenarios class JsonMessageMatcher(object): def __init__(self, message): self.message = message def __eq__(self, other): return self.message == jsonutils.loads(other) class _ReRaiseLoggedExceptionsFixture(fixtures.Fixture): """Record logged exceptions and re-raise in cleanup. The notifier just logs notification send errors so, for the sake of debugging test failures, we record any exceptions logged and re-raise them during cleanup. """ class FakeLogger(object): def __init__(self): self.exceptions = [] def exception(self, msg, *args, **kwargs): self.exceptions.append(sys.exc_info()[1]) def warning(self, msg, *args, **kwargs): return def setUp(self): super(_ReRaiseLoggedExceptionsFixture, self).setUp() self.logger = self.FakeLogger() def reraise_exceptions(): for ex in self.logger.exceptions: raise ex self.addCleanup(reraise_exceptions) class TestMessagingNotifier(test_utils.BaseTestCase): _v1 = [ ('v1', dict(v1=True)), ('not_v1', dict(v1=False)), ] _v2 = [ ('v2', dict(v2=True)), ('not_v2', dict(v2=False)), ] _publisher_id = [ ('ctor_pub_id', dict(ctor_pub_id='test', expected_pub_id='test')), ('prep_pub_id', dict(prep_pub_id='test.localhost', expected_pub_id='test.localhost')), ('override', dict(ctor_pub_id='test', prep_pub_id='test.localhost', expected_pub_id='test.localhost')), ] _topics = [ ('no_topics', dict(topics=[])), ('single_topic', dict(topics=['notifications'])), ('multiple_topic2', dict(topics=['foo', 'bar'])), ] _priority = [ ('audit', dict(priority='audit')), ('debug', dict(priority='debug')), ('info', dict(priority='info')), ('warn', dict(priority='warn')), ('error', dict(priority='error')), ('sample', dict(priority='sample')), ('critical', dict(priority='critical')), ] _payload = [ ('payload', dict(payload={'foo': 'bar'})), ] _context = [ ('ctxt', dict(ctxt={'user': 'bob'})), ] _retry = [ ('unconfigured', dict()), ('None', dict(retry=None)), ('0', dict(retry=0)), ('5', dict(retry=5)), ] @classmethod def generate_scenarios(cls): cls.scenarios = testscenarios.multiply_scenarios(cls._v1, cls._v2, cls._publisher_id, cls._topics, cls._priority, cls._payload, cls._context, cls._retry) def setUp(self): super(TestMessagingNotifier, self).setUp() self.logger = self.useFixture(_ReRaiseLoggedExceptionsFixture()).logger self.useFixture(fixtures.MockPatchObject( messaging, 'LOG', self.logger)) self.useFixture(fixtures.MockPatchObject( msg_notifier, '_LOG', self.logger)) @mock.patch('oslo_utils.timeutils.utcnow') def test_notifier(self, mock_utcnow): drivers = [] if self.v1: drivers.append('messaging') if self.v2: drivers.append('messagingv2') self.config(driver=drivers, topics=self.topics, group='oslo_messaging_notifications') transport = oslo_messaging.get_notification_transport(self.conf, url='fake:') if hasattr(self, 'ctor_pub_id'): notifier = oslo_messaging.Notifier(transport, publisher_id=self.ctor_pub_id) else: notifier = oslo_messaging.Notifier(transport) prepare_kwds = {} if hasattr(self, 'retry'): prepare_kwds['retry'] = self.retry if hasattr(self, 'prep_pub_id'): prepare_kwds['publisher_id'] = self.prep_pub_id if prepare_kwds: notifier = notifier.prepare(**prepare_kwds) transport._send_notification = mock.Mock() message_id = uuid.uuid4() uuid.uuid4 = mock.Mock(return_value=message_id) mock_utcnow.return_value = datetime.datetime.utcnow() message = { 'message_id': str(message_id), 'publisher_id': self.expected_pub_id, 'event_type': 'test.notify', 'priority': self.priority.upper(), 'payload': self.payload, 'timestamp': str(timeutils.utcnow()), } sends = [] if self.v1: sends.append(dict(version=1.0)) if self.v2: sends.append(dict(version=2.0)) calls = [] for send_kwargs in sends: for topic in self.topics: if hasattr(self, 'retry'): send_kwargs['retry'] = self.retry else: send_kwargs['retry'] = -1 target = oslo_messaging.Target(topic='%s.%s' % (topic, self.priority)) calls.append(mock.call(target, self.ctxt, message, **send_kwargs)) method = getattr(notifier, self.priority) method(self.ctxt, 'test.notify', self.payload) uuid.uuid4.assert_called_once_with() transport._send_notification.assert_has_calls(calls, any_order=True) self.assertTrue(notifier.is_enabled()) TestMessagingNotifier.generate_scenarios() class TestSerializer(test_utils.BaseTestCase): def setUp(self): super(TestSerializer, self).setUp() self.addCleanup(_impl_test.reset) @mock.patch('oslo_utils.timeutils.utcnow') def test_serializer(self, mock_utcnow): transport = oslo_messaging.get_notification_transport(self.conf, url='fake:') serializer = msg_serializer.NoOpSerializer() notifier = oslo_messaging.Notifier(transport, 'test.localhost', driver='test', topics=['test'], serializer=serializer) message_id = uuid.uuid4() uuid.uuid4 = mock.Mock(return_value=message_id) mock_utcnow.return_value = datetime.datetime.utcnow() serializer.serialize_context = mock.Mock() serializer.serialize_context.return_value = dict(user='alice') serializer.serialize_entity = mock.Mock() serializer.serialize_entity.return_value = 'sbar' notifier.info(dict(user='bob'), 'test.notify', 'bar') message = { 'message_id': str(message_id), 'publisher_id': 'test.localhost', 'event_type': 'test.notify', 'priority': 'INFO', 'payload': 'sbar', 'timestamp': str(timeutils.utcnow()), } self.assertEqual([(dict(user='alice'), message, 'INFO', -1)], _impl_test.NOTIFICATIONS) uuid.uuid4.assert_called_once_with() serializer.serialize_context.assert_called_once_with(dict(user='bob')) serializer.serialize_entity.assert_called_once_with(dict(user='bob'), 'bar') class TestNotifierTopics(test_utils.BaseTestCase): def test_topics_from_config(self): self.config(driver=['log'], group='oslo_messaging_notifications') self.config(topics=['topic1', 'topic2'], group='oslo_messaging_notifications') transport = oslo_messaging.get_notification_transport(self.conf, url='fake:') notifier = oslo_messaging.Notifier(transport, 'test.localhost') self.assertEqual(['topic1', 'topic2'], notifier._topics) def test_topics_from_kwargs(self): self.config(driver=['log'], group='oslo_messaging_notifications') transport = oslo_messaging.get_notification_transport(self.conf, url='fake:') notifier = oslo_messaging.Notifier(transport, 'test.localhost', topics=['topic1', 'topic2']) self.assertEqual(['topic1', 'topic2'], notifier._topics) class TestLogNotifier(test_utils.BaseTestCase): @mock.patch('oslo_utils.timeutils.utcnow') def test_notifier(self, mock_utcnow): self.config(driver=['log'], group='oslo_messaging_notifications') transport = oslo_messaging.get_notification_transport(self.conf, url='fake:') notifier = oslo_messaging.Notifier(transport, 'test.localhost') message_id = uuid.uuid4() uuid.uuid4 = mock.Mock() uuid.uuid4.return_value = message_id mock_utcnow.return_value = datetime.datetime.utcnow() logger = mock.Mock() message = { 'message_id': str(message_id), 'publisher_id': 'test.localhost', 'event_type': 'test.notify', 'priority': 'INFO', 'payload': 'bar', 'timestamp': str(timeutils.utcnow()), } with mock.patch.object(logging, 'getLogger') as gl: gl.return_value = logger notifier.info({}, 'test.notify', 'bar') uuid.uuid4.assert_called_once_with() logging.getLogger.assert_called_once_with( 'oslo.messaging.notification.test.notify') logger.info.assert_called_once_with(JsonMessageMatcher(message)) self.assertTrue(notifier.is_enabled()) def test_sample_priority(self): # Ensure logger drops sample-level notifications. driver = _impl_log.LogDriver(None, None, None) logger = mock.Mock(spec=logging.getLogger('oslo.messaging.' 'notification.foo')) logger.sample = None msg = {'event_type': 'foo'} with mock.patch.object(logging, 'getLogger') as gl: gl.return_value = logger driver.notify(None, msg, "sample", None) logging.getLogger.assert_called_once_with('oslo.messaging.' 'notification.foo') def test_mask_passwords(self): # Ensure that passwords are masked with notifications driver = _impl_log.LogDriver(None, None, None) logger = mock.MagicMock() logger.info = mock.MagicMock() message = {'password': 'passw0rd', 'event_type': 'foo'} mask_str = jsonutils.dumps(strutils.mask_dict_password(message)) with mock.patch.object(logging, 'getLogger') as gl: gl.return_value = logger driver.notify(None, message, 'info', 0) logger.info.assert_called_once_with(mask_str) class TestNotificationConfig(test_utils.BaseTestCase): def test_retry_config(self): conf = self.messaging_conf.conf self.config(driver=['messaging'], group='oslo_messaging_notifications') conf.set_override('retry', 3, group='oslo_messaging_notifications') transport = oslo_messaging.get_notification_transport(self.conf, url='fake:') notifier = oslo_messaging.Notifier(transport) self.assertEqual(3, notifier.retry) def test_notifier_retry_config(self): conf = self.messaging_conf.conf self.config(driver=['messaging'], group='oslo_messaging_notifications') conf.set_override('retry', 3, group='oslo_messaging_notifications') transport = oslo_messaging.get_notification_transport(self.conf, url='fake:') notifier = oslo_messaging.Notifier(transport, retry=5) self.assertEqual(5, notifier.retry) class TestRoutingNotifier(test_utils.BaseTestCase): def setUp(self): super(TestRoutingNotifier, self).setUp() self.config(driver=['routing'], group='oslo_messaging_notifications') transport = oslo_messaging.get_notification_transport(self.conf, url='fake:') self.notifier = oslo_messaging.Notifier(transport) self.router = self.notifier._driver_mgr['routing'].obj self.assertTrue(self.notifier.is_enabled()) def _fake_extension_manager(self, ext): return extension.ExtensionManager.make_test_instance( [extension.Extension('test', None, None, ext), ]) def _empty_extension_manager(self): return extension.ExtensionManager.make_test_instance([]) def test_should_load_plugin(self): self.router.used_drivers = set(["zoo", "blah"]) ext = mock.MagicMock() ext.name = "foo" self.assertFalse(self.router._should_load_plugin(ext)) ext.name = "zoo" self.assertTrue(self.router._should_load_plugin(ext)) def test_load_notifiers_no_config(self): # default routing_config="" self.router._load_notifiers() self.assertEqual({}, self.router.routing_groups) self.assertEqual(0, len(self.router.used_drivers)) def test_load_notifiers_no_extensions(self): self.config(routing_config="routing_notifier.yaml", group='oslo_messaging_notifications') routing_config = r"" config_file = mock.MagicMock() config_file.return_value = routing_config with mock.patch.object(self.router, '_get_notifier_config_file', config_file): with mock.patch('stevedore.dispatch.DispatchExtensionManager', return_value=self._empty_extension_manager()): with mock.patch('oslo_messaging.notify.' '_impl_routing.LOG') as mylog: self.router._load_notifiers() self.assertFalse(mylog.debug.called) self.assertEqual({}, self.router.routing_groups) def test_load_notifiers_config(self): self.config(routing_config="routing_notifier.yaml", group='oslo_messaging_notifications') routing_config = r""" group_1: rpc : foo group_2: rpc : blah """ config_file = mock.MagicMock() config_file.return_value = routing_config with mock.patch.object(self.router, '_get_notifier_config_file', config_file): with mock.patch('stevedore.dispatch.DispatchExtensionManager', return_value=self._fake_extension_manager( mock.MagicMock())): with mock.patch('oslo_messaging.notify.' '_impl_routing.LOG'): self.router._load_notifiers() groups = list(self.router.routing_groups.keys()) groups.sort() self.assertEqual(['group_1', 'group_2'], groups) def test_get_drivers_for_message_accepted_events(self): config = r""" group_1: rpc: accepted_events: - foo.* - blah.zoo.* - zip """ groups = yaml.safe_load(config) group = groups['group_1'] # No matching event ... self.assertEqual([], self.router._get_drivers_for_message( group, "unknown", "info")) # Child of foo ... self.assertEqual(['rpc'], self.router._get_drivers_for_message( group, "foo.1", "info")) # Foo itself ... self.assertEqual([], self.router._get_drivers_for_message( group, "foo", "info")) # Child of blah.zoo self.assertEqual(['rpc'], self.router._get_drivers_for_message( group, "blah.zoo.zing", "info")) def test_get_drivers_for_message_accepted_priorities(self): config = r""" group_1: rpc: accepted_priorities: - info - error """ groups = yaml.safe_load(config) group = groups['group_1'] # No matching priority self.assertEqual([], self.router._get_drivers_for_message( group, None, "unknown")) # Info ... self.assertEqual(['rpc'], self.router._get_drivers_for_message( group, None, "info")) # Error (to make sure the list is getting processed) ... self.assertEqual(['rpc'], self.router._get_drivers_for_message( group, None, "error")) def test_get_drivers_for_message_both(self): config = r""" group_1: rpc: accepted_priorities: - info accepted_events: - foo.* driver_1: accepted_priorities: - info driver_2: accepted_events: - foo.* """ groups = yaml.safe_load(config) group = groups['group_1'] # Valid event, but no matching priority self.assertEqual(['driver_2'], self.router._get_drivers_for_message( group, 'foo.blah', "unknown")) # Valid priority, but no matching event self.assertEqual(['driver_1'], self.router._get_drivers_for_message( group, 'unknown', "info")) # Happy day ... x = self.router._get_drivers_for_message(group, 'foo.blah', "info") x.sort() self.assertEqual(['driver_1', 'driver_2', 'rpc'], x) def test_filter_func(self): ext = mock.MagicMock() ext.name = "rpc" # Good ... self.assertTrue(self.router._filter_func(ext, {}, {}, 'info', None, ['foo', 'rpc'])) # Bad self.assertFalse(self.router._filter_func(ext, {}, {}, 'info', None, ['foo'])) def test_notify(self): self.router.routing_groups = {'group_1': None, 'group_2': None} drivers_mock = mock.MagicMock() drivers_mock.side_effect = [['rpc'], ['foo']] with mock.patch.object(self.router, 'plugin_manager') as pm: with mock.patch.object(self.router, '_get_drivers_for_message', drivers_mock): self.notifier.info({}, 'my_event', {}) self.assertEqual(sorted(['rpc', 'foo']), sorted(pm.map.call_args[0][6])) def test_notify_filtered(self): self.config(routing_config="routing_notifier.yaml", group='oslo_messaging_notifications') routing_config = r""" group_1: rpc: accepted_events: - my_event rpc2: accepted_priorities: - info bar: accepted_events: - nothing """ config_file = mock.MagicMock() config_file.return_value = routing_config rpc_driver = mock.Mock() rpc2_driver = mock.Mock() bar_driver = mock.Mock() pm = dispatch.DispatchExtensionManager.make_test_instance( [extension.Extension('rpc', None, None, rpc_driver), extension.Extension('rpc2', None, None, rpc2_driver), extension.Extension('bar', None, None, bar_driver)], ) with mock.patch.object(self.router, '_get_notifier_config_file', config_file): with mock.patch('stevedore.dispatch.DispatchExtensionManager', return_value=pm): with mock.patch('oslo_messaging.notify.' '_impl_routing.LOG'): self.notifier.info({}, 'my_event', {}) self.assertFalse(bar_driver.info.called) rpc_driver.notify.assert_called_once_with( {}, mock.ANY, 'INFO', -1) rpc2_driver.notify.assert_called_once_with( {}, mock.ANY, 'INFO', -1) class TestNoOpNotifier(test_utils.BaseTestCase): def test_notifier(self): self.config(driver=['noop'], group='oslo_messaging_notifications') transport = oslo_messaging.get_notification_transport(self.conf, url='fake:') notifier = oslo_messaging.Notifier(transport, 'test.localhost') self.assertFalse(notifier.is_enabled()) class TestNotifierTransportWarning(test_utils.BaseTestCase): @mock.patch('oslo_messaging.notify.notifier._LOG') def test_warning_when_rpc_transport(self, log): transport = oslo_messaging.get_rpc_transport(self.conf) oslo_messaging.Notifier(transport, 'test.localhost') log.warning.assert_called_once_with( "Using RPC transport for notifications. Please use " "get_notification_transport to obtain a " "notification transport instance.") oslo.messaging-5.35.0/oslo_messaging/tests/notify/test_logger.py0000666000175100017510000001302413224676046025147 0ustar zuulzuul00000000000000# Copyright 2013 eNovance # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime import logging import logging.config import os import sys from oslo_utils import timeutils import testscenarios import oslo_messaging from oslo_messaging.tests import utils as test_utils from six.moves import mock load_tests = testscenarios.load_tests_apply_scenarios # Stolen from oslo.log logging.AUDIT = logging.INFO + 1 logging.addLevelName(logging.AUDIT, 'AUDIT') class TestLogNotifier(test_utils.BaseTestCase): scenarios = [ ('debug', dict(priority='debug')), ('info', dict(priority='info')), ('warning', dict(priority='warning', queue='WARN')), ('warn', dict(priority='warn')), ('error', dict(priority='error')), ('critical', dict(priority='critical')), ('audit', dict(priority='audit')), ] def setUp(self): super(TestLogNotifier, self).setUp() self.addCleanup(oslo_messaging.notify._impl_test.reset) self.config(driver=['test'], group='oslo_messaging_notifications') # NOTE(jamespage) disable thread information logging for testing # as this causes test failures when zmq tests monkey_patch via # eventlet logging.logThreads = 0 @mock.patch('oslo_utils.timeutils.utcnow') def test_logger(self, mock_utcnow): fake_transport = oslo_messaging.get_notification_transport(self.conf) with mock.patch('oslo_messaging.transport._get_transport', return_value=fake_transport): self.logger = oslo_messaging.LoggingNotificationHandler('test://') mock_utcnow.return_value = datetime.datetime.utcnow() levelno = getattr(logging, self.priority.upper(), 42) record = logging.LogRecord('foo', levelno, '/foo/bar', 42, 'Something happened', None, None) self.logger.emit(record) context = oslo_messaging.notify._impl_test.NOTIFICATIONS[0][0] self.assertEqual({}, context) n = oslo_messaging.notify._impl_test.NOTIFICATIONS[0][1] self.assertEqual(getattr(self, 'queue', self.priority.upper()), n['priority']) self.assertEqual('logrecord', n['event_type']) self.assertEqual(str(timeutils.utcnow()), n['timestamp']) self.assertIsNone(n['publisher_id']) self.assertEqual( {'process': os.getpid(), 'funcName': None, 'name': 'foo', 'thread': None, 'levelno': levelno, 'processName': 'MainProcess', 'pathname': '/foo/bar', 'lineno': 42, 'msg': 'Something happened', 'exc_info': None, 'levelname': logging.getLevelName(levelno), 'extra': None}, n['payload']) @mock.patch('oslo_utils.timeutils.utcnow') def test_logging_conf(self, mock_utcnow): fake_transport = oslo_messaging.get_notification_transport(self.conf) with mock.patch('oslo_messaging.transport._get_transport', return_value=fake_transport): logging.config.dictConfig({ 'version': 1, 'handlers': { 'notification': { 'class': 'oslo_messaging.LoggingNotificationHandler', 'level': self.priority.upper(), 'url': 'test://', }, }, 'loggers': { 'default': { 'handlers': ['notification'], 'level': self.priority.upper(), }, }, }) mock_utcnow.return_value = datetime.datetime.utcnow() levelno = getattr(logging, self.priority.upper()) logger = logging.getLogger('default') lineno = sys._getframe().f_lineno + 1 logger.log(levelno, 'foobar') n = oslo_messaging.notify._impl_test.NOTIFICATIONS[0][1] self.assertEqual(getattr(self, 'queue', self.priority.upper()), n['priority']) self.assertEqual('logrecord', n['event_type']) self.assertEqual(str(timeutils.utcnow()), n['timestamp']) self.assertIsNone(n['publisher_id']) pathname = __file__ if pathname.endswith(('.pyc', '.pyo')): pathname = pathname[:-1] self.assertDictEqual( n['payload'], {'process': os.getpid(), 'funcName': 'test_logging_conf', 'name': 'default', 'thread': None, 'levelno': levelno, 'processName': 'MainProcess', 'pathname': pathname, 'lineno': lineno, 'msg': 'foobar', 'exc_info': None, 'levelname': logging.getLevelName(levelno), 'extra': None}) oslo.messaging-5.35.0/oslo_messaging/tests/test_transport.py0000777000175100017510000003672113224676046024430 0ustar zuulzuul00000000000000 # Copyright 2013 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import fixtures from oslo_config import cfg import six from six.moves import mock from stevedore import driver import testscenarios import oslo_messaging from oslo_messaging.tests import utils as test_utils from oslo_messaging import transport load_tests = testscenarios.load_tests_apply_scenarios class _FakeDriver(object): def __init__(self, conf): self.conf = conf def send(self, *args, **kwargs): pass def send_notification(self, *args, **kwargs): pass def listen(self, target, batch_size, batch_timeout): pass class _FakeManager(object): def __init__(self, driver): self.driver = driver class GetTransportTestCase(test_utils.BaseTestCase): scenarios = [ ('rpc_backend', dict(url=None, transport_url=None, rpc_backend='testbackend', control_exchange=None, allowed=None, aliases=None, expect=dict(backend='testbackend', exchange=None, url='testbackend:', allowed=[]))), ('transport_url', dict(url=None, transport_url='testtransport:', rpc_backend=None, control_exchange=None, allowed=None, aliases=None, expect=dict(backend='testtransport', exchange=None, url='testtransport:', allowed=[]))), ('url_param', dict(url='testtransport:', transport_url=None, rpc_backend=None, control_exchange=None, allowed=None, aliases=None, expect=dict(backend='testtransport', exchange=None, url='testtransport:', allowed=[]))), ('control_exchange', dict(url=None, transport_url=None, rpc_backend='testbackend', control_exchange='testexchange', allowed=None, aliases=None, expect=dict(backend='testbackend', exchange='testexchange', url='testbackend:', allowed=[]))), ('allowed_remote_exmods', dict(url=None, transport_url=None, rpc_backend='testbackend', control_exchange=None, allowed=['foo', 'bar'], aliases=None, expect=dict(backend='testbackend', exchange=None, url='testbackend:', allowed=['foo', 'bar']))), ('rpc_backend_aliased', dict(url=None, transport_url=None, rpc_backend='testfoo', control_exchange=None, allowed=None, aliases=dict(testfoo='testbackend'), expect=dict(backend='testbackend', exchange=None, url='testbackend:', allowed=[]))), ('transport_url_aliased', dict(url=None, transport_url='testfoo:', rpc_backend=None, control_exchange=None, allowed=None, aliases=dict(testfoo='testtransport'), expect=dict(backend='testtransport', exchange=None, url='testtransport:', allowed=[]))), ('url_param_aliased', dict(url='testfoo:', transport_url=None, rpc_backend=None, control_exchange=None, allowed=None, aliases=dict(testfoo='testtransport'), expect=dict(backend='testtransport', exchange=None, url='testtransport:', allowed=[]))), ] @mock.patch('oslo_messaging.transport.LOG') def test_get_transport(self, fake_logger): self.config(rpc_backend=self.rpc_backend, control_exchange=self.control_exchange, transport_url=self.transport_url) driver.DriverManager = mock.Mock() invoke_args = [self.conf, oslo_messaging.TransportURL.parse(self.conf, self.expect['url'])] invoke_kwds = dict(default_exchange=self.expect['exchange'], allowed_remote_exmods=self.expect['allowed']) drvr = _FakeDriver(self.conf) driver.DriverManager.return_value = _FakeManager(drvr) kwargs = dict(url=self.url) if self.allowed is not None: kwargs['allowed_remote_exmods'] = self.allowed if self.aliases is not None: kwargs['aliases'] = self.aliases transport_ = oslo_messaging.get_transport(self.conf, **kwargs) if self.aliases is not None: self.assertEqual( [mock.call('legacy "rpc_backend" is deprecated, ' '"testfoo" must be replaced by ' '"%s"' % self.aliases.get('testfoo'))], fake_logger.warning.mock_calls ) self.assertIsNotNone(transport_) self.assertIs(transport_.conf, self.conf) self.assertIs(transport_._driver, drvr) self.assertTrue(isinstance(transport_, transport.RPCTransport)) driver.DriverManager.assert_called_once_with('oslo.messaging.drivers', self.expect['backend'], invoke_on_load=True, invoke_args=invoke_args, invoke_kwds=invoke_kwds) class GetTransportSadPathTestCase(test_utils.BaseTestCase): scenarios = [ ('invalid_transport_url', dict(url=None, transport_url='invalid', rpc_backend=None, ex=dict(cls=oslo_messaging.InvalidTransportURL, msg_contains='No scheme specified', url='invalid'))), ('invalid_url_param', dict(url='invalid', transport_url=None, rpc_backend=None, ex=dict(cls=oslo_messaging.InvalidTransportURL, msg_contains='No scheme specified', url='invalid'))), ('driver_load_failure', dict(url=None, transport_url=None, rpc_backend='testbackend', ex=dict(cls=oslo_messaging.DriverLoadFailure, msg_contains='Failed to load', driver='testbackend'))), ] def test_get_transport_sad(self): self.config(rpc_backend=self.rpc_backend, transport_url=self.transport_url) if self.rpc_backend: driver.DriverManager = mock.Mock() invoke_args = [self.conf, oslo_messaging.TransportURL.parse(self.conf, self.url)] invoke_kwds = dict(default_exchange='openstack', allowed_remote_exmods=[]) driver.DriverManager.side_effect = RuntimeError() try: oslo_messaging.get_transport(self.conf, url=self.url) self.assertFalse(True) driver.DriverManager.\ assert_called_once_with('oslo.messaging.drivers', self.rpc_backend, invoke_on_load=True, invoke_args=invoke_args, invoke_kwds=invoke_kwds) except Exception as ex: ex_cls = self.ex.pop('cls') ex_msg_contains = self.ex.pop('msg_contains') self.assertIsInstance(ex, oslo_messaging.MessagingException) self.assertIsInstance(ex, ex_cls) self.assertIn(ex_msg_contains, six.text_type(ex)) for k, v in self.ex.items(): self.assertTrue(hasattr(ex, k)) self.assertEqual(v, str(getattr(ex, k))) # FIXME(markmc): this could be used elsewhere class _SetDefaultsFixture(fixtures.Fixture): def __init__(self, set_defaults, opts, *names): super(_SetDefaultsFixture, self).__init__() self.set_defaults = set_defaults self.opts = opts self.names = names def setUp(self): super(_SetDefaultsFixture, self).setUp() # FIXME(markmc): this comes from Id5c1f3ba def first(seq, default=None, key=None): if key is None: key = bool return next(six.moves.filter(key, seq), default) def default(opts, name): return first(opts, key=lambda o: o.name == name).default orig_defaults = {} for n in self.names: orig_defaults[n] = default(self.opts, n) def restore_defaults(): self.set_defaults(**orig_defaults) self.addCleanup(restore_defaults) class TestSetDefaults(test_utils.BaseTestCase): def setUp(self): super(TestSetDefaults, self).setUp(conf=cfg.ConfigOpts()) self.useFixture(_SetDefaultsFixture( oslo_messaging.set_transport_defaults, transport._transport_opts, 'control_exchange')) def test_set_default_control_exchange(self): oslo_messaging.set_transport_defaults(control_exchange='foo') driver.DriverManager = mock.Mock() invoke_kwds = dict(default_exchange='foo', allowed_remote_exmods=[]) driver.DriverManager.return_value = \ _FakeManager(_FakeDriver(self.conf)) oslo_messaging.get_transport(self.conf) driver.DriverManager.assert_called_once_with(mock.ANY, mock.ANY, invoke_on_load=mock.ANY, invoke_args=mock.ANY, invoke_kwds=invoke_kwds) class TestTransportMethodArgs(test_utils.BaseTestCase): _target = oslo_messaging.Target(topic='topic', server='server') def test_send_defaults(self): t = transport.Transport(_FakeDriver(cfg.CONF)) t._driver.send = mock.Mock() t._send(self._target, 'ctxt', 'message') t._driver.send.assert_called_once_with(self._target, 'ctxt', 'message', wait_for_reply=None, timeout=None, retry=None) def test_send_all_args(self): t = transport.Transport(_FakeDriver(cfg.CONF)) t._driver.send = mock.Mock() t._send(self._target, 'ctxt', 'message', wait_for_reply='wait_for_reply', timeout='timeout', retry='retry') t._driver.send.\ assert_called_once_with(self._target, 'ctxt', 'message', wait_for_reply='wait_for_reply', timeout='timeout', retry='retry') def test_send_notification(self): t = transport.Transport(_FakeDriver(cfg.CONF)) t._driver.send_notification = mock.Mock() t._send_notification(self._target, 'ctxt', 'message', version=1.0) t._driver.send_notification.assert_called_once_with(self._target, 'ctxt', 'message', 1.0, retry=None) def test_send_notification_all_args(self): t = transport.Transport(_FakeDriver(cfg.CONF)) t._driver.send_notification = mock.Mock() t._send_notification(self._target, 'ctxt', 'message', version=1.0, retry=5) t._driver.send_notification.assert_called_once_with(self._target, 'ctxt', 'message', 1.0, retry=5) def test_listen(self): t = transport.Transport(_FakeDriver(cfg.CONF)) t._driver.listen = mock.Mock() t._listen(self._target, 1, None) t._driver.listen.assert_called_once_with(self._target, 1, None) class TestTransportUrlCustomisation(test_utils.BaseTestCase): def setUp(self): super(TestTransportUrlCustomisation, self).setUp() def transport_url_parse(url): return transport.TransportURL.parse(self.conf, url) self.url1 = transport_url_parse("fake://vhost1?x=1&y=2&z=3") self.url2 = transport_url_parse("fake://vhost2?foo=bar") self.url3 = transport_url_parse("fake://vhost1?l=1&l=2&l=3") self.url4 = transport_url_parse("fake://vhost2?d=x:1&d=y:2&d=z:3") self.url5 = transport_url_parse("fake://noport:/?") def test_hash(self): urls = {} urls[self.url1] = self.url1 urls[self.url2] = self.url2 urls[self.url3] = self.url3 urls[self.url4] = self.url4 urls[self.url5] = self.url5 self.assertEqual(3, len(urls)) def test_eq(self): self.assertEqual(self.url1, self.url3) self.assertEqual(self.url2, self.url4) self.assertNotEqual(self.url1, self.url4) def test_query(self): self.assertEqual({'x': '1', 'y': '2', 'z': '3'}, self.url1.query) self.assertEqual({'foo': 'bar'}, self.url2.query) self.assertEqual({'l': '1,2,3'}, self.url3.query) self.assertEqual({'d': 'x:1,y:2,z:3'}, self.url4.query) def test_noport(self): self.assertIsNone(self.url5.hosts[0].port) class TestTransportHostCustomisation(test_utils.BaseTestCase): def setUp(self): super(TestTransportHostCustomisation, self).setUp() self.host1 = transport.TransportHost("host1", 5662, "user", "pass") self.host2 = transport.TransportHost("host1", 5662, "user", "pass") self.host3 = transport.TransportHost("host1", 5663, "user", "pass") self.host4 = transport.TransportHost("host1", 5662, "user2", "pass") self.host5 = transport.TransportHost("host1", 5662, "user", "pass2") self.host6 = transport.TransportHost("host2", 5662, "user", "pass") def test_hash(self): hosts = {} hosts[self.host1] = self.host1 hosts[self.host2] = self.host2 hosts[self.host3] = self.host3 hosts[self.host4] = self.host4 hosts[self.host5] = self.host5 hosts[self.host6] = self.host6 self.assertEqual(5, len(hosts)) def test_eq(self): self.assertEqual(self.host1, self.host2) self.assertNotEqual(self.host1, self.host3) self.assertNotEqual(self.host1, self.host4) self.assertNotEqual(self.host1, self.host5) self.assertNotEqual(self.host1, self.host6) oslo.messaging-5.35.0/oslo_messaging/tests/utils.py0000666000175100017510000000515513224676046022467 0ustar zuulzuul00000000000000# Copyright 2010-2011 OpenStack Foundation # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # Copyright 2013 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # Copyright 2013 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Common utilities used in testing""" import threading from oslo_config import cfg from oslotest import base TRUE_VALUES = ('true', '1', 'yes') class BaseTestCase(base.BaseTestCase): def setUp(self, conf=cfg.CONF): super(BaseTestCase, self).setUp() from oslo_messaging import conffixture self.messaging_conf = self.useFixture(conffixture.ConfFixture(conf)) self.messaging_conf.transport_driver = 'fake' self.conf = self.messaging_conf.conf self.conf.project = 'project' self.conf.prog = 'prog' def config(self, **kw): """Override some configuration values. The keyword arguments are the names of configuration options to override and their values. If a group argument is supplied, the overrides are applied to the specified configuration option group. All overrides are automatically cleared at the end of the current test by the tearDown() method. """ group = kw.pop('group', None) for k, v in kw.items(): self.conf.set_override(k, v, group) class ServerThreadHelper(threading.Thread): def __init__(self, server): super(ServerThreadHelper, self).__init__() self.daemon = True self._server = server self._stop_event = threading.Event() self._start_event = threading.Event() def start(self): super(ServerThreadHelper, self).start() self._start_event.wait() def run(self): self._server.start() self._start_event.set() self._stop_event.wait() # Check start() does nothing with a running listener self._server.start() self._server.stop() self._server.wait() def stop(self): self._stop_event.set() oslo.messaging-5.35.0/oslo_messaging/tests/test_fixture.py0000666000175100017510000000654213224676046024055 0ustar zuulzuul00000000000000# Copyright 2015 Mirantis Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_messaging import conffixture from oslo_messaging.tests import utils as test_utils class TestConfFixture(test_utils.BaseTestCase): def test_fixture_wraps_set_override(self): conf = self.messaging_conf.conf self.assertIsNotNone(conf.set_override.wrapped) self.messaging_conf._teardown_decorator() self.assertFalse(hasattr(conf.set_override, 'wrapped')) def test_fixture_wraps_clear_override(self): conf = self.messaging_conf.conf self.assertIsNotNone(conf.clear_override.wrapped) self.messaging_conf._teardown_decorator() self.assertFalse(hasattr(conf.clear_override, 'wrapped')) def test_fixture_setup_teardown_decorator(self): conf = cfg.ConfigOpts() self.assertFalse(hasattr(conf.set_override, 'wrapped')) self.assertFalse(hasattr(conf.clear_override, 'wrapped')) fixture = conffixture.ConfFixture(conf) self.assertFalse(hasattr(conf.set_override, 'wrapped')) self.assertFalse(hasattr(conf.clear_override, 'wrapped')) self.useFixture(fixture) self.assertTrue(hasattr(conf.set_override, 'wrapped')) self.assertTrue(hasattr(conf.clear_override, 'wrapped')) fixture._teardown_decorator() self.assertFalse(hasattr(conf.set_override, 'wrapped')) self.assertFalse(hasattr(conf.clear_override, 'wrapped')) def test_fixture_properties(self): conf = self.messaging_conf.conf self.messaging_conf.transport_driver = 'fake' self.assertEqual('fake', self.messaging_conf.transport_driver) self.assertEqual('fake', conf.rpc_backend) def test_old_notifications_config_override(self): conf = self.messaging_conf.conf conf.set_override( "notification_driver", ["messaging"]) conf.set_override( "notification_transport_url", "http://xyz") conf.set_override( "notification_topics", ['topic1']) self.assertEqual(["messaging"], conf.oslo_messaging_notifications.driver) self.assertEqual("http://xyz", conf.oslo_messaging_notifications.transport_url) self.assertEqual(['topic1'], conf.oslo_messaging_notifications.topics) conf.clear_override("notification_driver") conf.clear_override("notification_transport_url") conf.clear_override("notification_topics") self.assertEqual([], conf.oslo_messaging_notifications.driver) self.assertIsNone(conf.oslo_messaging_notifications.transport_url) self.assertEqual(['notifications'], conf.oslo_messaging_notifications.topics) oslo.messaging-5.35.0/oslo_messaging/tests/rpc/0000775000175100017510000000000013224676256021534 5ustar zuulzuul00000000000000oslo.messaging-5.35.0/oslo_messaging/tests/rpc/test_client.py0000777000175100017510000005115313224676046024432 0ustar zuulzuul00000000000000 # Copyright 2013 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from six.moves import mock import testscenarios import oslo_messaging from oslo_messaging import exceptions from oslo_messaging import serializer as msg_serializer from oslo_messaging.tests import utils as test_utils load_tests = testscenarios.load_tests_apply_scenarios class TestCastCall(test_utils.BaseTestCase): scenarios = [ ('cast_no_ctxt_no_args', dict(call=False, ctxt={}, args={})), ('call_no_ctxt_no_args', dict(call=True, ctxt={}, args={})), ('cast_ctxt_and_args', dict(call=False, ctxt=dict(user='testuser', project='testtenant'), args=dict(bar='blaa', foobar=11.01))), ('call_ctxt_and_args', dict(call=True, ctxt=dict(user='testuser', project='testtenant'), args=dict(bar='blaa', foobar=11.01))), ] def test_cast_call(self): self.config(rpc_response_timeout=None) transport = oslo_messaging.get_rpc_transport(self.conf, url='fake:') client = oslo_messaging.RPCClient(transport, oslo_messaging.Target()) transport._send = mock.Mock() msg = dict(method='foo', args=self.args) kwargs = {'retry': None} if self.call: kwargs['wait_for_reply'] = True kwargs['timeout'] = None method = client.call if self.call else client.cast method(self.ctxt, 'foo', **self.args) transport._send.assert_called_once_with(oslo_messaging.Target(), self.ctxt, msg, **kwargs) class TestCastToTarget(test_utils.BaseTestCase): _base = [ ('all_none', dict(ctor={}, prepare={}, expect={})), ('ctor_exchange', dict(ctor=dict(exchange='testexchange'), prepare={}, expect=dict(exchange='testexchange'))), ('prepare_exchange', dict(ctor={}, prepare=dict(exchange='testexchange'), expect=dict(exchange='testexchange'))), ('prepare_exchange_none', dict(ctor=dict(exchange='testexchange'), prepare=dict(exchange=None), expect={})), ('both_exchange', dict(ctor=dict(exchange='ctorexchange'), prepare=dict(exchange='testexchange'), expect=dict(exchange='testexchange'))), ('ctor_topic', dict(ctor=dict(topic='testtopic'), prepare={}, expect=dict(topic='testtopic'))), ('prepare_topic', dict(ctor={}, prepare=dict(topic='testtopic'), expect=dict(topic='testtopic'))), ('prepare_topic_none', dict(ctor=dict(topic='testtopic'), prepare=dict(topic=None), expect={})), ('both_topic', dict(ctor=dict(topic='ctortopic'), prepare=dict(topic='testtopic'), expect=dict(topic='testtopic'))), ('ctor_namespace', dict(ctor=dict(namespace='testnamespace'), prepare={}, expect=dict(namespace='testnamespace'))), ('prepare_namespace', dict(ctor={}, prepare=dict(namespace='testnamespace'), expect=dict(namespace='testnamespace'))), ('prepare_namespace_none', dict(ctor=dict(namespace='testnamespace'), prepare=dict(namespace=None), expect={})), ('both_namespace', dict(ctor=dict(namespace='ctornamespace'), prepare=dict(namespace='testnamespace'), expect=dict(namespace='testnamespace'))), ('ctor_version', dict(ctor=dict(version='1.1'), prepare={}, expect=dict(version='1.1'))), ('prepare_version', dict(ctor={}, prepare=dict(version='1.1'), expect=dict(version='1.1'))), ('prepare_version_none', dict(ctor=dict(version='1.1'), prepare=dict(version=None), expect={})), ('both_version', dict(ctor=dict(version='ctorversion'), prepare=dict(version='1.1'), expect=dict(version='1.1'))), ('ctor_server', dict(ctor=dict(server='testserver'), prepare={}, expect=dict(server='testserver'))), ('prepare_server', dict(ctor={}, prepare=dict(server='testserver'), expect=dict(server='testserver'))), ('prepare_server_none', dict(ctor=dict(server='testserver'), prepare=dict(server=None), expect={})), ('both_server', dict(ctor=dict(server='ctorserver'), prepare=dict(server='testserver'), expect=dict(server='testserver'))), ('ctor_fanout', dict(ctor=dict(fanout=True), prepare={}, expect=dict(fanout=True))), ('prepare_fanout', dict(ctor={}, prepare=dict(fanout=True), expect=dict(fanout=True))), ('prepare_fanout_none', dict(ctor=dict(fanout=True), prepare=dict(fanout=None), expect={})), ('both_fanout', dict(ctor=dict(fanout=True), prepare=dict(fanout=False), expect=dict(fanout=False))), ] _prepare = [ ('single_prepare', dict(double_prepare=False)), ('double_prepare', dict(double_prepare=True)), ] @classmethod def generate_scenarios(cls): cls.scenarios = testscenarios.multiply_scenarios(cls._base, cls._prepare) def setUp(self): super(TestCastToTarget, self).setUp(conf=cfg.ConfigOpts()) def test_cast_to_target(self): target = oslo_messaging.Target(**self.ctor) expect_target = oslo_messaging.Target(**self.expect) transport = oslo_messaging.get_rpc_transport(self.conf, url='fake:') client = oslo_messaging.RPCClient(transport, target) transport._send = mock.Mock() msg = dict(method='foo', args={}) if 'namespace' in self.expect: msg['namespace'] = self.expect['namespace'] if 'version' in self.expect: msg['version'] = self.expect['version'] if self.prepare: client = client.prepare(**self.prepare) if self.double_prepare: client = client.prepare(**self.prepare) client.cast({}, 'foo') transport._send.assert_called_once_with(expect_target, {}, msg, retry=None) TestCastToTarget.generate_scenarios() _notset = object() class TestCallTimeout(test_utils.BaseTestCase): scenarios = [ ('all_none', dict(confval=None, ctor=None, prepare=_notset, expect=None)), ('confval', dict(confval=21, ctor=None, prepare=_notset, expect=21)), ('ctor', dict(confval=None, ctor=21.1, prepare=_notset, expect=21.1)), ('ctor_zero', dict(confval=None, ctor=0, prepare=_notset, expect=0)), ('prepare', dict(confval=None, ctor=None, prepare=21.1, expect=21.1)), ('prepare_override', dict(confval=None, ctor=10.1, prepare=21.1, expect=21.1)), ('prepare_zero', dict(confval=None, ctor=None, prepare=0, expect=0)), ] def test_call_timeout(self): self.config(rpc_response_timeout=self.confval) transport = oslo_messaging.get_rpc_transport(self.conf, url='fake:') client = oslo_messaging.RPCClient(transport, oslo_messaging.Target(), timeout=self.ctor) transport._send = mock.Mock() msg = dict(method='foo', args={}) kwargs = dict(wait_for_reply=True, timeout=self.expect, retry=None) if self.prepare is not _notset: client = client.prepare(timeout=self.prepare) client.call({}, 'foo') transport._send.assert_called_once_with(oslo_messaging.Target(), {}, msg, **kwargs) class TestCallRetry(test_utils.BaseTestCase): scenarios = [ ('all_none', dict(ctor=None, prepare=_notset, expect=None)), ('ctor', dict(ctor=21, prepare=_notset, expect=21)), ('ctor_zero', dict(ctor=0, prepare=_notset, expect=0)), ('prepare', dict(ctor=None, prepare=21, expect=21)), ('prepare_override', dict(ctor=10, prepare=21, expect=21)), ('prepare_zero', dict(ctor=None, prepare=0, expect=0)), ] def test_call_retry(self): transport = oslo_messaging.get_rpc_transport(self.conf, url='fake:') client = oslo_messaging.RPCClient(transport, oslo_messaging.Target(), retry=self.ctor) transport._send = mock.Mock() msg = dict(method='foo', args={}) kwargs = dict(wait_for_reply=True, timeout=60, retry=self.expect) if self.prepare is not _notset: client = client.prepare(retry=self.prepare) client.call({}, 'foo') transport._send.assert_called_once_with(oslo_messaging.Target(), {}, msg, **kwargs) class TestCallFanout(test_utils.BaseTestCase): scenarios = [ ('target', dict(prepare=_notset, target={'fanout': True})), ('prepare', dict(prepare={'fanout': True}, target={})), ('both', dict(prepare={'fanout': True}, target={'fanout': True})), ] def test_call_fanout(self): transport = oslo_messaging.get_rpc_transport(self.conf, url='fake:') client = oslo_messaging.RPCClient(transport, oslo_messaging.Target(**self.target)) if self.prepare is not _notset: client = client.prepare(**self.prepare) self.assertRaises(exceptions.InvalidTarget, client.call, {}, 'foo') class TestSerializer(test_utils.BaseTestCase): scenarios = [ ('cast', dict(call=False, ctxt=dict(user='bob'), args=dict(a='a', b='b', c='c'), retval=None)), ('call', dict(call=True, ctxt=dict(user='bob'), args=dict(a='a', b='b', c='c'), retval='d')), ] def test_call_serializer(self): self.config(rpc_response_timeout=None) transport = oslo_messaging.get_rpc_transport(self.conf, url='fake:') serializer = msg_serializer.NoOpSerializer() client = oslo_messaging.RPCClient(transport, oslo_messaging.Target(), serializer=serializer) transport._send = mock.Mock() kwargs = dict(wait_for_reply=True, timeout=None) if self.call else {} kwargs['retry'] = None transport._send.return_value = self.retval serializer.serialize_entity = mock.Mock() serializer.deserialize_entity = mock.Mock() serializer.serialize_context = mock.Mock() def _stub(ctxt, arg): return 's' + arg msg = dict(method='foo', args=dict()) for k, v in self.args.items(): msg['args'][k] = 's' + v serializer.serialize_entity.side_effect = _stub if self.call: serializer.deserialize_entity.return_value = 'd' + self.retval serializer.serialize_context.return_value = dict(user='alice') method = client.call if self.call else client.cast retval = method(self.ctxt, 'foo', **self.args) if self.retval is not None: self.assertEqual('d' + self.retval, retval) transport._send.assert_called_once_with(oslo_messaging.Target(), dict(user='alice'), msg, **kwargs) expected_calls = [mock.call(self.ctxt, arg) for arg in self.args] self.assertEqual(expected_calls, serializer.serialize_entity.mock_calls) if self.call: serializer.deserialize_entity.assert_called_once_with(self.ctxt, self.retval) serializer.serialize_context.assert_called_once_with(self.ctxt) class TestVersionCap(test_utils.BaseTestCase): _call_vs_cast = [ ('call', dict(call=True)), ('cast', dict(call=False)), ] _cap_scenarios = [ ('all_none', dict(cap=None, prepare_cap=_notset, version=None, prepare_version=_notset, success=True)), ('ctor_cap_ok', dict(cap='1.1', prepare_cap=_notset, version='1.0', prepare_version=_notset, success=True)), ('ctor_cap_override_ok', dict(cap='2.0', prepare_cap='1.1', version='1.0', prepare_version='1.0', success=True)), ('ctor_cap_override_none_ok', dict(cap='1.1', prepare_cap=None, version='1.0', prepare_version=_notset, success=True)), ('ctor_cap_minor_fail', dict(cap='1.0', prepare_cap=_notset, version='1.1', prepare_version=_notset, success=False)), ('ctor_cap_major_fail', dict(cap='2.0', prepare_cap=_notset, version=None, prepare_version='1.0', success=False)), ('ctor_cap_none_version_ok', dict(cap=None, prepare_cap=_notset, version='1.0', prepare_version=_notset, success=True)), ('ctor_cap_version_none_fail', dict(cap='1.0', prepare_cap=_notset, version=None, prepare_version=_notset, success=False)), ] @classmethod def generate_scenarios(cls): cls.scenarios = ( testscenarios.multiply_scenarios(cls._call_vs_cast, cls._cap_scenarios)) def test_version_cap(self): self.config(rpc_response_timeout=None) transport = oslo_messaging.get_rpc_transport(self.conf, url='fake:') target = oslo_messaging.Target(version=self.version) client = oslo_messaging.RPCClient(transport, target, version_cap=self.cap) if self.success: transport._send = mock.Mock() if self.prepare_version is not _notset: target = target(version=self.prepare_version) msg = dict(method='foo', args={}) if target.version is not None: msg['version'] = target.version kwargs = {'retry': None} if self.call: kwargs['wait_for_reply'] = True kwargs['timeout'] = None prep_kwargs = {} if self.prepare_cap is not _notset: prep_kwargs['version_cap'] = self.prepare_cap if self.prepare_version is not _notset: prep_kwargs['version'] = self.prepare_version if prep_kwargs: client = client.prepare(**prep_kwargs) method = client.call if self.call else client.cast try: method({}, 'foo') except Exception as ex: self.assertIsInstance(ex, oslo_messaging.RPCVersionCapError, ex) self.assertFalse(self.success) else: self.assertTrue(self.success) transport._send.assert_called_once_with(target, {}, msg, **kwargs) TestVersionCap.generate_scenarios() class TestCanSendVersion(test_utils.BaseTestCase): scenarios = [ ('all_none', dict(cap=None, prepare_cap=_notset, version=None, prepare_version=_notset, can_send_version=_notset, can_send=True)), ('ctor_cap_ok', dict(cap='1.1', prepare_cap=_notset, version='1.0', prepare_version=_notset, can_send_version=_notset, can_send=True)), ('ctor_cap_override_ok', dict(cap='2.0', prepare_cap='1.1', version='1.0', prepare_version='1.0', can_send_version=_notset, can_send=True)), ('ctor_cap_override_none_ok', dict(cap='1.1', prepare_cap=None, version='1.0', prepare_version=_notset, can_send_version=_notset, can_send=True)), ('ctor_cap_can_send_ok', dict(cap='1.1', prepare_cap=None, version='1.0', prepare_version=_notset, can_send_version='1.1', can_send=True)), ('ctor_cap_can_send_none_ok', dict(cap='1.1', prepare_cap=None, version='1.0', prepare_version=_notset, can_send_version=None, can_send=True)), ('ctor_cap_minor_fail', dict(cap='1.0', prepare_cap=_notset, version='1.1', prepare_version=_notset, can_send_version=_notset, can_send=False)), ('ctor_cap_major_fail', dict(cap='2.0', prepare_cap=_notset, version=None, prepare_version='1.0', can_send_version=_notset, can_send=False)), ('ctor_cap_none_version_ok', dict(cap=None, prepare_cap=_notset, version='1.0', prepare_version=_notset, can_send_version=_notset, can_send=True)), ('ctor_cap_version_none_fail', dict(cap='1.0', prepare_cap=_notset, version=None, prepare_version=_notset, can_send_version=_notset, can_send=False)), ('ctor_cap_version_can_send_none_fail', dict(cap='1.0', prepare_cap=_notset, version='1.0', prepare_version=_notset, can_send_version=None, can_send=False)), ] def test_version_cap(self): self.config(rpc_response_timeout=None) transport = oslo_messaging.get_rpc_transport(self.conf, url='fake:') target = oslo_messaging.Target(version=self.version) client = oslo_messaging.RPCClient(transport, target, version_cap=self.cap) prep_kwargs = {} if self.prepare_cap is not _notset: prep_kwargs['version_cap'] = self.prepare_cap if self.prepare_version is not _notset: prep_kwargs['version'] = self.prepare_version if prep_kwargs: client = client.prepare(**prep_kwargs) if self.can_send_version is not _notset: can_send = client.can_send_version(version=self.can_send_version) call_context_can_send = client.prepare().can_send_version( version=self.can_send_version) self.assertEqual(can_send, call_context_can_send) else: can_send = client.can_send_version() self.assertEqual(self.can_send, can_send) def test_invalid_version_type(self): target = oslo_messaging.Target(topic='sometopic') transport = oslo_messaging.get_rpc_transport(self.conf, url='fake:') client = oslo_messaging.RPCClient(transport, target) self.assertRaises(exceptions.MessagingException, client.prepare, version='5') self.assertRaises(exceptions.MessagingException, client.prepare, version='5.a') self.assertRaises(exceptions.MessagingException, client.prepare, version='5.5.a') class TestTransportWarning(test_utils.BaseTestCase): @mock.patch('oslo_messaging.rpc.client.LOG') def test_warning_when_notifier_transport(self, log): transport = oslo_messaging.get_notification_transport(self.conf) oslo_messaging.RPCClient(transport, oslo_messaging.Target()) log.warning.assert_called_once_with( "Using notification transport for RPC. Please use " "get_rpc_transport to obtain an RPC transport " "instance.") oslo.messaging-5.35.0/oslo_messaging/tests/rpc/__init__.py0000666000175100017510000000000013224676046023632 0ustar zuulzuul00000000000000oslo.messaging-5.35.0/oslo_messaging/tests/rpc/test_dispatcher.py0000777000175100017510000002675513224676046025314 0ustar zuulzuul00000000000000# Copyright 2013 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import testscenarios import oslo_messaging from oslo_messaging import rpc from oslo_messaging import serializer as msg_serializer from oslo_messaging.tests import utils as test_utils from six.moves import mock load_tests = testscenarios.load_tests_apply_scenarios class _FakeEndpoint(object): def __init__(self, target=None): self.target = target def foo(self, ctxt, **kwargs): pass @rpc.expose def bar(self, ctxt, **kwargs): pass def _foobar(self, ctxt, **kwargs): pass class TestDispatcher(test_utils.BaseTestCase): scenarios = [ ('no_endpoints', dict(endpoints=[], access_policy=None, dispatch_to=None, ctxt={}, msg=dict(method='foo'), exposed_methods=['foo', 'bar', '_foobar'], success=False, ex=oslo_messaging.UnsupportedVersion)), ('default_target', dict(endpoints=[{}], access_policy=None, dispatch_to=dict(endpoint=0, method='foo'), ctxt={}, msg=dict(method='foo'), exposed_methods=['foo', 'bar', '_foobar'], success=True, ex=None)), ('default_target_ctxt_and_args', dict(endpoints=[{}], access_policy=oslo_messaging.LegacyRPCAccessPolicy, dispatch_to=dict(endpoint=0, method='bar'), ctxt=dict(user='bob'), msg=dict(method='bar', args=dict(blaa=True)), exposed_methods=['foo', 'bar', '_foobar'], success=True, ex=None)), ('default_target_namespace', dict(endpoints=[{}], access_policy=oslo_messaging.LegacyRPCAccessPolicy, dispatch_to=dict(endpoint=0, method='foo'), ctxt={}, msg=dict(method='foo', namespace=None), exposed_methods=['foo', 'bar', '_foobar'], success=True, ex=None)), ('default_target_version', dict(endpoints=[{}], access_policy=oslo_messaging.DefaultRPCAccessPolicy, dispatch_to=dict(endpoint=0, method='foo'), ctxt={}, msg=dict(method='foo', version='1.0'), exposed_methods=['foo', 'bar'], success=True, ex=None)), ('default_target_no_such_method', dict(endpoints=[{}], access_policy=oslo_messaging.DefaultRPCAccessPolicy, dispatch_to=None, ctxt={}, msg=dict(method='foobar'), exposed_methods=['foo', 'bar'], success=False, ex=oslo_messaging.NoSuchMethod)), ('namespace', dict(endpoints=[{}, dict(namespace='testns')], access_policy=oslo_messaging.DefaultRPCAccessPolicy, dispatch_to=dict(endpoint=1, method='foo'), ctxt={}, msg=dict(method='foo', namespace='testns'), exposed_methods=['foo', 'bar'], success=True, ex=None)), ('namespace_mismatch', dict(endpoints=[{}, dict(namespace='testns')], access_policy=oslo_messaging.DefaultRPCAccessPolicy, dispatch_to=None, ctxt={}, msg=dict(method='foo', namespace='nstest'), exposed_methods=['foo', 'bar'], success=False, ex=oslo_messaging.UnsupportedVersion)), ('version', dict(endpoints=[dict(version='1.5'), dict(version='3.4')], access_policy=oslo_messaging.DefaultRPCAccessPolicy, dispatch_to=dict(endpoint=1, method='foo'), ctxt={}, msg=dict(method='foo', version='3.2'), exposed_methods=['foo', 'bar'], success=True, ex=None)), ('version_mismatch', dict(endpoints=[dict(version='1.5'), dict(version='3.0')], access_policy=oslo_messaging.DefaultRPCAccessPolicy, dispatch_to=None, ctxt={}, msg=dict(method='foo', version='3.2'), exposed_methods=['foo', 'bar'], success=False, ex=oslo_messaging.UnsupportedVersion)), ('message_in_null_namespace_with_multiple_namespaces', dict(endpoints=[dict(namespace='testns', legacy_namespaces=[None])], access_policy=oslo_messaging.DefaultRPCAccessPolicy, dispatch_to=dict(endpoint=0, method='foo'), ctxt={}, msg=dict(method='foo', namespace=None), exposed_methods=['foo', 'bar'], success=True, ex=None)), ('message_in_wrong_namespace_with_multiple_namespaces', dict(endpoints=[dict(namespace='testns', legacy_namespaces=['second', None])], access_policy=oslo_messaging.DefaultRPCAccessPolicy, dispatch_to=None, ctxt={}, msg=dict(method='foo', namespace='wrong'), exposed_methods=['foo', 'bar'], success=False, ex=oslo_messaging.UnsupportedVersion)), ('message_with_endpoint_no_private_and_public_method', dict(endpoints=[dict(namespace='testns', legacy_namespaces=['second', None])], access_policy=oslo_messaging.DefaultRPCAccessPolicy, dispatch_to=dict(endpoint=0, method='foo'), ctxt={}, msg=dict(method='foo', namespace='testns'), exposed_methods=['foo', 'bar'], success=True, ex=None)), ('message_with_endpoint_no_private_and_private_method', dict(endpoints=[dict(namespace='testns', legacy_namespaces=['second', None], )], access_policy=oslo_messaging.DefaultRPCAccessPolicy, dispatch_to=dict(endpoint=0, method='_foobar'), ctxt={}, msg=dict(method='_foobar', namespace='testns'), exposed_methods=['foo', 'bar'], success=False, ex=oslo_messaging.NoSuchMethod)), ('message_with_endpoint_explicitly_exposed_without_exposed_method', dict(endpoints=[dict(namespace='testns', legacy_namespaces=['second', None], )], access_policy=oslo_messaging.ExplicitRPCAccessPolicy, dispatch_to=dict(endpoint=0, method='foo'), ctxt={}, msg=dict(method='foo', namespace='testns'), exposed_methods=['bar'], success=False, ex=oslo_messaging.NoSuchMethod)), ('message_with_endpoint_explicitly_exposed_with_exposed_method', dict(endpoints=[dict(namespace='testns', legacy_namespaces=['second', None], )], access_policy=oslo_messaging.ExplicitRPCAccessPolicy, dispatch_to=dict(endpoint=0, method='bar'), ctxt={}, msg=dict(method='bar', namespace='testns'), exposed_methods=['bar'], success=True, ex=None)), ] def test_dispatcher(self): def _set_endpoint_mock_properties(endpoint): endpoint.foo = mock.Mock(spec=dir(_FakeEndpoint.foo)) # mock doesn't pick up the decorated method. endpoint.bar = mock.Mock(spec=dir(_FakeEndpoint.bar)) endpoint.bar.exposed = mock.PropertyMock(return_value=True) endpoint._foobar = mock.Mock(spec=dir(_FakeEndpoint._foobar)) return endpoint endpoints = [_set_endpoint_mock_properties(mock.Mock( spec=_FakeEndpoint, target=oslo_messaging.Target(**e))) for e in self.endpoints] serializer = None dispatcher = oslo_messaging.RPCDispatcher(endpoints, serializer, self.access_policy) incoming = mock.Mock(ctxt=self.ctxt, message=self.msg) res = None try: res = dispatcher.dispatch(incoming) except Exception as ex: self.assertFalse(self.success, ex) self.assertIsNotNone(self.ex, ex) self.assertIsInstance(ex, self.ex, ex) if isinstance(ex, oslo_messaging.NoSuchMethod): self.assertEqual(self.msg.get('method'), ex.method) elif isinstance(ex, oslo_messaging.UnsupportedVersion): self.assertEqual(self.msg.get('version', '1.0'), ex.version) if ex.method: self.assertEqual(self.msg.get('method'), ex.method) else: self.assertTrue(self.success, "Unexpected success of operation during testing") self.assertIsNotNone(res) for n, endpoint in enumerate(endpoints): for method_name in self.exposed_methods: method = getattr(endpoint, method_name) if self.dispatch_to and n == self.dispatch_to['endpoint'] and \ method_name == self.dispatch_to['method'] and \ method_name in self.exposed_methods: method.assert_called_once_with( self.ctxt, **self.msg.get('args', {})) else: self.assertEqual(0, method.call_count, 'method: {}'.format(method)) class TestSerializer(test_utils.BaseTestCase): scenarios = [ ('no_args_or_retval', dict(ctxt={}, dctxt={}, args={}, retval=None)), ('args_and_retval', dict(ctxt=dict(user='bob'), dctxt=dict(user='alice'), args=dict(a='a', b='b', c='c'), retval='d')), ] def test_serializer(self): endpoint = _FakeEndpoint() serializer = msg_serializer.NoOpSerializer() dispatcher = oslo_messaging.RPCDispatcher([endpoint], serializer) endpoint.foo = mock.Mock() args = dict([(k, 'd' + v) for k, v in self.args.items()]) endpoint.foo.return_value = self.retval serializer.serialize_entity = mock.Mock() serializer.deserialize_entity = mock.Mock() serializer.deserialize_context = mock.Mock() serializer.deserialize_context.return_value = self.dctxt expected_side_effect = ['d' + arg for arg in self.args] serializer.deserialize_entity.side_effect = expected_side_effect serializer.serialize_entity.return_value = None if self.retval: serializer.serialize_entity.return_value = 's' + self.retval incoming = mock.Mock() incoming.ctxt = self.ctxt incoming.message = dict(method='foo', args=self.args) retval = dispatcher.dispatch(incoming) if self.retval is not None: self.assertEqual('s' + self.retval, retval) endpoint.foo.assert_called_once_with(self.dctxt, **args) serializer.deserialize_context.assert_called_once_with(self.ctxt) expected_calls = [mock.call(self.dctxt, arg) for arg in self.args] self.assertEqual(expected_calls, serializer.deserialize_entity.mock_calls) serializer.serialize_entity.assert_called_once_with(self.dctxt, self.retval) oslo.messaging-5.35.0/oslo_messaging/tests/rpc/test_server.py0000666000175100017510000010530313224676046024454 0ustar zuulzuul00000000000000 # Copyright 2013 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import threading import warnings import eventlet import fixtures from oslo_config import cfg from six.moves import mock import testscenarios import oslo_messaging from oslo_messaging import rpc from oslo_messaging.rpc import dispatcher from oslo_messaging.rpc import server as rpc_server_module from oslo_messaging import server as server_module from oslo_messaging.tests import utils as test_utils load_tests = testscenarios.load_tests_apply_scenarios class ServerSetupMixin(object): class Server(object): def __init__(self, transport, topic, server, endpoint, serializer, exchange): self.controller = ServerSetupMixin.ServerController() target = oslo_messaging.Target(topic=topic, server=server, exchange=exchange) self.server = oslo_messaging.get_rpc_server(transport, target, [endpoint, self.controller], serializer=serializer) def wait(self): # Wait for the executor to process the stop message, indicating all # test messages have been processed self.controller.stopped.wait() # Check start() does nothing with a running server self.server.start() self.server.stop() self.server.wait() def start(self): self.server.start() class ServerController(object): def __init__(self): self.stopped = threading.Event() def stop(self, ctxt): self.stopped.set() class TestSerializer(object): def serialize_entity(self, ctxt, entity): return ('s' + entity) if entity else entity def deserialize_entity(self, ctxt, entity): return ('d' + entity) if entity else entity def serialize_context(self, ctxt): return dict([(k, 's' + v) for k, v in ctxt.items()]) def deserialize_context(self, ctxt): return dict([(k, 'd' + v) for k, v in ctxt.items()]) def __init__(self): self.serializer = self.TestSerializer() def _setup_server(self, transport, endpoint, topic=None, server=None, exchange=None): server = self.Server(transport, topic=topic or 'testtopic', server=server or 'testserver', endpoint=endpoint, serializer=self.serializer, exchange=exchange) server.start() return server def _stop_server(self, client, server, topic=None, exchange=None): client.cast({}, 'stop') server.wait() def _setup_client(self, transport, topic='testtopic', exchange=None): target = oslo_messaging.Target(topic=topic, exchange=exchange) return oslo_messaging.RPCClient(transport, target=target, serializer=self.serializer) class TestRPCServer(test_utils.BaseTestCase, ServerSetupMixin): def __init__(self, *args): super(TestRPCServer, self).__init__(*args) ServerSetupMixin.__init__(self) def setUp(self): super(TestRPCServer, self).setUp(conf=cfg.ConfigOpts()) # FakeExchangeManager uses a class-level exchanges mapping; "reset" it # before tests assert amount of items stored self.useFixture(fixtures.MonkeyPatch( 'oslo_messaging._drivers.impl_fake.FakeExchangeManager._exchanges', new_value={})) @mock.patch('warnings.warn') def test_constructor(self, warn): transport = oslo_messaging.get_rpc_transport(self.conf, url='fake:') target = oslo_messaging.Target(topic='foo', server='bar') endpoints = [object()] serializer = object() access_policy = dispatcher.DefaultRPCAccessPolicy warnings.simplefilter("always", FutureWarning) server = oslo_messaging.get_rpc_server(transport, target, endpoints, serializer=serializer, access_policy=access_policy) self.assertIs(server.conf, self.conf) self.assertIs(server.transport, transport) self.assertIsInstance(server.dispatcher, oslo_messaging.RPCDispatcher) self.assertIs(server.dispatcher.endpoints, endpoints) self.assertIs(server.dispatcher.serializer, serializer) self.assertEqual('blocking', server.executor_type) self.assertEqual([ mock.call("blocking executor is deprecated. Executor default will " "be removed. Use explicitly threading or eventlet " "instead in version 'pike' and will be removed in " "version 'rocky'", category=FutureWarning, stacklevel=3) ], warn.mock_calls) @mock.patch('warnings.warn') def test_constructor_without_explicit_RPCAccessPolicy(self, warn): transport = oslo_messaging.get_rpc_transport(self.conf, url='fake:') target = oslo_messaging.Target(topic='foo', server='bar') endpoints = [object()] serializer = object() warnings.simplefilter("always", FutureWarning) oslo_messaging.get_rpc_server(transport, target, endpoints, serializer=serializer) self.assertEqual([ mock.call("blocking executor is deprecated. Executor default will " "be removed. Use explicitly threading or eventlet " "instead in version 'pike' and will be removed in " "version 'rocky'", category=FutureWarning, stacklevel=3) ], warn.mock_calls) def test_server_wait_method(self): transport = oslo_messaging.get_rpc_transport(self.conf, url='fake:') target = oslo_messaging.Target(topic='foo', server='bar') endpoints = [object()] serializer = object() class MagicMockIgnoreArgs(mock.MagicMock): """MagicMock ignores arguments. A MagicMock which can never misinterpret the arguments passed to it during construction. """ def __init__(self, *args, **kwargs): super(MagicMockIgnoreArgs, self).__init__() server = oslo_messaging.get_rpc_server(transport, target, endpoints, serializer=serializer) # Mocking executor server._executor_cls = MagicMockIgnoreArgs server._create_listener = MagicMockIgnoreArgs() server.dispatcher = MagicMockIgnoreArgs() # Here assigning executor's listener object to listener variable # before calling wait method, because in wait method we are # setting executor to None. server.start() listener = server.listener server.stop() # call server wait method server.wait() self.assertEqual(1, listener.cleanup.call_count) def test_no_target_server(self): transport = oslo_messaging.get_rpc_transport(self.conf, url='fake:') server = oslo_messaging.get_rpc_server( transport, oslo_messaging.Target(topic='testtopic'), []) try: server.start() except Exception as ex: self.assertIsInstance(ex, oslo_messaging.InvalidTarget, ex) self.assertEqual('testtopic', ex.target.topic) else: self.assertTrue(False) def test_no_server_topic(self): transport = oslo_messaging.get_rpc_transport(self.conf, url='fake:') target = oslo_messaging.Target(server='testserver') server = oslo_messaging.get_rpc_server(transport, target, []) try: server.start() except Exception as ex: self.assertIsInstance(ex, oslo_messaging.InvalidTarget, ex) self.assertEqual('testserver', ex.target.server) else: self.assertTrue(False) def _test_no_client_topic(self, call=True): transport = oslo_messaging.get_rpc_transport(self.conf, url='fake:') client = self._setup_client(transport, topic=None) method = client.call if call else client.cast try: method({}, 'ping', arg='foo') except Exception as ex: self.assertIsInstance(ex, oslo_messaging.InvalidTarget, ex) self.assertIsNotNone(ex.target) else: self.assertTrue(False) def test_no_client_topic_call(self): self._test_no_client_topic(call=True) def test_no_client_topic_cast(self): self._test_no_client_topic(call=False) def test_client_call_timeout(self): transport = oslo_messaging.get_rpc_transport(self.conf, url='fake:') finished = False wait = threading.Condition() class TestEndpoint(object): def ping(self, ctxt, arg): with wait: if not finished: wait.wait() server_thread = self._setup_server(transport, TestEndpoint()) client = self._setup_client(transport) try: client.prepare(timeout=0).call({}, 'ping', arg='foo') except Exception as ex: self.assertIsInstance(ex, oslo_messaging.MessagingTimeout, ex) else: self.assertTrue(False) with wait: finished = True wait.notify() self._stop_server(client, server_thread) def test_unknown_executor(self): transport = oslo_messaging.get_rpc_transport(self.conf, url='fake:') try: oslo_messaging.get_rpc_server(transport, None, [], executor='foo') except Exception as ex: self.assertIsInstance(ex, oslo_messaging.ExecutorLoadFailure) self.assertEqual('foo', ex.executor) else: self.assertTrue(False) def test_cast(self): transport = oslo_messaging.get_rpc_transport(self.conf, url='fake:') class TestEndpoint(object): def __init__(self): self.pings = [] def ping(self, ctxt, arg): self.pings.append(arg) endpoint = TestEndpoint() server_thread = self._setup_server(transport, endpoint) client = self._setup_client(transport) client.cast({}, 'ping', arg='foo') client.cast({}, 'ping', arg='bar') self._stop_server(client, server_thread) self.assertEqual(['dsfoo', 'dsbar'], endpoint.pings) def test_call(self): # NOTE(milan): using a separate transport instance for each the client # and the server to be able to check independent transport instances # can communicate over same exchange&topic transport_srv = oslo_messaging.get_rpc_transport(self.conf, url='fake:') transport_cli = oslo_messaging.get_rpc_transport(self.conf, url='fake:') class TestEndpoint(object): def ping(self, ctxt, arg): return arg server_thread = self._setup_server(transport_srv, TestEndpoint()) client = self._setup_client(transport_cli) self.assertIsNone(client.call({}, 'ping', arg=None)) self.assertEqual(0, client.call({}, 'ping', arg=0)) self.assertFalse(client.call({}, 'ping', arg=False)) self.assertEqual([], client.call({}, 'ping', arg=[])) self.assertEqual({}, client.call({}, 'ping', arg={})) self.assertEqual('dsdsfoo', client.call({}, 'ping', arg='foo')) self._stop_server(client, server_thread) def test_direct_call(self): transport = oslo_messaging.get_rpc_transport(self.conf, url='fake:') class TestEndpoint(object): def ping(self, ctxt, arg): return arg server_thread = self._setup_server(transport, TestEndpoint()) client = self._setup_client(transport) direct = client.prepare(server='testserver') self.assertIsNone(direct.call({}, 'ping', arg=None)) self.assertEqual(0, client.call({}, 'ping', arg=0)) self.assertFalse(client.call({}, 'ping', arg=False)) self.assertEqual([], client.call({}, 'ping', arg=[])) self.assertEqual({}, client.call({}, 'ping', arg={})) self.assertEqual('dsdsfoo', direct.call({}, 'ping', arg='foo')) self._stop_server(client, server_thread) def test_context(self): transport = oslo_messaging.get_rpc_transport(self.conf, url='fake:') class TestEndpoint(object): def ctxt_check(self, ctxt, key): return ctxt[key] server_thread = self._setup_server(transport, TestEndpoint()) client = self._setup_client(transport) self.assertEqual('dsdsb', client.call({'dsa': 'b'}, 'ctxt_check', key='a')) self._stop_server(client, server_thread) def test_failure(self): transport = oslo_messaging.get_rpc_transport(self.conf, url='fake:') class TestEndpoint(object): def ping(self, ctxt, arg): raise ValueError(arg) debugs = [] errors = [] def stub_debug(msg, *a, **kw): if (a and len(a) == 1 and isinstance(a[0], dict) and a[0]): a = a[0] debugs.append(str(msg) % a) def stub_error(msg, *a, **kw): if (a and len(a) == 1 and isinstance(a[0], dict) and a[0]): a = a[0] errors.append(str(msg) % a) self.useFixture(fixtures.MockPatchObject( rpc_server_module.LOG, 'debug', stub_debug)) self.useFixture(fixtures.MockPatchObject( rpc_server_module.LOG, 'error', stub_error)) server_thread = self._setup_server(transport, TestEndpoint()) client = self._setup_client(transport) try: client.call({}, 'ping', arg='foo') except Exception as ex: self.assertIsInstance(ex, ValueError) self.assertEqual('dsfoo', str(ex)) self.assertTrue(len(debugs) == 0) self.assertGreater(len(errors), 0) else: self.assertTrue(False) self._stop_server(client, server_thread) def test_expected_failure(self): transport = oslo_messaging.get_rpc_transport(self.conf, url='fake:') debugs = [] errors = [] def stub_debug(msg, *a, **kw): if (a and len(a) == 1 and isinstance(a[0], dict) and a[0]): a = a[0] debugs.append(str(msg) % a) def stub_error(msg, *a, **kw): if (a and len(a) == 1 and isinstance(a[0], dict) and a[0]): a = a[0] errors.append(str(msg) % a) self.useFixture(fixtures.MockPatchObject( rpc_server_module.LOG, 'debug', stub_debug)) self.useFixture(fixtures.MockPatchObject( rpc_server_module.LOG, 'error', stub_error)) class TestEndpoint(object): @oslo_messaging.expected_exceptions(ValueError) def ping(self, ctxt, arg): raise ValueError(arg) server_thread = self._setup_server(transport, TestEndpoint()) client = self._setup_client(transport) try: client.call({}, 'ping', arg='foo') except Exception as ex: self.assertIsInstance(ex, ValueError) self.assertEqual('dsfoo', str(ex)) self.assertGreater(len(debugs), 0) self.assertTrue(len(errors) == 0) else: self.assertTrue(False) self._stop_server(client, server_thread) @mock.patch('oslo_messaging.rpc.server.LOG') def test_warning_when_notifier_transport(self, log): transport = oslo_messaging.get_notification_transport(self.conf) target = oslo_messaging.Target(topic='foo', server='bar') endpoints = [object()] serializer = object() oslo_messaging.get_rpc_server(transport, target, endpoints, serializer=serializer) log.warning.assert_called_once_with( "Using notification transport for RPC. Please use " "get_rpc_transport to obtain an RPC transport " "instance.") class TestMultipleServers(test_utils.BaseTestCase, ServerSetupMixin): _exchanges = [ ('same_exchange', dict(exchange1=None, exchange2=None)), ('diff_exchange', dict(exchange1='x1', exchange2='x2')), ] _topics = [ ('same_topic', dict(topic1='t', topic2='t')), ('diff_topic', dict(topic1='t1', topic2='t2')), ] _server = [ ('same_server', dict(server1=None, server2=None)), ('diff_server', dict(server1='s1', server2='s2')), ] _fanout = [ ('not_fanout', dict(fanout1=None, fanout2=None)), ('fanout', dict(fanout1=True, fanout2=True)), ] _method = [ ('call', dict(call1=True, call2=True)), ('cast', dict(call1=False, call2=False)), ] _endpoints = [ ('one_endpoint', dict(multi_endpoints=False, expect1=['ds1', 'ds2'], expect2=['ds1', 'ds2'])), ('two_endpoints', dict(multi_endpoints=True, expect1=['ds1'], expect2=['ds2'])), ] @classmethod def generate_scenarios(cls): cls.scenarios = testscenarios.multiply_scenarios(cls._exchanges, cls._topics, cls._server, cls._fanout, cls._method, cls._endpoints) # fanout call not supported def filter_fanout_call(scenario): params = scenario[1] fanout = params['fanout1'] or params['fanout2'] call = params['call1'] or params['call2'] return not (call and fanout) # listening multiple times on same topic/server pair not supported def filter_same_topic_and_server(scenario): params = scenario[1] single_topic = params['topic1'] == params['topic2'] single_server = params['server1'] == params['server2'] return not (single_topic and single_server) # fanout to multiple servers on same topic and exchange each endpoint # will receive both messages def fanout_to_servers(scenario): params = scenario[1] fanout = params['fanout1'] or params['fanout2'] single_exchange = params['exchange1'] == params['exchange2'] single_topic = params['topic1'] == params['topic2'] multi_servers = params['server1'] != params['server2'] if fanout and single_exchange and single_topic and multi_servers: params['expect1'] = params['expect1'][:] + params['expect1'] params['expect2'] = params['expect2'][:] + params['expect2'] return scenario # multiple endpoints on same topic and exchange # either endpoint can get either message def single_topic_multi_endpoints(scenario): params = scenario[1] single_exchange = params['exchange1'] == params['exchange2'] single_topic = params['topic1'] == params['topic2'] if single_topic and single_exchange and params['multi_endpoints']: params['expect_either'] = (params['expect1'] + params['expect2']) params['expect1'] = params['expect2'] = [] else: params['expect_either'] = [] return scenario for f in [filter_fanout_call, filter_same_topic_and_server]: cls.scenarios = [i for i in cls.scenarios if f(i)] for m in [fanout_to_servers, single_topic_multi_endpoints]: cls.scenarios = [m(i) for i in cls.scenarios] def __init__(self, *args): super(TestMultipleServers, self).__init__(*args) ServerSetupMixin.__init__(self) def setUp(self): super(TestMultipleServers, self).setUp(conf=cfg.ConfigOpts()) self.useFixture(fixtures.MonkeyPatch( 'oslo_messaging._drivers.impl_fake.FakeExchangeManager._exchanges', new_value={})) def test_multiple_servers(self): transport1 = oslo_messaging.get_rpc_transport(self.conf, url='fake:') if self.exchange1 != self.exchange2: transport2 = oslo_messaging.get_rpc_transport(self.conf, url='fake:') else: transport2 = transport1 class TestEndpoint(object): def __init__(self): self.pings = [] def ping(self, ctxt, arg): self.pings.append(arg) def alive(self, ctxt): return 'alive' if self.multi_endpoints: endpoint1, endpoint2 = TestEndpoint(), TestEndpoint() else: endpoint1 = endpoint2 = TestEndpoint() server1 = self._setup_server(transport1, endpoint1, topic=self.topic1, exchange=self.exchange1, server=self.server1) server2 = self._setup_server(transport2, endpoint2, topic=self.topic2, exchange=self.exchange2, server=self.server2) client1 = self._setup_client(transport1, topic=self.topic1, exchange=self.exchange1) client2 = self._setup_client(transport2, topic=self.topic2, exchange=self.exchange2) client1 = client1.prepare(server=self.server1) client2 = client2.prepare(server=self.server2) if self.fanout1: client1.call({}, 'alive') client1 = client1.prepare(fanout=True) if self.fanout2: client2.call({}, 'alive') client2 = client2.prepare(fanout=True) (client1.call if self.call1 else client1.cast)({}, 'ping', arg='1') (client2.call if self.call2 else client2.cast)({}, 'ping', arg='2') self._stop_server(client1.prepare(fanout=None), server1, topic=self.topic1, exchange=self.exchange1) self._stop_server(client2.prepare(fanout=None), server2, topic=self.topic2, exchange=self.exchange2) def check(pings, expect): self.assertEqual(len(expect), len(pings)) for a in expect: self.assertIn(a, pings) if self.expect_either: check(endpoint1.pings + endpoint2.pings, self.expect_either) else: check(endpoint1.pings, self.expect1) check(endpoint2.pings, self.expect2) TestMultipleServers.generate_scenarios() class TestServerLocking(test_utils.BaseTestCase): def setUp(self): super(TestServerLocking, self).setUp(conf=cfg.ConfigOpts()) def _logmethod(name): def method(self, *args, **kwargs): with self._lock: self._calls.append(name) return method executors = [] class FakeExecutor(object): def __init__(self, *args, **kwargs): self._lock = threading.Lock() self._calls = [] executors.append(self) submit = _logmethod('submit') shutdown = _logmethod('shutdown') self.executors = executors class MessageHandlingServerImpl(oslo_messaging.MessageHandlingServer): def _create_listener(self): return mock.Mock() def _process_incoming(self, incoming): pass self.server = MessageHandlingServerImpl(mock.Mock(), mock.Mock()) self.server._executor_cls = FakeExecutor def test_start_stop_wait(self): # Test a simple execution of start, stop, wait in order eventlet.spawn(self.server.start) self.server.stop() self.server.wait() self.assertEqual(1, len(self.executors)) self.assertEqual(['shutdown'], self.executors[0]._calls) self.assertTrue(self.server.listener.cleanup.called) def test_reversed_order(self): # Test that if we call wait, stop, start, these will be correctly # reordered eventlet.spawn(self.server.wait) # This is non-deterministic, but there's not a great deal we can do # about that eventlet.sleep(0) eventlet.spawn(self.server.stop) eventlet.sleep(0) eventlet.spawn(self.server.start) self.server.wait() self.assertEqual(1, len(self.executors)) self.assertEqual(['shutdown'], self.executors[0]._calls) def test_wait_for_running_task(self): # Test that if 2 threads call a method simultaneously, both will wait, # but only 1 will call the underlying executor method. start_event = threading.Event() finish_event = threading.Event() running_event = threading.Event() done_event = threading.Event() _runner = [None] class SteppingFakeExecutor(self.server._executor_cls): def __init__(self, *args, **kwargs): # Tell the test which thread won the race _runner[0] = eventlet.getcurrent() running_event.set() start_event.wait() super(SteppingFakeExecutor, self).__init__(*args, **kwargs) done_event.set() finish_event.wait() self.server._executor_cls = SteppingFakeExecutor start1 = eventlet.spawn(self.server.start) start2 = eventlet.spawn(self.server.start) # Wait until one of the threads starts running running_event.wait() runner = _runner[0] waiter = start2 if runner == start1 else start2 waiter_finished = threading.Event() waiter.link(lambda _: waiter_finished.set()) # At this point, runner is running start(), and waiter() is waiting for # it to complete. runner has not yet logged anything. self.assertEqual(0, len(self.executors)) self.assertFalse(waiter_finished.is_set()) # Let the runner log the call start_event.set() done_event.wait() # We haven't signalled completion yet, so submit shouldn't have run self.assertEqual(1, len(self.executors)) self.assertEqual([], self.executors[0]._calls) self.assertFalse(waiter_finished.is_set()) # Let the runner complete finish_event.set() waiter.wait() runner.wait() # Check that both threads have finished, start was only called once, # and execute ran self.assertTrue(waiter_finished.is_set()) self.assertEqual(1, len(self.executors)) self.assertEqual([], self.executors[0]._calls) def test_start_stop_wait_stop_wait(self): # Test that we behave correctly when calling stop/wait more than once. # Subsequent calls should be noops. self.server.start() self.server.stop() self.server.wait() self.server.stop() self.server.wait() self.assertEqual(len(self.executors), 1) self.assertEqual(['shutdown'], self.executors[0]._calls) self.assertTrue(self.server.listener.cleanup.called) def test_state_wrapping(self): # Test that we behave correctly if a thread waits, and the server state # has wrapped when it it next scheduled # Ensure that if 2 threads wait for the completion of 'start', the # first will wait until complete_event is signalled, but the second # will continue complete_event = threading.Event() complete_waiting_callback = threading.Event() start_state = self.server._states['start'] old_wait_for_completion = start_state.wait_for_completion waited = [False] def new_wait_for_completion(*args, **kwargs): if not waited[0]: waited[0] = True complete_waiting_callback.set() complete_event.wait() old_wait_for_completion(*args, **kwargs) start_state.wait_for_completion = new_wait_for_completion # thread1 will wait for start to complete until we signal it thread1 = eventlet.spawn(self.server.stop) thread1_finished = threading.Event() thread1.link(lambda _: thread1_finished.set()) self.server.start() complete_waiting_callback.wait() # The server should have started, but stop should not have been called self.assertEqual(1, len(self.executors)) self.assertEqual([], self.executors[0]._calls) self.assertFalse(thread1_finished.is_set()) self.server.stop() self.server.wait() # We should have gone through all the states, and thread1 should still # be waiting self.assertEqual(1, len(self.executors)) self.assertEqual(['shutdown'], self.executors[0]._calls) self.assertFalse(thread1_finished.is_set()) # Start again self.server.start() # We should now record 4 executors (2 for each server) self.assertEqual(2, len(self.executors)) self.assertEqual(['shutdown'], self.executors[0]._calls) self.assertEqual([], self.executors[1]._calls) self.assertFalse(thread1_finished.is_set()) # Allow thread1 to complete complete_event.set() thread1_finished.wait() # thread1 should now have finished, and stop should not have been # called again on either the first or second executor self.assertEqual(2, len(self.executors)) self.assertEqual(['shutdown'], self.executors[0]._calls) self.assertEqual([], self.executors[1]._calls) self.assertTrue(thread1_finished.is_set()) @mock.patch.object(server_module, 'DEFAULT_LOG_AFTER', 1) @mock.patch.object(server_module, 'LOG') def test_logging(self, mock_log): # Test that we generate a log message if we wait longer than # DEFAULT_LOG_AFTER log_event = threading.Event() mock_log.warning.side_effect = lambda _, __: log_event.set() # Call stop without calling start. We should log a wait after 1 second thread = eventlet.spawn(self.server.stop) log_event.wait() # Redundant given that we already waited, but it's nice to assert self.assertTrue(mock_log.warning.called) thread.kill() @mock.patch.object(server_module, 'LOG') def test_logging_explicit_wait(self, mock_log): # Test that we generate a log message if we wait longer than # the number of seconds passed to log_after log_event = threading.Event() mock_log.warning.side_effect = lambda _, __: log_event.set() # Call stop without calling start. We should log a wait after 1 second thread = eventlet.spawn(self.server.stop, log_after=1) log_event.wait() # Redundant given that we already waited, but it's nice to assert self.assertTrue(mock_log.warning.called) thread.kill() @mock.patch.object(server_module, 'LOG') def test_logging_with_timeout(self, mock_log): # Test that we log a message after log_after seconds if we've also # specified an absolute timeout log_event = threading.Event() mock_log.warning.side_effect = lambda _, __: log_event.set() # Call stop without calling start. We should log a wait after 1 second thread = eventlet.spawn(self.server.stop, log_after=1, timeout=2) log_event.wait() # Redundant given that we already waited, but it's nice to assert self.assertTrue(mock_log.warning.called) thread.kill() def test_timeout_wait(self): # Test that we will eventually timeout when passing the timeout option # if a preceding condition is not satisfied. self.assertRaises(server_module.TaskTimeout, self.server.stop, timeout=1) def test_timeout_running(self): # Test that we will eventually timeout if we're waiting for another # thread to complete this task # Start the server, which will also instantiate an executor self.server.start() self.server.stop() shutdown_called = threading.Event() # Patch the executor's stop method to be very slow def slow_shutdown(wait): shutdown_called.set() eventlet.sleep(10) self.executors[0].shutdown = slow_shutdown # Call wait in a new thread thread = eventlet.spawn(self.server.wait) # Wait until the thread is in the slow stop method shutdown_called.wait() # Call wait again in the main thread with a timeout self.assertRaises(server_module.TaskTimeout, self.server.wait, timeout=1) thread.kill() @mock.patch.object(server_module, 'LOG') def test_log_after_zero(self, mock_log): # Test that we do not log a message after DEFAULT_LOG_AFTER if the # caller gave log_after=1 # Call stop without calling start. self.assertRaises(server_module.TaskTimeout, self.server.stop, log_after=0, timeout=2) # We timed out. Ensure we didn't log anything. self.assertFalse(mock_log.warning.called) class TestRPCExposeDecorator(test_utils.BaseTestCase): def foo(self): pass @rpc.expose def bar(self): """bar docstring""" pass def test_undecorated(self): self.assertRaises(AttributeError, lambda: self.foo.exposed) def test_decorated(self): self.assertEqual(True, self.bar.exposed) self.assertEqual("""bar docstring""", self.bar.__doc__) self.assertEqual('bar', self.bar.__name__) oslo.messaging-5.35.0/oslo_messaging/tests/test_opts.py0000666000175100017510000000465013224676046023352 0ustar zuulzuul00000000000000 # Copyright 2014 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from six.moves import mock import stevedore import testtools from oslo_messaging import server try: from oslo_messaging import opts except ImportError: opts = None from oslo_messaging.tests import utils as test_utils @testtools.skipIf(opts is None, "Options not importable") class OptsTestCase(test_utils.BaseTestCase): def _test_list_opts(self, result): self.assertEqual(7, len(result)) groups = [g for (g, l) in result] self.assertIn(None, groups) self.assertIn('matchmaker_redis', groups) self.assertIn('oslo_messaging_zmq', groups) self.assertIn('oslo_messaging_amqp', groups) self.assertIn('oslo_messaging_notifications', groups) self.assertIn('oslo_messaging_rabbit', groups) self.assertIn('oslo_messaging_kafka', groups) opt_names = [o.name for (g, l) in result for o in l] self.assertIn('rpc_backend', opt_names) def test_list_opts(self): self._test_list_opts(opts.list_opts()) def test_entry_point(self): result = None for ext in stevedore.ExtensionManager('oslo.config.opts', invoke_on_load=True): if ext.name == "oslo.messaging": result = ext.obj break self.assertIsNotNone(result) self._test_list_opts(result) def test_defaults(self): transport = mock.Mock() transport.conf = self.conf class MessageHandlingServerImpl(server.MessageHandlingServer): def _create_listener(self): pass def _process_incoming(self, incoming): pass MessageHandlingServerImpl(transport, mock.Mock()) opts.set_defaults(self.conf, executor_thread_pool_size=100) self.assertEqual(100, self.conf.executor_thread_pool_size) oslo.messaging-5.35.0/oslo_messaging/tests/test_urls.py0000666000175100017510000002335213224676046023352 0ustar zuulzuul00000000000000 # Copyright 2013 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import testscenarios import oslo_messaging from oslo_messaging.tests import utils as test_utils load_tests = testscenarios.load_tests_apply_scenarios class TestParseURL(test_utils.BaseTestCase): scenarios = [ ('transport', dict(url='foo:', aliases=None, expect=dict(transport='foo'))), ('transport_aliased', dict(url='bar:', aliases=dict(bar='foo'), expect=dict(transport='foo'))), ('virtual_host_slash', dict(url='foo:////', aliases=None, expect=dict(transport='foo', virtual_host='/'))), ('virtual_host', dict(url='foo:///bar', aliases=None, expect=dict(transport='foo', virtual_host='bar'))), ('host', dict(url='foo://host/bar', aliases=None, expect=dict(transport='foo', virtual_host='bar', hosts=[ dict(host='host'), ]))), ('ipv6_host', dict(url='foo://[ffff::1]/bar', aliases=None, expect=dict(transport='foo', virtual_host='bar', hosts=[ dict(host='ffff::1'), ]))), ('port', dict(url='foo://host:1234/bar', aliases=None, expect=dict(transport='foo', virtual_host='bar', hosts=[ dict(host='host', port=1234), ]))), ('ipv6_port', dict(url='foo://[ffff::1]:1234/bar', aliases=None, expect=dict(transport='foo', virtual_host='bar', hosts=[ dict(host='ffff::1', port=1234), ]))), ('username', dict(url='foo://u@host:1234/bar', aliases=None, expect=dict(transport='foo', virtual_host='bar', hosts=[ dict(host='host', port=1234, username='u'), ]))), ('password', dict(url='foo://u:p@host:1234/bar', aliases=None, expect=dict(transport='foo', virtual_host='bar', hosts=[ dict(host='host', port=1234, username='u', password='p'), ]))), ('creds_no_host', dict(url='foo://u:p@/bar', aliases=None, expect=dict(transport='foo', virtual_host='bar', hosts=[ dict(username='u', password='p'), ]))), ('multi_host', dict(url='foo://u:p@host1:1234,host2:4321/bar', aliases=None, expect=dict(transport='foo', virtual_host='bar', hosts=[ dict(host='host1', port=1234, username='u', password='p'), dict(host='host2', port=4321), ]))), ('multi_host_partial_creds', dict(url='foo://u:p@host1,host2/bar', aliases=None, expect=dict(transport='foo', virtual_host='bar', hosts=[ dict(host='host1', username='u', password='p'), dict(host='host2'), ]))), ('multi_creds', dict(url='foo://u1:p1@host1:1234,u2:p2@host2:4321/bar', aliases=None, expect=dict(transport='foo', virtual_host='bar', hosts=[ dict(host='host1', port=1234, username='u1', password='p1'), dict(host='host2', port=4321, username='u2', password='p2'), ]))), ('multi_creds_ipv6', dict(url='foo://u1:p1@[ffff::1]:1234,u2:p2@[ffff::2]:4321/bar', aliases=None, expect=dict(transport='foo', virtual_host='bar', hosts=[ dict(host='ffff::1', port=1234, username='u1', password='p1'), dict(host='ffff::2', port=4321, username='u2', password='p2'), ]))), ('quoting', dict(url='foo://u%24:p%26@host:1234/%24', aliases=None, expect=dict(transport='foo', virtual_host='$', hosts=[ dict(host='host', port=1234, username='u$', password='p&'), ]))), ] def test_parse_url(self): self.config(rpc_backend=None) url = oslo_messaging.TransportURL.parse(self.conf, self.url, self.aliases) hosts = [] for host in self.expect.get('hosts', []): hosts.append(oslo_messaging.TransportHost(host.get('host'), host.get('port'), host.get('username'), host.get('password'))) expected = oslo_messaging.TransportURL(self.conf, self.expect.get('transport'), self.expect.get('virtual_host'), hosts) self.assertEqual(expected, url) class TestFormatURL(test_utils.BaseTestCase): scenarios = [ ('rpc_backend', dict(rpc_backend='testbackend', transport=None, virtual_host=None, hosts=[], aliases=None, expected='testbackend:///')), ('rpc_backend_aliased', dict(rpc_backend='testfoo', transport=None, virtual_host=None, hosts=[], aliases=dict(testfoo='testbackend'), expected='testbackend:///')), ('transport', dict(rpc_backend=None, transport='testtransport', virtual_host=None, hosts=[], aliases=None, expected='testtransport:///')), ('transport_aliased', dict(rpc_backend=None, transport='testfoo', virtual_host=None, hosts=[], aliases=dict(testfoo='testtransport'), expected='testtransport:///')), ('virtual_host', dict(rpc_backend=None, transport='testtransport', virtual_host='/vhost', hosts=[], aliases=None, expected='testtransport:////vhost')), ('host', dict(rpc_backend=None, transport='testtransport', virtual_host='/', hosts=[ dict(hostname='host', port=10, username='bob', password='secret'), ], aliases=None, expected='testtransport://bob:secret@host:10//')), ('multi_host', dict(rpc_backend=None, transport='testtransport', virtual_host='', hosts=[ dict(hostname='h1', port=1000, username='b1', password='s1'), dict(hostname='h2', port=2000, username='b2', password='s2'), ], aliases=None, expected='testtransport://b1:s1@h1:1000,b2:s2@h2:2000/')), ('quoting', dict(rpc_backend=None, transport='testtransport', virtual_host='/$', hosts=[ dict(hostname='host', port=10, username='b$', password='s&'), ], aliases=None, expected='testtransport://b%24:s%26@host:10//%24')), ] def test_parse_url(self): self.config(rpc_backend=self.rpc_backend) hosts = [] for host in self.hosts: hosts.append(oslo_messaging.TransportHost(host.get('hostname'), host.get('port'), host.get('username'), host.get('password'))) url = oslo_messaging.TransportURL(self.conf, self.transport, self.virtual_host, hosts, self.aliases) self.assertEqual(self.expected, str(url)) oslo.messaging-5.35.0/oslo_messaging/_utils.py0000666000175100017510000000330213224676046021454 0ustar zuulzuul00000000000000 # Copyright 2013 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. def version_is_compatible(imp_version, version): """Determine whether versions are compatible. :param imp_version: The version implemented :param version: The version requested by an incoming message. """ if imp_version is None: return True if version is None: return False version_parts = version.split('.') imp_version_parts = imp_version.split('.') try: rev = version_parts[2] except IndexError: rev = 0 try: imp_rev = imp_version_parts[2] except IndexError: imp_rev = 0 if int(version_parts[0]) != int(imp_version_parts[0]): # Major return False if int(version_parts[1]) > int(imp_version_parts[1]): # Minor return False if (int(version_parts[1]) == int(imp_version_parts[1]) and int(rev) > int(imp_rev)): # Revision return False return True class DummyLock(object): def acquire(self): pass def release(self): pass def __enter__(self): self.acquire() def __exit__(self, type, value, traceback): self.release() oslo.messaging-5.35.0/oslo_messaging/_drivers/0000775000175100017510000000000013224676256021423 5ustar zuulzuul00000000000000oslo.messaging-5.35.0/oslo_messaging/_drivers/impl_fake.py0000666000175100017510000002206513224676046023730 0ustar zuulzuul00000000000000# Copyright 2011 OpenStack Foundation # Copyright 2013 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import json import threading import time from six import moves import oslo_messaging from oslo_messaging._drivers import base class FakeIncomingMessage(base.RpcIncomingMessage): def __init__(self, ctxt, message, reply_q, requeue): super(FakeIncomingMessage, self).__init__(ctxt, message) self.requeue_callback = requeue self._reply_q = reply_q def reply(self, reply=None, failure=None): if self._reply_q: failure = failure[1] if failure else None self._reply_q.put((reply, failure)) def requeue(self): self.requeue_callback() class FakeListener(base.PollStyleListener): def __init__(self, exchange_manager, targets, pool=None): super(FakeListener, self).__init__() self._exchange_manager = exchange_manager self._targets = targets self._pool = pool self._stopped = threading.Event() # NOTE(sileht): Ensure that all needed queues exists even the listener # have not been polled yet for target in self._targets: exchange = self._exchange_manager.get_exchange(target.exchange) exchange.ensure_queue(target, pool) @base.batch_poll_helper def poll(self, timeout=None): if timeout is not None: deadline = time.time() + timeout else: deadline = None while not self._stopped.is_set(): for target in self._targets: exchange = self._exchange_manager.get_exchange(target.exchange) (ctxt, message, reply_q, requeue) = exchange.poll(target, self._pool) if message is not None: message = FakeIncomingMessage(ctxt, message, reply_q, requeue) return message if deadline is not None: pause = deadline - time.time() if pause < 0: break pause = min(pause, 0.050) else: pause = 0.050 time.sleep(pause) return None def stop(self): self._stopped.set() class FakeExchange(object): def __init__(self, name): self.name = name self._queues_lock = threading.RLock() self._topic_queues = {} self._server_queues = {} def ensure_queue(self, target, pool): with self._queues_lock: if target.server: self._get_server_queue(target.topic, target.server) else: self._get_topic_queue(target.topic, pool) def _get_topic_queue(self, topic, pool=None): if pool and (topic, pool) not in self._topic_queues: # NOTE(sileht): if the pool name is set, we need to # copy all the already delivered messages from the # default queue to this queue self._topic_queues[(topic, pool)] = copy.deepcopy( self._get_topic_queue(topic)) return self._topic_queues.setdefault((topic, pool), []) def _get_server_queue(self, topic, server): return self._server_queues.setdefault((topic, server), []) def deliver_message(self, topic, ctxt, message, server=None, fanout=False, reply_q=None): with self._queues_lock: if fanout: queues = [q for t, q in self._server_queues.items() if t[0] == topic] elif server is not None: queues = [self._get_server_queue(topic, server)] else: # NOTE(sileht): ensure at least the queue without # pool name exists self._get_topic_queue(topic) queues = [q for t, q in self._topic_queues.items() if t[0] == topic] def requeue(): self.deliver_message(topic, ctxt, message, server=server, fanout=fanout, reply_q=reply_q) for queue in queues: queue.append((ctxt, message, reply_q, requeue)) def poll(self, target, pool): with self._queues_lock: if target.server: queue = self._get_server_queue(target.topic, target.server) else: queue = self._get_topic_queue(target.topic, pool) return queue.pop(0) if queue else (None, None, None, None) class FakeExchangeManager(object): _exchanges_lock = threading.Lock() _exchanges = {} def __init__(self, default_exchange): self._default_exchange = default_exchange def get_exchange(self, name): if name is None: name = self._default_exchange with self._exchanges_lock: return self._exchanges.setdefault(name, FakeExchange(name)) class FakeDriver(base.BaseDriver): """Fake driver used for testing. This driver passes messages in memory, and should only be used for unit tests. """ def __init__(self, conf, url, default_exchange=None, allowed_remote_exmods=None): super(FakeDriver, self).__init__(conf, url, default_exchange, allowed_remote_exmods) self._exchange_manager = FakeExchangeManager(default_exchange) def require_features(self, requeue=True): pass @staticmethod def _check_serialize(message): """Make sure a message intended for rpc can be serialized. We specifically want to use json, not our own jsonutils because jsonutils has some extra logic to automatically convert objects to primitive types so that they can be serialized. We want to catch all cases where non-primitive types make it into this code and treat it as an error. """ json.dumps(message) def _send(self, target, ctxt, message, wait_for_reply=None, timeout=None): self._check_serialize(message) exchange = self._exchange_manager.get_exchange(target.exchange) reply_q = None if wait_for_reply: reply_q = moves.queue.Queue() exchange.deliver_message(target.topic, ctxt, message, server=target.server, fanout=target.fanout, reply_q=reply_q) if wait_for_reply: try: reply, failure = reply_q.get(timeout=timeout) if failure: raise failure else: return reply except moves.queue.Empty: raise oslo_messaging.MessagingTimeout( 'No reply on topic %s' % target.topic) return None def send(self, target, ctxt, message, wait_for_reply=None, timeout=None, retry=None): # NOTE(sileht): retry doesn't need to be implemented, the fake # transport always works return self._send(target, ctxt, message, wait_for_reply, timeout) def send_notification(self, target, ctxt, message, version, retry=None): # NOTE(sileht): retry doesn't need to be implemented, the fake # transport always works self._send(target, ctxt, message) def listen(self, target, batch_size, batch_timeout): exchange = target.exchange or self._default_exchange listener = FakeListener(self._exchange_manager, [oslo_messaging.Target( topic=target.topic, server=target.server, exchange=exchange), oslo_messaging.Target( topic=target.topic, exchange=exchange)]) return base.PollStyleListenerAdapter(listener, batch_size, batch_timeout) def listen_for_notifications(self, targets_and_priorities, pool, batch_size, batch_timeout): targets = [ oslo_messaging.Target( topic='%s.%s' % (target.topic, priority), exchange=target.exchange) for target, priority in targets_and_priorities] listener = FakeListener(self._exchange_manager, targets, pool) return base.PollStyleListenerAdapter(listener, batch_size, batch_timeout) def cleanup(self): pass oslo.messaging-5.35.0/oslo_messaging/_drivers/impl_zmq.py0000666000175100017510000001764713224676046023643 0ustar zuulzuul00000000000000# Copyright 2011 Cloudscaling Group, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import threading from stevedore import driver from oslo_messaging._drivers import base from oslo_messaging._drivers import common as rpc_common from oslo_messaging._drivers.zmq_driver.client import zmq_client from oslo_messaging._drivers.zmq_driver.server import zmq_server from oslo_messaging._drivers.zmq_driver import zmq_async from oslo_messaging._drivers.zmq_driver import zmq_options from oslo_messaging._i18n import _LE RPCException = rpc_common.RPCException class LazyDriverItem(object): def __init__(self, item_cls, *args, **kwargs): self._lock = threading.Lock() self.item = None self.item_class = item_cls self.args = args self.kwargs = kwargs self.process_id = os.getpid() def get(self): # NOTE(ozamiatin): Lazy initialization. # All init stuff moved closer to usage point - lazy init. # Better design approach is to initialize in the driver's # __init__, but 'fork' extensively used by services # breaks all things. if self.item is not None and os.getpid() == self.process_id: return self.item with self._lock: if self.item is None or os.getpid() != self.process_id: self.process_id = os.getpid() self.item = self.item_class(*self.args, **self.kwargs) return self.item def cleanup(self): if self.item: self.item.cleanup() class ZmqDriver(base.BaseDriver): """ZeroMQ Driver implementation. Provides implementation of RPC and Notifier APIs by means of ZeroMQ library. See :doc:`zmq_driver` for details. """ def __init__(self, conf, url, default_exchange=None, allowed_remote_exmods=None): """Construct ZeroMQ driver. Initialize driver options. Construct matchmaker - pluggable interface to targets management Name Service Construct client and server controllers :param conf: oslo messaging configuration object :type conf: oslo_config.CONF :param url: transport URL :type url: TransportUrl :param default_exchange: Not used in zmq implementation :type default_exchange: None :param allowed_remote_exmods: remote exception passing options :type allowed_remote_exmods: list """ zmq = zmq_async.import_zmq() if zmq is None: raise ImportError(_LE("ZeroMQ is not available!")) conf = zmq_options.register_opts(conf, url) self.conf = conf self.allowed_remote_exmods = allowed_remote_exmods self.matchmaker = driver.DriverManager( 'oslo.messaging.zmq.matchmaker', self.get_matchmaker_backend(self.conf, url), ).driver(self.conf, url=url) client_cls = zmq_client.ZmqClientProxy if conf.oslo_messaging_zmq.use_pub_sub and not \ conf.oslo_messaging_zmq.use_router_proxy: client_cls = zmq_client.ZmqClientMixDirectPubSub elif not conf.oslo_messaging_zmq.use_pub_sub and not \ conf.oslo_messaging_zmq.use_router_proxy: client_cls = zmq_client.ZmqClientDirect self.client = LazyDriverItem( client_cls, self.conf, self.matchmaker, self.allowed_remote_exmods) self.notifier = LazyDriverItem( client_cls, self.conf, self.matchmaker, self.allowed_remote_exmods) super(ZmqDriver, self).__init__(conf, url, default_exchange, allowed_remote_exmods) @staticmethod def get_matchmaker_backend(conf, url): zmq_transport, _, matchmaker_backend = url.transport.partition('+') assert zmq_transport == 'zmq', "Needs to be zmq for this transport!" if not matchmaker_backend: return conf.oslo_messaging_zmq.rpc_zmq_matchmaker if matchmaker_backend not in zmq_options.MATCHMAKER_BACKENDS: raise rpc_common.RPCException( _LE("Incorrect matchmaker backend name %(backend_name)s! " "Available names are: %(available_names)s") % {"backend_name": matchmaker_backend, "available_names": zmq_options.MATCHMAKER_BACKENDS}) return matchmaker_backend def send(self, target, ctxt, message, wait_for_reply=None, timeout=None, retry=None): """Send RPC message to server :param target: Message destination target :type target: oslo_messaging.Target :param ctxt: Message context :type ctxt: dict :param message: Message payload to pass :type message: dict :param wait_for_reply: Waiting for reply flag :type wait_for_reply: bool :param timeout: Reply waiting timeout in seconds :type timeout: int :param retry: an optional default connection retries configuration None or -1 means to retry forever 0 means no retry N means N retries :type retry: int """ client = self.client.get() if wait_for_reply: return client.send_call(target, ctxt, message, timeout, retry) elif target.fanout: client.send_fanout(target, ctxt, message, retry) else: client.send_cast(target, ctxt, message, retry) def send_notification(self, target, ctxt, message, version, retry=None): """Send notification to server :param target: Message destination target :type target: oslo_messaging.Target :param ctxt: Message context :type ctxt: dict :param message: Message payload to pass :type message: dict :param version: Messaging API version :type version: str :param retry: an optional default connection retries configuration None or -1 means to retry forever 0 means no retry N means N retries :type retry: int """ client = self.notifier.get() client.send_notify(target, ctxt, message, version, retry) def listen(self, target, batch_size, batch_timeout): """Listen to a specified target on a server side :param target: Message destination target :type target: oslo_messaging.Target """ listener = zmq_server.ZmqServer(self, self.conf, self.matchmaker, target) return base.PollStyleListenerAdapter(listener, batch_size, batch_timeout) def listen_for_notifications(self, targets_and_priorities, pool, batch_size, batch_timeout): """Listen to a specified list of targets on a server side :param targets_and_priorities: List of pairs (target, priority) :type targets_and_priorities: list :param pool: Not used for zmq implementation :type pool: object """ listener = zmq_server.ZmqNotificationServer( self, self.conf, self.matchmaker, targets_and_priorities) return base.PollStyleListenerAdapter(listener, batch_size, batch_timeout) def cleanup(self): """Cleanup all driver's connections finally """ self.client.cleanup() self.notifier.cleanup() oslo.messaging-5.35.0/oslo_messaging/_drivers/common.py0000666000175100017510000004371013224676046023271 0ustar zuulzuul00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # Copyright 2011 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import collections import copy import logging import sys import traceback from oslo_serialization import jsonutils from oslo_utils import timeutils import six import oslo_messaging from oslo_messaging._i18n import _ from oslo_messaging._i18n import _LE from oslo_messaging import _utils as utils LOG = logging.getLogger(__name__) _EXCEPTIONS_MODULE = 'exceptions' if six.PY2 else 'builtins' _EXCEPTIONS_MODULES = ['exceptions', 'builtins'] '''RPC Envelope Version. This version number applies to the top level structure of messages sent out. It does *not* apply to the message payload, which must be versioned independently. For example, when using rpc APIs, a version number is applied for changes to the API being exposed over rpc. This version number is handled in the rpc proxy and dispatcher modules. This version number applies to the message envelope that is used in the serialization done inside the rpc layer. See serialize_msg() and deserialize_msg(). The current message format (version 2.0) is very simple. It is: { 'oslo.version': , 'oslo.message': } Message format version '1.0' is just considered to be the messages we sent without a message envelope. So, the current message envelope just includes the envelope version. It may eventually contain additional information, such as a signature for the message payload. We will JSON encode the application message payload. The message envelope, which includes the JSON encoded application message body, will be passed down to the messaging libraries as a dict. ''' _RPC_ENVELOPE_VERSION = '2.0' _VERSION_KEY = 'oslo.version' _MESSAGE_KEY = 'oslo.message' _REMOTE_POSTFIX = '_Remote' class RPCException(Exception): msg_fmt = _("An unknown RPC related exception occurred.") def __init__(self, message=None, **kwargs): self.kwargs = kwargs if not message: try: message = self.msg_fmt % kwargs except Exception: # kwargs doesn't match a variable in the message # log the issue and the kwargs LOG.exception(_LE('Exception in string format operation, ' 'kwargs are:')) for name, value in kwargs.items(): LOG.error("%s: %s", name, value) # at least get the core message out if something happened message = self.msg_fmt super(RPCException, self).__init__(message) class Timeout(RPCException): """Signifies that a timeout has occurred. This exception is raised if the rpc_response_timeout is reached while waiting for a response from the remote side. """ msg_fmt = _('Timeout while waiting on RPC response - ' 'topic: "%(topic)s", RPC method: "%(method)s" ' 'info: "%(info)s"') def __init__(self, info=None, topic=None, method=None): """Initiates Timeout object. :param info: Extra info to convey to the user :param topic: The topic that the rpc call was sent to :param method: The name of the rpc method being called """ self.info = info self.topic = topic self.method = method super(Timeout, self).__init__( None, info=info or _(''), topic=topic or _(''), method=method or _('')) class DuplicateMessageError(RPCException): msg_fmt = _("Found duplicate message(%(msg_id)s). Skipping it.") class InvalidRPCConnectionReuse(RPCException): msg_fmt = _("Invalid reuse of an RPC connection.") class UnsupportedRpcVersion(RPCException): msg_fmt = _("Specified RPC version, %(version)s, not supported by " "this endpoint.") class UnsupportedRpcEnvelopeVersion(RPCException): msg_fmt = _("Specified RPC envelope version, %(version)s, " "not supported by this endpoint.") class RpcVersionCapError(RPCException): msg_fmt = _("Specified RPC version cap, %(version_cap)s, is too low") class Connection(object): """A connection, returned by rpc.create_connection(). This class represents a connection to the message bus used for rpc. An instance of this class should never be created by users of the rpc API. Use rpc.create_connection() instead. """ def close(self): """Close the connection. This method must be called when the connection will no longer be used. It will ensure that any resources associated with the connection, such as a network connection, and cleaned up. """ raise NotImplementedError() def serialize_remote_exception(failure_info): """Prepares exception data to be sent over rpc. Failure_info should be a sys.exc_info() tuple. """ tb = traceback.format_exception(*failure_info) failure = failure_info[1] kwargs = {} if hasattr(failure, 'kwargs'): kwargs = failure.kwargs # NOTE(matiu): With cells, it's possible to re-raise remote, remote # exceptions. Lets turn it back into the original exception type. cls_name = six.text_type(failure.__class__.__name__) mod_name = six.text_type(failure.__class__.__module__) if (cls_name.endswith(_REMOTE_POSTFIX) and mod_name.endswith(_REMOTE_POSTFIX)): cls_name = cls_name[:-len(_REMOTE_POSTFIX)] mod_name = mod_name[:-len(_REMOTE_POSTFIX)] data = { 'class': cls_name, 'module': mod_name, 'message': six.text_type(failure), 'tb': tb, 'args': failure.args, 'kwargs': kwargs } json_data = jsonutils.dumps(data) return json_data def deserialize_remote_exception(data, allowed_remote_exmods): failure = jsonutils.loads(six.text_type(data)) trace = failure.get('tb', []) message = failure.get('message', "") + "\n" + "\n".join(trace) name = failure.get('class') module = failure.get('module') # the remote service which raised the given exception might have a # different python version than the caller. For example, the caller might # run python 2.7, while the remote service might run python 3.4. Thus, # the exception module will be "builtins" instead of "exceptions". if module in _EXCEPTIONS_MODULES: module = _EXCEPTIONS_MODULE # NOTE(ameade): We DO NOT want to allow just any module to be imported, in # order to prevent arbitrary code execution. if module != _EXCEPTIONS_MODULE and module not in allowed_remote_exmods: return oslo_messaging.RemoteError(name, failure.get('message'), trace) try: __import__(module) mod = sys.modules[module] klass = getattr(mod, name) if not issubclass(klass, Exception): raise TypeError("Can only deserialize Exceptions") failure = klass(*failure.get('args', []), **failure.get('kwargs', {})) except (AttributeError, TypeError, ImportError): return oslo_messaging.RemoteError(name, failure.get('message'), trace) ex_type = type(failure) str_override = lambda self: message new_ex_type = type(ex_type.__name__ + _REMOTE_POSTFIX, (ex_type,), {'__str__': str_override, '__unicode__': str_override}) new_ex_type.__module__ = '%s%s' % (module, _REMOTE_POSTFIX) try: # NOTE(ameade): Dynamically create a new exception type and swap it in # as the new type for the exception. This only works on user defined # Exceptions and not core Python exceptions. This is important because # we cannot necessarily change an exception message so we must override # the __str__ method. failure.__class__ = new_ex_type except TypeError: # NOTE(ameade): If a core exception then just add the traceback to the # first exception argument. failure.args = (message,) + failure.args[1:] return failure class CommonRpcContext(object): def __init__(self, **kwargs): self.values = kwargs def __getattr__(self, key): try: return self.values[key] except KeyError: raise AttributeError(key) def to_dict(self): return copy.deepcopy(self.values) @classmethod def from_dict(cls, values): return cls(**values) def deepcopy(self): return self.from_dict(self.to_dict()) def update_store(self): # local.store.context = self pass class ClientException(Exception): """Encapsulates actual exception expected to be hit by a RPC proxy object. Merely instantiating it records the current exception information, which will be passed back to the RPC client without exceptional logging. """ def __init__(self): self._exc_info = sys.exc_info() def serialize_msg(raw_msg): # NOTE(russellb) See the docstring for _RPC_ENVELOPE_VERSION for more # information about this format. msg = {_VERSION_KEY: _RPC_ENVELOPE_VERSION, _MESSAGE_KEY: jsonutils.dumps(raw_msg)} return msg def deserialize_msg(msg): # NOTE(russellb): Hang on to your hats, this road is about to # get a little bumpy. # # Robustness Principle: # "Be strict in what you send, liberal in what you accept." # # At this point we have to do a bit of guessing about what it # is we just received. Here is the set of possibilities: # # 1) We received a dict. This could be 2 things: # # a) Inspect it to see if it looks like a standard message envelope. # If so, great! # # b) If it doesn't look like a standard message envelope, it could either # be a notification, or a message from before we added a message # envelope (referred to as version 1.0). # Just return the message as-is. # # 2) It's any other non-dict type. Just return it and hope for the best. # This case covers return values from rpc.call() from before message # envelopes were used. (messages to call a method were always a dict) if not isinstance(msg, dict): # See #2 above. return msg base_envelope_keys = (_VERSION_KEY, _MESSAGE_KEY) if not all(map(lambda key: key in msg, base_envelope_keys)): # See #1.b above. return msg # At this point we think we have the message envelope # format we were expecting. (#1.a above) if not utils.version_is_compatible(_RPC_ENVELOPE_VERSION, msg[_VERSION_KEY]): raise UnsupportedRpcEnvelopeVersion(version=msg[_VERSION_KEY]) raw_msg = jsonutils.loads(msg[_MESSAGE_KEY]) return raw_msg class DecayingTimer(object): def __init__(self, duration=None): self._watch = timeutils.StopWatch(duration=duration) def start(self): self._watch.start() def check_return(self, timeout_callback=None, *args, **kwargs): maximum = kwargs.pop('maximum', None) left = self._watch.leftover(return_none=True) if left is None: return maximum if left <= 0 and timeout_callback is not None: timeout_callback(*args, **kwargs) return left if maximum is None else min(left, maximum) # NOTE(sileht): Even if rabbit has only one Connection class, # this connection can be used for two purposes: # * wait and receive amqp messages (only do read stuffs on the socket) # * send messages to the broker (only do write stuffs on the socket) # The code inside a connection class is not concurrency safe. # Using one Connection class instance for doing both, will result # of eventlet complaining of multiple greenthreads that read/write the # same fd concurrently... because 'send' and 'listen' run in different # greenthread. # So, a connection cannot be shared between thread/greenthread and # this two variables permit to define the purpose of the connection # to allow drivers to add special handling if needed (like heatbeat). # amqp drivers create 3 kind of connections: # * driver.listen*(): each call create a new 'PURPOSE_LISTEN' connection # * driver.send*(): a pool of 'PURPOSE_SEND' connections is used # * driver internally have another 'PURPOSE_LISTEN' connection dedicated # to wait replies of rpc call PURPOSE_LISTEN = 'listen' PURPOSE_SEND = 'send' class ConnectionContext(Connection): """The class that is actually returned to the create_connection() caller. This is essentially a wrapper around Connection that supports 'with'. It can also return a new Connection, or one from a pool. The function will also catch when an instance of this class is to be deleted. With that we can return Connections to the pool on exceptions and so forth without making the caller be responsible for catching them. If possible the function makes sure to return a connection to the pool. """ def __init__(self, connection_pool, purpose): """Create a new connection, or get one from the pool.""" self.connection = None self.connection_pool = connection_pool pooled = purpose == PURPOSE_SEND if pooled: self.connection = connection_pool.get() else: # a non-pooled connection is requested, so create a new connection self.connection = connection_pool.create(purpose) self.pooled = pooled self.connection.pooled = pooled def __enter__(self): """When with ConnectionContext() is used, return self.""" return self def _done(self): """If the connection came from a pool, clean it up and put it back. If it did not come from a pool, close it. """ if self.connection: if self.pooled: # Reset the connection so it's ready for the next caller # to grab from the pool try: self.connection.reset() except Exception: LOG.exception(_LE("Fail to reset the connection, drop it")) try: self.connection.close() except Exception: pass self.connection = self.connection_pool.create() finally: self.connection_pool.put(self.connection) else: try: self.connection.close() except Exception: pass self.connection = None def __exit__(self, exc_type, exc_value, tb): """End of 'with' statement. We're done here.""" self._done() def __del__(self): """Caller is done with this connection. Make sure we cleaned up.""" self._done() def close(self): """Caller is done with this connection.""" self._done() def __getattr__(self, key): """Proxy all other calls to the Connection instance.""" if self.connection: return getattr(self.connection, key) else: raise InvalidRPCConnectionReuse() class ConfigOptsProxy(collections.Mapping): """Proxy for oslo_config.cfg.ConfigOpts. Values from the query part of the transport url (if they are both present and valid) override corresponding values from the configuration. """ def __init__(self, conf, url, group): self._conf = conf self._url = url self._group = group self._validate_query() def _validate_query(self): for name in self._url.query: self.GroupAttrProxy(self._conf, self._group, self._conf[self._group], self._url)[name] def __getattr__(self, name): value = getattr(self._conf, name) if isinstance(value, self._conf.GroupAttr) and name == self._group: return self.GroupAttrProxy(self._conf, name, value, self._url) return value def __getitem__(self, name): return self.__getattr__(name) def __contains__(self, name): return name in self._conf def __iter__(self): return iter(self._conf) def __len__(self): return len(self._conf) class GroupAttrProxy(collections.Mapping): """Internal helper proxy for oslo_config.cfg.ConfigOpts.GroupAttr.""" _VOID_MARKER = object() def __init__(self, conf, group_name, group, url): self._conf = conf self._group_name = group_name self._group = group self._url = url def __getattr__(self, opt_name): # Make sure that the group has this specific option opt_value_conf = getattr(self._group, opt_name) # If the option is also present in the url and has a valid # (i.e. convertible) value type, then try to override it opt_value_url = self._url.query.get(opt_name, self._VOID_MARKER) if opt_value_url is self._VOID_MARKER: return opt_value_conf opt_info = self._conf._get_opt_info(opt_name, self._group_name) return opt_info['opt'].type(opt_value_url) def __getitem__(self, opt_name): return self.__getattr__(opt_name) def __contains__(self, opt_name): return opt_name in self._group def __iter__(self): return iter(self._group) def __len__(self): return len(self._group) oslo.messaging-5.35.0/oslo_messaging/_drivers/amqp1_driver/0000775000175100017510000000000013224676256024015 5ustar zuulzuul00000000000000oslo.messaging-5.35.0/oslo_messaging/_drivers/amqp1_driver/opts.py0000666000175100017510000002722613224676046025364 0ustar zuulzuul00000000000000# Copyright 2014, Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg amqp1_opts = [ cfg.StrOpt('container_name', deprecated_group='amqp1', help='Name for the AMQP container. must be globally unique.' ' Defaults to a generated UUID'), cfg.IntOpt('idle_timeout', default=0, # disabled deprecated_group='amqp1', help='Timeout for inactive connections (in seconds)'), cfg.BoolOpt('trace', default=False, deprecated_group='amqp1', help='Debug: dump AMQP frames to stdout'), cfg.BoolOpt('ssl', default=False, help=("Attempt to connect via SSL. If no other ssl-related " "parameters are given, it will use the system's " "CA-bundle to verify the server's certificate.")), cfg.StrOpt('ssl_ca_file', default='', deprecated_group='amqp1', help="CA certificate PEM file used to verify the server's" ' certificate'), cfg.StrOpt('ssl_cert_file', default='', deprecated_group='amqp1', help='Self-identifying certificate PEM file' ' for client authentication'), cfg.StrOpt('ssl_key_file', default='', deprecated_group='amqp1', help='Private key PEM file used to sign ssl_cert_file' ' certificate (optional)'), cfg.StrOpt('ssl_key_password', deprecated_group='amqp1', secret=True, help='Password for decrypting ssl_key_file (if encrypted)'), cfg.BoolOpt('ssl_verify_vhost', default=False, help="By default SSL checks that the name in the server's" " certificate matches the hostname in the transport_url. In" " some configurations it may be preferable to use the virtual" " hostname instead, for example if the server uses the Server" " Name Indication TLS extension (rfc6066) to provide a" " certificate per virtual host. Set ssl_verify_vhost to True" " if the server's SSL certificate uses the virtual host name" " instead of the DNS name."), cfg.BoolOpt('allow_insecure_clients', default=False, deprecated_group='amqp1', # marked as deprecated in Ocata deprecated_for_removal=True, deprecated_reason="Not applicable - not a SSL server", help='Accept clients using either SSL or plain TCP'), cfg.StrOpt('sasl_mechanisms', default='', deprecated_group='amqp1', help='Space separated list of acceptable SASL mechanisms'), cfg.StrOpt('sasl_config_dir', default='', deprecated_group='amqp1', help='Path to directory that contains the SASL configuration'), cfg.StrOpt('sasl_config_name', default='', deprecated_group='amqp1', help='Name of configuration file (without .conf suffix)'), cfg.StrOpt('sasl_default_realm', default='', help='SASL realm to use if no realm present in username'), cfg.StrOpt('username', default='', deprecated_group='amqp1', deprecated_for_removal=True, deprecated_reason='Should use configuration option ' 'transport_url to provide the username.', help='User name for message broker authentication'), cfg.StrOpt('password', default='', deprecated_group='amqp1', secret=True, deprecated_for_removal=True, deprecated_reason='Should use configuration option ' 'transport_url to provide the password.', help='Password for message broker authentication'), # Network connection failure retry options cfg.IntOpt('connection_retry_interval', default=1, min=1, help='Seconds to pause before attempting to re-connect.'), cfg.IntOpt('connection_retry_backoff', default=2, min=0, help='Increase the connection_retry_interval by this many' ' seconds after each unsuccessful failover attempt.'), cfg.IntOpt('connection_retry_interval_max', default=30, min=1, help='Maximum limit for connection_retry_interval' ' + connection_retry_backoff'), # Message send retry and timeout options cfg.IntOpt('link_retry_delay', default=10, min=1, help='Time to pause between re-connecting an AMQP 1.0 link that' ' failed due to a recoverable error.'), cfg.IntOpt('default_reply_retry', default=0, min=-1, help='The maximum number of attempts to re-send a reply message' ' which failed due to a recoverable error.'), cfg.IntOpt('default_reply_timeout', default=30, min=5, help='The deadline for an rpc reply message delivery.'), cfg.IntOpt('default_send_timeout', default=30, min=5, help='The deadline for an rpc cast or call message delivery.' ' Only used when caller does not provide a timeout expiry.'), cfg.IntOpt('default_notify_timeout', default=30, min=5, help='The deadline for a sent notification message delivery.' ' Only used when caller does not provide a timeout expiry.'), # Sender link cache maintenance: cfg.IntOpt('default_sender_link_timeout', default=600, min=1, help='The duration to schedule a purge of idle sender links.' ' Detach link after expiry.'), # Addressing: cfg.StrOpt('addressing_mode', default='dynamic', help="Indicates the addressing mode used by the driver.\n" "Permitted values:\n" "'legacy' - use legacy non-routable addressing\n" "'routable' - use routable addresses\n" "'dynamic' - use legacy addresses if the message bus does not" " support routing otherwise use routable addressing"), cfg.BoolOpt('pseudo_vhost', default=True, help="Enable virtual host support for those message buses" " that do not natively support virtual hosting (such as" " qpidd). When set to true the virtual host name will be" " added to all message bus addresses, effectively creating" " a private 'subnet' per virtual host. Set to False if the" " message bus supports virtual hosting using the 'hostname'" " field in the AMQP 1.0 Open performative as the name of the" " virtual host."), # Legacy addressing customization: cfg.StrOpt('server_request_prefix', default='exclusive', deprecated_group='amqp1', help="address prefix used when sending to a specific server"), cfg.StrOpt('broadcast_prefix', default='broadcast', deprecated_group='amqp1', help="address prefix used when broadcasting to all servers"), cfg.StrOpt('group_request_prefix', default='unicast', deprecated_group='amqp1', help="address prefix when sending to any server in group"), # Routable addressing customization: # # Addresses a composed of the following string values using a template in # the form of: # $(address_prefix)/$(*cast)/$(exchange)/$(topic)[/$(server-name)] # where *cast is one of the multicast/unicast/anycast values used to # identify the delivery pattern used for the addressed message cfg.StrOpt('rpc_address_prefix', default='openstack.org/om/rpc', help="Address prefix for all generated RPC addresses"), cfg.StrOpt('notify_address_prefix', default='openstack.org/om/notify', help="Address prefix for all generated Notification addresses"), cfg.StrOpt('multicast_address', default='multicast', help="Appended to the address prefix when sending a fanout" " message. Used by the message bus to identify fanout" " messages."), cfg.StrOpt('unicast_address', default='unicast', help="Appended to the address prefix when sending to a" " particular RPC/Notification server. Used by the message bus" " to identify messages sent to a single destination."), cfg.StrOpt('anycast_address', default='anycast', help="Appended to the address prefix when sending to a group of" " consumers. Used by the message bus to identify messages that" " should be delivered in a round-robin fashion across" " consumers."), cfg.StrOpt('default_notification_exchange', help="Exchange name used in notification addresses.\n" "Exchange name resolution precedence:\n" "Target.exchange if set\n" "else default_notification_exchange if set\n" "else control_exchange if set\n" "else 'notify'"), cfg.StrOpt('default_rpc_exchange', help="Exchange name used in RPC addresses.\n" "Exchange name resolution precedence:\n" "Target.exchange if set\n" "else default_rpc_exchange if set\n" "else control_exchange if set\n" "else 'rpc'"), # Message Credit Levels cfg.IntOpt('reply_link_credit', default=200, min=1, help='Window size for incoming RPC Reply messages.'), cfg.IntOpt('rpc_server_credit', default=100, min=1, help='Window size for incoming RPC Request messages'), cfg.IntOpt('notify_server_credit', default=100, min=1, help='Window size for incoming Notification messages'), # Settlement control cfg.MultiStrOpt('pre_settled', default=['rpc-cast', 'rpc-reply'], help="Send messages of this type pre-settled.\n" "Pre-settled messages will not receive acknowledgement\n" "from the peer. Note well: pre-settled messages may be\n" "silently discarded if the delivery fails.\n" "Permitted values:\n" "'rpc-call' - send RPC Calls pre-settled\n" "'rpc-reply'- send RPC Replies pre-settled\n" "'rpc-cast' - Send RPC Casts pre-settled\n" "'notify' - Send Notifications pre-settled\n") ] oslo.messaging-5.35.0/oslo_messaging/_drivers/amqp1_driver/__init__.py0000666000175100017510000000000013224676046026113 0ustar zuulzuul00000000000000oslo.messaging-5.35.0/oslo_messaging/_drivers/amqp1_driver/oslo_messaging_amqp_driver_overview.rst0000666000175100017510000017267113224676046034114 0ustar zuulzuul00000000000000############################## Oslo.messaging AMQP 1.0 Driver ############################## :Date: $Date: 2016-08-02 $ :Revision: $Revision: 0.04 $ Introduction ============ This document describes the architecture and implementation of the oslo.messaging AMQP 1.0 driver. The AMQP 1.0 driver provides an implementation of the oslo.messaging base driver service interfaces that map client application RPC and Notify methods "onto" the operation of an AMQP 1.0 protocol messaging bus. The blueprint for the original driver can be found here [1]_ and the original implementation is described in [2]_. The feature specification for the updates to the AMQP 1.0 driver for the OpenStack Newton release can be found here [3]_ The driver effectively hides the details of the AMQP 1.0 protocol transport and message processing from the client applications. The Pyngus messaging framework [4]_ built on the QPID Proton engine [5]_ provides a callback-based API for message passing. The driver implementation is comprised of the callback "handlers" that drive the messaging APIs to connect to the message bus, subscribe servers, send and receive messages. :: +------------+ +------------+ +-------------+ +-------------+ | | | | | | | | OpenStack | RPC Client | | RPC Server | | Notify | | Notify | Application | | | | | Client | | Server | +------------+ +------------+ +-------------+ +-------------+ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX +-----------------------------------------------------------+ | Oslo.Messaging "Base Driver Interface" | Oslo Messaging +-----------------------------------------------------------+ Driver | Oslo.Messaging AMQP 1.0 Driver | +-----------------------------------------------------------+ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX +-----------------------------------------------------------+ | Pyngus Messaging Framework | +-----------------------------------------------------------+ | QPID Proton Library | AMQP 1.0 +-----------------------------------------------------------+ Protocol | AMQP 1.0 Protocol | Exchange +-----------------------------------------------------------+ | TCP/IP Network Layer | +-----------------------------------------------------------+ Development View ================ Code Base --------- The AMQP 1.0 driver source code is maintained in the OpenStack oslo.messaging repository [7]_. The driver implementation, tests and user guide are located in the sub-directories of the repository. :: ├── doc │ └── source │ ├── AMQP1.0.rst ├── oslo_messaging ├── _drivers │ ├── amqp1_driver │ │ ├── addressing.py │ │ ├── controller.py │ │ ├── eventloop.py │ │ ├── opts.py │ ├── impl_amqp1.py ├── tests ├── drivers ├── test_amqp_driver.py +-----------------+----------------------------------------------------+ |File | Content | +=================+====================================================+ |doc/ |The AMQP 1.0 driver user guide details | |source/ |prerequisite, configuration and platform deployment | |AMQP1.0.rst |considerations. | | | | +-----------------+----------------------------------------------------+ |_drivers/ |This file provides the oslo.messaging driver entry | |impl_amqp1.py |points for the AMQP 1.0 driver. The file provides | | |implementations for the base.RpcIncomingMessage, | | |base.PollStyleListener and base.BaseDriver oslo | | |messaging entities. | +-----------------+----------------------------------------------------+ |_drivers/ |This file provides a set of utilities that translate| |amqp1_driver/ |a target address to a well-formed AMQP 1.0 address. | |addressing.py | | | | | +-----------------+----------------------------------------------------+ |_drivers/ |The controller manages the interface between the | |amqp1_driver/ |driver and the messaging service protocol exchange. | |controller.py | | | | | +-----------------+----------------------------------------------------+ |_drivers/ |This module provides a background thread that | |amqp1_driver/ |handles scheduled messaging operations. All | |eventloop.py |protocol specific exchanges are executed on this | | |background thread. | +-----------------+----------------------------------------------------+ |_drivers/ |This file manages the AMQP 1.0 driver configuration | |amqp1_driver/ |options (oslo_messaging_amqp). | |opts.py | | | | | +-----------------+----------------------------------------------------+ |tests/ |This file contains a set of functional tests that | |drivers/ |target the capabilities of the driver. A message | |test_amqp_driver |intermediary is included to emulate the full | | |messaging protocol exchanges. | +-----------------+----------------------------------------------------+ Deployment ========== The Oslo Messaging AMQP 1.0 driver is deployed on each node of the OpenStack infrastructure where one or more OpenStack services will be deployed. :: Node Node +--------------------------------+ +-----------------------------------+ | +-------------+ | | +--------------+ +--------------+ | | | | | | | | | | | | | OpenStack | | | | OpenStack | | OpenStack | | | | Service | | | | Service | | Service | | | | | | | | | | | | | +-------------+ | | +--------------+ +--------------+ | | | Oslo | | | | Oslo | | Oslo | | | | Messaging | | | | Messaging | | Messaging | | | +------------+ +-------------+ | | +--------------+ +--------------+ | | | AMQP 1.0 | | AMQP 1.0 | | | | AMQP 1.0 | | AMQP 1.0 | | | |Intermediary| | Driver | | | | Driver | | Driver | | | +------------+ +-------------+ | | +--------------+ +--------------+ | | +----------------------------+ | | +-------------------------------+ | | | TCP/IP | | | | TCP/IP | | | | Stack | | | | Stack | | | +----------------------------+ | | +-------------------------------+ | +--------------------------------+ +-----------------------------------+ ^ ^ ^ ^ | | | | | | Public Network | | +----------------------v-----------------------------------------v------------+ v Internal Network v +-----------------------------------------------------------------------------+ The configuration of each OpenStack service must provide the transport information that indicates to the oslo messaging layer that the AMQP 1.0 driver is to be instantiated for the back-end. During instantiation of the driver, a connection is established from the driver to an AMQP 1.0 intermediary that provides the messaging bus capabilities. The intermediary can be co-located on nodes that are running OpenStack services or can be located on separate stand-alone nodes in the control plane. The driver architecture is intended to support any messaging intermediary (e.g. broker or router) that implements version 1.0 of the AMQP protocol. Support for additional classes of intermediaries might require changes to driver configuration parameters and addressing syntax but should not otherwise require changes to the driver architecture. Driver Structure ================ The functionality of the AMQP 1.0 driver is implemented across a number of components that encapsulate the mapping of the driver activities onto the AMQP protocol exchange. The *Controller* implements the primary functional logic for the driver and serves as the interface between the driver entry points ( *Proton Driver* ) and the I/O operations associated with sending and receiving messages on links attached to the message bus. Each sending or receiving link is associated with a specific driver activity such as sending an RPC Call/Cast or Notify message, receiving an RPC reply message, or receiving an RPC or Notify server request. :: _______________________ / / / Application / / (OpenStack) / /______________________/ | XXXXXXXXXXXXXXXXXXXXXXX|XXXXXXXXXXXXXXXXXXXXXXXXXXXXX | +----------+ +-----------| Proton | V | Driver | +-------+ +----------+ | Tasks | | +-------+ +------------+ +--------->| Controller | ----| |---- / +------------+ \ / | \ / | \ +---------+ +---------+ +---------+ | Sender |<--| Replies | | Server | | | | | | | +---------+ +---------+ +---------+ | | | | +---------+ +---------+ | | Proton | | Proton | | |Listener | |Listener | | +---------+ +---------+ | | | XXXXXXXXX|XXXXXXXXXXXXX|XXXXXXXXXXXXXXX|XXXXXXXXXXXXX | | | +--------+ +--------+ +--------+ | Send | | Receive| | Receive| | Link | | Link | | Link | +--------+ +--------+ +--------+ Task Orchestration ------------------ The AMQP 1.0 driver maintains a thread for processing protocol events and timers. Therefore, the driver must orchestrate and synchronize requests from the client applications with this internal thread. The *Proton Driver* will act as a proxy for each client request and constructs a task request object on the caller's thread via the *Controller*. The task request object contains the necessary information to execute the desired method on the driver invocation thread of control. This method is executed synchronously - the client thread pends until the driver thread completes processing the task. The unique task objects provided for driver thread invocation include: * Subscribe Task * Send Task (for RPC Cast or Notify) * RPC Call Task * RPC Reply Task * Message Disposition Task :: +------------------------+ +-------------------------------+ | Client Thread | | Driver Thread | | +--------+ +---------+ | | +------+ +--------+ +-------+ | | |Proton | |Control | | | |Event | |Control | |Pyngus | | | |Driver | |(-ler) | | | |Loop | |(-ler) | |Frmwrk | | | +---+----+ +----+----+ | | +---+--+ +---+----+ +---+---+ | | |create | | | | | | | | |task() | | | | | | | | |---------->| | | | | | | | |add | | | | | | | | |task() | | Request | | | | | | |---------->| | Queue | | | | | | | | enq | +------+ | deq | | | | | | |------|---> |||||+--|---->| exec() | | | | | | | +------+ | |------->| | | | | | | | | |----------|-+ | | | wait() | | | | | Protocol | | | | #-----------|------|------+ | | | Exchange | | | | # | | V | | | | | | | # | | +-----+ | | set() |<---------|-+ | | # | | |Event|<--------|--------| | | | # | | | | | | | | | | # | | +-----+ | | | | | | # | | | | | | | | | #<----------|------|------+ | | | | | | | | | | | | | | | + + | | + + + | | | | | | | | | +------------------------+ +-------------------------------+ Scheduling - Execution ^^^^^^^^^^^^^^^^^^^^^^ Following the method task construction, the task is added to the *Controller* queue of requests for execution. Following the placement of the task on this queue, the caller will wait for the execution to complete (or possibly timeout or raise an exception). The eventloop running in its own thread will dequeue the task request and invoke the corresponding method on the *Controller* servant using the information stored in the task request object retrieved. The calls executed on this eventloop thread via the *Controller* perform all the protocol specific intelligence required for the pyngus framework. In addition to the target method invocation, the eventloop may call on the request object for message communication state changes or other indications from the peer. :: Request +--------------------------------------------+ +----------+ Tasks |Client Thread /\ | | | | * * * * * * / v | | + V + listen() | * * * * * * | | |---| -------->| * Init *-->* Schedule *-->* Wait * | | |---| | * * * * * * | | |---| | * * * * * * | | +_|_+ | * * * *\ * * | | V | +------------------|-->| +--------------+ +--------------------------------------------+ | | Eventloop | | | * * | +--------------------------------------------+ | | * * | |Client Thread /\ | | | * Execute * | | * * * * * * / v | | | * * | call() | * * * * * * | | | * * | -------->| * Init *-->* Schedule *-->* Wait * | | | ^ * * \ | | * * * * * * | | | / \ | | * * * * * * | | | / / | | * * * *\ * * | | | \ / | | +------------------|-->| | \ * *v | +--------------------------------------------+ | | * * | o | | * Protocol * | o | | * Exchange * | o | | * * | +--------------------------------------------+ | | * * | |Client Thread /\ | | +--------------+ | * * * * * * / v | | cast() | * * * * * * | | -------->| * Init *-->* Schedule *-->* Wait * | | | * * * * * * | | | * * * * * * | | | * * * *\ * * | | | +------------------|--> +--------------------------------------------+ Completion ^^^^^^^^^^ After carrying out the messaging protocol exchange for the requested task or upon a timeout/exception condition, the eventloop thread will wake-up the callers thread to indicate the task completion. Use Scenarios ============= The primary use scenarios for the AMQP 1.0 Driver correspond to the activities supported by the oslo messaging base driver interface. These activities include the ability to subscribe RPC and Notify servers (referred to as "Servers" in the graphics) as well the ability to send RPC (cast and call) messages and Notification messages into the control plane infrastructure. Following RPC and Notify server processing (e.g. dispatch to the application) the ability to indicate the final disposition of the message is supported and mapped onto the message delivery and settlement capabilities of the AMQP messaging bus. The composition of the AMQP driver and its dynamic behaviors is defined by the support of these primary activities. Load Driver ----------- The operational life-cycle of the AMQP 1.0 driver begins when the oslo messaging loads and instantiates the driver instance for use by an application. To complete this activity, the driver will retrieve the oslo_messaging_amqp configuration options in order to define the driver's run time behaviors. The transport URL specifier provided will be used by the driver to create a connection to the AMQP 1.0 messaging bus. The transport URL is of the form amqp://user:pass@host1:port[,hostN:portN] Where the transport scheme specifies **amqp** as the back-end. It should be noted that oslo.messaging is deprecating the discrete host, port and auth configuration options [6]_. The driver provides the capability to transform the "Target" provided by an application to an addressing format that can be associated to the sender and receive links that take part in the AMQP protocol exchange. :: load()---+ \ ----------- \ +--- Transport > * * | ----------- * *<---+ * Prepare * * Driver * * * * * ---------- | Cfg Opts | ----------\ | \ v v * * * * * Retrieve * * Config * * * * * | | v * * * Start * * Protocol * * Thread * * * * * | | v * * +--------------+ * Connect* | AMQP | * to *<----------->| Protocol | * Message * | Exchange | * Bus * +--------------+ * * \ | \ | \ ------------ v +-----> Connection --+ * * ------------ | * * | * Address *<--------------------+ * Factory * * * * * When the AMQP 1.0 driver connects to the messaging bus, it will identify the intermediary that it is connected to (e.g. broker or router). Based on the intermediary type, the driver will dynamically select an addressing syntax that is optimal for operation in a router mesh or a syntax that is appropriate for broker backed queues or topics. Subscribe Server ---------------- The AMQP 1.0 driver maintains a set of (RPC or Notification) servers that are created via the subscribe server activity. For each server, the driver will create and attach a set of addresses for the target that corresponds to the server endpoint for an AMQP protocol exchange. A unique *ProtonListener* (e.g. AMQP 1.0 Receiver Link) is instantiated for each server subscription and the driver will attach event handlers to perform message transport performatives for the link. The driver maintains a single incoming queue that messages from all attached links will be placed upon. :: listen() + \ \ * * \ * * +> * Create * * Listener* * * * * \ ---------- -------- | +-------> Incoming Target -+ | / ---------- -------- \ | +----+ \ v / v * * v * * * Create * * Server * * *\ * * \ ---------- | \ ----------- Connection | +------> Addresses ----------\ | /----------- \ v / v * * / * *<------+ * Attach * * Links * * * * * | | v +--------------+ | AMQP | | Protocol | | Exchange | +--------------+ Send Message ------------ The AMQP 1.0 driver provides the ability to send messages (e.g. RPC Call/Cast or Notify) to a target specified by a client application. The driver maintains a cache of senders corresponding to each unique target that is referenced across the driver life-cycle. The driver maintains a single receiver link that will be the incoming link for all RPC reply messages received by the driver. Prior to sending an RPC call message that expects a reply, the driver will allocate a unique correlation identifier for inclusion in the call message. The driver will also set the message's reply-to field to the address of the RPC reply link. This correlation identifier will appear in the RPC reply message and is used to deliver the reply to the proper client. Prior to sending the message, the AMQP 1.0 driver will determine if the sender link is active and has enough credits for the transfer to proceed. If there are not enough credits to send the message, the driver will retain the pending message until it can be sent or times out. If there are credits to send a message, the driver will first check if there are any messages from a previous request pending to be sent. The driver will service these pending requests in FIFO order and may defer sending the current message request if credits to send run out. The AMQP 1.0 driver tracks the settlement status of all request messages sent to the messaging bus. For each message sent, the driver will maintain a count of the number of retry attempts made on the message. The driver will re-send a message that is not acknowledged up until the retry limit is reached or a send timeout deadline is reached. :: send() + -------- \ +--- Target \ * * | -------- \ * *<---+ +> * Prepare * * Request *---+ ------------- /* * +----> Request Msg <-----+ / * * ------------- | ------- <-+ | | Sender | | ------- | | v | * * ------------ | * *---------> Correlation | * Prepare * ------------ | * Response * | * * | * * | | | | | v --------- | * * +---------> Pending | * */ --------- | * Send * | * Message *\ --------- | * * +-----> Unacked <---+ | * * --------- | | | | | | | + v | / +--------------+ * * v | AMQP | * * | Protocol |-----------> * Settle * | Exchange | * Message * +--------------+ * * * * Server Receive -------------- The AMQP 1.0 driver (via subscribe) maintains a groups of links that receive messages from a set of addresses derived from the Targets associated with a Server instantiation. Messages arriving from these links are placed on the Listener's incoming queue via the Server's incoming message handler. The Listener's poll method will return the message to the application for subsequent application service dispatching. :: +--------------+ | AMQP | | Protocol | | Exchange | +--------------+ | ^ -------- V | --------- Receiver-+ * * +------- Address -------- \ * * --------- v* Message * * Received* * * * * \ \ ----------------- +------> Incoming Message --+ * * ----------------- | * * | * Poll *<--+ | * * | | * * | | * * +-------------------------+ RPC Reply Receive ----------------- The AMQP 1.0 driver instantiates a single receiving link for the reception of all RPC reply messages. Messages received on this receiving link are routed to the originating caller using the correlation-id embedded in the header of the message itself. To ensure the responsiveness and throughput on the shared RPC receiving link, the AMQP 1.0 driver will immediately update the link transfer credits and will acknowledge the successful receipt of the RPC reply. :: +--------------+ | AMQP | | Protocol | | Exchange | +--------------+ | ----------------- V + ------ Incoming Message * * / ----------------- * *v * Message * * Received*<---+ * * | * * \ | ------------- | \ +---- Correlation V \ ------------- * * \ * * \ --------------- * Update * +------> Reply Message * Credit * --------------- * * * * | V * * * * * Accept * * Message * * * * * | V +--------------+ | AMQP | | Protocol | | Exchange | +--------------+ Disposition ----------- For each incoming message provided by the AMQP 1.0 driver to a server application (e.g. RPC or Notify), the delivery disposition of the incoming message can be indicated to the driver. The disposition can either be to acknowledge the message indicating the message was accepted by the application or to requeue the message indicating that application processing could not successfully take place. The driver will initiate the appropriate settlement of the message through an AMQP protocol exchange over the message bus. :: acknowledge()--------+ requeue() --------+ | | v v * * * * * * * * * Ack * * Requeue * * Message *\ ----* Message * * * \ / * * * * \ / * * | v ------------- v | | Incoming Msg | | / ------------- | | / | v v | +--------------+ | | AMQP |<----------------------------+ | Protocol | | Exchange | +--------------+ Driver Components ================= This section describes the components of the AMQP 1.0 driver implementation. For each component, its primary responsibilities and the relationships to other components are included. These relationships are derived from service requests placed upon the other components. Architectural or system-level constraints on the component (e.g. multiplicity, concurrency, parameterization) that change the depiction of the architecture are included. Additionally, any list of issues waiting resolution are described. Controller ---------- +-----------------+----------------------------------------------------+ |Component | *Controller* | +=================+====================================================+ |Responsibilities | Responsible for performing messaging-related | | | operations requested by the driver (tasks) | | | and for managing the connection to the messaging | | | service provided by the AMQP 1.0 intermediaries. | | | | | | This component provides the logic for addressing, | | | sending and receiving messages as well as managing | | | the messaging bus connection life-cycle. | +-----------------+----------------------------------------------------+ |Collaborators | | | | Sender (pyngus.SenderEventHandler) | | | Server (pyngus.ReceiverEventHandler) | | | Replies (pyngus.ReceiverEventHandler) | +-----------------+----------------------------------------------------+ |Notes | The component is dynamically created and destroyed.| | | It is created whenever the driver is instantiated | | | in a client application process. The component | | | will terminate the driver operation when the client| | | initiates a shutdown of the driver. | | | | | | All AMQP 1.0 protocol exchanges (e.g. messaging | | | and I/O work) are done on the Eventloop driver | | | thread. This allows the driver to run | | | asynchronously from the messaging clients. | | | | | | The component supports addressing modes defined | | | by the driver configuration and through dynamic | | | inspection of the connection to the messaging | | | intermediary. | +-----------------+----------------------------------------------------+ |Issues | A cache of sender links indexed by address is | | | maintained. Currently, removal from the cache is | | | is not implemented. | +-----------------+----------------------------------------------------+ Sender ------ +-----------------+----------------------------------------------------+ |Component | *Sender* (pyngus.SenderEventHander) | +=================+====================================================+ |Responsibilities | Responsible for managing a sender link life-cycle | | | and queueing/tracking the message delivery. | | | (implementation of Pyngus.SenderEventHandle) | | | | | | Provides the capabilities for sending to a | | | particular address on the message bus. | | | | | | Provides the capability to queue (pending) | | | *SendTask* when link not active or insufficient | | | link credit capacity. | | | | | | Provides the capability to retry send following a | | | recoverable connection or link failure. | +-----------------+----------------------------------------------------+ |Collaborators | | | | Addresser | | | Connection | | | Pyngus.SenderLink | | | SendTask | +-----------------+----------------------------------------------------+ |Notes | The component is dynamically created and destroyed.| | | It is created by the *Controller* on a client | | | caller thread and retained in a *Sender* cache. | +-----------------+----------------------------------------------------+ |Issues | Sender cache aging (see above) | +-----------------+----------------------------------------------------+ Server ------ +-----------------+----------------------------------------------------+ |Component | *Server* (pyngus.ReceiverEventHander) | +=================+====================================================+ |Responsibilities | Responsible for operations for the lifecycle of an | | | incoming queue that is used for messages received | | | from a set of target addresses. | | | | +-----------------+----------------------------------------------------+ |Collaborators | Connection | | | Pyngus.ReceiverLink | +-----------------+----------------------------------------------------+ |Notes | The component is dynamically created and destroyed.| | | It is created whenever a client application | | | subscribes a RPC or Notification server to the | | | messaging bus. When the client application closes | | | the transport, this component and its associated | | | links will be detached/closed. | | | | | | Individual receiver links are created over the | | | message bus connection for all the addresses | | | generated for the server target. | | | | | | All the receiver links share a single event | | | callback handler. | +-----------------+----------------------------------------------------+ |Issues | The credit per link is presently hard-coded. A | | | mechanism to monitor for a back-up of inbound | | | messages to back-pressure the sender is proposed. | +-----------------+----------------------------------------------------+ Replies ------- +-----------------+----------------------------------------------------+ |Component | *Replies* (pyngus.ReceiverEventHander) | +=================+====================================================+ |Responsibilities | Responsible for the operations and managing | | | the life-cycle of the receiver link for all RPC | | | reply messages. A single instance of an RPC reply | | | link is maintained for the driver. | +-----------------+----------------------------------------------------+ |Collaborators | Connection | | | Pyngus.ReceiverLink | +-----------------+----------------------------------------------------+ |Notes | The component is dynamically created and destroyed.| | | The reply link is created when the connection to | | | the messaging bus is activated. | | | | | | The origination of RPC calls is inhibited until | | | the replies link is active. | | | | | | Message are routed to the originator's incoming | | | queue using the correlation-id header that is | | | contained in the response message. | +-----------------+----------------------------------------------------+ |Issues | | +-----------------+----------------------------------------------------+ ProtonDriver ------------ +-----------------+----------------------------------------------------+ |Component | *ProtonDriver* | +=================+====================================================+ |Responsibilities | Responsible for providing the oslo.Messaging | | | BaseDriver implementation. | | | | | | Provides the capabilities to send RPC and | | | Notification messages and create subscriptions for | | | the application. | | | | | | Each operation generates a task that is scheduled | | | for execution on the *Controller* eventloop | | | thread. | | | | | | The calling thread blocks until execution completes| | | or timeout. | +-----------------+----------------------------------------------------+ |Collaborators | | | | Controller | | | RPCCallTask | | | SendTask | | | SubscribeTask | +-----------------+----------------------------------------------------+ |Notes | The component is dynamically created and destroyed.| | | It is created whenever the oslo.messaging AMQP 1.0 | | | driver is loaded by an application (process). | | | | | | The component manages the life-cycle of the | | | *Controller* component. Tasks may be created but | | | will not be processed until the Controller | | | connection to the messaging service completes. | | | | | | There are separate timeout values for RPC Send, | | | Notify Send, and RPC Call Reply. | +-----------------+----------------------------------------------------+ |Issues | | | | The unmarshalling of an RPC response could cause | | | an exception/failure and should be optimally | | | communicated back up to the caller. | +-----------------+----------------------------------------------------+ ProtonIncomingMessage --------------------- +-----------------+----------------------------------------------------+ |Component | *ProtonIncomingMessage* | +=================+====================================================+ |Responsibilities | Responsible for managing the life-cycle of an | | | incoming message received on a RPC or notification | | | Server link. | | | | | | Provides the capability to set the disposition of | | | the incoming message as acknowledge (e.g. settled) | | | or requeue. | | | | | | Provides the capability to marshal and send the | | | reply to an RPC Call message. | | | | +-----------------+----------------------------------------------------+ |Collaborators | Controller | | | ProtonListener | | | MessageDispositionTask | | | SendTask | | | | +-----------------+----------------------------------------------------+ |Notes | The component is dynamically created and destroyed.| | | A ProtonListener returns this component from the | | | poll of the incoming queue. | | | | | | The message reply_to and id fields of the incoming | | | message are used to generate the target for the | | | RPC reply message. | | | | | | The RPC reply and message disposition operations | | | are scheduled for execution on the Controller | | | eventoloop thread. The caller on the component is | | | blocked until task completion (or timeout). | +-----------------+----------------------------------------------------+ |Issues | The ProtonIncomingMessage is used for both RPC | | | and Notification Server instances. Conceptually, | | | a Notification Server should not schedule a reply | | | and a RPC Server should not schedule a message | | | requeue. Subclassing base.IncomingMessage for | | | Notifications and base.RpcIncomingMessage for RPC | | | could be a consideration. | +-----------------+----------------------------------------------------+ ProtonListener -------------- +-----------------+----------------------------------------------------+ |Component | *ProtonListener* | +=================+====================================================+ |Responsibilities | Responsible for providing the oslo.Messaging | | | base.PollStyleListener implementation. | | | | | | Provides the capabilities to manage the queue of | | | incoming messages received from the messaging links| | | | | | Returns instance of ProtonIncomingMessage to | | | to Servers | +-----------------+----------------------------------------------------+ |Collaborators | | | | Queue | +-----------------+----------------------------------------------------+ |Notes | The component is dynamically created and destroyed.| | | An instance is created for each subscription | | | request (e.g. RPC or Notification Server). | | | | | | The Controller maintains a map of Servers indexed | | | by each specific ProtonListener identifier (target)| +-----------------+----------------------------------------------------+ |Issues | | +-----------------+----------------------------------------------------+ SubscribeTask ------------- +-----------------+----------------------------------------------------+ |Component | *SubscribeTask* | +=================+====================================================+ |Responsibilities | Responsible for orchestrating a subscription to a | | | given target. | | | | | | Provides the capability to prepare and schedule | | | the subscription call on the Controller eventloop | | | thread. | +-----------------+----------------------------------------------------+ |Collaborators | | | | Controller | +-----------------+----------------------------------------------------+ |Notes | The component is dynamically created and destroyed.| | | It is created for each ProtonDriver subscription | | | request (e.g. listen or listen_for_notifications). | | | | | | The task is prepared and scheduled on the caller's | | | thread. The subscribe operation is executed on the | | | Controller's eventloop thread. The task completes | | | once the subscription has been established on the | | | message bus. | +-----------------+----------------------------------------------------+ |Issues | | +-----------------+----------------------------------------------------+ SendTask -------- +-----------------+----------------------------------------------------+ |Component | *SendTask* | +=================+====================================================+ |Responsibilities | Responsible for sending a message to a given | | | target. | | | | | | Provides the capability to prepare and schedule | | | the send call on the Controller eventloop thread. | | | | | | Provides the ability to be called by Controller | | | eventloop thread to indicate the settlement of the | | | message (e.g. acknowledge or nack). | | | | | | Provides the ability to be called by Controller | | | eventloop thread upon expiry of send timeout | | | duration or general message delivery failure. | +-----------------+----------------------------------------------------+ |Collaborators | | | | Controller | +-----------------+----------------------------------------------------+ |Notes | The component is dynamically created and destroyed.| | | It is created for each ProtonDriver "RPC Cast" or | | | "Notify" send request. The component is destroyed | | | when the message transfer has reached a terminal | | | state (e.g. settled). | | | | | | The task is prepared and scheduled on the caller's | | | thread. The send operation is executed on the | | | Controller's eventloop thread. | | | | | | All retry, timeout and acknowledge operations are | | | performed on Controller eventloop thread and | | | indicated back to the caller thread. | +-----------------+----------------------------------------------------+ |Issues | | +-----------------+----------------------------------------------------+ RPCCallTask ----------- +-----------------+----------------------------------------------------+ |Component | *RPCCallTask* | +=================+====================================================+ |Responsibilities | Responsible for sending an RPC Call message to a | | | given target. | | | | | | Provides all the capabilities derived from the | | | parent SendTask component. | | | | | | Provides the additional capability to prepare for | | | the RPC Call response message that will be returned| | | on the senders reply link. | +-----------------+----------------------------------------------------+ |Collaborators | | | | Controller | | | Sender | +-----------------+----------------------------------------------------+ |Notes | The component is dynamically created and destroyed.| | | It is created for each ProtonDriver "RPC Call" | | | send request. It is destroyed once the RPC | | | exchanged has reached its terminal state. | | | | | | The task is prepared and scheduled on the caller's | | | thread. The send operation is executed on the | | | Controller's eventloop thread. | | | | | | The Controller manages a single receiving link for | | | all RPC reply messages. Message are routed | | | using the correlation-id header in the response | | | message. | +-----------------+----------------------------------------------------+ |Issues | | +-----------------+----------------------------------------------------+ MessageDispositionTasks ----------------------- +-----------------+----------------------------------------------------+ |Component | *MessageDispositionTask* | +=================+====================================================+ |Responsibilities | Responsible for updating the message disposition | | | for ProtonIncomingMessage. | | | | | | Provides the ability to acknowledge or requeue the | | | message according to application determination. | +-----------------+----------------------------------------------------+ |Collaborators | | | | Controller | | | ProtonIncomingMessage | | | Server | +-----------------+----------------------------------------------------+ |Notes | The component is dynamically created and destroyed.| | | It is created by ProtonIncomingMessage settlement | | | calls (acknowledge or requeue). It is destroyed | | | once the disposition is updated in the Proton | | | protocol engine. | | | | | | the task is prepared and scheduled on the caller's | | | thread. The disposition operation is a function | | | closure on the target server, receiver link and | | | delivery handle for the message received on the | | | Server receiver call back. The closure is executed | | | on the Controller's eventloop thread. | | | | | | The settlement of RPC responses is automatic and | | | not under application control. | +-----------------+----------------------------------------------------+ |Issues | | +-----------------+----------------------------------------------------+ Service and Operational Qualities ================================= This section describes the primary service and operational qualities that are relevant to the driver architecture and implementation. These non-functional factors define the behavior of the driver implementation (e.g. limits and capacities). These behaviors can be generally categorized as being due to a design time (e.g. limit enforced by implementation) or a run time (e.g. limit due to environment, resources, etc.) constraint. The full detail and measures for these qualities is outside the scope of this document but should be included in any performance and scalability analysis of the driver implementation. +-------------+--------------------------------------------+------------+ | Quality | Description | Limit | +-------------+--------------------------------------------+------------+ | Servers | The number of RPC or Notify servers that | Environment| | | the driver will concurrently subscribe to | | | | the messaging bus (e.g. Listeners) | | +-------------+--------------------------------------------+------------+ | Subscription| The maximum rate at which servers can be | Environment| | Rate | subscribed and attached to the message bus | | +-------------+--------------------------------------------+------------+ | Senders | The number of unique Targets that can | Environment| | | be concurrently defined for the destination| | | | of RPC or Notify message transfer | | +-------------+--------------------------------------------+------------+ | Pending | The number of messages that the driver | Environment| | Sends | will queue while waiting for link | | | | availability or flow credit | | +-------------+--------------------------------------------+------------+ | Sends | The number of concurrent unacked messages | Environment| | Outstanding | the driver will send | | | | | | +-------------+--------------------------------------------+------------+ | Server Link | The number of message credits an RPC or | Design | | Credits | Notification server will issue | | | | | | +-------------+--------------------------------------------+------------+ | RPC Reply | The number of RPC reply message credits | Design | | Link Credits| the driver will issue | | | | | | +-------------+--------------------------------------------+------------+ | Message | The rate that the driver will transfer | Environment| | Transfer | requests to the message bus | | | Rate | | | +-------------+--------------------------------------------+------------+ | Message | The rate of transfer for the message | Environment| | Data | body "payload" | | | Throughput | | | +-------------+--------------------------------------------+------------+ | Tasks | The number of concurrent client requests | Design | | Outstanding | that can be queued for driver thread | | | | processing. | | +-------------+--------------------------------------------+------------+ | Message | The number of attempts the driver will | Design | | Retries | make to send a message | | | | | | +-------------+--------------------------------------------+------------+ | Transport | The number of Transport Hosts that can | Environment| | Hosts | be specified for connection management | | | | (e.g. selection and failover) | | +-------------+--------------------------------------------+------------+ References ========== .. [1] https://blueprints.launchpad.net/oslo.messaging/+spec/amqp10-driver-implementation .. [2] https://git.openstack.org/cgit/openstack/oslo-specs/tree/specs/juno/amqp10-driver-implementation.rst .. [3] https://review.openstack.org/#/c/314603/ .. [4] https://github.com/kgiusti/pyngus .. [5] https://github.com/apache/qpid-proton .. [6] https://review.openstack.org/#/c/317285/ .. [7] https://git.openstack.org/openstack/oslo.messaging oslo.messaging-5.35.0/oslo_messaging/_drivers/amqp1_driver/eventloop.py0000666000175100017510000003426113224676046026407 0ustar zuulzuul00000000000000# Copyright 2014, Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ A thread that performs all messaging I/O and protocol event handling. This module provides a background thread that handles messaging operations scheduled via the Controller, and performs blocking socket I/O and timer processing. This thread is designed to be as simple as possible - all the protocol specific intelligence is provided by the Controller and executed on the background thread via callables. """ import collections import errno import heapq import logging import math from monotonic import monotonic as now # noqa import os import select import socket import threading import uuid import pyngus from oslo_messaging._i18n import _LE, _LI, _LW LOG = logging.getLogger(__name__) def compute_timeout(offset): # minimize the timer granularity to one second so we don't have to track # too many timers return math.ceil(now() + offset) class _SocketConnection(object): """Associates a pyngus Connection with a python network socket, and handles all connection-related I/O and timer events. """ def __init__(self, name, container, properties, handler): self.name = name self.socket = None self.pyngus_conn = None self._properties = properties # The handler is a pyngus ConnectionEventHandler, which is invoked by # pyngus on connection-related events (active, closed, error, etc). # Currently it is the Controller object. self._handler = handler self._container = container def fileno(self): """Allows use of a _SocketConnection in a select() call. """ return self.socket.fileno() def read_socket(self): """Called to read from the socket.""" if self.socket: try: pyngus.read_socket_input(self.pyngus_conn, self.socket) self.pyngus_conn.process(now()) except (socket.timeout, socket.error) as e: # pyngus handles EAGAIN/EWOULDBLOCK and EINTER self.pyngus_conn.close_input() self.pyngus_conn.close_output() self._handler.socket_error(str(e)) def write_socket(self): """Called to write to the socket.""" if self.socket: try: pyngus.write_socket_output(self.pyngus_conn, self.socket) self.pyngus_conn.process(now()) except (socket.timeout, socket.error) as e: # pyngus handles EAGAIN/EWOULDBLOCK and EINTER self.pyngus_conn.close_output() self.pyngus_conn.close_input() self._handler.socket_error(str(e)) def connect(self, host): """Connect to host and start the AMQP protocol.""" addr = socket.getaddrinfo(host.hostname, host.port, socket.AF_UNSPEC, socket.SOCK_STREAM) if not addr: key = "%s:%i" % (host.hostname, host.port) error = "Invalid peer address '%s'" % key LOG.error(_LE("Invalid peer address '%s'"), key) self._handler.socket_error(error) return my_socket = socket.socket(addr[0][0], addr[0][1], addr[0][2]) my_socket.setblocking(0) # 0=non-blocking my_socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1) try: my_socket.connect(addr[0][4]) except socket.error as e: if e.errno != errno.EINPROGRESS: error = "Socket connect failure '%s'" % str(e) LOG.error(_LE("Socket connect failure '%s'"), str(e)) self._handler.socket_error(error) return self.socket = my_socket props = self._properties.copy() if pyngus.VERSION >= (2, 0, 0): # configure client authentication # props['x-server'] = False if host.username: props['x-username'] = host.username props['x-password'] = host.password or "" self.pyngus_conn = self._container.create_connection(self.name, self._handler, props) self.pyngus_conn.user_context = self if pyngus.VERSION < (2, 0, 0): # older versions of pyngus requires manual SASL configuration: # determine the proper SASL mechanism: PLAIN if a username/password # is present, else ANONYMOUS pn_sasl = self.pyngus_conn.pn_sasl if host.username: password = host.password if host.password else "" pn_sasl.plain(host.username, password) else: pn_sasl.mechanisms("ANONYMOUS") pn_sasl.client() self.pyngus_conn.open() def reset(self, name=None): """Clean up the current state, expect 'connect()' to be recalled later. """ # note well: since destroy() is called on the connection, do not invoke # this method from a pyngus callback! if self.pyngus_conn: self.pyngus_conn.destroy() self.pyngus_conn = None self.close() if name: self.name = name def close(self): if self.socket: self.socket.close() self.socket = None class Scheduler(object): """Schedule callables to be run in the future. """ class Event(object): # simply hold a reference to a callback that can be set to None if the # alarm is canceled def __init__(self, callback): self.callback = callback def cancel(self): # quicker than rebalancing the tree self.callback = None def __init__(self): self._callbacks = {} self._deadlines = [] def alarm(self, request, deadline): """Request a callable be executed at a specific time """ try: callbacks = self._callbacks[deadline] except KeyError: callbacks = list() self._callbacks[deadline] = callbacks heapq.heappush(self._deadlines, deadline) entry = Scheduler.Event(request) callbacks.append(entry) return entry def defer(self, request, delay): """Request a callable be executed after delay seconds """ return self.alarm(request, compute_timeout(delay)) @property def _next_deadline(self): """The timestamp of the next expiring event or None """ return self._deadlines[0] if self._deadlines else None def _get_delay(self, max_delay=None): """Get the delay in milliseconds until the next callable needs to be run, or 'max_delay' if no outstanding callables or the delay to the next callable is > 'max_delay'. """ due = self._deadlines[0] if self._deadlines else None if due is None: return max_delay _now = now() if due <= _now: return 0 else: return min(due - _now, max_delay) if max_delay else due - _now def _process(self): """Invoke all expired callables.""" if self._deadlines: _now = now() try: while self._deadlines[0] <= _now: deadline = heapq.heappop(self._deadlines) callbacks = self._callbacks[deadline] del self._callbacks[deadline] for cb in callbacks: cb.callback and cb.callback() except IndexError: pass class Requests(object): """A queue of callables to execute from the eventloop thread's main loop. """ def __init__(self): self._requests = collections.deque() self._wakeup_pipe = os.pipe() self._pipe_ready = False # prevents blocking on an empty pipe self._pipe_lock = threading.Lock() def wakeup(self, request=None): """Enqueue a callable to be executed by the eventloop, and force the eventloop thread to wake up from select(). """ with self._pipe_lock: if request: self._requests.append(request) if not self._pipe_ready: self._pipe_ready = True os.write(self._wakeup_pipe[1], b'!') def fileno(self): """Allows this request queue to be used by select().""" return self._wakeup_pipe[0] def process_requests(self): """Invoked by the eventloop thread, execute each queued callable.""" with self._pipe_lock: if not self._pipe_ready: return self._pipe_ready = False os.read(self._wakeup_pipe[0], 512) requests = self._requests self._requests = collections.deque() for r in requests: r() class Thread(threading.Thread): """Manages socket I/O and executes callables queued up by external threads. """ def __init__(self, container_name, node, command, pid): super(Thread, self).__init__() # callables from other threads: self._requests = Requests() # delayed callables (only used on this thread for now): self._scheduler = Scheduler() self._connection = None # Configure a container if container_name is None: container_name = ("openstack.org/om/container/%s/%s/%s/%s" % (node, command, pid, uuid.uuid4().hex)) self._container = pyngus.Container(container_name) self.name = "Thread for Proton container: %s" % self._container.name self._shutdown = False self.daemon = True self.start() def wakeup(self, request=None): """Wake up the eventloop thread, Optionally providing a callable to run when the eventloop wakes up. Thread safe. """ self._requests.wakeup(request) def shutdown(self): """Shutdown the eventloop thread. Thread safe. """ LOG.debug("eventloop shutdown requested") self._shutdown = True self.wakeup() def destroy(self): # release the container. This can only be called after the eventloop # thread exited self._container.destroy() self._container = None # the following methods are not thread safe - they must be run from the # eventloop thread def defer(self, request, delay): """Invoke request after delay seconds.""" return self._scheduler.defer(request, delay) def alarm(self, request, deadline): """Invoke request at a particular time""" return self._scheduler.alarm(request, deadline) def connect(self, host, handler, properties): """Get a _SocketConnection to a peer represented by url.""" key = "openstack.org/om/connection/%s:%s/" % (host.hostname, host.port) # return pre-existing conn = self._container.get_connection(key) if conn: return conn.user_context # create a new connection - this will be stored in the # container, using the specified name as the lookup key, or if # no name was provided, the host:port combination sc = _SocketConnection(key, self._container, properties, handler=handler) sc.connect(host) self._connection = sc return sc def run(self): """Run the proton event/timer loop.""" LOG.debug("Starting Proton thread, container=%s", self._container.name) try: self._main_loop() except Exception: # unknown error - fatal LOG.exception("Fatal unhandled event loop error!") raise def _main_loop(self): # Main event loop while not self._shutdown: readfds = [self._requests] writefds = [] deadline = self._scheduler._next_deadline pyngus_conn = self._connection and self._connection.pyngus_conn if pyngus_conn and self._connection.socket: if pyngus_conn.needs_input: readfds.append(self._connection) if pyngus_conn.has_output: writefds.append(self._connection) if pyngus_conn.deadline: deadline = (pyngus_conn.deadline if not deadline else min(deadline, pyngus_conn.deadline)) # force select to return in time to service the next expiring timer if deadline: _now = now() timeout = 0 if deadline <= _now else (deadline - _now) else: timeout = None # and now we wait... try: select.select(readfds, writefds, [], timeout) except select.error as serror: if serror[0] == errno.EINTR: LOG.warning(_LW("ignoring interrupt from select(): %s"), str(serror)) continue raise # assuming fatal... # Ignore the select return value - simply poll the socket for I/O. # Testing shows that polling improves latency over checking the # lists returned by select() self._requests.process_requests() self._connection.read_socket() if pyngus_conn and pyngus_conn.deadline: _now = now() if pyngus_conn.deadline <= _now: pyngus_conn.process(_now) self._connection.write_socket() self._scheduler._process() # run any deferred requests LOG.info(_LI("eventloop thread exiting, container=%s"), self._container.name) oslo.messaging-5.35.0/oslo_messaging/_drivers/amqp1_driver/addressing.py0000666000175100017510000002663713224676046026527 0ustar zuulzuul00000000000000# Copyright 2016, Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Utilities that map from a Target address to a proper AMQP 1.0 address. This module defines a utility class that translates a high level oslo.messaging address (Target) into the message-level address used on the message bus. This translation may be statically configured or determined when the connection to the message bus is made. The Target members that are used to generate the address are: * exchange * topic * server flag * fanout flag In addition a 'service' tag is associated with the address. This tag determines the service associated with an address (e.g. rpc or notification) so that traffic can be partitioned based on its use. """ import abc import logging from oslo_messaging._i18n import _LW from oslo_messaging.target import Target __all__ = [ "keyify", "AddresserFactory", "SERVICE_RPC", "SERVICE_NOTIFY" ] SERVICE_RPC = 0 SERVICE_NOTIFY = 1 LOG = logging.getLogger(__name__) def keyify(address, service=SERVICE_RPC): """Create a hashable key from a Target and service that will uniquely identify the generated address. This key is used to map the abstract oslo.messaging address to its corresponding AMQP link(s). This mapping may be done before the connection is established. """ if isinstance(address, Target): # service is important because the resolved address may be # different based on whether or not this Target is used for # notifications or RPC return ("Target:{t={%s} e={%s} s={%s} f={%s} service={%s}}" % (address.topic, address.exchange, address.server, address.fanout, service)) else: # absolute address can be used without modification return "String:{%s}" % address class Addresser(object): """Base class message bus address generator. Used to convert an oslo.messaging address into an AMQP 1.0 address string used over the connection to the message bus. """ def __init__(self, default_exchange): self._default_exchange = default_exchange def resolve(self, target, service): if not isinstance(target, Target): # an already resolved address return target # Return a link address for a given target if target.fanout: return self.multicast_address(target, service) elif target.server: return self.unicast_address(target, service) else: return self.anycast_address(target, service) @abc.abstractmethod def multicast_address(self, target, service): """Address used to broadcast to all subscribers """ @abc.abstractmethod def unicast_address(self, target, service): """Address used to target a specific subscriber (direct) """ @abc.abstractmethod def anycast_address(self, target, service): """Address used for shared subscribers (competing consumers) """ def _concat(self, sep, items): return sep.join(filter(bool, items)) class LegacyAddresser(Addresser): """Legacy addresses are in the following format: multicast: '$broadcast_prefix[.$vhost].$exchange.$topic.all' unicast: '$server_prefix[.$vhost].$exchange.$topic.$server' anycast: '$group_prefix[.$vhost].$exchange.$topic' Legacy addresses do not distinguish RPC traffic from Notification traffic """ def __init__(self, default_exchange, server_prefix, broadcast_prefix, group_prefix, vhost): super(LegacyAddresser, self).__init__(default_exchange) self._server_prefix = server_prefix self._broadcast_prefix = broadcast_prefix self._group_prefix = group_prefix self._vhost = vhost def multicast_address(self, target, service): return self._concat(".", [self._broadcast_prefix, self._vhost, target.exchange or self._default_exchange, target.topic, "all"]) def unicast_address(self, target, service=SERVICE_RPC): return self._concat(".", [self._server_prefix, self._vhost, target.exchange or self._default_exchange, target.topic, target.server]) def anycast_address(self, target, service=SERVICE_RPC): return self._concat(".", [self._group_prefix, self._vhost, target.exchange or self._default_exchange, target.topic]) # for debug: def _is_multicast(self, address): return address.startswith(self._broadcast_prefix) def _is_unicast(self, address): return address.startswith(self._server_prefix) def _is_anycast(self, address): return address.startswith(self._group_prefix) def _is_service(self, address, service): # legacy addresses are the same for RPC or Notifications return True class RoutableAddresser(Addresser): """Routable addresses have different formats based their use. It starts with a prefix that is determined by the type of traffic (RPC or Notifications). The prefix is followed by a description of messaging delivery semantics. The delivery may be one of: 'multicast', 'unicast', or 'anycast'. The delivery semantics are followed by information pulled from the Target. The template is: $prefix/$semantics[/$vhost]/$exchange/$topic[/$server] Examples based on the default prefix and semantic values: rpc-unicast: "openstack.org/om/rpc/unicast/my-exchange/my-topic/my-server" notify-anycast: "openstack.org/om/notify/anycast/my-vhost/exchange/topic" """ def __init__(self, default_exchange, rpc_exchange, rpc_prefix, notify_exchange, notify_prefix, unicast_tag, multicast_tag, anycast_tag, vhost): super(RoutableAddresser, self).__init__(default_exchange) if not self._default_exchange: self._default_exchange = "openstack" # templates for address generation: self._vhost = vhost _rpc = rpc_prefix + "/" self._rpc_prefix = _rpc self._rpc_unicast = _rpc + unicast_tag self._rpc_multicast = _rpc + multicast_tag self._rpc_anycast = _rpc + anycast_tag _notify = notify_prefix + "/" self._notify_prefix = _notify self._notify_unicast = _notify + unicast_tag self._notify_multicast = _notify + multicast_tag self._notify_anycast = _notify + anycast_tag self._exchange = [ # SERVICE_RPC: rpc_exchange or self._default_exchange or 'rpc', # SERVICE_NOTIFY: notify_exchange or self._default_exchange or 'notify' ] def multicast_address(self, target, service=SERVICE_RPC): if service == SERVICE_RPC: prefix = self._rpc_multicast else: prefix = self._notify_multicast return self._concat("/", [prefix, self._vhost, target.exchange or self._exchange[service], target.topic]) def unicast_address(self, target, service=SERVICE_RPC): if service == SERVICE_RPC: prefix = self._rpc_unicast else: prefix = self._notify_unicast return self._concat("/", [prefix, self._vhost, target.exchange or self._exchange[service], target.topic, target.server]) def anycast_address(self, target, service=SERVICE_RPC): if service == SERVICE_RPC: prefix = self._rpc_anycast else: prefix = self._notify_anycast return self._concat("/", [prefix, self._vhost, target.exchange or self._exchange[service], target.topic]) # for debug: def _is_multicast(self, address): return (address.startswith(self._rpc_multicast) or address.startswith(self._notify_multicast)) def _is_unicast(self, address): return (address.startswith(self._rpc_unicast) or address.startswith(self._notify_unicast)) def _is_anycast(self, address): return (address.startswith(self._rpc_anycast) or address.startswith(self._notify_anycast)) def _is_service(self, address, service): return address.startswith(self._rpc_prefix if service == SERVICE_RPC else self._notify_prefix) class AddresserFactory(object): """Generates the proper Addresser based on configuration and the type of message bus the driver is connected to. """ def __init__(self, default_exchange, mode, **kwargs): self._default_exchange = default_exchange self._mode = mode self._kwargs = kwargs def __call__(self, remote_properties, vhost=None): # for backwards compatibility use legacy if dynamic and we're connected # to qpidd or we cannot identify the message bus. This can be # overridden via the configuration. product = remote_properties.get('product', 'qpid-cpp') # TODO(kgiusti): Router support was added in Newton. Remove this # warning post Newton, once the driver has stabilized. if product == "qpid-dispatch-router": w = _LW("This is the initial release of support for message" " routing technology. Be aware that messages are not" " queued and may be discarded if there are no consumers" " present.") LOG.warning(w) if self._mode == 'legacy' or (self._mode == 'dynamic' and product == 'qpid-cpp'): return LegacyAddresser(self._default_exchange, self._kwargs['legacy_server_prefix'], self._kwargs['legacy_broadcast_prefix'], self._kwargs['legacy_group_prefix'], vhost) else: return RoutableAddresser(self._default_exchange, self._kwargs.get("rpc_exchange"), self._kwargs["rpc_prefix"], self._kwargs.get("notify_exchange"), self._kwargs["notify_prefix"], self._kwargs["unicast"], self._kwargs["multicast"], self._kwargs["anycast"], vhost) oslo.messaging-5.35.0/oslo_messaging/_drivers/amqp1_driver/controller.py0000666000175100017510000015026413224676046026561 0ustar zuulzuul00000000000000# Copyright 2014, Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Controller that manages the interface between the driver and the messaging service. This module defines a Controller class that is responsible for performing messaging-related operations (Tasks) requested by the driver, and for managing the connection to the messaging service. The Controller creates a background thread which performs all messaging operations and socket I/O. The Controller's messaging logic is executed in the background thread via lambda functions scheduled by the Controller. """ import abc import collections import logging from monotonic import monotonic as now # noqa import os import platform import random import sys import threading import uuid import proton import pyngus from six import iteritems from six import itervalues from six import moves from oslo_messaging._drivers.amqp1_driver.addressing import AddresserFactory from oslo_messaging._drivers.amqp1_driver.addressing import keyify from oslo_messaging._drivers.amqp1_driver.addressing import SERVICE_NOTIFY from oslo_messaging._drivers.amqp1_driver.addressing import SERVICE_RPC from oslo_messaging._drivers.amqp1_driver import eventloop from oslo_messaging._i18n import _LE, _LI, _LW from oslo_messaging import exceptions from oslo_messaging.target import Target from oslo_messaging import transport LOG = logging.getLogger(__name__) class Task(object): """Run a command on the eventloop thread, wait until it completes """ @abc.abstractmethod def wait(self): """Called by the client thread to wait for the operation to complete. The implementation may optionally return a value. """ @abc.abstractmethod def _execute(self, controller): """This method will be run on the eventloop thread to perform the messaging operation. """ class SubscribeTask(Task): """A task that creates a subscription to the given target. Messages arriving from the target are given to the listener. """ def __init__(self, target, listener, notifications=False): super(SubscribeTask, self).__init__() self._target = target() # mutable - need a copy self._subscriber_id = listener.id self._in_queue = listener.incoming self._service = SERVICE_NOTIFY if notifications else SERVICE_RPC self._wakeup = threading.Event() def wait(self): self._wakeup.wait() def _execute(self, controller): controller.subscribe(self) self._wakeup.set() class SendTask(Task): """This is the class used by the Controller to send messages to a given destination. """ def __init__(self, name, message, target, deadline, retry, wait_for_ack, notification=False): super(SendTask, self).__init__() self.name = name # note: target can be either a Target class or a string # target is mutable - make copy self.target = target() if isinstance(target, Target) else target self.message = message self.deadline = deadline self.wait_for_ack = wait_for_ack self.service = SERVICE_NOTIFY if notification else SERVICE_RPC self.timer = None self._retry = None if retry is None or retry < 0 else retry self._wakeup = threading.Event() self._error = None def wait(self): self._wakeup.wait() return self._error def _execute(self, controller): controller.send(self) def _prepare(self, sender): """Called immediately before the message is handed off to the i/o system. This implies that the sender link is up. """ pass def _on_ack(self, state, info): """If wait_for_ack is True, this is called by the eventloop thread when the ack/nack is received from the peer. If wait_for_ack is False this is called by the eventloop right after the message is written to the link. In the last case state will always be set to ACCEPTED. """ if state != pyngus.SenderLink.ACCEPTED: msg = ("{name} message send to {target} failed: remote" " disposition: {disp}, info:" "{info}".format(name=self.name, target=self.target, disp=state, info=info)) self._error = exceptions.MessageDeliveryFailure(msg) LOG.warning("%s", msg) self._cleanup() self._wakeup.set() def _on_timeout(self): """Invoked by the eventloop when the send fails to complete before the timeout is reached. """ self.timer = None msg = ("{name} message sent to {target} failed: timed" " out".format(name=self.name, target=self.target)) LOG.warning("%s", msg) # Only raise a MessagingTimeout if the caller has explicitly specified # a timeout. self._error = exceptions.MessagingTimeout(msg) \ if self.message.ttl else \ exceptions.MessageDeliveryFailure(msg) self._cleanup() self._wakeup.set() def _on_error(self, description): """Invoked by the eventloop if the send operation fails for reasons other than timeout and nack. """ msg = ("{name} message sent to {target} failed:" " {reason}".format(name=self.name, target=self.target, reason=description)) LOG.warning("%s", msg) self._error = exceptions.MessageDeliveryFailure(msg) self._cleanup() self._wakeup.set() def _cleanup(self): if self.timer: self.timer.cancel() self.timer = None @property def _can_retry(self): # has the retry count expired? if self._retry is not None: self._retry -= 1 if self._retry < 0: return False return True class RPCCallTask(SendTask): """Performs an RPC Call. Sends the request and waits for a response from the destination. """ def __init__(self, target, message, deadline, retry, wait_for_ack): super(RPCCallTask, self).__init__("RPC Call", message, target, deadline, retry, wait_for_ack) self._reply_link = None self._reply_msg = None self._msg_id = None def wait(self): error = super(RPCCallTask, self).wait() return error or self._reply_msg def _prepare(self, sender): # reserve a message id for mapping the received response if self._msg_id: # already set so this is a re-transmit. To be safe cancel the old # msg_id and allocate a fresh one. self._reply_link.cancel_response(self._msg_id) self._reply_link = sender._reply_link rl = self._reply_link self._msg_id = rl.prepare_for_response(self.message, self._on_reply) def _on_reply(self, message): # called if/when the reply message arrives self._reply_msg = message self._msg_id = None # to prevent _cleanup() from cancelling it self._cleanup() self._wakeup.set() def _on_ack(self, state, info): if state != pyngus.SenderLink.ACCEPTED: super(RPCCallTask, self)._on_ack(state, info) # must wait for reply if ACCEPTED def _cleanup(self): if self._msg_id: self._reply_link.cancel_response(self._msg_id) self._reply_link = None super(RPCCallTask, self)._cleanup() class MessageDispositionTask(Task): """A task that updates the message disposition as accepted or released for a Server """ def __init__(self, disposition, released=False): super(MessageDispositionTask, self).__init__() self._disposition = disposition self._released = released def wait(self): # disposition update does not have to block the sender since there is # no result to pend for. This avoids a thread context switch with # every RPC call pass def _execute(self, controller): try: self._disposition(self._released) except Exception as e: # there's really nothing we can do about a failed disposition. LOG.exception(_LE("Message acknowledgment failed: %s"), e) class Sender(pyngus.SenderEventHandler): """A link for sending to a particular destination on the message bus. """ def __init__(self, destination, scheduler, delay, service): super(Sender, self).__init__() self._destination = destination self._service = service self._address = None self._link = None self._scheduler = scheduler self._delay = delay # for re-connecting/re-transmitting # holds all pending SendTasks self._pending_sends = collections.deque() # holds all messages sent but not yet acked self._unacked = set() self._reply_link = None self._connection = None self._resend_timer = None @property def pending_messages(self): return len(self._pending_sends) @property def unacked_messages(self): return len(self._unacked) def attach(self, connection, reply_link, addresser): """Open the link. Called by the Controller when the AMQP connection becomes active. """ self._connection = connection self._reply_link = reply_link self._address = addresser.resolve(self._destination, self._service) LOG.debug("Sender %s attached", self._address) self._link = self._open_link() def detach(self): """Close the link. Called by the controller when shutting down or in response to a close requested by the remote. May be re-attached later (after a reset is done) """ LOG.debug("Sender %s detached", self._address) self._connection = None self._reply_link = None if self._resend_timer: self._resend_timer.cancel() self._resend_timer = None if self._link: self._link.close() def reset(self, reason="Link reset"): """Called by the controller on connection failover. Release all link resources, abort any in-flight messages, and check the retry limit on all pending send requests. """ self._address = None self._connection = None self._reply_link = None if self._link: self._link.destroy() self._link = None self._abort_unacked(reason) self._check_retry_limit(reason) def destroy(self, reason="Link destroyed"): """Destroy the sender and all pending messages. Called on driver shutdown. """ LOG.debug("Sender %s destroyed", self._address) self.reset(reason) self._abort_pending(reason) def send_message(self, send_task): """Send a message out the link. """ if send_task.deadline: def timer_callback(): # may be in either list, or none self._unacked.discard(send_task) try: self._pending_sends.remove(send_task) except ValueError: pass send_task._on_timeout() send_task.timer = self._scheduler.alarm(timer_callback, send_task.deadline) if not self._can_send or self._pending_sends: self._pending_sends.append(send_task) else: self._send(send_task) # Pyngus callbacks: def sender_active(self, sender_link): LOG.debug("Sender %s active", self._address) self._send_pending() def credit_granted(self, sender_link): pass def sender_remote_closed(self, sender_link, pn_condition): # The remote has initiated a close. This could happen when the message # bus is shutting down, or it detected an error LOG.warning(_LW("sender %(addr)s failed due to remote initiated close:" " condition=%(cond)s"), {'addr': self._address, 'cond': pn_condition}) self._link.close() # sender_closed() will be called once the link completes closing def sender_closed(self, sender_link): self._handle_sender_closed() def sender_failed(self, sender_link, error): """Protocol error occurred.""" LOG.warning(_LW("sender %(addr)s failed error=%(error)s"), {'addr': self._address, 'error': error}) self._handle_sender_closed(str(error)) # end Pyngus callbacks def _handle_sender_closed(self, reason="Sender closed"): self._abort_unacked(reason) if self._connection: # still attached, so attempt to restart the link self._check_retry_limit(reason) self._scheduler.defer(self._reopen_link, self._delay) def _check_retry_limit(self, reason): # Called on recoverable connection or link failure. Remove any pending # sends that have exhausted their retry count: expired = set() for send_task in self._pending_sends: if not send_task._can_retry: expired.add(send_task) send_task._on_error("Message send failed: %s" % reason) while expired: self._pending_sends.remove(expired.pop()) def _abort_unacked(self, error): # fail all messages that have been sent to the message bus and have not # been acked yet while self._unacked: send_task = self._unacked.pop() send_task._on_error("Message send failed: %s" % error) def _abort_pending(self, error): # fail all messages that have yet to be sent to the message bus while self._pending_sends: send_task = self._pending_sends.popleft() send_task._on_error("Message send failed: %s" % error) @property def _can_send(self): return self._link and self._link.active # acknowledge status _TIMED_OUT = pyngus.SenderLink.TIMED_OUT _ACCEPTED = pyngus.SenderLink.ACCEPTED _RELEASED = pyngus.SenderLink.RELEASED _MODIFIED = pyngus.SenderLink.MODIFIED def _send(self, send_task): send_task._prepare(self) send_task.message.address = self._address if send_task.wait_for_ack: self._unacked.add(send_task) def pyngus_callback(link, handle, state, info): # invoked when the message bus (n)acks this message if state == Sender._TIMED_OUT: # ignore pyngus timeout - we maintain our own timer # which will properly deal with this case return self._unacked.discard(send_task) if state == Sender._ACCEPTED: send_task._on_ack(Sender._ACCEPTED, info) elif (state == Sender._RELEASED or (state == Sender._MODIFIED and # assuming delivery-failed means in-doubt: not info.get("delivery-failed") and not info.get("undeliverable-here"))): # These states indicate that the message was never # forwarded beyond the next hop so they can be # re-transmitted without risk of duplication self._resend(send_task) else: # some error - let task figure it out... send_task._on_ack(state, info) self._link.send(send_task.message, delivery_callback=pyngus_callback, handle=self, deadline=send_task.deadline) else: # do not wait for ack self._link.send(send_task.message, delivery_callback=None, handle=self, deadline=send_task.deadline) send_task._on_ack(pyngus.SenderLink.ACCEPTED, {}) def _resend(self, send_task): # the message bus returned the message without forwarding it. Wait a # bit for other outstanding sends to finish - most likely ending up # here since they are all going to the same destination - then resend # this message if send_task._can_retry: # note well: once there is something on the pending list no further # messages will be sent (they will all queue up behind this one). self._pending_sends.append(send_task) if self._resend_timer is None: sched = self._scheduler # this will get the pending sends going again self._resend_timer = sched.defer(self._resend_pending, self._delay) else: send_task._on_error("Send retries exhausted") def _resend_pending(self): # run from the _resend_timer, attempt to resend pending messages self._resend_timer = None self._send_pending() def _send_pending(self): # flush all pending messages out if self._can_send: while self._pending_sends: self._send(self._pending_sends.popleft()) def _open_link(self): name = "openstack.org/om/sender/[%s]/%s" % (self._address, uuid.uuid4().hex) link = self._connection.create_sender(name=name, source_address=self._address, target_address=self._address, event_handler=self) link.open() return link def _reopen_link(self): if self._connection: if self._link: self._link.destroy() self._link = self._open_link() class Replies(pyngus.ReceiverEventHandler): """This is the receiving link for all RPC reply messages. Messages are routed to the proper incoming queue using the correlation-id header in the message. """ def __init__(self, connection, on_ready, on_down, capacity): self._correlation = {} # map of correlation-id to response queue self._on_ready = on_ready self._on_down = on_down rname = ("openstack.org/om/receiver/[rpc-response]/%s" % uuid.uuid4().hex) self._receiver = connection.create_receiver("rpc-response", event_handler=self, name=rname) # capacity determines the maximum number of reply messages this link is # willing to receive. As messages are received and capacity is # consumed, this driver will 'top up' the capacity back to max # capacity. This number should be large enough to avoid needlessly # flow-controlling the replies. self._capacity = capacity self._capacity_low = (capacity + 1) / 2 self._receiver.open() def detach(self): # close the link if self._receiver: self._receiver.close() def destroy(self): self._correlation.clear() if self._receiver: self._receiver.destroy() self._receiver = None def prepare_for_response(self, request, callback): """Apply a unique message identifier to this request message. This will be used to identify messages sent in reply. The identifier is placed in the 'id' field of the request message. It is expected that the identifier will appear in the 'correlation-id' field of the corresponding response message. """ request.id = uuid.uuid4().hex # reply is placed on reply_queue self._correlation[request.id] = callback request.reply_to = self._receiver.source_address return request.id def cancel_response(self, msg_id): """Abort waiting for the response message corresponding to msg_id. This can be used if the request fails and no reply is expected. """ try: del self._correlation[msg_id] except KeyError: pass @property def active(self): return self._receiver and self._receiver.active # Pyngus ReceiverLink event callbacks: def receiver_active(self, receiver_link): """This is a Pyngus callback, invoked by Pyngus when the receiver_link has transitioned to the open state and is able to receive incoming messages. """ LOG.debug("Replies link active src=%s", self._receiver.source_address) receiver_link.add_capacity(self._capacity) self._on_ready() def receiver_remote_closed(self, receiver, pn_condition): """This is a Pyngus callback, invoked by Pyngus when the peer of this receiver link has initiated closing the connection. """ if pn_condition: LOG.error(_LE("Reply subscription closed by peer: %s"), pn_condition) receiver.close() def receiver_failed(self, receiver_link, error): """Protocol error occurred.""" LOG.error(_LE("Link to reply queue failed. error=%(error)s"), {"error": error}) self._on_down() def receiver_closed(self, receiver_link): self._on_down() def message_received(self, receiver, message, handle): """This is a Pyngus callback, invoked by Pyngus when a new message arrives on this receiver link from the peer. """ key = message.correlation_id try: self._correlation[key](message) # cleanup (only need one response per request) del self._correlation[key] receiver.message_accepted(handle) except KeyError: LOG.warning(_LW("Can't find receiver for response msg id=%s, " "dropping!"), key) receiver.message_modified(handle, True, True, None) # ensure we have enough credit if receiver.capacity <= self._capacity_low: receiver.add_capacity(self._capacity - receiver.capacity) class Server(pyngus.ReceiverEventHandler): """A group of links that receive messages from a set of addresses derived from a given target. Messages arriving on the links are placed on the 'incoming' queue. """ def __init__(self, target, incoming, scheduler, delay, capacity): self._target = target self._incoming = incoming self._addresses = [] self._capacity = capacity # credit per each link self._capacity_low = (capacity + 1) / 2 self._receivers = [] self._scheduler = scheduler self._delay = delay # for link re-attach self._connection = None self._reopen_scheduled = False def attach(self, connection): """Create receiver links over the given connection for all the configured addresses. """ self._connection = connection for a in self._addresses: name = "openstack.org/om/receiver/[%s]/%s" % (a, uuid.uuid4().hex) r = self._open_link(a, name) self._receivers.append(r) def detach(self): """Attempt a clean shutdown of the links""" self._connection = None self._addresses = [] for receiver in self._receivers: receiver.close() def reset(self): # destroy the links, but keep the addresses around since we may be # failing over. Since links are destroyed, this cannot be called from # any of the following ReceiverLink callbacks. self._connection = None self._addresses = [] self._reopen_scheduled = False for r in self._receivers: r.destroy() self._receivers = [] # Pyngus ReceiverLink event callbacks. Note that all of the Server's links # share this handler def receiver_remote_closed(self, receiver, pn_condition): """This is a Pyngus callback, invoked by Pyngus when the peer of this receiver link has initiated closing the connection. """ LOG.debug("Server subscription to %s remote detach", receiver.source_address) if pn_condition: vals = { "addr": receiver.source_address or receiver.target_address, "err_msg": pn_condition } LOG.error(_LE("Server subscription %(addr)s closed " "by peer: %(err_msg)s"), vals) receiver.close() def receiver_failed(self, receiver_link, error): """Protocol error occurred.""" LOG.error(_LE("Listener link queue failed. error=%(error)s"), {"error": error}) self.receiver_closed(receiver_link) def receiver_closed(self, receiver_link): LOG.debug("Server subscription to %s closed", receiver_link.source_address) # If still attached, attempt to re-start link if self._connection and not self._reopen_scheduled: LOG.debug("Server subscription reopen scheduled") self._reopen_scheduled = True self._scheduler.defer(self._reopen_links, self._delay) def message_received(self, receiver, message, handle): """This is a Pyngus callback, invoked by Pyngus when a new message arrives on this receiver link from the peer. """ def message_disposition(released=False): if receiver in self._receivers and not receiver.closed: if released: receiver.message_released(handle) else: receiver.message_accepted(handle) if receiver.capacity <= self._capacity_low: receiver.add_capacity(self._capacity - receiver.capacity) else: LOG.debug("Can't find receiver for settlement") qentry = {"message": message, "disposition": message_disposition} self._incoming.put(qentry) def _open_link(self, address, name): props = {"snd-settle-mode": "mixed"} r = self._connection.create_receiver(source_address=address, target_address=address, event_handler=self, name=name, properties=props) r.add_capacity(self._capacity) r.open() return r def _reopen_links(self): # attempt to re-establish any closed links LOG.debug("Server subscription reopening") self._reopen_scheduled = False if self._connection: for i in range(len(self._receivers)): link = self._receivers[i] if link.closed: addr = link.target_address name = link.name link.destroy() self._receivers[i] = self._open_link(addr, name) class RPCServer(Server): """Subscribes to RPC addresses""" def __init__(self, target, incoming, scheduler, delay, capacity): super(RPCServer, self).__init__(target, incoming, scheduler, delay, capacity) def attach(self, connection, addresser): # Generate the AMQP 1.0 addresses for the base class self._addresses = [ addresser.unicast_address(self._target, SERVICE_RPC), addresser.multicast_address(self._target, SERVICE_RPC), addresser.anycast_address(self._target, SERVICE_RPC) ] # now invoke the base class with the generated addresses super(RPCServer, self).attach(connection) class NotificationServer(Server): """Subscribes to Notification addresses""" def __init__(self, target, incoming, scheduler, delay, capacity): super(NotificationServer, self).__init__(target, incoming, scheduler, delay, capacity) def attach(self, connection, addresser): # Generate the AMQP 1.0 addresses for the base class self._addresses = [ addresser.anycast_address(self._target, SERVICE_NOTIFY) ] # now invoke the base class with the generated addresses super(NotificationServer, self).attach(connection) class Hosts(object): """An order list of TransportHost addresses. Connection failover progresses from one host to the next. username, password , and realm come from the configuration and are used only if no username/password/realm is present in the URL. """ def __init__(self, url, default_username=None, default_password=None, default_realm=None): self.virtual_host = url.virtual_host if url.hosts: self._entries = url.hosts[:] else: self._entries = [transport.TransportHost(hostname="localhost", port=5672)] for entry in self._entries: entry.port = entry.port or 5672 entry.username = entry.username or default_username entry.password = entry.password or default_password if default_realm and entry.username and '@' not in entry.username: entry.username = entry.username + '@' + default_realm self._current = random.randint(0, len(self._entries) - 1) @property def current(self): return self._entries[self._current] def next(self): if len(self._entries) > 1: self._current = (self._current + 1) % len(self._entries) return self.current def __repr__(self): return '' def __str__(self): r = ', vhost=%s' % self.virtual_host if self.virtual_host else '' return ", ".join(["%r" % th for th in self._entries]) + r class Controller(pyngus.ConnectionEventHandler): """Controls the connection to the AMQP messaging service. This object is the 'brains' of the driver. It maintains the logic for addressing, sending and receiving messages, and managing the connection. All messaging and I/O work is done on the Eventloop thread, allowing the driver to run asynchronously from the messaging clients. """ def __init__(self, url, default_exchange, config): self.processor = None self._socket_connection = None self._node = platform.node() or "" self._command = os.path.basename(sys.argv[0]) self._pid = os.getpid() # queue of drivertask objects to execute on the eventloop thread self._tasks = moves.queue.Queue(maxsize=500) # limit the number of Task()'s to execute per call to _process_tasks(). # This allows the eventloop main thread to return to servicing socket # I/O in a timely manner self._max_task_batch = 50 # cache of all Sender links indexed by address: self._all_senders = {} # active Sender links indexed by address: self._active_senders = set() # closing Sender links indexed by address: self._purged_senders = [] # Servers indexed by target. Each entry is a map indexed by the # specific ProtonListener's identifier: self._servers = {} self._container_name = config.oslo_messaging_amqp.container_name self.idle_timeout = config.oslo_messaging_amqp.idle_timeout self.trace_protocol = config.oslo_messaging_amqp.trace self.ssl = config.oslo_messaging_amqp.ssl self.ssl_ca_file = config.oslo_messaging_amqp.ssl_ca_file self.ssl_cert_file = config.oslo_messaging_amqp.ssl_cert_file self.ssl_key_file = config.oslo_messaging_amqp.ssl_key_file self.ssl_key_password = config.oslo_messaging_amqp.ssl_key_password self.ssl_allow_insecure = \ config.oslo_messaging_amqp.allow_insecure_clients self.ssl_verify_vhost = config.oslo_messaging_amqp.ssl_verify_vhost self.pseudo_vhost = config.oslo_messaging_amqp.pseudo_vhost self.sasl_mechanisms = config.oslo_messaging_amqp.sasl_mechanisms self.sasl_config_dir = config.oslo_messaging_amqp.sasl_config_dir self.sasl_config_name = config.oslo_messaging_amqp.sasl_config_name self.hosts = Hosts(url, config.oslo_messaging_amqp.username, config.oslo_messaging_amqp.password, config.oslo_messaging_amqp.sasl_default_realm) self.conn_retry_interval = \ config.oslo_messaging_amqp.connection_retry_interval self.conn_retry_backoff = \ config.oslo_messaging_amqp.connection_retry_backoff self.conn_retry_interval_max = \ config.oslo_messaging_amqp.connection_retry_interval_max self.link_retry_delay = config.oslo_messaging_amqp.link_retry_delay _opts = config.oslo_messaging_amqp factory_args = {"legacy_server_prefix": _opts.server_request_prefix, "legacy_broadcast_prefix": _opts.broadcast_prefix, "legacy_group_prefix": _opts.group_request_prefix, "rpc_prefix": _opts.rpc_address_prefix, "notify_prefix": _opts.notify_address_prefix, "multicast": _opts.multicast_address, "unicast": _opts.unicast_address, "anycast": _opts.anycast_address, "notify_exchange": _opts.default_notification_exchange, "rpc_exchange": _opts.default_rpc_exchange} self.addresser_factory = AddresserFactory(default_exchange, _opts.addressing_mode, **factory_args) self.addresser = None # cannot send an RPC request until the replies link is active, as we # need the peer assigned address, so need to delay sending any RPC # requests until this link is active: self.reply_link = None # Set True when the driver is shutting down self._closing = False # only schedule one outstanding reconnect attempt at a time self._reconnecting = False self._delay = self.conn_retry_interval # seconds between retries # prevent queuing up multiple requests to run _process_tasks() self._process_tasks_scheduled = False self._process_tasks_lock = threading.Lock() # credit levels for incoming links self._reply_credit = _opts.reply_link_credit self._rpc_credit = _opts.rpc_server_credit self._notify_credit = _opts.notify_server_credit # sender link maintenance timer and interval self._link_maint_timer = None self._link_maint_timeout = _opts.default_sender_link_timeout def connect(self): """Connect to the messaging service.""" self.processor = eventloop.Thread(self._container_name, self._node, self._command, self._pid) self.processor.wakeup(lambda: self._do_connect()) def add_task(self, task): """Add a Task for execution on processor thread.""" self._tasks.put(task) self._schedule_task_processing() def shutdown(self, timeout=30): """Shutdown the messaging service.""" LOG.info(_LI("Shutting down the AMQP 1.0 connection")) if self.processor: self.processor.wakeup(self._start_shutdown) LOG.debug("Waiting for eventloop to exit") self.processor.join(timeout) self._hard_reset("Shutting down") for sender in itervalues(self._all_senders): sender.destroy() self._all_senders.clear() self._servers.clear() self.processor.destroy() self.processor = None LOG.debug("Eventloop exited, driver shut down") # The remaining methods are reserved to run from the eventloop thread only! # They must not be invoked directly! # methods executed by Tasks created by the driver: def send(self, send_task): if send_task.deadline and send_task.deadline <= now(): send_task._on_timeout() return key = keyify(send_task.target, send_task.service) sender = self._all_senders.get(key) if not sender: sender = Sender(send_task.target, self.processor, self.link_retry_delay, send_task.service) self._all_senders[key] = sender if self.reply_link and self.reply_link.active: sender.attach(self._socket_connection.pyngus_conn, self.reply_link, self.addresser) self._active_senders.add(key) sender.send_message(send_task) def subscribe(self, subscribe_task): """Subscribe to a given target""" if subscribe_task._service == SERVICE_NOTIFY: t = "notification" server = NotificationServer(subscribe_task._target, subscribe_task._in_queue, self.processor, self.link_retry_delay, self._notify_credit) else: t = "RPC" server = RPCServer(subscribe_task._target, subscribe_task._in_queue, self.processor, self.link_retry_delay, self._rpc_credit) LOG.debug("Subscribing to %(type)s target %(target)s", {'type': t, 'target': subscribe_task._target}) key = keyify(subscribe_task._target, subscribe_task._service) servers = self._servers.get(key) if servers is None: servers = {} self._servers[key] = servers servers[subscribe_task._subscriber_id] = server if self._active: server.attach(self._socket_connection.pyngus_conn, self.addresser) # commands executed on the processor (eventloop) via 'wakeup()': def _do_connect(self): """Establish connection and reply subscription on processor thread.""" host = self.hosts.current conn_props = {'properties': {u'process': self._command, u'pid': self._pid, u'node': self._node}} # only set hostname in the AMQP 1.0 Open performative if the message # bus can interpret it as the virtual host. We leave it unspecified # since apparently noone can agree on how it should be used otherwise! if self.hosts.virtual_host and not self.pseudo_vhost: conn_props['hostname'] = self.hosts.virtual_host if self.idle_timeout: conn_props["idle-time-out"] = float(self.idle_timeout) if self.trace_protocol: conn_props["x-trace-protocol"] = self.trace_protocol # SSL configuration ssl_enabled = False if self.ssl: ssl_enabled = True conn_props["x-ssl"] = self.ssl if self.ssl_ca_file: conn_props["x-ssl-ca-file"] = self.ssl_ca_file ssl_enabled = True if self.ssl_cert_file: ssl_enabled = True conn_props["x-ssl-identity"] = (self.ssl_cert_file, self.ssl_key_file, self.ssl_key_password) if ssl_enabled: # Set the identity of the remote server for SSL to use when # verifying the received certificate. Typically this is the DNS # name used to set up the TCP connections. However some servers # may provide a certificate for the virtual host instead. If that # is the case we need to use the virtual hostname instead. # Refer to SSL Server Name Indication (SNI) for the entire story: # https://tools.ietf.org/html/rfc6066 if self.ssl_verify_vhost: if self.hosts.virtual_host: conn_props['x-ssl-peer-name'] = self.hosts.virtual_host else: conn_props['x-ssl-peer-name'] = host.hostname # SASL configuration: if self.sasl_mechanisms: conn_props["x-sasl-mechs"] = self.sasl_mechanisms if self.sasl_config_dir: conn_props["x-sasl-config-dir"] = self.sasl_config_dir if self.sasl_config_name: conn_props["x-sasl-config-name"] = self.sasl_config_name self._socket_connection = self.processor.connect(host, handler=self, properties=conn_props) LOG.debug("Connection initiated") def _process_tasks(self): """Execute Task objects in the context of the processor thread.""" with self._process_tasks_lock: self._process_tasks_scheduled = False count = 0 while (not self._tasks.empty() and count < self._max_task_batch): try: self._tasks.get(False)._execute(self) except Exception as e: LOG.exception(_LE("Error processing task: %s"), e) count += 1 # if we hit _max_task_batch, resume task processing later: if not self._tasks.empty(): self._schedule_task_processing() def _schedule_task_processing(self): """_process_tasks() helper: prevent queuing up multiple requests for task processing. This method is called both by the application thread and the processing thread. """ if self.processor: with self._process_tasks_lock: already_scheduled = self._process_tasks_scheduled self._process_tasks_scheduled = True if not already_scheduled: self.processor.wakeup(lambda: self._process_tasks()) def _start_shutdown(self): """Called when the application is closing the transport. Attempt to cleanly flush/close all links. """ self._closing = True if self._active: # try a clean shutdown self._detach_senders() self._detach_servers() self.reply_link.detach() self._socket_connection.pyngus_conn.close() else: # don't wait for a close from the remote, may never happen self.processor.shutdown() # reply link callbacks: def _reply_link_ready(self): """Invoked when the Replies reply link has become active. At this point, we are ready to receive messages, so start all pending RPC requests. """ LOG.info(_LI("Messaging is active (%(hostname)s:%(port)s%(vhost)s)"), {'hostname': self.hosts.current.hostname, 'port': self.hosts.current.port, 'vhost': ("/" + self.hosts.virtual_host if self.hosts.virtual_host else "")}) for sender in itervalues(self._all_senders): sender.attach(self._socket_connection.pyngus_conn, self.reply_link, self.addresser) def _reply_link_down(self): # Treat it as a recoverable failure because the RPC reply address is # now invalid for all in-flight RPC requests. if not self._closing: self._detach_senders() self._detach_servers() self._socket_connection.pyngus_conn.close() # once closed, _handle_connection_loss() will initiate reconnect # callback from eventloop on socket error def socket_error(self, error): """Called by eventloop when a socket error occurs.""" LOG.error(_LE("Socket failure: %s"), error) self._handle_connection_loss(str(error)) # Pyngus connection event callbacks (and their helpers), all invoked from # the eventloop thread: def connection_failed(self, connection, error): """This is a Pyngus callback, invoked by Pyngus when a non-recoverable error occurs on the connection. """ if connection is not self._socket_connection.pyngus_conn: # pyngus bug: ignore failure callback on destroyed connections return LOG.debug("AMQP Connection failure: %s", error) self._handle_connection_loss(str(error)) def connection_active(self, connection): """This is a Pyngus callback, invoked by Pyngus when the connection to the peer is up. At this point, the driver will activate all subscriber links (server) and the reply link. """ LOG.debug("Connection active (%(hostname)s:%(port)s), subscribing...", {'hostname': self.hosts.current.hostname, 'port': self.hosts.current.port}) # allocate an addresser based on the advertised properties of the # message bus props = connection.remote_properties or {} self.addresser = self.addresser_factory(props, self.hosts.virtual_host if self.pseudo_vhost else None) for servers in itervalues(self._servers): for server in itervalues(servers): server.attach(self._socket_connection.pyngus_conn, self.addresser) self.reply_link = Replies(self._socket_connection.pyngus_conn, self._reply_link_ready, self._reply_link_down, self._reply_credit) self._delay = self.conn_retry_interval # reset # schedule periodic maintenance of sender links self._link_maint_timer = self.processor.defer(self._purge_sender_links, self._link_maint_timeout) def connection_closed(self, connection): """This is a Pyngus callback, invoked by Pyngus when the connection has cleanly closed. This occurs after the driver closes the connection locally, and the peer has acknowledged the close. At this point, the shutdown of the driver's connection is complete. """ LOG.debug("AMQP connection closed.") # if the driver isn't being shutdown, failover and reconnect self._handle_connection_loss("AMQP connection closed.") def connection_remote_closed(self, connection, reason): """This is a Pyngus callback, invoked by Pyngus when the peer has requested that the connection be closed. """ # The messaging service/broker is trying to shut down the # connection. Acknowledge the close, and try to reconnect/failover # later once the connection has closed (connection_closed is called). if reason: LOG.info(_LI("Connection closed by peer: %s"), reason) self._detach_senders() self._detach_servers() self.reply_link.detach() self._socket_connection.pyngus_conn.close() def sasl_done(self, connection, pn_sasl, outcome): """This is a Pyngus callback invoked when the SASL handshake has completed. The outcome of the handshake is passed in the outcome argument. """ if outcome == proton.SASL.OK: return LOG.error(_LE("AUTHENTICATION FAILURE: Cannot connect to " "%(hostname)s:%(port)s as user %(username)s"), {'hostname': self.hosts.current.hostname, 'port': self.hosts.current.port, 'username': self.hosts.current.username}) # pyngus will invoke connection_failed() eventually def _handle_connection_loss(self, reason): """The connection to the messaging service has been lost. Try to reestablish the connection/failover if not shutting down the driver. """ self.addresser = None self._socket_connection.close() if self._closing: # we're in the middle of shutting down the driver anyways, # just consider it done: self.processor.shutdown() else: # for some reason, we've lost the connection to the messaging # service. Try to re-establish the connection: if not self._reconnecting: self._reconnecting = True LOG.info(_LI("delaying reconnect attempt for %d seconds"), self._delay) self.processor.defer(lambda: self._do_reconnect(reason), self._delay) self._delay = min(self._delay * self.conn_retry_backoff, self.conn_retry_interval_max) if self._link_maint_timer: self._link_maint_timer.cancel() self._link_maint_timer = None def _do_reconnect(self, reason): """Invoked on connection/socket failure, failover and re-connect to the messaging service. """ self._reconnecting = False if not self._closing: self._hard_reset(reason) host = self.hosts.next() LOG.info(_LI("Reconnecting to: %(hostname)s:%(port)s"), {'hostname': host.hostname, 'port': host.port}) self._socket_connection.connect(host) def _hard_reset(self, reason): """Reset the controller to its pre-connection state""" # note well: since this method destroys the connection, it cannot be # invoked directly from a pyngus callback. Use processor.defer() to # run this method on the main loop instead. for sender in self._purged_senders: sender.destroy(reason) del self._purged_senders[:] self._active_senders.clear() unused = [] for key, sender in iteritems(self._all_senders): # clean up any sender links that no longer have messages to send if sender.pending_messages == 0: unused.append(key) else: sender.reset(reason) self._active_senders.add(key) for key in unused: self._all_senders[key].destroy(reason) del self._all_senders[key] for servers in itervalues(self._servers): for server in itervalues(servers): server.reset() if self.reply_link: self.reply_link.destroy() self.reply_link = None if self._socket_connection: self._socket_connection.reset() def _detach_senders(self): """Close all sender links""" for sender in itervalues(self._all_senders): sender.detach() def _detach_servers(self): """Close all listener links""" for servers in itervalues(self._servers): for server in itervalues(servers): server.detach() def _purge_sender_links(self): """Purge inactive sender links""" if not self._closing: # destroy links that have already been closed for sender in self._purged_senders: sender.destroy("Idle link purged") del self._purged_senders[:] # determine next set to purge purge = set(self._all_senders.keys()) - self._active_senders for key in purge: sender = self._all_senders[key] if not sender.pending_messages and not sender.unacked_messages: sender.detach() self._purged_senders.append(self._all_senders.pop(key)) self._active_senders.clear() self._link_maint_timer = \ self.processor.defer(self._purge_sender_links, self._link_maint_timeout) @property def _active(self): # Is the connection up return (self._socket_connection and self._socket_connection.pyngus_conn.active) oslo.messaging-5.35.0/oslo_messaging/_drivers/amqp.py0000666000175100017510000001041713224676046022735 0ustar zuulzuul00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # Copyright 2011 - 2012, Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Utilities for drivers based on the AMQPDriverBase. This module contains utility code used by drivers based on the AMQPDriverBase class. Specifically this includes the impl_rabbit driver. """ import collections import uuid from oslo_config import cfg import six from oslo_messaging._drivers import common as rpc_common deprecated_durable_opts = [ cfg.DeprecatedOpt('amqp_durable_queues', group='DEFAULT'), cfg.DeprecatedOpt('rabbit_durable_queues', group='DEFAULT') ] amqp_opts = [ cfg.BoolOpt('amqp_durable_queues', default=False, deprecated_opts=deprecated_durable_opts, help='Use durable queues in AMQP.'), cfg.BoolOpt('amqp_auto_delete', default=False, deprecated_group='DEFAULT', help='Auto-delete queues in AMQP.'), ] UNIQUE_ID = '_unique_id' class RpcContext(rpc_common.CommonRpcContext): """Context that supports replying to a rpc.call.""" def __init__(self, **kwargs): self.msg_id = kwargs.pop('msg_id', None) self.reply_q = kwargs.pop('reply_q', None) super(RpcContext, self).__init__(**kwargs) def deepcopy(self): values = self.to_dict() values['conf'] = self.conf values['msg_id'] = self.msg_id values['reply_q'] = self.reply_q return self.__class__(**values) def unpack_context(msg): """Unpack context from msg.""" context_dict = {} for key in list(msg.keys()): key = six.text_type(key) if key.startswith('_context_'): value = msg.pop(key) context_dict[key[9:]] = value context_dict['msg_id'] = msg.pop('_msg_id', None) context_dict['reply_q'] = msg.pop('_reply_q', None) return RpcContext.from_dict(context_dict) def pack_context(msg, context): """Pack context into msg. Values for message keys need to be less than 255 chars, so we pull context out into a bunch of separate keys. If we want to support more arguments in rabbit messages, we may want to do the same for args at some point. """ if isinstance(context, dict): context_d = context.items() else: context_d = context.to_dict().items() msg.update(('_context_%s' % key, value) for (key, value) in context_d) class _MsgIdCache(object): """This class checks any duplicate messages.""" # NOTE: This value is considered can be a configuration item, but # it is not necessary to change its value in most cases, # so let this value as static for now. DUP_MSG_CHECK_SIZE = 16 def __init__(self, **kwargs): self.prev_msgids = collections.deque([], maxlen=self.DUP_MSG_CHECK_SIZE) def check_duplicate_message(self, message_data): """AMQP consumers may read same message twice when exceptions occur before ack is returned. This method prevents doing it. """ try: msg_id = message_data.pop(UNIQUE_ID) except KeyError: return if msg_id in self.prev_msgids: raise rpc_common.DuplicateMessageError(msg_id=msg_id) return msg_id def add(self, msg_id): if msg_id and msg_id not in self.prev_msgids: self.prev_msgids.append(msg_id) def _add_unique_id(msg): """Add unique_id for checking duplicate messages.""" unique_id = uuid.uuid4().hex msg.update({UNIQUE_ID: unique_id}) class AMQPDestinationNotFound(Exception): pass oslo.messaging-5.35.0/oslo_messaging/_drivers/__init__.py0000666000175100017510000000000013224676046023521 0ustar zuulzuul00000000000000oslo.messaging-5.35.0/oslo_messaging/_drivers/base.py0000666000175100017510000006021713224676046022714 0ustar zuulzuul00000000000000# Copyright 2013 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc import threading from oslo_config import cfg from oslo_utils import excutils from oslo_utils import timeutils import six from oslo_messaging import exceptions base_opts = [ cfg.IntOpt('rpc_conn_pool_size', default=30, deprecated_group='DEFAULT', help='Size of RPC connection pool.'), cfg.IntOpt('conn_pool_min_size', default=2, help='The pool size limit for connections expiration policy'), cfg.IntOpt('conn_pool_ttl', default=1200, help='The time-to-live in sec of idle connections in the pool') ] def batch_poll_helper(func): """Decorator to poll messages in batch This decorator is used to add message batching support to a :py:meth:`PollStyleListener.poll` implementation that only polls for a single message per call. """ def wrapper(in_self, timeout=None, batch_size=1, batch_timeout=None): incomings = [] driver_prefetch = in_self.prefetch_size if driver_prefetch > 0: batch_size = min(batch_size, driver_prefetch) timeout = batch_timeout or timeout with timeutils.StopWatch(timeout) as watch: while True: message = func(in_self, timeout=watch.leftover(True)) if message is not None: incomings.append(message) if len(incomings) == batch_size or message is None: break return incomings return wrapper class TransportDriverError(exceptions.MessagingException): """Base class for transport driver specific exceptions.""" @six.add_metaclass(abc.ABCMeta) class IncomingMessage(object): """The IncomingMessage class represents a single message received from the messaging backend. Instances of this class are passed to up a server's messaging processing logic. The backend driver must provide a concrete derivation of this class which provides the backend specific logic for its public methods. :param ctxt: Context metadata provided by sending application. :type ctxt: dict :param message: The message as provided by the sending application. :type message: dict """ def __init__(self, ctxt, message): self.ctxt = ctxt self.message = message def acknowledge(self): """Called by the server to acknowledge receipt of the message. When this is called the driver must notify the backend of the acknowledgment. This call should block at least until the driver has processed the acknowledgment request locally. It may unblock before the acknowledgment state has been acted upon by the backend. If the acknowledge operation fails this method must issue a log message describing the reason for the failure. :raises: Does not raise an exception """ @abc.abstractmethod def requeue(self): """Called by the server to return the message to the backend so it may be made available for consumption by another server. This call should block at least until the driver has processed the requeue request locally. It may unblock before the backend makes the requeued message available for consumption. If the requeue operation fails this method must issue a log message describing the reason for the failure. Support for this method is _optional_. The :py:meth:`BaseDriver.require_features` method should indicate whether or not support for requeue is available. :raises: Does not raise an exception """ @six.add_metaclass(abc.ABCMeta) class RpcIncomingMessage(IncomingMessage): """The RpcIncomingMessage represents an RPC request message received from the backend. This class must be used for RPC calls that return a value to the caller. """ @abc.abstractmethod def reply(self, reply=None, failure=None): """Called by the server to send an RPC reply message or an exception back to the calling client. If an exception is passed via *failure* the driver must convert it to a form that can be sent as a message and properly converted back to the exception at the remote. The driver must provide a way to determine the destination address for the reply. For example the driver may use the *reply-to* field from the corresponding incoming message. Often a driver will also need to set a correlation identifier in the reply to help the remote route the reply to the correct RPCClient. The driver should provide an *at-most-once* delivery guarantee for reply messages. This call should block at least until the reply message has been handed off to the backend - there is no need to confirm that the reply has been delivered. If the reply operation fails this method must issue a log message describing the reason for the failure. See :py:meth:`BaseDriver.send` for details regarding how the received reply is processed. :param reply: reply message body :type reply: dict :param failure: an exception thrown by the RPC call :type failure: Exception :raises: Does not raise an exception """ @six.add_metaclass(abc.ABCMeta) class PollStyleListener(object): """A PollStyleListener is used to transfer received messages to a server for processing. A polling pattern is used to retrieve messages. A PollStyleListener uses a separate thread to run the polling loop. A :py:class:`PollStyleListenerAdapter` can be used to create a :py:class:`Listener` from a PollStyleListener. :param prefetch_size: The number of messages that should be pulled from the backend per receive transaction. May not be honored by all backend implementations. :type prefetch_size: int """ def __init__(self, prefetch_size=-1): self.prefetch_size = prefetch_size @abc.abstractmethod def poll(self, timeout=None, batch_size=1, batch_timeout=None): """poll is called by the server to retrieve incoming messages. It blocks until 'batch_size' incoming messages are available, a timeout occurs, or the poll is interrupted by a call to the :py:meth:`stop` method. If 'batch_size' is > 1 poll must block until 'batch_size' messages are available or at least one message is available and batch_timeout expires :param timeout: Block up to 'timeout' seconds waiting for a message :type timeout: float :param batch_size: Block until this number of messages are received. :type batch_size: int :param batch_timeout: Time to wait in seconds for a full batch to arrive. A timer is started when the first message in a batch is received. If a full batch's worth of messages is not received when the timer expires then :py:meth:`poll` returns all messages received thus far. :type batch_timeout: float :raises: Does not raise an exception. :return: A list of up to batch_size IncomingMessage objects. """ def stop(self): """Stop the listener from polling for messages. This method must cause the :py:meth:`poll` call to unblock and return whatever messages are currently available. This method is called from a different thread than the poller so it must be thread-safe. """ pass def cleanup(self): """Cleanup all resources held by the listener. This method should block until the cleanup is completed. """ pass @six.add_metaclass(abc.ABCMeta) class Listener(object): """A Listener is used to transfer incoming messages from the driver to a server for processing. A callback is used by the driver to transfer the messages. :param batch_size: desired number of messages passed to single on_incoming_callback notification :type batch_size: int :param batch_timeout: defines how long should we wait in seconds for batch_size messages if we already have some messages waiting for processing :type batch_timeout: float :param prefetch_size: defines how many messages we want to prefetch from the messaging backend in a single request. May not be honored by all backend implementations. :type prefetch_size: int """ def __init__(self, batch_size, batch_timeout, prefetch_size=-1): self.on_incoming_callback = None self.batch_timeout = batch_timeout self.prefetch_size = prefetch_size if prefetch_size > 0: batch_size = min(batch_size, prefetch_size) self.batch_size = batch_size def start(self, on_incoming_callback): """Start receiving messages. This should cause the driver to start receiving messages from the backend. When message(s) arrive the driver must invoke 'on_incoming_callback' passing it the received messages as a list of IncomingMessages. :param on_incoming_callback: callback function to be executed when listener receives messages. :type on_incoming_callback: func """ self.on_incoming_callback = on_incoming_callback def stop(self): """Stop receiving messages. The driver must no longer invoke the callback. """ self.on_incoming_callback = None @abc.abstractmethod def cleanup(self): """Cleanup all resources held by the listener. This method should block until the cleanup is completed. """ class PollStyleListenerAdapter(Listener): """A Listener that uses a PollStyleListener for message transfer. A dedicated thread is created to do message polling. """ def __init__(self, poll_style_listener, batch_size, batch_timeout): super(PollStyleListenerAdapter, self).__init__( batch_size, batch_timeout, poll_style_listener.prefetch_size ) self._poll_style_listener = poll_style_listener self._listen_thread = threading.Thread(target=self._runner) self._listen_thread.daemon = True self._started = False def start(self, on_incoming_callback): super(PollStyleListenerAdapter, self).start(on_incoming_callback) self._started = True self._listen_thread.start() @excutils.forever_retry_uncaught_exceptions def _runner(self): while self._started: incoming = self._poll_style_listener.poll( batch_size=self.batch_size, batch_timeout=self.batch_timeout) if incoming: self.on_incoming_callback(incoming) # listener is stopped but we need to process all already consumed # messages while True: incoming = self._poll_style_listener.poll( batch_size=self.batch_size, batch_timeout=self.batch_timeout) if not incoming: return self.on_incoming_callback(incoming) def stop(self): self._started = False self._poll_style_listener.stop() self._listen_thread.join() super(PollStyleListenerAdapter, self).stop() def cleanup(self): self._poll_style_listener.cleanup() @six.add_metaclass(abc.ABCMeta) class BaseDriver(object): """Defines the backend driver interface. Each backend driver implementation must provide a concrete derivation of this class implementing the backend specific logic for its public methods. :param conf: The configuration settings provided by the user. :type conf: ConfigOpts :param url: The network address of the messaging backend(s). :type url: TransportURL :param default_exchange: The exchange to use if no exchange is specified in a Target. :type default_exchange: str :param allowed_remote_exmods: whitelist of those exception modules which are permitted to be re-raised if an exception is returned in response to an RPC call. :type allowed_remote_exmods: list """ prefetch_size = 0 def __init__(self, conf, url, default_exchange=None, allowed_remote_exmods=None): self.conf = conf self._url = url self._default_exchange = default_exchange self._allowed_remote_exmods = allowed_remote_exmods or [] def require_features(self, requeue=False): """The driver must raise a 'NotImplementedError' if any of the feature flags passed as True are not supported. """ if requeue: raise NotImplementedError('Message requeueing not supported by ' 'this transport driver') @abc.abstractmethod def send(self, target, ctxt, message, wait_for_reply=None, timeout=None, retry=None): """Send a message to the given target and optionally wait for a reply. This method is used by the RPC client when sending RPC requests to a server. The driver must use the *topic*, *exchange*, and *server* (if present) attributes of the *target* to construct the backend-native message address. The message address must match the format used by subscription(s) created by the :py:meth:`BaseDriver.listen` method. If the *target's* *fanout* attribute is set, a copy of the message must be sent to all subscriptions using the *exchange* and *topic* values. If *fanout* is not set, then only one subscriber should receive the message. In the case of multiple subscribers to the same address, only one copy of the message is delivered. In this case the driver should implement a delivery pattern that distributes messages in a balanced fashion across the multiple subscribers. This method must block the caller until one of the following events occur: * the send operation completes successfully * *timeout* seconds elapse (if specified) * *retry* count is reached (if specified) The *wait_for_reply* parameter determines whether or not the caller expects a response to the RPC request. If True, this method must block until a response message is received. This method then returns the response message to the caller. The driver must implement a mechanism for routing incoming responses back to their corresponding send request. How this is done may vary based on the type of messaging backend, but typically it involves having the driver create an internal subscription for reply messages and setting the request message's *reply-to* header to the subscription address. The driver may also need to supply a correlation identifier for mapping the response back to the sender. See :py:meth:`RpcIncomingMessage.reply` If *wait_for_reply* is False this method will block until the message has been handed off to the backend - there is no need to confirm that the message has been delivered. Once the handoff completes this method returns. The driver may attempt to retry sending the message should a recoverable error occur that prevents the message from being passed to the backend. The *retry* parameter specifies how many attempts to re-send the message the driver may make before raising a :py:exc:`MessageDeliveryFailure` exception. A value of None or -1 means unlimited retries. 0 means no retry is attempted. N means attempt at most N retries before failing. **Note well:** the driver MUST guarantee that the message is not duplicated by the retry process. :param target: The message's destination address :type target: Target :param ctxt: Context metadata provided by sending application which is transferred along with the message. :type ctxt: dict :param message: message provided by the caller :type message: dict :param wait_for_reply: If True block until a reply message is received. :type wait_for_reply: bool :param timeout: Maximum time in seconds to block waiting for the send operation to complete. Should this expire the :py:meth:`send` must raise a :py:exc:`MessagingTimeout` exception :type timeout: float :param retry: maximum message send attempts permitted :type retry: int :returns: A reply message or None if no reply expected :raises: :py:exc:`MessagingException`, any exception thrown by the remote server when executing the RPC call. """ @abc.abstractmethod def send_notification(self, target, ctxt, message, version, retry): """Send a notification message to the given target. This method is used by the Notifier to send notification messages to a Listener. Notifications use a *store and forward* delivery pattern. The driver must allow for delivery in the case where the intended recipient is not present at the time the notification is published. Typically this requires a messaging backend that has the ability to store messages until a consumer is present. Therefore this method must block at least until the backend accepts ownership of the message. This method does not guarantee that the message has or will be processed by the intended recipient. The driver must use the *topic* and *exchange* attributes of the *target* to construct the backend-native message address. The message address must match the format used by subscription(s) created by the :py:meth:`BaseDriver.listen_for_notifications` method. Only one copy of the message is delivered in the case of multiple subscribers to the same address. In this case the driver should implement a delivery pattern that distributes messages in a balanced fashion across the multiple subscribers. There is an exception to the single delivery semantics described above: the *pool* parameter to the :py:meth:`BaseDriver.listen_for_notifications` method may be used to set up shared subscriptions. See :py:meth:`BaseDriver.listen_for_notifications` for details. This method must also honor the *retry* parameter. See :py:meth:`BaseDriver.send` for details regarding implementing the *retry* process. *version* indicates whether or not the message should be encapsulated in an envelope. A value < 2.0 should not envelope the message. See :py:func:`common.serialize_msg` for more detail. :param target: The message's destination address :type target: Target :param ctxt: Context metadata provided by sending application which is transferred along with the message. :type ctxt: dict :param message: message provided by the caller :type message: dict :param version: determines the envelope for the message :type version: float :param retry: maximum message send attempts permitted :type retry: int :returns: None :raises: :py:exc:`MessagingException` """ @abc.abstractmethod def listen(self, target, batch_size, batch_timeout): """Construct a listener for the given target. The listener may be either a :py:class:`Listener` or :py:class:`PollStyleListener` depending on the driver's preference. This method is used by the RPC server. The driver must create subscriptions to the address provided in *target*. These subscriptions must then be associated with a :py:class:`Listener` or :py:class:`PollStyleListener` which is returned by this method. See :py:meth:`BaseDriver.send` for more detail regarding message addressing. The driver must support receiving messages sent to the following addresses derived from the values in *target*: * all messages sent to the exchange and topic given in the target. This includes messages sent using a fanout pattern. * if the server attribute of the target is set then the driver must also subscribe to messages sent to the exchange, topic, and server For example, given a target with exchange 'my-exchange', topic 'my-topic', and server 'my-server', the driver would create subscriptions for: * all messages sent to my-exchange and my-topic (including fanout) * all messages sent to my-exchange, my-topic, and my-server The driver must pass messages arriving from these subscriptions to the listener. For :py:class:`PollStyleListener` the driver should trigger the :py:meth:`PollStyleListener.poll` method to unblock and return the incoming messages. For :py:class:`Listener` the driver should invoke the callback with the incoming messages. This method only blocks long enough to establish the subscription(s) and construct the listener. In the case of failover, the driver must restore the subscription(s). Subscriptions should remain active until the listener is stopped. :param target: The address(es) to subscribe to. :type target: Target :param batch_size: passed to the listener :type batch_size: int :param batch_timeout: passed to the listener :type batch_timeout: float :returns: None :raises: :py:exc:`MessagingException` """ @abc.abstractmethod def listen_for_notifications(self, targets_and_priorities, pool, batch_size, batch_timeout): """Construct a notification listener for the given list of tuples of (target, priority) addresses. The driver must create a subscription for each (*target*, *priority*) pair. The topic for the subscription is created for each pair using the format `"%s.%s" % (target.topic, priority)`. This format is used by the caller of the :py:meth:`BaseDriver.send_notification` when setting the topic member of the target parameter. Only the *exchange* and *topic* must be considered when creating subscriptions. *server* and *fanout* must be ignored. The *pool* parameter, if specified, should cause the driver to create a subscription that is shared with other subscribers using the same pool identifier. Each pool gets a single copy of the message. For example if there is a subscriber pool with identifier **foo** and another pool **bar**, then one **foo** subscriber and one **bar** subscriber will each receive a copy of the message. The driver should implement a delivery pattern that distributes message in a balanced fashion across the subscribers in a pool. The driver must raise a :py:exc:`NotImplementedError` if pooling is not supported and a pool identifier is passed in. Refer to the description of :py:meth:`BaseDriver.send_notification` for further details regarding implementation. :param targets_and_priorities: List of (target, priority) pairs :type targets_and_priorities: list :param pool: pool identifier :type pool: str :param batch_size: passed to the listener :type batch_size: int :param batch_timeout: passed to the listener :type batch_timeout: float :returns: None :raises: :py:exc:`MessagingException`, :py:exc:`NotImplementedError` """ @abc.abstractmethod def cleanup(self): """Release all resources used by the driver. This method must block until the cleanup is complete. """ oslo.messaging-5.35.0/oslo_messaging/_drivers/amqpdriver.py0000666000175100017510000005606413224676046024161 0ustar zuulzuul00000000000000 # Copyright 2013 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. __all__ = ['AMQPDriverBase'] import logging import threading import time import uuid import cachetools from oslo_utils import timeutils from six import moves import oslo_messaging from oslo_messaging._drivers import amqp as rpc_amqp from oslo_messaging._drivers import base from oslo_messaging._drivers import common as rpc_common from oslo_messaging._i18n import _ from oslo_messaging._i18n import _LE from oslo_messaging._i18n import _LI from oslo_messaging._i18n import _LW LOG = logging.getLogger(__name__) # Minimum/Maximum sleep between a poll and ack/requeue # Maximum should be small enough to not get rejected ack, # minimum should be big enough to not burn the CPU. ACK_REQUEUE_EVERY_SECONDS_MIN = 0.001 ACK_REQUEUE_EVERY_SECONDS_MAX = 1.0 class MessageOperationsHandler(object): """Queue used by message operations to ensure that all tasks are serialized and run in the same thread, since underlying drivers like kombu are not thread safe. """ def __init__(self, name): self.name = "%s (%s)" % (name, hex(id(self))) self._tasks = moves.queue.Queue() self._shutdown = threading.Event() self._shutdown_thread = threading.Thread( target=self._process_in_background) self._shutdown_thread.daemon = True def stop(self): self._shutdown.set() def process_in_background(self): """Run all pending tasks queued by do() in an thread during the shutdown process. """ self._shutdown_thread.start() def _process_in_background(self): while not self._shutdown.is_set(): self.process() time.sleep(ACK_REQUEUE_EVERY_SECONDS_MIN) def process(self): "Run all pending tasks queued by do()." while True: try: task = self._tasks.get(block=False) except moves.queue.Empty: break task() def do(self, task): "Put the task in the queue." self._tasks.put(task) class AMQPIncomingMessage(base.RpcIncomingMessage): def __init__(self, listener, ctxt, message, unique_id, msg_id, reply_q, obsolete_reply_queues, message_operations_handler): super(AMQPIncomingMessage, self).__init__(ctxt, message) self.listener = listener self.unique_id = unique_id self.msg_id = msg_id self.reply_q = reply_q self._obsolete_reply_queues = obsolete_reply_queues self._message_operations_handler = message_operations_handler self.stopwatch = timeutils.StopWatch() self.stopwatch.start() def _send_reply(self, conn, reply=None, failure=None): if not self._obsolete_reply_queues.reply_q_valid(self.reply_q, self.msg_id): return if failure: failure = rpc_common.serialize_remote_exception(failure) # NOTE(sileht): ending can be removed in N*, see Listener.wait() # for more detail. msg = {'result': reply, 'failure': failure, 'ending': True, '_msg_id': self.msg_id} rpc_amqp._add_unique_id(msg) unique_id = msg[rpc_amqp.UNIQUE_ID] LOG.debug("sending reply msg_id: %(msg_id)s " "reply queue: %(reply_q)s " "time elapsed: %(elapsed)ss", { 'msg_id': self.msg_id, 'unique_id': unique_id, 'reply_q': self.reply_q, 'elapsed': self.stopwatch.elapsed()}) conn.direct_send(self.reply_q, rpc_common.serialize_msg(msg)) def reply(self, reply=None, failure=None): if not self.msg_id: # NOTE(Alexei_987) not sending reply, if msg_id is empty # because reply should not be expected by caller side return # NOTE(sileht): return without hold the a connection if possible if not self._obsolete_reply_queues.reply_q_valid(self.reply_q, self.msg_id): return # NOTE(sileht): we read the configuration value from the driver # to be able to backport this change in previous version that # still have the qpid driver duration = self.listener.driver.missing_destination_retry_timeout timer = rpc_common.DecayingTimer(duration=duration) timer.start() while True: try: with self.listener.driver._get_connection( rpc_common.PURPOSE_SEND) as conn: self._send_reply(conn, reply, failure) return except rpc_amqp.AMQPDestinationNotFound: if timer.check_return() > 0: LOG.debug(("The reply %(msg_id)s cannot be sent " "%(reply_q)s reply queue don't exist, " "retrying..."), { 'msg_id': self.msg_id, 'reply_q': self.reply_q}) time.sleep(0.25) else: self._obsolete_reply_queues.add(self.reply_q, self.msg_id) LOG.info(_LI("The reply %(msg_id)s cannot be sent " "%(reply_q)s reply queue don't exist after " "%(duration)s sec abandoning..."), { 'msg_id': self.msg_id, 'reply_q': self.reply_q, 'duration': duration}) return def acknowledge(self): self._message_operations_handler.do(self.message.acknowledge) self.listener.msg_id_cache.add(self.unique_id) def requeue(self): # NOTE(sileht): In case of the connection is lost between receiving the # message and requeing it, this requeue call fail # but because the message is not acknowledged and not added to the # msg_id_cache, the message will be reconsumed, the only difference is # the message stay at the beginning of the queue instead of moving to # the end. self._message_operations_handler.do(self.message.requeue) class ObsoleteReplyQueuesCache(object): """Cache of reply queue id that doesn't exists anymore. NOTE(sileht): In case of a broker restart/failover a reply queue can be unreachable for short period the IncomingMessage.send_reply will block for 60 seconds in this case or until rabbit recovers. But in case of the reply queue is unreachable because the rpc client is really gone, we can have a ton of reply to send waiting 60 seconds. This leads to a starvation of connection of the pool The rpc server take to much time to send reply, other rpc client will raise TimeoutError because their don't receive their replies in time. This object cache stores already known gone client to not wait 60 seconds and hold a connection of the pool. Keeping 200 last gone rpc client for 1 minute is enough and doesn't hold to much memory. """ SIZE = 200 TTL = 60 def __init__(self): self._lock = threading.RLock() self._cache = cachetools.TTLCache(self.SIZE, self.TTL) def reply_q_valid(self, reply_q, msg_id): if reply_q in self._cache: self._no_reply_log(reply_q, msg_id) return False return True def add(self, reply_q, msg_id): with self._lock: self._cache.update({reply_q: msg_id}) self._no_reply_log(reply_q, msg_id) def _no_reply_log(self, reply_q, msg_id): LOG.warning(_LW("%(reply_queue)s doesn't exists, drop reply to " "%(msg_id)s"), {'reply_queue': reply_q, 'msg_id': msg_id}) class AMQPListener(base.PollStyleListener): def __init__(self, driver, conn): super(AMQPListener, self).__init__(driver.prefetch_size) self.driver = driver self.conn = conn self.msg_id_cache = rpc_amqp._MsgIdCache() self.incoming = [] self._shutdown = threading.Event() self._shutoff = threading.Event() self._obsolete_reply_queues = ObsoleteReplyQueuesCache() self._message_operations_handler = MessageOperationsHandler( "AMQPListener") self._current_timeout = ACK_REQUEUE_EVERY_SECONDS_MIN def __call__(self, message): ctxt = rpc_amqp.unpack_context(message) unique_id = self.msg_id_cache.check_duplicate_message(message) if ctxt.msg_id: LOG.debug("received message msg_id: %(msg_id)s reply to " "%(queue)s", {'queue': ctxt.reply_q, 'msg_id': ctxt.msg_id}) else: LOG.debug("received message with unique_id: %s", unique_id) self.incoming.append(AMQPIncomingMessage( self, ctxt.to_dict(), message, unique_id, ctxt.msg_id, ctxt.reply_q, self._obsolete_reply_queues, self._message_operations_handler)) @base.batch_poll_helper def poll(self, timeout=None): stopwatch = timeutils.StopWatch(duration=timeout).start() while not self._shutdown.is_set(): self._message_operations_handler.process() if self.incoming: return self.incoming.pop(0) left = stopwatch.leftover(return_none=True) if left is None: left = self._current_timeout if left <= 0: return None try: self.conn.consume(timeout=min(self._current_timeout, left)) except rpc_common.Timeout: self._current_timeout = max(self._current_timeout * 2, ACK_REQUEUE_EVERY_SECONDS_MAX) else: self._current_timeout = ACK_REQUEUE_EVERY_SECONDS_MIN # NOTE(sileht): listener is stopped, just processes remaining messages # and operations self._message_operations_handler.process() if self.incoming: return self.incoming.pop(0) self._shutoff.set() def stop(self): self._shutdown.set() self.conn.stop_consuming() self._shutoff.wait() # NOTE(sileht): Here, the listener is stopped, but some incoming # messages may still live on server side, because callback is still # running and message is not yet ack/requeue. It's safe to do the ack # into another thread, side the polling thread is now terminated. self._message_operations_handler.process_in_background() def cleanup(self): # NOTE(sileht): server executor is now stopped, we are sure that no # more incoming messages in live, we can acknowledge # remaining messages and stop the thread self._message_operations_handler.stop() # Closes listener connection self.conn.close() class ReplyWaiters(object): WAKE_UP = object() def __init__(self): self._queues = {} self._wrn_threshold = 10 def get(self, msg_id, timeout): try: return self._queues[msg_id].get(block=True, timeout=timeout) except moves.queue.Empty: raise oslo_messaging.MessagingTimeout( 'Timed out waiting for a reply ' 'to message ID %s' % msg_id) def put(self, msg_id, message_data): queue = self._queues.get(msg_id) if not queue: LOG.info(_LI('No calling threads waiting for msg_id : %s'), msg_id) LOG.debug(' queues: %(queues)s, message: %(message)s', {'queues': len(self._queues), 'message': message_data}) else: queue.put(message_data) def add(self, msg_id): self._queues[msg_id] = moves.queue.Queue() queues_length = len(self._queues) if queues_length > self._wrn_threshold: LOG.warning(_LW('Number of call queues is %(queues_length)s, ' 'greater than warning threshold: %(old_threshold)s' '. There could be a leak. Increasing threshold to:' ' %(threshold)s'), {'queues_length': queues_length, 'old_threshold': self._wrn_threshold, 'threshold': self._wrn_threshold * 2}) self._wrn_threshold *= 2 def remove(self, msg_id): del self._queues[msg_id] class ReplyWaiter(object): def __init__(self, reply_q, conn, allowed_remote_exmods): self.conn = conn self.allowed_remote_exmods = allowed_remote_exmods self.msg_id_cache = rpc_amqp._MsgIdCache() self.waiters = ReplyWaiters() self.conn.declare_direct_consumer(reply_q, self) self._thread_exit_event = threading.Event() self._thread = threading.Thread(target=self.poll) self._thread.daemon = True self._thread.start() def stop(self): if self._thread: self._thread_exit_event.set() self.conn.stop_consuming() self._thread.join() self._thread = None def poll(self): current_timeout = ACK_REQUEUE_EVERY_SECONDS_MIN while not self._thread_exit_event.is_set(): try: # ack every ACK_REQUEUE_EVERY_SECONDS_MAX seconds self.conn.consume(timeout=current_timeout) except rpc_common.Timeout: current_timeout = max(current_timeout * 2, ACK_REQUEUE_EVERY_SECONDS_MAX) except Exception: LOG.exception(_LE("Failed to process incoming message, " "retrying...")) else: current_timeout = ACK_REQUEUE_EVERY_SECONDS_MIN def __call__(self, message): # NOTE(sileht): __call__ is running within the polling thread, # (conn.consume -> conn.conn.drain_events() -> __call__ callback) # it's threadsafe to acknowledge the message here, no need to wait # the next polling message.acknowledge() incoming_msg_id = message.pop('_msg_id', None) if message.get('ending'): LOG.debug("received reply msg_id: %s", incoming_msg_id) self.waiters.put(incoming_msg_id, message) def listen(self, msg_id): self.waiters.add(msg_id) def unlisten(self, msg_id): self.waiters.remove(msg_id) @staticmethod def _raise_timeout_exception(msg_id): raise oslo_messaging.MessagingTimeout( _('Timed out waiting for a reply to message ID %s.') % msg_id) def _process_reply(self, data): self.msg_id_cache.check_duplicate_message(data) if data['failure']: failure = data['failure'] result = rpc_common.deserialize_remote_exception( failure, self.allowed_remote_exmods) else: result = data.get('result', None) ending = data.get('ending', False) return result, ending def wait(self, msg_id, timeout): # NOTE(sileht): for each msg_id we receive two amqp message # first one with the payload, a second one to ensure the other # have finish to send the payload # NOTE(viktors): We are going to remove this behavior in the N # release, but we need to keep backward compatibility, so we should # support both cases for now. timer = rpc_common.DecayingTimer(duration=timeout) timer.start() final_reply = None ending = False while not ending: timeout = timer.check_return(self._raise_timeout_exception, msg_id) try: message = self.waiters.get(msg_id, timeout=timeout) except moves.queue.Empty: self._raise_timeout_exception(msg_id) reply, ending = self._process_reply(message) if reply is not None: # NOTE(viktors): This can be either first _send_reply() with an # empty `result` field or a second _send_reply() with # ending=True and no `result` field. final_reply = reply return final_reply class AMQPDriverBase(base.BaseDriver): missing_destination_retry_timeout = 0 def __init__(self, conf, url, connection_pool, default_exchange=None, allowed_remote_exmods=None): super(AMQPDriverBase, self).__init__(conf, url, default_exchange, allowed_remote_exmods) self._default_exchange = default_exchange self._connection_pool = connection_pool self._reply_q_lock = threading.Lock() self._reply_q = None self._reply_q_conn = None self._waiter = None def _get_exchange(self, target): return target.exchange or self._default_exchange def _get_connection(self, purpose=rpc_common.PURPOSE_SEND): return rpc_common.ConnectionContext(self._connection_pool, purpose=purpose) def _get_reply_q(self): with self._reply_q_lock: if self._reply_q is not None: return self._reply_q reply_q = 'reply_' + uuid.uuid4().hex conn = self._get_connection(rpc_common.PURPOSE_LISTEN) self._waiter = ReplyWaiter(reply_q, conn, self._allowed_remote_exmods) self._reply_q = reply_q self._reply_q_conn = conn return self._reply_q def _send(self, target, ctxt, message, wait_for_reply=None, timeout=None, envelope=True, notify=False, retry=None): msg = message if wait_for_reply: msg_id = uuid.uuid4().hex msg.update({'_msg_id': msg_id}) msg.update({'_reply_q': self._get_reply_q()}) rpc_amqp._add_unique_id(msg) unique_id = msg[rpc_amqp.UNIQUE_ID] rpc_amqp.pack_context(msg, ctxt) if envelope: msg = rpc_common.serialize_msg(msg) if wait_for_reply: self._waiter.listen(msg_id) log_msg = "CALL msg_id: %s " % msg_id else: log_msg = "CAST unique_id: %s " % unique_id try: with self._get_connection(rpc_common.PURPOSE_SEND) as conn: if notify: exchange = self._get_exchange(target) log_msg += "NOTIFY exchange '%(exchange)s'" \ " topic '%(topic)s'" % { 'exchange': exchange, 'topic': target.topic} LOG.debug(log_msg) conn.notify_send(exchange, target.topic, msg, retry=retry) elif target.fanout: log_msg += "FANOUT topic '%(topic)s'" % { 'topic': target.topic} LOG.debug(log_msg) conn.fanout_send(target.topic, msg, retry=retry) else: topic = target.topic exchange = self._get_exchange(target) if target.server: topic = '%s.%s' % (target.topic, target.server) log_msg += "exchange '%(exchange)s'" \ " topic '%(topic)s'" % { 'exchange': exchange, 'topic': topic} LOG.debug(log_msg) conn.topic_send(exchange_name=exchange, topic=topic, msg=msg, timeout=timeout, retry=retry) if wait_for_reply: result = self._waiter.wait(msg_id, timeout) if isinstance(result, Exception): raise result return result finally: if wait_for_reply: self._waiter.unlisten(msg_id) def send(self, target, ctxt, message, wait_for_reply=None, timeout=None, retry=None): return self._send(target, ctxt, message, wait_for_reply, timeout, retry=retry) def send_notification(self, target, ctxt, message, version, retry=None): return self._send(target, ctxt, message, envelope=(version == 2.0), notify=True, retry=retry) def listen(self, target, batch_size, batch_timeout): conn = self._get_connection(rpc_common.PURPOSE_LISTEN) listener = AMQPListener(self, conn) conn.declare_topic_consumer(exchange_name=self._get_exchange(target), topic=target.topic, callback=listener) conn.declare_topic_consumer(exchange_name=self._get_exchange(target), topic='%s.%s' % (target.topic, target.server), callback=listener) conn.declare_fanout_consumer(target.topic, listener) return base.PollStyleListenerAdapter(listener, batch_size, batch_timeout) def listen_for_notifications(self, targets_and_priorities, pool, batch_size, batch_timeout): conn = self._get_connection(rpc_common.PURPOSE_LISTEN) # NOTE(sileht): The application set batch_size, so we don't need to # prefetch more messages, especially for notifications. Notifications # queues can be really big when the consumer have disapear during a # long period, and when it come back, kombu/pyamqp will fetch all # messages it can. So we override the default qos prefetch value conn.connection.rabbit_qos_prefetch_count = batch_size listener = AMQPListener(self, conn) for target, priority in targets_and_priorities: conn.declare_topic_consumer( exchange_name=self._get_exchange(target), topic='%s.%s' % (target.topic, priority), callback=listener, queue_name=pool) return base.PollStyleListenerAdapter(listener, batch_size, batch_timeout) def cleanup(self): if self._connection_pool: self._connection_pool.empty() self._connection_pool = None with self._reply_q_lock: if self._reply_q is not None: self._waiter.stop() self._reply_q_conn.close() self._reply_q_conn = None self._reply_q = None self._waiter = None oslo.messaging-5.35.0/oslo_messaging/_drivers/pika_driver/0000775000175100017510000000000013224676256023722 5ustar zuulzuul00000000000000oslo.messaging-5.35.0/oslo_messaging/_drivers/pika_driver/pika_engine.py0000666000175100017510000002670413224676046026555 0ustar zuulzuul00000000000000# Copyright 2015 Mirantis, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import logging import os import threading import uuid from oslo_utils import eventletutils import pika_pool from stevedore import driver from oslo_messaging._drivers.pika_driver import pika_commons as pika_drv_cmns from oslo_messaging._drivers.pika_driver import pika_exceptions as pika_drv_exc LOG = logging.getLogger(__name__) class _PooledConnectionWithConfirmations(pika_pool.Connection): """Derived from 'pika_pool.Connection' and extends its logic - adds 'confirm_delivery' call after channel creation to enable delivery confirmation for channel """ @property def channel(self): if self.fairy.channel is None: self.fairy.channel = self.fairy.cxn.channel() self.fairy.channel.confirm_delivery() return self.fairy.channel class PikaEngine(object): """Used for shared functionality between other pika driver modules, like connection factory, connection pools, processing and holding configuration, etc. """ def __init__(self, conf, url, default_exchange=None, allowed_remote_exmods=None): self.conf = conf self.url = url self._connection_factory_type = ( self.conf.oslo_messaging_pika.connection_factory ) self._connection_factory = None self._connection_without_confirmation_pool = None self._connection_with_confirmation_pool = None self._pid = None self._init_lock = threading.Lock() self.host_connection_reconnect_delay = ( conf.oslo_messaging_pika.host_connection_reconnect_delay ) # processing rpc options self.default_rpc_exchange = ( conf.oslo_messaging_pika.default_rpc_exchange ) self.rpc_reply_exchange = ( conf.oslo_messaging_pika.rpc_reply_exchange ) self.allowed_remote_exmods = [pika_drv_cmns.EXCEPTIONS_MODULE] if allowed_remote_exmods: self.allowed_remote_exmods.extend(allowed_remote_exmods) self.rpc_listener_prefetch_count = ( conf.oslo_messaging_pika.rpc_listener_prefetch_count ) self.default_rpc_retry_attempts = ( conf.oslo_messaging_pika.default_rpc_retry_attempts ) self.rpc_retry_delay = ( conf.oslo_messaging_pika.rpc_retry_delay ) if self.rpc_retry_delay < 0: raise ValueError("rpc_retry_delay should be non-negative integer") self.rpc_reply_listener_prefetch_count = ( conf.oslo_messaging_pika.rpc_listener_prefetch_count ) self.rpc_reply_retry_attempts = ( conf.oslo_messaging_pika.rpc_reply_retry_attempts ) self.rpc_reply_retry_delay = ( conf.oslo_messaging_pika.rpc_reply_retry_delay ) if self.rpc_reply_retry_delay < 0: raise ValueError("rpc_reply_retry_delay should be non-negative " "integer") self.rpc_queue_expiration = ( self.conf.oslo_messaging_pika.rpc_queue_expiration ) # processing notification options self.default_notification_exchange = ( conf.oslo_messaging_pika.default_notification_exchange ) self.notification_persistence = ( conf.oslo_messaging_pika.notification_persistence ) self.notification_listener_prefetch_count = ( conf.oslo_messaging_pika.notification_listener_prefetch_count ) self.default_notification_retry_attempts = ( conf.oslo_messaging_pika.default_notification_retry_attempts ) if self.default_notification_retry_attempts is None: raise ValueError("default_notification_retry_attempts should be " "an integer") self.notification_retry_delay = ( conf.oslo_messaging_pika.notification_retry_delay ) if (self.notification_retry_delay is None or self.notification_retry_delay < 0): raise ValueError("notification_retry_delay should be non-negative " "integer") self.default_content_type = ( 'application/' + conf.oslo_messaging_pika.default_serializer_type ) def _init_if_needed(self): cur_pid = os.getpid() if self._pid == cur_pid: return with self._init_lock: if self._pid == cur_pid: return if self._pid: LOG.warning("New pid is detected. Old: %s, new: %s. " "Cleaning up...", self._pid, cur_pid) # Note(dukhlov): we need to force select poller usage in case # when 'thread' module is monkey patched becase current # eventlet implementation does not support patching of # poll/epoll/kqueue if eventletutils.is_monkey_patched("thread"): from pika.adapters import select_connection select_connection.SELECT_TYPE = "select" mgr = driver.DriverManager( 'oslo.messaging.pika.connection_factory', self._connection_factory_type ) self._connection_factory = mgr.driver(self.url, self.conf) # initializing 2 connection pools: 1st for connections without # confirmations, 2nd - with confirmations self._connection_without_confirmation_pool = pika_pool.QueuedPool( create=self.create_connection, max_size=self.conf.oslo_messaging_pika.pool_max_size, max_overflow=self.conf.oslo_messaging_pika.pool_max_overflow, timeout=self.conf.oslo_messaging_pika.pool_timeout, recycle=self.conf.oslo_messaging_pika.pool_recycle, stale=self.conf.oslo_messaging_pika.pool_stale, ) self._connection_with_confirmation_pool = pika_pool.QueuedPool( create=self.create_connection, max_size=self.conf.oslo_messaging_pika.pool_max_size, max_overflow=self.conf.oslo_messaging_pika.pool_max_overflow, timeout=self.conf.oslo_messaging_pika.pool_timeout, recycle=self.conf.oslo_messaging_pika.pool_recycle, stale=self.conf.oslo_messaging_pika.pool_stale, ) self._connection_with_confirmation_pool.Connection = ( _PooledConnectionWithConfirmations ) self._pid = cur_pid def create_connection(self, for_listening=False): self._init_if_needed() return self._connection_factory.create_connection(for_listening) @property def connection_without_confirmation_pool(self): self._init_if_needed() return self._connection_without_confirmation_pool @property def connection_with_confirmation_pool(self): self._init_if_needed() return self._connection_with_confirmation_pool def cleanup(self): if self._connection_factory: self._connection_factory.cleanup() def declare_exchange_by_channel(self, channel, exchange, exchange_type, durable): """Declare exchange using already created channel, if they don't exist :param channel: Channel for communication with RabbitMQ :param exchange: String, RabbitMQ exchange name :param exchange_type: String ('direct', 'topic' or 'fanout') exchange type for exchange to be declared :param durable: Boolean, creates durable exchange if true """ try: channel.exchange_declare( exchange, exchange_type, auto_delete=True, durable=durable ) except pika_drv_cmns.PIKA_CONNECTIVITY_ERRORS as e: raise pika_drv_exc.ConnectionException( "Connectivity problem detected during declaring exchange: " "exchange:{}, exchange_type: {}, durable: {}. {}".format( exchange, exchange_type, durable, str(e) ) ) def declare_queue_binding_by_channel(self, channel, exchange, queue, routing_key, exchange_type, queue_expiration, durable): """Declare exchange, queue and bind them using already created channel, if they don't exist :param channel: Channel for communication with RabbitMQ :param exchange: String, RabbitMQ exchange name :param queue: Sting, RabbitMQ queue name :param routing_key: Sting, RabbitMQ routing key for queue binding :param exchange_type: String ('direct', 'topic' or 'fanout') exchange type for exchange to be declared :param queue_expiration: Integer, time in seconds which queue will remain existing in RabbitMQ when there no consumers connected :param durable: Boolean, creates durable exchange and queue if true """ try: channel.exchange_declare( exchange, exchange_type, auto_delete=True, durable=durable ) arguments = {} if queue_expiration > 0: arguments['x-expires'] = queue_expiration * 1000 channel.queue_declare(queue, durable=durable, arguments=arguments) channel.queue_bind(queue, exchange, routing_key) except pika_drv_cmns.PIKA_CONNECTIVITY_ERRORS as e: raise pika_drv_exc.ConnectionException( "Connectivity problem detected during declaring queue " "binding: exchange:{}, queue: {}, routing_key: {}, " "exchange_type: {}, queue_expiration: {}, " "durable: {}. {}".format( exchange, queue, routing_key, exchange_type, queue_expiration, durable, str(e) ) ) def get_rpc_exchange_name(self, exchange): """Returns RabbitMQ exchange name for given rpc request :param exchange: String, oslo.messaging target's exchange :return: String, RabbitMQ exchange name """ return exchange or self.default_rpc_exchange @staticmethod def get_rpc_queue_name(topic, server, no_ack, worker=False): """Returns RabbitMQ queue name for given rpc request :param topic: String, oslo.messaging target's topic :param server: String, oslo.messaging target's server :param no_ack: Boolean, use message delivery with acknowledges or not :param worker: Boolean, use queue by single worker only or not :return: String, RabbitMQ queue name """ queue_parts = ["no_ack" if no_ack else "with_ack", topic] if server is not None: queue_parts.append(server) if worker: queue_parts.append("worker") queue_parts.append(uuid.uuid4().hex) queue = '.'.join(queue_parts) return queue oslo.messaging-5.35.0/oslo_messaging/_drivers/pika_driver/pika_listener.py0000666000175100017510000001044213224676046027125 0ustar zuulzuul00000000000000# Copyright 2015 Mirantis, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import threading import uuid from concurrent import futures from oslo_log import log as logging from oslo_messaging._drivers.pika_driver import pika_poller as pika_drv_poller LOG = logging.getLogger(__name__) class RpcReplyPikaListener(object): """Provide functionality for listening RPC replies. Create and handle reply poller and coroutine for performing polling job """ def __init__(self, pika_engine): super(RpcReplyPikaListener, self).__init__() self._pika_engine = pika_engine # preparing poller for listening replies self._reply_queue = None self._reply_poller = None self._reply_waiting_futures = {} self._reply_consumer_initialized = False self._reply_consumer_initialization_lock = threading.Lock() self._shutdown = False def get_reply_qname(self): """As result return reply queue name, shared for whole process, but before this check is RPC listener initialized or not and perform initialization if needed :return: String, queue name which hould be used for reply sending """ if self._reply_consumer_initialized: return self._reply_queue with self._reply_consumer_initialization_lock: if self._reply_consumer_initialized: return self._reply_queue # generate reply queue name if needed if self._reply_queue is None: self._reply_queue = "reply.{}.{}.{}".format( self._pika_engine.conf.project, self._pika_engine.conf.prog, uuid.uuid4().hex ) # initialize reply poller if needed if self._reply_poller is None: self._reply_poller = pika_drv_poller.RpcReplyPikaPoller( self._pika_engine, self._pika_engine.rpc_reply_exchange, self._reply_queue, 1, None, self._pika_engine.rpc_reply_listener_prefetch_count ) self._reply_poller.start(self._on_incoming) self._reply_consumer_initialized = True return self._reply_queue def _on_incoming(self, incoming): """Reply polling job. Poll replies in infinite loop and notify registered features """ for message in incoming: try: message.acknowledge() future = self._reply_waiting_futures.pop( message.msg_id, None ) if future is not None: future.set_result(message) except Exception: LOG.exception("Unexpected exception during processing" "reply message") def register_reply_waiter(self, msg_id): """Register reply waiter. Should be called before message sending to the server :param msg_id: String, message_id of expected reply :return future: Future, container for expected reply to be returned over """ future = futures.Future() self._reply_waiting_futures[msg_id] = future return future def unregister_reply_waiter(self, msg_id): """Unregister reply waiter. Should be called if client has not got reply and doesn't want to continue waiting (if timeout_expired for example) :param msg_id: """ self._reply_waiting_futures.pop(msg_id, None) def cleanup(self): """Stop replies consuming and cleanup resources""" self._shutdown = True if self._reply_poller: self._reply_poller.stop() self._reply_poller.cleanup() self._reply_poller = None self._reply_queue = None oslo.messaging-5.35.0/oslo_messaging/_drivers/pika_driver/pika_poller.py0000666000175100017510000005332713224676046026606 0ustar zuulzuul00000000000000# Copyright 2015 Mirantis, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import threading from oslo_log import log as logging from oslo_service import loopingcall from oslo_messaging._drivers import base from oslo_messaging._drivers.pika_driver import pika_commons as pika_drv_cmns from oslo_messaging._drivers.pika_driver import pika_exceptions as pika_drv_exc from oslo_messaging._drivers.pika_driver import pika_message as pika_drv_msg LOG = logging.getLogger(__name__) class PikaPoller(base.Listener): """Provides user friendly functionality for RabbitMQ message consuming, handles low level connectivity problems and restore connection if some connectivity related problem detected """ def __init__(self, pika_engine, batch_size, batch_timeout, prefetch_count, incoming_message_class): """Initialize required fields :param pika_engine: PikaEngine, shared object with configuration and shared driver functionality :param batch_size: desired number of messages passed to single on_incoming_callback call :param batch_timeout: defines how long should we wait for batch_size messages if we already have some messages waiting for processing :param prefetch_count: Integer, maximum count of unacknowledged messages which RabbitMQ broker sends to this consumer :param incoming_message_class: PikaIncomingMessage, wrapper for consumed RabbitMQ message """ super(PikaPoller, self).__init__(batch_size, batch_timeout, prefetch_count) self._pika_engine = pika_engine self._incoming_message_class = incoming_message_class self._connection = None self._channel = None self._recover_loopingcall = None self._lock = threading.RLock() self._cur_batch_buffer = None self._cur_batch_timeout_id = None self._started = False self._closing_connection_by_poller = False self._queues_to_consume = None def _on_connection_close(self, connection, reply_code, reply_text): self._deliver_cur_batch() if self._closing_connection_by_poller: return with self._lock: self._connection = None self._start_recover_consuming_task() def _on_channel_close(self, channel, reply_code, reply_text): if self._cur_batch_buffer: self._cur_batch_buffer = [ message for message in self._cur_batch_buffer if not message.need_ack() ] if self._closing_connection_by_poller: return with self._lock: self._channel = None self._start_recover_consuming_task() def _on_consumer_cancel(self, method_frame): with self._lock: if self._queues_to_consume: consumer_tag = method_frame.method.consumer_tag for queue_info in self._queues_to_consume: if queue_info["consumer_tag"] == consumer_tag: queue_info["consumer_tag"] = None self._start_recover_consuming_task() def _on_message_no_ack_callback(self, unused, method, properties, body): """Is called by Pika when message was received from queue listened with no_ack=True mode """ incoming_message = self._incoming_message_class( self._pika_engine, None, method, properties, body ) self._on_incoming_message(incoming_message) def _on_message_with_ack_callback(self, unused, method, properties, body): """Is called by Pika when message was received from queue listened with no_ack=False mode """ incoming_message = self._incoming_message_class( self._pika_engine, self._channel, method, properties, body ) self._on_incoming_message(incoming_message) def _deliver_cur_batch(self): if self._cur_batch_timeout_id is not None: self._connection.remove_timeout(self._cur_batch_timeout_id) self._cur_batch_timeout_id = None if self._cur_batch_buffer: buf_to_send = self._cur_batch_buffer self._cur_batch_buffer = None try: self.on_incoming_callback(buf_to_send) except Exception: LOG.exception("Unexpected exception during incoming delivery") def _on_incoming_message(self, incoming_message): if self._cur_batch_buffer is None: self._cur_batch_buffer = [incoming_message] else: self._cur_batch_buffer.append(incoming_message) if len(self._cur_batch_buffer) >= self.batch_size: self._deliver_cur_batch() return if self._cur_batch_timeout_id is None: self._cur_batch_timeout_id = self._connection.add_timeout( self.batch_timeout, self._deliver_cur_batch) def _start_recover_consuming_task(self): """Start async job for checking connection to the broker.""" if self._recover_loopingcall is None and self._started: self._recover_loopingcall = ( loopingcall.DynamicLoopingCall( self._try_recover_consuming ) ) LOG.info("Starting recover consuming job for listener: %s", self) self._recover_loopingcall.start() def _try_recover_consuming(self): with self._lock: try: if self._started: self._start_or_recover_consuming() except pika_drv_exc.EstablishConnectionException as e: LOG.warning( "Problem during establishing connection for pika " "poller %s", e, exc_info=True ) return self._pika_engine.host_connection_reconnect_delay except pika_drv_exc.ConnectionException as e: LOG.warning( "Connectivity exception during starting/recovering pika " "poller %s", e, exc_info=True ) except pika_drv_cmns.PIKA_CONNECTIVITY_ERRORS as e: LOG.warning( "Connectivity exception during starting/recovering pika " "poller %s", e, exc_info=True ) except BaseException: # NOTE (dukhlov): I preffer to use here BaseException because # if this method raise such exception LoopingCall stops # execution Probably it should never happen and Exception # should be enough but in case of programmer mistake it could # be and it is potentially hard to catch problem if we will # stop background task. It is better when it continue to work # and write a lot of LOG with this error LOG.exception("Unexpected exception during " "starting/recovering pika poller") else: self._recover_loopingcall = None LOG.info("Recover consuming job was finished for listener: %s", self) raise loopingcall.LoopingCallDone(True) return 0 def _start_or_recover_consuming(self): """Performs reconnection to the broker. It is unsafe method for internal use only """ if self._connection is None or not self._connection.is_open: self._connection = self._pika_engine.create_connection( for_listening=True ) self._connection.add_on_close_callback(self._on_connection_close) self._channel = None if self._channel is None or not self._channel.is_open: if self._queues_to_consume: for queue_info in self._queues_to_consume: queue_info["consumer_tag"] = None self._channel = self._connection.channel() self._channel.add_on_close_callback(self._on_channel_close) self._channel.add_on_cancel_callback(self._on_consumer_cancel) self._channel.basic_qos(prefetch_count=self.prefetch_size) if self._queues_to_consume is None: self._queues_to_consume = self._declare_queue_binding() self._start_consuming() def _declare_queue_binding(self): """Is called by recovering connection logic if target RabbitMQ exchange and (or) queue do not exist. Should be overridden in child classes :return Dictionary: declared_queue_name -> no_ack_mode """ raise NotImplementedError( "It is base class. Please declare exchanges and queues here" ) def _start_consuming(self): """Is called by recovering connection logic for starting consumption of configured RabbitMQ queues """ assert self._queues_to_consume is not None try: for queue_info in self._queues_to_consume: if queue_info["consumer_tag"] is not None: continue no_ack = queue_info["no_ack"] on_message_callback = ( self._on_message_no_ack_callback if no_ack else self._on_message_with_ack_callback ) queue_info["consumer_tag"] = self._channel.basic_consume( on_message_callback, queue_info["queue_name"], no_ack=no_ack ) except Exception: self._queues_to_consume = None raise def _stop_consuming(self): """Is called by poller's stop logic for stopping consumption of configured RabbitMQ queues """ assert self._queues_to_consume is not None for queue_info in self._queues_to_consume: consumer_tag = queue_info["consumer_tag"] if consumer_tag is not None: self._channel.basic_cancel(consumer_tag) queue_info["consumer_tag"] = None def start(self, on_incoming_callback): """Starts poller. Should be called before polling to allow message consuming :param on_incoming_callback: callback function to be executed when listener received messages. Messages should be processed and acked/nacked by callback """ super(PikaPoller, self).start(on_incoming_callback) with self._lock: if self._started: return connected = False try: self._start_or_recover_consuming() except pika_drv_exc.EstablishConnectionException as exc: LOG.warning( "Can not establish connection during pika poller's " "start(). %s", exc, exc_info=True ) except pika_drv_exc.ConnectionException as exc: LOG.warning( "Connectivity problem during pika poller's start(). %s", exc, exc_info=True ) except pika_drv_cmns.PIKA_CONNECTIVITY_ERRORS as exc: LOG.warning( "Connectivity problem during pika poller's start(). %s", exc, exc_info=True ) else: connected = True self._started = True if not connected: self._start_recover_consuming_task() def stop(self): """Stops poller. Should be called when polling is not needed anymore to stop new message consuming. After that it is necessary to poll already prefetched messages """ super(PikaPoller, self).stop() with self._lock: if not self._started: return if self._recover_loopingcall is not None: self._recover_loopingcall.stop() self._recover_loopingcall = None if (self._queues_to_consume and self._channel and self._channel.is_open): try: self._stop_consuming() except pika_drv_cmns.PIKA_CONNECTIVITY_ERRORS as exc: LOG.warning( "Connectivity problem detected during consumer " "cancellation. %s", exc, exc_info=True ) self._deliver_cur_batch() self._started = False def cleanup(self): """Cleanup allocated resources (channel, connection, etc).""" with self._lock: if self._connection and self._connection.is_open: try: self._closing_connection_by_poller = True self._connection.close() self._closing_connection_by_poller = False except pika_drv_cmns.PIKA_CONNECTIVITY_ERRORS: # expected errors pass except Exception: LOG.exception("Unexpected error during closing connection") finally: self._channel = None self._connection = None class RpcServicePikaPoller(PikaPoller): """PikaPoller implementation for polling RPC messages. Overrides base functionality according to RPC specific """ def __init__(self, pika_engine, target, batch_size, batch_timeout, prefetch_count): """Adds target parameter for declaring RPC specific exchanges and queues :param pika_engine: PikaEngine, shared object with configuration and shared driver functionality :param target: Target, oslo.messaging Target object which defines RPC endpoint :param batch_size: desired number of messages passed to single on_incoming_callback call :param batch_timeout: defines how long should we wait for batch_size messages if we already have some messages waiting for processing :param prefetch_count: Integer, maximum count of unacknowledged messages which RabbitMQ broker sends to this consumer """ self._target = target super(RpcServicePikaPoller, self).__init__( pika_engine, batch_size, batch_timeout, prefetch_count, pika_drv_msg.RpcPikaIncomingMessage ) def _declare_queue_binding(self): """Overrides base method and perform declaration of RabbitMQ exchanges and queues which correspond to oslo.messaging RPC target :return Dictionary: declared_queue_name -> no_ack_mode """ queue_expiration = self._pika_engine.rpc_queue_expiration exchange = self._pika_engine.get_rpc_exchange_name( self._target.exchange ) queues_to_consume = [] for no_ack in [True, False]: queue = self._pika_engine.get_rpc_queue_name( self._target.topic, None, no_ack ) self._pika_engine.declare_queue_binding_by_channel( channel=self._channel, exchange=exchange, queue=queue, routing_key=queue, exchange_type='direct', durable=False, queue_expiration=queue_expiration ) queues_to_consume.append( {"queue_name": queue, "no_ack": no_ack, "consumer_tag": None} ) if self._target.server: server_queue = self._pika_engine.get_rpc_queue_name( self._target.topic, self._target.server, no_ack ) self._pika_engine.declare_queue_binding_by_channel( channel=self._channel, exchange=exchange, durable=False, queue=server_queue, routing_key=server_queue, exchange_type='direct', queue_expiration=queue_expiration ) queues_to_consume.append( {"queue_name": server_queue, "no_ack": no_ack, "consumer_tag": None} ) worker_queue = self._pika_engine.get_rpc_queue_name( self._target.topic, self._target.server, no_ack, True ) all_workers_routing_key = self._pika_engine.get_rpc_queue_name( self._target.topic, "all_workers", no_ack ) self._pika_engine.declare_queue_binding_by_channel( channel=self._channel, exchange=exchange, durable=False, queue=worker_queue, routing_key=all_workers_routing_key, exchange_type='direct', queue_expiration=queue_expiration ) queues_to_consume.append( {"queue_name": worker_queue, "no_ack": no_ack, "consumer_tag": None} ) return queues_to_consume class RpcReplyPikaPoller(PikaPoller): """PikaPoller implementation for polling RPC reply messages. Overrides base functionality according to RPC reply specific """ def __init__(self, pika_engine, exchange, queue, batch_size, batch_timeout, prefetch_count): """Adds exchange and queue parameter for declaring exchange and queue used for RPC reply delivery :param pika_engine: PikaEngine, shared object with configuration and shared driver functionality :param exchange: String, exchange name used for RPC reply delivery :param queue: String, queue name used for RPC reply delivery :param batch_size: desired number of messages passed to single on_incoming_callback call :param batch_timeout: defines how long should we wait for batch_size messages if we already have some messages waiting for processing :param prefetch_count: Integer, maximum count of unacknowledged messages which RabbitMQ broker sends to this consumer """ self._exchange = exchange self._queue = queue super(RpcReplyPikaPoller, self).__init__( pika_engine, batch_size, batch_timeout, prefetch_count, pika_drv_msg.RpcReplyPikaIncomingMessage ) def _declare_queue_binding(self): """Overrides base method and perform declaration of RabbitMQ exchange and queue used for RPC reply delivery :return Dictionary: declared_queue_name -> no_ack_mode """ self._pika_engine.declare_queue_binding_by_channel( channel=self._channel, exchange=self._exchange, queue=self._queue, routing_key=self._queue, exchange_type='direct', queue_expiration=self._pika_engine.rpc_queue_expiration, durable=False ) return [{"queue_name": self._queue, "no_ack": False, "consumer_tag": None}] class NotificationPikaPoller(PikaPoller): """PikaPoller implementation for polling Notification messages. Overrides base functionality according to Notification specific """ def __init__(self, pika_engine, targets_and_priorities, batch_size, batch_timeout, prefetch_count, queue_name=None): """Adds targets_and_priorities and queue_name parameter for declaring exchanges and queues used for notification delivery :param pika_engine: PikaEngine, shared object with configuration and shared driver functionality :param targets_and_priorities: list of (target, priority), defines default queue names for corresponding notification types :param batch_size: desired number of messages passed to single on_incoming_callback call :param batch_timeout: defines how long should we wait for batch_size messages if we already have some messages waiting for processing :param prefetch_count: Integer, maximum count of unacknowledged messages which RabbitMQ broker sends to this consumer :param queue: String, alternative queue name used for this poller instead of default queue name """ self._targets_and_priorities = targets_and_priorities self._queue_name = queue_name super(NotificationPikaPoller, self).__init__( pika_engine, batch_size, batch_timeout, prefetch_count, pika_drv_msg.PikaIncomingMessage ) def _declare_queue_binding(self): """Overrides base method and perform declaration of RabbitMQ exchanges and queues used for notification delivery :return Dictionary: declared_queue_name -> no_ack_mode """ queues_to_consume = [] for target, priority in self._targets_and_priorities: routing_key = '%s.%s' % (target.topic, priority) queue = self._queue_name or routing_key self._pika_engine.declare_queue_binding_by_channel( channel=self._channel, exchange=( target.exchange or self._pika_engine.default_notification_exchange ), queue=queue, routing_key=routing_key, exchange_type='direct', queue_expiration=None, durable=self._pika_engine.notification_persistence, ) queues_to_consume.append( {"queue_name": queue, "no_ack": False, "consumer_tag": None} ) return queues_to_consume oslo.messaging-5.35.0/oslo_messaging/_drivers/pika_driver/pika_connection.py0000666000175100017510000004603413224676046027445 0ustar zuulzuul00000000000000# Copyright 2016 Mirantis, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import collections import logging import os import threading import futurist from pika.adapters import select_connection from pika import exceptions as pika_exceptions from pika import spec as pika_spec from oslo_utils import eventletutils current_thread = eventletutils.fetch_current_thread_functor() LOG = logging.getLogger(__name__) class ThreadSafePikaConnection(object): def __init__(self, parameters=None, _impl_class=select_connection.SelectConnection): self.params = parameters self._connection_lock = threading.Lock() self._evt_closed = threading.Event() self._task_queue = collections.deque() self._pending_connection_futures = set() create_connection_future = self._register_pending_future() def on_open_error(conn, err): create_connection_future.set_exception( pika_exceptions.AMQPConnectionError(err) ) self._impl = _impl_class( parameters=parameters, on_open_callback=create_connection_future.set_result, on_open_error_callback=on_open_error, on_close_callback=self._on_connection_close, stop_ioloop_on_close=False, ) self._interrupt_pipein, self._interrupt_pipeout = os.pipe() self._impl.ioloop.add_handler(self._interrupt_pipein, self._impl.ioloop.read_interrupt, select_connection.READ) self._thread = threading.Thread(target=self._process_io) self._thread.daemon = True self._thread_id = None self._thread.start() create_connection_future.result() def _check_called_not_from_event_loop(self): if current_thread() == self._thread_id: raise RuntimeError("This call is not allowed from ioloop thread") def _execute_task(self, func, *args, **kwargs): if current_thread() == self._thread_id: return func(*args, **kwargs) future = futurist.Future() self._task_queue.append((func, args, kwargs, future)) if self._evt_closed.is_set(): self._notify_all_futures_connection_close() elif self._interrupt_pipeout is not None: os.write(self._interrupt_pipeout, b'X') return future.result() def _register_pending_future(self): future = futurist.Future() self._pending_connection_futures.add(future) def on_done_callback(fut): try: self._pending_connection_futures.remove(fut) except KeyError: pass future.add_done_callback(on_done_callback) if self._evt_closed.is_set(): self._notify_all_futures_connection_close() return future def _notify_all_futures_connection_close(self): while self._task_queue: try: method_res_future = self._task_queue.pop()[3] except KeyError: break else: method_res_future.set_exception( pika_exceptions.ConnectionClosed() ) while self._pending_connection_futures: try: pending_connection_future = ( self._pending_connection_futures.pop() ) except KeyError: break else: pending_connection_future.set_exception( pika_exceptions.ConnectionClosed() ) def _on_connection_close(self, conn, reply_code, reply_text): self._evt_closed.set() self._notify_all_futures_connection_close() if self._interrupt_pipeout: os.close(self._interrupt_pipeout) os.close(self._interrupt_pipein) def add_on_close_callback(self, callback): return self._execute_task(self._impl.add_on_close_callback, callback) def _do_process_io(self): while self._task_queue: func, args, kwargs, future = self._task_queue.pop() try: res = func(*args, **kwargs) except BaseException as e: LOG.exception(e) future.set_exception(e) else: future.set_result(res) self._impl.ioloop.poll() self._impl.ioloop.process_timeouts() def _process_io(self): self._thread_id = current_thread() while not self._evt_closed.is_set(): try: self._do_process_io() except BaseException: LOG.exception("Error during processing connection's IO") def close(self, *args, **kwargs): self._check_called_not_from_event_loop() res = self._execute_task(self._impl.close, *args, **kwargs) self._evt_closed.wait() self._thread.join() return res def channel(self, channel_number=None): self._check_called_not_from_event_loop() channel_opened_future = self._register_pending_future() impl_channel = self._execute_task( self._impl.channel, on_open_callback=channel_opened_future.set_result, channel_number=channel_number ) # Create our proxy channel channel = ThreadSafePikaChannel(impl_channel, self) # Link implementation channel with our proxy channel impl_channel._set_cookie(channel) channel_opened_future.result() return channel def add_timeout(self, timeout, callback): return self._execute_task(self._impl.add_timeout, timeout, callback) def remove_timeout(self, timeout_id): return self._execute_task(self._impl.remove_timeout, timeout_id) @property def is_closed(self): return self._impl.is_closed @property def is_closing(self): return self._impl.is_closing @property def is_open(self): return self._impl.is_open class ThreadSafePikaChannel(object): # pylint: disable=R0904,R0902 def __init__(self, channel_impl, connection): self._impl = channel_impl self._connection = connection self._delivery_confirmation = False self._message_returned = False self._current_future = None self._evt_closed = threading.Event() self.add_on_close_callback(self._on_channel_close) def _execute_task(self, func, *args, **kwargs): return self._connection._execute_task(func, *args, **kwargs) def _on_channel_close(self, channel, reply_code, reply_text): self._evt_closed.set() if self._current_future: self._current_future.set_exception( pika_exceptions.ChannelClosed(reply_code, reply_text)) def _on_message_confirmation(self, frame): self._current_future.set_result(frame) def add_on_close_callback(self, callback): self._execute_task(self._impl.add_on_close_callback, callback) def add_on_cancel_callback(self, callback): self._execute_task(self._impl.add_on_cancel_callback, callback) def __int__(self): return self.channel_number @property def channel_number(self): return self._impl.channel_number @property def is_closed(self): return self._impl.is_closed @property def is_closing(self): return self._impl.is_closing @property def is_open(self): return self._impl.is_open def close(self, reply_code=0, reply_text="Normal Shutdown"): self._impl.close(reply_code=reply_code, reply_text=reply_text) self._evt_closed.wait() def _check_called_not_from_event_loop(self): self._connection._check_called_not_from_event_loop() def flow(self, active): self._check_called_not_from_event_loop() self._current_future = futurist.Future() self._execute_task( self._impl.flow, callback=self._current_future.set_result, active=active ) return self._current_future.result() def basic_consume(self, # pylint: disable=R0913 consumer_callback, queue, no_ack=False, exclusive=False, consumer_tag=None, arguments=None): self._check_called_not_from_event_loop() self._current_future = futurist.Future() self._execute_task( self._impl.add_callback, self._current_future.set_result, replies=[pika_spec.Basic.ConsumeOk], one_shot=True ) self._impl.add_callback(self._current_future.set_result, replies=[pika_spec.Basic.ConsumeOk], one_shot=True) tag = self._execute_task( self._impl.basic_consume, consumer_callback=consumer_callback, queue=queue, no_ack=no_ack, exclusive=exclusive, consumer_tag=consumer_tag, arguments=arguments ) self._current_future.result() return tag def basic_cancel(self, consumer_tag): self._check_called_not_from_event_loop() self._current_future = futurist.Future() self._execute_task( self._impl.basic_cancel, callback=self._current_future.set_result, consumer_tag=consumer_tag, nowait=False) self._current_future.result() def basic_ack(self, delivery_tag=0, multiple=False): return self._execute_task( self._impl.basic_ack, delivery_tag=delivery_tag, multiple=multiple) def basic_nack(self, delivery_tag=None, multiple=False, requeue=True): return self._execute_task( self._impl.basic_nack, delivery_tag=delivery_tag, multiple=multiple, requeue=requeue ) def publish(self, exchange, routing_key, body, # pylint: disable=R0913 properties=None, mandatory=False, immediate=False): if self._delivery_confirmation: self._check_called_not_from_event_loop() # In publisher-acknowledgments mode self._message_returned = False self._current_future = futurist.Future() self._execute_task(self._impl.basic_publish, exchange=exchange, routing_key=routing_key, body=body, properties=properties, mandatory=mandatory, immediate=immediate) conf_method = self._current_future.result().method if isinstance(conf_method, pika_spec.Basic.Nack): raise pika_exceptions.NackError((None,)) else: assert isinstance(conf_method, pika_spec.Basic.Ack), ( conf_method) if self._message_returned: raise pika_exceptions.UnroutableError((None,)) else: # In non-publisher-acknowledgments mode self._execute_task(self._impl.basic_publish, exchange=exchange, routing_key=routing_key, body=body, properties=properties, mandatory=mandatory, immediate=immediate) def basic_qos(self, prefetch_size=0, prefetch_count=0, all_channels=False): self._check_called_not_from_event_loop() self._current_future = futurist.Future() self._execute_task(self._impl.basic_qos, callback=self._current_future.set_result, prefetch_size=prefetch_size, prefetch_count=prefetch_count, all_channels=all_channels) self._current_future.result() def basic_recover(self, requeue=False): self._check_called_not_from_event_loop() self._current_future = futurist.Future() self._execute_task( self._impl.basic_recover, callback=lambda: self._current_future.set_result(None), requeue=requeue ) self._current_future.result() def basic_reject(self, delivery_tag=None, requeue=True): self._execute_task(self._impl.basic_reject, delivery_tag=delivery_tag, requeue=requeue) def _on_message_returned(self, *args, **kwargs): self._message_returned = True def confirm_delivery(self): self._check_called_not_from_event_loop() self._current_future = futurist.Future() self._execute_task(self._impl.add_callback, callback=self._current_future.set_result, replies=[pika_spec.Confirm.SelectOk], one_shot=True) self._execute_task(self._impl.confirm_delivery, callback=self._on_message_confirmation, nowait=False) self._current_future.result() self._delivery_confirmation = True self._execute_task(self._impl.add_on_return_callback, self._on_message_returned) def exchange_declare(self, exchange=None, # pylint: disable=R0913 exchange_type='direct', passive=False, durable=False, auto_delete=False, internal=False, arguments=None, **kwargs): self._check_called_not_from_event_loop() self._current_future = futurist.Future() self._execute_task(self._impl.exchange_declare, callback=self._current_future.set_result, exchange=exchange, exchange_type=exchange_type, passive=passive, durable=durable, auto_delete=auto_delete, internal=internal, nowait=False, arguments=arguments, type=kwargs["type"] if kwargs else None) return self._current_future.result() def exchange_delete(self, exchange=None, if_unused=False): self._check_called_not_from_event_loop() self._current_future = futurist.Future() self._execute_task(self._impl.exchange_delete, callback=self._current_future.set_result, exchange=exchange, if_unused=if_unused, nowait=False) return self._current_future.result() def exchange_bind(self, destination=None, source=None, routing_key='', arguments=None): self._check_called_not_from_event_loop() self._current_future = futurist.Future() self._execute_task(self._impl.exchange_bind, callback=self._current_future.set_result, destination=destination, source=source, routing_key=routing_key, nowait=False, arguments=arguments) return self._current_future.result() def exchange_unbind(self, destination=None, source=None, routing_key='', arguments=None): self._check_called_not_from_event_loop() self._current_future = futurist.Future() self._execute_task(self._impl.exchange_unbind, callback=self._current_future.set_result, destination=destination, source=source, routing_key=routing_key, nowait=False, arguments=arguments) return self._current_future.result() def queue_declare(self, queue='', passive=False, durable=False, exclusive=False, auto_delete=False, arguments=None): self._check_called_not_from_event_loop() self._current_future = futurist.Future() self._execute_task(self._impl.queue_declare, callback=self._current_future.set_result, queue=queue, passive=passive, durable=durable, exclusive=exclusive, auto_delete=auto_delete, nowait=False, arguments=arguments) return self._current_future.result() def queue_delete(self, queue='', if_unused=False, if_empty=False): self._check_called_not_from_event_loop() self._current_future = futurist.Future() self._execute_task(self._impl.queue_delete, callback=self._current_future.set_result, queue=queue, if_unused=if_unused, if_empty=if_empty, nowait=False) return self._current_future.result() def queue_purge(self, queue=''): self._check_called_not_from_event_loop() self._current_future = futurist.Future() self._execute_task(self._impl.queue_purge, callback=self._current_future.set_result, queue=queue, nowait=False) return self._current_future.result() def queue_bind(self, queue, exchange, routing_key=None, arguments=None): self._check_called_not_from_event_loop() self._current_future = futurist.Future() self._execute_task(self._impl.queue_bind, callback=self._current_future.set_result, queue=queue, exchange=exchange, routing_key=routing_key, nowait=False, arguments=arguments) return self._current_future.result() def queue_unbind(self, queue='', exchange=None, routing_key=None, arguments=None): self._check_called_not_from_event_loop() self._current_future = futurist.Future() self._execute_task(self._impl.queue_unbind, callback=self._current_future.set_result, queue=queue, exchange=exchange, routing_key=routing_key, arguments=arguments) return self._current_future.result() oslo.messaging-5.35.0/oslo_messaging/_drivers/pika_driver/pika_connection_factory.py0000666000175100017510000002614113224676046031171 0ustar zuulzuul00000000000000# Copyright 2016 Mirantis, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import logging import random import socket import threading import time from oslo_config import cfg import pika from pika import credentials as pika_credentials from oslo_messaging._drivers.pika_driver import pika_commons as pika_drv_cmns from oslo_messaging._drivers.pika_driver import pika_connection from oslo_messaging._drivers.pika_driver import pika_exceptions as pika_drv_exc LOG = logging.getLogger(__name__) # constant for setting tcp_user_timeout socket option # (it should be defined in 'select' module of standard library in future) TCP_USER_TIMEOUT = 18 # constants for creating connection statistics HOST_CONNECTION_LAST_TRY_TIME = "last_try_time" HOST_CONNECTION_LAST_SUCCESS_TRY_TIME = "last_success_try_time" pika_opts = [ cfg.IntOpt('channel_max', help='Maximum number of channels to allow'), cfg.IntOpt('frame_max', help='The maximum byte size for an AMQP frame'), cfg.IntOpt('heartbeat_interval', default=3, help="How often to send heartbeats for consumer's connections"), cfg.BoolOpt('ssl', help='Enable SSL'), cfg.DictOpt('ssl_options', help='Arguments passed to ssl.wrap_socket'), cfg.FloatOpt('socket_timeout', default=0.25, help="Set socket timeout in seconds for connection's socket"), cfg.FloatOpt('tcp_user_timeout', default=0.25, help="Set TCP_USER_TIMEOUT in seconds for connection's " "socket"), cfg.FloatOpt('host_connection_reconnect_delay', default=0.25, help="Set delay for reconnection to some host which has " "connection error"), cfg.StrOpt('connection_factory', default="single", choices=["new", "single", "read_write"], help='Connection factory implementation') ] class PikaConnectionFactory(object): def __init__(self, url, conf): self._url = url self._conf = conf self._connection_lock = threading.RLock() if not url.hosts: raise ValueError("You should provide at least one RabbitMQ host") # initializing connection parameters for configured RabbitMQ hosts self._common_pika_params = { 'virtual_host': url.virtual_host, 'channel_max': conf.oslo_messaging_pika.channel_max, 'frame_max': conf.oslo_messaging_pika.frame_max, 'ssl': conf.oslo_messaging_pika.ssl, 'ssl_options': conf.oslo_messaging_pika.ssl_options, 'socket_timeout': conf.oslo_messaging_pika.socket_timeout } self._host_list = url.hosts self._heartbeat_interval = conf.oslo_messaging_pika.heartbeat_interval self._host_connection_reconnect_delay = ( conf.oslo_messaging_pika.host_connection_reconnect_delay ) self._tcp_user_timeout = conf.oslo_messaging_pika.tcp_user_timeout self._connection_host_status = {} self._cur_connection_host_num = random.randint( 0, len(url.hosts) - 1 ) def cleanup(self): pass def create_connection(self, for_listening=False): """Create and return connection to any available host. :return: created connection :raise: ConnectionException if all hosts are not reachable """ with self._connection_lock: host_count = len(self._host_list) connection_attempts = host_count while connection_attempts > 0: self._cur_connection_host_num += 1 self._cur_connection_host_num %= host_count try: return self._create_host_connection( self._cur_connection_host_num, for_listening ) except pika_drv_cmns.PIKA_CONNECTIVITY_ERRORS as e: LOG.warning("Can't establish connection to host. %s", e) except pika_drv_exc.HostConnectionNotAllowedException as e: LOG.warning("Connection to host is not allowed. %s", e) connection_attempts -= 1 raise pika_drv_exc.EstablishConnectionException( "Can not establish connection to any configured RabbitMQ " "host: " + str(self._host_list) ) def _set_tcp_user_timeout(self, s): if not self._tcp_user_timeout: return try: s.setsockopt( socket.IPPROTO_TCP, TCP_USER_TIMEOUT, int(self._tcp_user_timeout * 1000) ) except socket.error: LOG.warning( "Whoops, this kernel doesn't seem to support TCP_USER_TIMEOUT." ) def _create_host_connection(self, host_index, for_listening): """Create new connection to host #host_index :param host_index: Integer, number of host for connection establishing :param for_listening: Boolean, creates connection for listening if True :return: New connection """ host = self._host_list[host_index] cur_time = time.time() host_connection_status = self._connection_host_status.get(host) if host_connection_status is None: host_connection_status = { HOST_CONNECTION_LAST_SUCCESS_TRY_TIME: 0, HOST_CONNECTION_LAST_TRY_TIME: 0 } self._connection_host_status[host] = host_connection_status last_success_time = host_connection_status[ HOST_CONNECTION_LAST_SUCCESS_TRY_TIME ] last_time = host_connection_status[ HOST_CONNECTION_LAST_TRY_TIME ] # raise HostConnectionNotAllowedException if we tried to establish # connection in last 'host_connection_reconnect_delay' and got # failure if (last_time != last_success_time and cur_time - last_time < self._host_connection_reconnect_delay): raise pika_drv_exc.HostConnectionNotAllowedException( "Connection to host #{} is not allowed now because of " "previous failure".format(host_index) ) try: connection = self._do_create_host_connection( host, for_listening ) self._connection_host_status[host][ HOST_CONNECTION_LAST_SUCCESS_TRY_TIME ] = cur_time return connection finally: self._connection_host_status[host][ HOST_CONNECTION_LAST_TRY_TIME ] = cur_time def _do_create_host_connection(self, host, for_listening): connection_params = pika.ConnectionParameters( host=host.hostname, port=host.port, credentials=pika_credentials.PlainCredentials( host.username, host.password ), heartbeat_interval=( self._heartbeat_interval if for_listening else None ), **self._common_pika_params ) if for_listening: connection = pika_connection.ThreadSafePikaConnection( parameters=connection_params ) else: connection = pika.BlockingConnection( parameters=connection_params ) connection.params = connection_params self._set_tcp_user_timeout(connection._impl.socket) return connection class NotClosableConnection(object): def __init__(self, connection): self._connection = connection def __getattr__(self, item): return getattr(self._connection, item) def close(self): pass class SinglePikaConnectionFactory(PikaConnectionFactory): def __init__(self, url, conf): super(SinglePikaConnectionFactory, self).__init__(url, conf) self._connection = None def create_connection(self, for_listening=False): with self._connection_lock: if self._connection is None or not self._connection.is_open: self._connection = ( super(SinglePikaConnectionFactory, self).create_connection( True ) ) return NotClosableConnection(self._connection) def cleanup(self): with self._connection_lock: if self._connection is not None and self._connection.is_open: try: self._connection.close() except Exception: LOG.warning( "Unexpected exception during connection closing", exc_info=True ) self._connection = None class ReadWritePikaConnectionFactory(PikaConnectionFactory): def __init__(self, url, conf): super(ReadWritePikaConnectionFactory, self).__init__(url, conf) self._read_connection = None self._write_connection = None def create_connection(self, for_listening=False): with self._connection_lock: if for_listening: if (self._read_connection is None or not self._read_connection.is_open): self._read_connection = super( ReadWritePikaConnectionFactory, self ).create_connection(True) return NotClosableConnection(self._read_connection) else: if (self._write_connection is None or not self._write_connection.is_open): self._write_connection = super( ReadWritePikaConnectionFactory, self ).create_connection(True) return NotClosableConnection(self._write_connection) def cleanup(self): with self._connection_lock: if (self._read_connection is not None and self._read_connection.is_open): try: self._read_connection.close() except Exception: LOG.warning( "Unexpected exception during connection closing", exc_info=True ) self._read_connection = None if (self._write_connection is not None and self._write_connection.is_open): try: self._write_connection.close() except Exception: LOG.warning( "Unexpected exception during connection closing", exc_info=True ) self._write_connection = None oslo.messaging-5.35.0/oslo_messaging/_drivers/pika_driver/pika_exceptions.py0000666000175100017510000000420413224676046027460 0ustar zuulzuul00000000000000# Copyright 2015 Mirantis, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_messaging import exceptions class ExchangeNotFoundException(exceptions.MessageDeliveryFailure): """Is raised if specified exchange is not found in RabbitMQ.""" pass class MessageRejectedException(exceptions.MessageDeliveryFailure): """Is raised if message which you are trying to send was nacked by RabbitMQ it may happen if RabbitMQ is not able to process message """ pass class RoutingException(exceptions.MessageDeliveryFailure): """Is raised if message can not be delivered to any queue. Usually it means that any queue is not binded to given exchange with given routing key. Raised if 'mandatory' flag specified only """ pass class ConnectionException(exceptions.MessagingException): """Is raised if some operation can not be performed due to connectivity problem """ pass class TimeoutConnectionException(ConnectionException): """Is raised if socket timeout was expired during network interaction""" pass class EstablishConnectionException(ConnectionException): """Is raised if we have some problem during establishing connection procedure """ pass class HostConnectionNotAllowedException(EstablishConnectionException): """Is raised in case of try to establish connection to temporary not allowed host (because of reconnection policy for example) """ pass class UnsupportedDriverVersion(exceptions.MessagingException): """Is raised when message is received but was sent by different, not supported driver version """ pass oslo.messaging-5.35.0/oslo_messaging/_drivers/pika_driver/__init__.py0000666000175100017510000000000013224676046026020 0ustar zuulzuul00000000000000oslo.messaging-5.35.0/oslo_messaging/_drivers/pika_driver/pika_message.py0000666000175100017510000006015413224676046026731 0ustar zuulzuul00000000000000# Copyright 2015 Mirantis, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import socket import time import traceback import uuid from concurrent import futures from oslo_log import log as logging from oslo_utils import importutils from oslo_utils import timeutils from pika import exceptions as pika_exceptions from pika import spec as pika_spec import pika_pool import six import tenacity import oslo_messaging from oslo_messaging._drivers import base from oslo_messaging._drivers.pika_driver import pika_commons as pika_drv_cmns from oslo_messaging._drivers.pika_driver import pika_exceptions as pika_drv_exc from oslo_messaging import _utils as utils from oslo_messaging import exceptions LOG = logging.getLogger(__name__) _VERSION_HEADER = "version" _VERSION = "1.0" class RemoteExceptionMixin(object): """Used for constructing dynamic exception type during deserialization of remote exception. It defines unified '__init__' method signature and exception message format """ def __init__(self, module, clazz, message, trace): """Store serialized data :param module: String, module name for importing original exception class of serialized remote exception :param clazz: String, original class name of serialized remote exception :param message: String, original message of serialized remote exception :param trace: String, original trace of serialized remote exception """ self.module = module self.clazz = clazz self.message = message self.trace = trace self._str_msgs = message + "\n" + "\n".join(trace) def __str__(self): return self._str_msgs class PikaIncomingMessage(base.IncomingMessage): """Driver friendly adapter for received message. Extract message information from RabbitMQ message and provide access to it """ def __init__(self, pika_engine, channel, method, properties, body): """Parse RabbitMQ message :param pika_engine: PikaEngine, shared object with configuration and shared driver functionality :param channel: Channel, RabbitMQ channel which was used for this message delivery, used for sending ack back. If None - ack is not required :param method: Method, RabbitMQ message method :param properties: Properties, RabbitMQ message properties :param body: Bytes, RabbitMQ message body """ headers = getattr(properties, "headers", {}) version = headers.get(_VERSION_HEADER, None) if not utils.version_is_compatible(version, _VERSION): raise pika_drv_exc.UnsupportedDriverVersion( "Message's version: {} is not compatible with driver version: " "{}".format(version, _VERSION)) self._pika_engine = pika_engine self._channel = channel self._delivery_tag = method.delivery_tag self._version = version self._content_type = properties.content_type self.unique_id = properties.message_id self.expiration_time = ( None if properties.expiration is None else time.time() + float(properties.expiration) / 1000 ) try: serializer = pika_drv_cmns.MESSAGE_SERIALIZERS[self._content_type] except KeyError: raise NotImplementedError( "Content-type['{}'] is not supported.".format( self._content_type ) ) message_dict = serializer.load_from_bytes(body) context_dict = {} for key in list(message_dict.keys()): key = six.text_type(key) if key.startswith('_$_'): value = message_dict.pop(key) context_dict[key[3:]] = value super(PikaIncomingMessage, self).__init__(context_dict, message_dict) def need_ack(self): return self._channel is not None def acknowledge(self): """Ack the message. Should be called by message processing logic when it considered as consumed (means that we don't need redelivery of this message anymore) """ if self.need_ack(): self._channel.basic_ack(delivery_tag=self._delivery_tag) def requeue(self): """Rollback the message. Should be called by message processing logic when it can not process the message right now and should be redelivered later if it is possible """ if self.need_ack(): return self._channel.basic_nack(delivery_tag=self._delivery_tag, requeue=True) class RpcPikaIncomingMessage(PikaIncomingMessage, base.RpcIncomingMessage): """PikaIncomingMessage implementation for RPC messages. It expects extra RPC related fields in message body (msg_id and reply_q). Also 'reply' method added to allow consumer to send RPC reply back to the RPC client """ def __init__(self, pika_engine, channel, method, properties, body): """Defines default values of msg_id and reply_q fields and just call super.__init__ method :param pika_engine: PikaEngine, shared object with configuration and shared driver functionality :param channel: Channel, RabbitMQ channel which was used for this message delivery, used for sending ack back. If None - ack is not required :param method: Method, RabbitMQ message method :param properties: Properties, RabbitMQ message properties :param body: Bytes, RabbitMQ message body """ super(RpcPikaIncomingMessage, self).__init__( pika_engine, channel, method, properties, body ) self.reply_q = properties.reply_to self.msg_id = properties.correlation_id def reply(self, reply=None, failure=None): """Send back reply to the RPC client :param reply: Dictionary, reply. In case of exception should be None :param failure: Tuple, should be a sys.exc_info() tuple. Should be None if RPC request was successfully processed. :return RpcReplyPikaIncomingMessage: message with reply """ if self.reply_q is None: return reply_outgoing_message = RpcReplyPikaOutgoingMessage( self._pika_engine, self.msg_id, reply=reply, failure_info=failure, content_type=self._content_type, ) def on_exception(ex): if isinstance(ex, pika_drv_exc.ConnectionException): LOG.warning( "Connectivity related problem during reply sending. %s", ex ) return True else: return False if self._pika_engine.rpc_reply_retry_attempts: retrier = tenacity.retry( stop=( tenacity.stop_never if self._pika_engine.rpc_reply_retry_attempts == -1 else tenacity.stop_after_attempt( self._pika_engine.rpc_reply_retry_attempts ) ), retry=tenacity.retry_if_exception(on_exception), wait=tenacity.wait_fixed( self._pika_engine.rpc_reply_retry_delay ) ) else: retrier = None try: timeout = (None if self.expiration_time is None else max(self.expiration_time - time.time(), 0)) with timeutils.StopWatch(duration=timeout) as stopwatch: reply_outgoing_message.send( reply_q=self.reply_q, stopwatch=stopwatch, retrier=retrier ) LOG.debug( "Message [id:'%s'] replied to '%s'.", self.msg_id, self.reply_q ) except Exception: LOG.exception( "Message [id:'%s'] wasn't replied to : %s", self.msg_id, self.reply_q ) class RpcReplyPikaIncomingMessage(PikaIncomingMessage): """PikaIncomingMessage implementation for RPC reply messages. It expects extra RPC reply related fields in message body (result and failure). """ def __init__(self, pika_engine, channel, method, properties, body): """Defines default values of result and failure fields, call super.__init__ method and then construct Exception object if failure is not None :param pika_engine: PikaEngine, shared object with configuration and shared driver functionality :param channel: Channel, RabbitMQ channel which was used for this message delivery, used for sending ack back. If None - ack is not required :param method: Method, RabbitMQ message method :param properties: Properties, RabbitMQ message properties :param body: Bytes, RabbitMQ message body """ super(RpcReplyPikaIncomingMessage, self).__init__( pika_engine, channel, method, properties, body ) self.msg_id = properties.correlation_id self.result = self.message.get("s", None) self.failure = self.message.get("e", None) if self.failure is not None: trace = self.failure.get('t', []) message = self.failure.get('s', "") class_name = self.failure.get('c') module_name = self.failure.get('m') res_exc = None if module_name in pika_engine.allowed_remote_exmods: try: module = importutils.import_module(module_name) klass = getattr(module, class_name) ex_type = type( klass.__name__, (RemoteExceptionMixin, klass), {} ) res_exc = ex_type(module_name, class_name, message, trace) except ImportError as e: LOG.warning( "Can not deserialize remote exception [module:%s, " "class:%s]. %s", module_name, class_name, e ) # if we have not processed failure yet, use RemoteError class if res_exc is None: res_exc = oslo_messaging.RemoteError( class_name, message, trace ) self.failure = res_exc class PikaOutgoingMessage(object): """Driver friendly adapter for sending message. Construct RabbitMQ message and send it """ def __init__(self, pika_engine, message, context, content_type=None): """Parse RabbitMQ message :param pika_engine: PikaEngine, shared object with configuration and shared driver functionality :param message: Dictionary, user's message fields :param context: Dictionary, request context's fields :param content_type: String, content-type header, defines serialization mechanism, if None default content-type from pika_engine is used """ self._pika_engine = pika_engine self._content_type = ( content_type if content_type is not None else self._pika_engine.default_content_type ) try: self._serializer = pika_drv_cmns.MESSAGE_SERIALIZERS[ self._content_type ] except KeyError: raise NotImplementedError( "Content-type['{}'] is not supported.".format( self._content_type ) ) self.message = message self.context = context self.unique_id = uuid.uuid4().hex def _prepare_message_to_send(self): """Combine user's message fields an system fields (_unique_id, context's data etc) """ msg = self.message.copy() if self.context: for key, value in self.context.items(): key = six.text_type(key) msg['_$_' + key] = value props = pika_spec.BasicProperties( content_type=self._content_type, headers={_VERSION_HEADER: _VERSION}, message_id=self.unique_id, ) return msg, props @staticmethod def _publish(pool, exchange, routing_key, body, properties, mandatory, stopwatch): """Execute pika publish method using connection from connection pool Also this message catches all pika related exceptions and raise oslo.messaging specific exceptions :param pool: Pool, pika connection pool for connection choosing :param exchange: String, RabbitMQ exchange name for message sending :param routing_key: String, RabbitMQ routing key for message routing :param body: Bytes, RabbitMQ message payload :param properties: Properties, RabbitMQ message properties :param mandatory: Boolean, RabbitMQ publish mandatory flag (raise exception if it is not possible to deliver message to any queue) :param stopwatch: StopWatch, stopwatch object for calculating allowed timeouts """ if stopwatch.expired(): raise exceptions.MessagingTimeout( "Timeout for current operation was expired." ) try: timeout = stopwatch.leftover(return_none=True) with pool.acquire(timeout=timeout) as conn: if timeout is not None: properties.expiration = str(int(timeout * 1000)) conn.channel.publish( exchange=exchange, routing_key=routing_key, body=body, properties=properties, mandatory=mandatory ) except pika_exceptions.NackError as e: raise pika_drv_exc.MessageRejectedException( "Can not send message: [body: {}], properties: {}] to " "target [exchange: {}, routing_key: {}]. {}".format( body, properties, exchange, routing_key, str(e) ) ) except pika_exceptions.UnroutableError as e: raise pika_drv_exc.RoutingException( "Can not deliver message:[body:{}, properties: {}] to any " "queue using target: [exchange:{}, " "routing_key:{}]. {}".format( body, properties, exchange, routing_key, str(e) ) ) except pika_pool.Timeout as e: raise exceptions.MessagingTimeout( "Timeout for current operation was expired. {}".format(str(e)) ) except pika_pool.Connection.connectivity_errors as e: if (isinstance(e, pika_exceptions.ChannelClosed) and e.args and e.args[0] == 404): raise pika_drv_exc.ExchangeNotFoundException( "Attempt to send message to not existing exchange " "detected, message: [body:{}, properties: {}], target: " "[exchange:{}, routing_key:{}]. {}".format( body, properties, exchange, routing_key, str(e) ) ) raise pika_drv_exc.ConnectionException( "Connectivity problem detected during sending the message: " "[body:{}, properties: {}] to target: [exchange:{}, " "routing_key:{}]. {}".format( body, properties, exchange, routing_key, str(e) ) ) except socket.timeout: raise pika_drv_exc.TimeoutConnectionException( "Socket timeout exceeded." ) def _do_send(self, exchange, routing_key, msg_dict, msg_props, confirm=True, mandatory=True, persistent=False, stopwatch=pika_drv_cmns.INFINITE_STOP_WATCH, retrier=None): """Send prepared message with configured retrying :param exchange: String, RabbitMQ exchange name for message sending :param routing_key: String, RabbitMQ routing key for message routing :param msg_dict: Dictionary, message payload :param msg_props: Properties, message properties :param confirm: Boolean, enable publisher confirmation if True :param mandatory: Boolean, RabbitMQ publish mandatory flag (raise exception if it is not possible to deliver message to any queue) :param persistent: Boolean, send persistent message if True, works only for routing into durable queues :param stopwatch: StopWatch, stopwatch object for calculating allowed timeouts :param retrier: tenacity.Retrying, configured retrier object for sending message, if None no retrying is performed """ msg_props.delivery_mode = 2 if persistent else 1 pool = (self._pika_engine.connection_with_confirmation_pool if confirm else self._pika_engine.connection_without_confirmation_pool) body = self._serializer.dump_as_bytes(msg_dict) LOG.debug( "Sending message:[body:%s; properties: %s] to target: " "[exchange:%s; routing_key:%s]", body, msg_props, exchange, routing_key ) publish = (self._publish if retrier is None else retrier(self._publish)) return publish(pool, exchange, routing_key, body, msg_props, mandatory, stopwatch) def send(self, exchange, routing_key='', confirm=True, mandatory=True, persistent=False, stopwatch=pika_drv_cmns.INFINITE_STOP_WATCH, retrier=None): """Send message with configured retrying :param exchange: String, RabbitMQ exchange name for message sending :param routing_key: String, RabbitMQ routing key for message routing :param confirm: Boolean, enable publisher confirmation if True :param mandatory: Boolean, RabbitMQ publish mandatory flag (raise exception if it is not possible to deliver message to any queue) :param persistent: Boolean, send persistent message if True, works only for routing into durable queues :param stopwatch: StopWatch, stopwatch object for calculating allowed timeouts :param retrier: tenacity.Retrying, configured retrier object for sending message, if None no retrying is performed """ msg_dict, msg_props = self._prepare_message_to_send() return self._do_send(exchange, routing_key, msg_dict, msg_props, confirm, mandatory, persistent, stopwatch, retrier) class RpcPikaOutgoingMessage(PikaOutgoingMessage): """PikaOutgoingMessage implementation for RPC messages. It adds possibility to wait and receive RPC reply """ def __init__(self, pika_engine, message, context, content_type=None): super(RpcPikaOutgoingMessage, self).__init__( pika_engine, message, context, content_type ) self.msg_id = None self.reply_q = None def send(self, exchange, routing_key, reply_listener=None, stopwatch=pika_drv_cmns.INFINITE_STOP_WATCH, retrier=None): """Send RPC message with configured retrying :param exchange: String, RabbitMQ exchange name for message sending :param routing_key: String, RabbitMQ routing key for message routing :param reply_listener: RpcReplyPikaListener, listener for waiting reply. If None - return immediately without reply waiting :param stopwatch: StopWatch, stopwatch object for calculating allowed timeouts :param retrier: tenacity.Retrying, configured retrier object for sending message, if None no retrying is performed """ msg_dict, msg_props = self._prepare_message_to_send() if reply_listener: self.msg_id = uuid.uuid4().hex msg_props.correlation_id = self.msg_id LOG.debug('MSG_ID is %s', self.msg_id) self.reply_q = reply_listener.get_reply_qname() msg_props.reply_to = self.reply_q future = reply_listener.register_reply_waiter(msg_id=self.msg_id) self._do_send( exchange=exchange, routing_key=routing_key, msg_dict=msg_dict, msg_props=msg_props, confirm=True, mandatory=True, persistent=False, stopwatch=stopwatch, retrier=retrier ) try: return future.result(stopwatch.leftover(return_none=True)) except BaseException as e: reply_listener.unregister_reply_waiter(self.msg_id) if isinstance(e, futures.TimeoutError): e = exceptions.MessagingTimeout() raise e else: self._do_send( exchange=exchange, routing_key=routing_key, msg_dict=msg_dict, msg_props=msg_props, confirm=True, mandatory=True, persistent=False, stopwatch=stopwatch, retrier=retrier ) class RpcReplyPikaOutgoingMessage(PikaOutgoingMessage): """PikaOutgoingMessage implementation for RPC reply messages. It sets correlation_id AMQP property to link this reply with response """ def __init__(self, pika_engine, msg_id, reply=None, failure_info=None, content_type=None): """Initialize with reply information for sending :param pika_engine: PikaEngine, shared object with configuration and shared driver functionality :param msg_id: String, msg_id of RPC request, which waits for reply :param reply: Dictionary, reply. In case of exception should be None :param failure_info: Tuple, should be a sys.exc_info() tuple. Should be None if RPC request was successfully processed. :param content_type: String, content-type header, defines serialization mechanism, if None default content-type from pika_engine is used """ self.msg_id = msg_id if failure_info is not None: ex_class = failure_info[0] ex = failure_info[1] tb = traceback.format_exception(*failure_info) if issubclass(ex_class, RemoteExceptionMixin): failure_data = { 'c': ex.clazz, 'm': ex.module, 's': ex.message, 't': tb } else: failure_data = { 'c': six.text_type(ex_class.__name__), 'm': six.text_type(ex_class.__module__), 's': six.text_type(ex), 't': tb } msg = {'e': failure_data} else: msg = {'s': reply} super(RpcReplyPikaOutgoingMessage, self).__init__( pika_engine, msg, None, content_type ) def send(self, reply_q, stopwatch=pika_drv_cmns.INFINITE_STOP_WATCH, retrier=None): """Send RPC message with configured retrying :param reply_q: String, queue name for sending reply :param stopwatch: StopWatch, stopwatch object for calculating allowed timeouts :param retrier: tenacity.Retrying, configured retrier object for sending message, if None no retrying is performed """ msg_dict, msg_props = self._prepare_message_to_send() msg_props.correlation_id = self.msg_id self._do_send( exchange=self._pika_engine.rpc_reply_exchange, routing_key=reply_q, msg_dict=msg_dict, msg_props=msg_props, confirm=True, mandatory=True, persistent=False, stopwatch=stopwatch, retrier=retrier ) oslo.messaging-5.35.0/oslo_messaging/_drivers/pika_driver/pika_commons.py0000666000175100017510000000244313224676046026755 0ustar zuulzuul00000000000000# Copyright 2015 Mirantis, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import select import socket from oslo_serialization.serializer import json_serializer from oslo_serialization.serializer import msgpack_serializer from oslo_utils import timeutils from pika import exceptions as pika_exceptions import six PIKA_CONNECTIVITY_ERRORS = ( pika_exceptions.AMQPConnectionError, pika_exceptions.ConnectionClosed, pika_exceptions.ChannelClosed, socket.timeout, select.error ) EXCEPTIONS_MODULE = 'exceptions' if six.PY2 else 'builtins' INFINITE_STOP_WATCH = timeutils.StopWatch(duration=None).start() MESSAGE_SERIALIZERS = { 'application/json': json_serializer.JSONSerializer(), 'application/msgpack': msgpack_serializer.MessagePackSerializer() } oslo.messaging-5.35.0/oslo_messaging/_drivers/impl_rabbit.py0000666000175100017510000015524513224676046024274 0ustar zuulzuul00000000000000# Copyright 2011 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import contextlib import errno import functools import itertools import math import os import random import socket import ssl import sys import threading import time import uuid import kombu import kombu.connection import kombu.entity import kombu.messaging from oslo_config import cfg from oslo_log import log as logging from oslo_utils import eventletutils from oslo_utils import netutils import six from six.moves.urllib import parse from oslo_messaging._drivers import amqp as rpc_amqp from oslo_messaging._drivers import amqpdriver from oslo_messaging._drivers import base from oslo_messaging._drivers import common as rpc_common from oslo_messaging._drivers import pool from oslo_messaging._i18n import _ from oslo_messaging._i18n import _LE from oslo_messaging._i18n import _LI from oslo_messaging._i18n import _LW from oslo_messaging import _utils from oslo_messaging import exceptions # NOTE(sileht): don't exists in py2 socket module TCP_USER_TIMEOUT = 18 rabbit_opts = [ cfg.BoolOpt('ssl', default=False, deprecated_name='rabbit_use_ssl', help='Connect over SSL.'), cfg.StrOpt('ssl_version', default='', deprecated_name='kombu_ssl_version', help='SSL version to use (valid only if SSL enabled). ' 'Valid values are TLSv1 and SSLv23. SSLv2, SSLv3, ' 'TLSv1_1, and TLSv1_2 may be available on some ' 'distributions.' ), cfg.StrOpt('ssl_key_file', default='', deprecated_name='kombu_ssl_keyfile', help='SSL key file (valid only if SSL enabled).'), cfg.StrOpt('ssl_cert_file', default='', deprecated_name='kombu_ssl_certfile', help='SSL cert file (valid only if SSL enabled).'), cfg.StrOpt('ssl_ca_file', default='', deprecated_name='kombu_ssl_ca_certs', help='SSL certification authority file ' '(valid only if SSL enabled).'), cfg.FloatOpt('kombu_reconnect_delay', default=1.0, deprecated_group='DEFAULT', help='How long to wait before reconnecting in response to an ' 'AMQP consumer cancel notification.'), cfg.StrOpt('kombu_compression', help="EXPERIMENTAL: Possible values are: gzip, bz2. If not " "set compression will not be used. This option may not " "be available in future versions."), cfg.IntOpt('kombu_missing_consumer_retry_timeout', deprecated_name="kombu_reconnect_timeout", default=60, help='How long to wait a missing client before abandoning to ' 'send it its replies. This value should not be longer ' 'than rpc_response_timeout.'), cfg.StrOpt('kombu_failover_strategy', choices=('round-robin', 'shuffle'), default='round-robin', help='Determines how the next RabbitMQ node is chosen in case ' 'the one we are currently connected to becomes ' 'unavailable. Takes effect only if more than one ' 'RabbitMQ node is provided in config.'), cfg.StrOpt('rabbit_host', default='localhost', deprecated_group='DEFAULT', deprecated_for_removal=True, deprecated_reason="Replaced by [DEFAULT]/transport_url", help='The RabbitMQ broker address where a single node is ' 'used.'), cfg.PortOpt('rabbit_port', default=5672, deprecated_group='DEFAULT', deprecated_for_removal=True, deprecated_reason="Replaced by [DEFAULT]/transport_url", help='The RabbitMQ broker port where a single node is used.'), cfg.ListOpt('rabbit_hosts', default=['$rabbit_host:$rabbit_port'], deprecated_group='DEFAULT', deprecated_for_removal=True, deprecated_reason="Replaced by [DEFAULT]/transport_url", help='RabbitMQ HA cluster host:port pairs.'), cfg.StrOpt('rabbit_userid', default='guest', deprecated_group='DEFAULT', deprecated_for_removal=True, deprecated_reason="Replaced by [DEFAULT]/transport_url", help='The RabbitMQ userid.'), cfg.StrOpt('rabbit_password', default='guest', deprecated_group='DEFAULT', deprecated_for_removal=True, deprecated_reason="Replaced by [DEFAULT]/transport_url", help='The RabbitMQ password.', secret=True), cfg.StrOpt('rabbit_login_method', choices=('PLAIN', 'AMQPLAIN', 'RABBIT-CR-DEMO'), default='AMQPLAIN', deprecated_group='DEFAULT', help='The RabbitMQ login method.'), cfg.StrOpt('rabbit_virtual_host', default='/', deprecated_group='DEFAULT', deprecated_for_removal=True, deprecated_reason="Replaced by [DEFAULT]/transport_url", help='The RabbitMQ virtual host.'), cfg.IntOpt('rabbit_retry_interval', default=1, help='How frequently to retry connecting with RabbitMQ.'), cfg.IntOpt('rabbit_retry_backoff', default=2, deprecated_group='DEFAULT', help='How long to backoff for between retries when connecting ' 'to RabbitMQ.'), cfg.IntOpt('rabbit_interval_max', default=30, help='Maximum interval of RabbitMQ connection retries. ' 'Default is 30 seconds.'), cfg.IntOpt('rabbit_max_retries', default=0, deprecated_for_removal=True, deprecated_group='DEFAULT', help='Maximum number of RabbitMQ connection retries. ' 'Default is 0 (infinite retry count).'), cfg.BoolOpt('rabbit_ha_queues', default=False, deprecated_group='DEFAULT', help='Try to use HA queues in RabbitMQ (x-ha-policy: all). ' 'If you change this option, you must wipe the RabbitMQ ' 'database. In RabbitMQ 3.0, queue mirroring is no longer ' 'controlled by the x-ha-policy argument when declaring a ' 'queue. If you just want to make sure that all queues (except ' 'those with auto-generated names) are mirrored across all ' 'nodes, run: ' """\"rabbitmqctl set_policy HA '^(?!amq\.).*' """ """'{"ha-mode": "all"}' \""""), cfg.IntOpt('rabbit_transient_queues_ttl', min=1, default=1800, help='Positive integer representing duration in seconds for ' 'queue TTL (x-expires). Queues which are unused for the ' 'duration of the TTL are automatically deleted. The ' 'parameter affects only reply and fanout queues.'), cfg.IntOpt('rabbit_qos_prefetch_count', default=0, help='Specifies the number of messages to prefetch. Setting to ' 'zero allows unlimited messages.'), cfg.IntOpt('heartbeat_timeout_threshold', default=60, help="Number of seconds after which the Rabbit broker is " "considered down if heartbeat's keep-alive fails " "(0 disable the heartbeat). EXPERIMENTAL"), cfg.IntOpt('heartbeat_rate', default=2, help='How often times during the heartbeat_timeout_threshold ' 'we check the heartbeat.'), # NOTE(sileht): deprecated option since oslo_messaging 1.5.0, cfg.BoolOpt('fake_rabbit', default=False, deprecated_group='DEFAULT', help='Deprecated, use rpc_backend=kombu+memory or ' 'rpc_backend=fake'), ] LOG = logging.getLogger(__name__) def _get_queue_arguments(rabbit_ha_queues, rabbit_queue_ttl): """Construct the arguments for declaring a queue. If the rabbit_ha_queues option is set, we try to declare a mirrored queue as described here: http://www.rabbitmq.com/ha.html Setting x-ha-policy to all means that the queue will be mirrored to all nodes in the cluster. In RabbitMQ 3.0, queue mirroring is no longer controlled by the x-ha-policy argument when declaring a queue. If you just want to make sure that all queues (except those with auto-generated names) are mirrored across all nodes, run: rabbitmqctl set_policy HA '^(?!amq\.).*' '{"ha-mode": "all"}' If the rabbit_queue_ttl option is > 0, then the queue is declared with the "Queue TTL" value as described here: https://www.rabbitmq.com/ttl.html Setting a queue TTL causes the queue to be automatically deleted if it is unused for the TTL duration. This is a helpful safeguard to prevent queues with zero consumers from growing without bound. """ args = {} if rabbit_ha_queues: args['x-ha-policy'] = 'all' if rabbit_queue_ttl > 0: args['x-expires'] = rabbit_queue_ttl * 1000 return args class RabbitMessage(dict): def __init__(self, raw_message): super(RabbitMessage, self).__init__( rpc_common.deserialize_msg(raw_message.payload)) LOG.trace('RabbitMessage.Init: message %s', self) self._raw_message = raw_message def acknowledge(self): LOG.trace('RabbitMessage.acknowledge: message %s', self) self._raw_message.ack() def requeue(self): LOG.trace('RabbitMessage.requeue: message %s', self) self._raw_message.requeue() class Consumer(object): """Consumer class.""" def __init__(self, exchange_name, queue_name, routing_key, type, durable, exchange_auto_delete, queue_auto_delete, callback, nowait=False, rabbit_ha_queues=None, rabbit_queue_ttl=0): """Init the Consumer class with the exchange_name, routing_key, type, durable auto_delete """ self.queue_name = queue_name self.exchange_name = exchange_name self.routing_key = routing_key self.exchange_auto_delete = exchange_auto_delete self.queue_auto_delete = queue_auto_delete self.durable = durable self.callback = callback self.type = type self.nowait = nowait self.queue_arguments = _get_queue_arguments(rabbit_ha_queues, rabbit_queue_ttl) self.queue = None self._declared_on = None self.exchange = kombu.entity.Exchange( name=exchange_name, type=type, durable=self.durable, auto_delete=self.exchange_auto_delete) def declare(self, conn): """Re-declare the queue after a rabbit (re)connect.""" self.queue = kombu.entity.Queue( name=self.queue_name, channel=conn.channel, exchange=self.exchange, durable=self.durable, auto_delete=self.queue_auto_delete, routing_key=self.routing_key, queue_arguments=self.queue_arguments) try: LOG.debug('[%s] Queue.declare: %s', conn.connection_id, self.queue_name) self.queue.declare() except conn.connection.channel_errors as exc: # NOTE(jrosenboom): This exception may be triggered by a race # condition. Simply retrying will solve the error most of the time # and should work well enough as a workaround until the race # condition itself can be fixed. # See https://bugs.launchpad.net/neutron/+bug/1318721 for details. if exc.code == 404: self.queue.declare() else: raise self._declared_on = conn.channel def consume(self, conn, tag): """Actually declare the consumer on the amqp channel. This will start the flow of messages from the queue. Using the Connection.consume() will process the messages, calling the appropriate callback. """ # Ensure we are on the correct channel before consuming if conn.channel != self._declared_on: self.declare(conn) try: self.queue.consume(callback=self._callback, consumer_tag=six.text_type(tag), nowait=self.nowait) except conn.connection.channel_errors as exc: # We retries once because of some races that we can # recover before informing the deployer # bugs.launchpad.net/oslo.messaging/+bug/1581148 # bugs.launchpad.net/oslo.messaging/+bug/1609766 # bugs.launchpad.net/neutron/+bug/1318721 # 406 error code relates to messages that are doubled ack'd # At any channel error, the RabbitMQ closes # the channel, but the amqp-lib quietly re-open # it. So, we must reset all tags and declare # all consumers again. conn._new_tags = set(conn._consumers.values()) if exc.code == 404 or (exc.code == 406 and exc.method_name == 'Basic.ack'): self.declare(conn) self.queue.consume(callback=self._callback, consumer_tag=six.text_type(tag), nowait=self.nowait) else: raise def cancel(self, tag): LOG.trace('ConsumerBase.cancel: canceling %s', tag) self.queue.cancel(six.text_type(tag)) def _callback(self, message): """Call callback with deserialized message. Messages that are processed and ack'ed. """ m2p = getattr(self.queue.channel, 'message_to_python', None) if m2p: message = m2p(message) try: self.callback(RabbitMessage(message)) except Exception: LOG.exception(_LE("Failed to process message" " ... skipping it.")) message.reject() class DummyConnectionLock(_utils.DummyLock): def heartbeat_acquire(self): pass class ConnectionLock(DummyConnectionLock): """Lock object to protect access to the kombu connection This is a lock object to protect access to the kombu connection object between the heartbeat thread and the driver thread. They are two way to acquire this lock: * lock.acquire() * lock.heartbeat_acquire() In both case lock.release(), release the lock. The goal is that the heartbeat thread always have the priority for acquiring the lock. This ensures we have no heartbeat starvation when the driver sends a lot of messages. So when lock.heartbeat_acquire() is called next time the lock is released(), the caller unconditionally acquires the lock, even someone else have asked for the lock before it. """ def __init__(self): self._workers_waiting = 0 self._heartbeat_waiting = False self._lock_acquired = None self._monitor = threading.Lock() self._workers_locks = threading.Condition(self._monitor) self._heartbeat_lock = threading.Condition(self._monitor) self._get_thread_id = eventletutils.fetch_current_thread_functor() def acquire(self): with self._monitor: while self._lock_acquired: self._workers_waiting += 1 self._workers_locks.wait() self._workers_waiting -= 1 self._lock_acquired = self._get_thread_id() def heartbeat_acquire(self): # NOTE(sileht): must be called only one time with self._monitor: while self._lock_acquired is not None: self._heartbeat_waiting = True self._heartbeat_lock.wait() self._heartbeat_waiting = False self._lock_acquired = self._get_thread_id() def release(self): with self._monitor: if self._lock_acquired is None: raise RuntimeError("We can't release a not acquired lock") thread_id = self._get_thread_id() if self._lock_acquired != thread_id: raise RuntimeError("We can't release lock acquired by another " "thread/greenthread; %s vs %s" % (self._lock_acquired, thread_id)) self._lock_acquired = None if self._heartbeat_waiting: self._heartbeat_lock.notify() elif self._workers_waiting > 0: self._workers_locks.notify() @contextlib.contextmanager def for_heartbeat(self): self.heartbeat_acquire() try: yield finally: self.release() class Connection(object): """Connection object.""" pools = {} def __init__(self, conf, url, purpose): # NOTE(viktors): Parse config options driver_conf = conf.oslo_messaging_rabbit self.max_retries = driver_conf.rabbit_max_retries self.interval_start = driver_conf.rabbit_retry_interval self.interval_stepping = driver_conf.rabbit_retry_backoff self.interval_max = driver_conf.rabbit_interval_max self.login_method = driver_conf.rabbit_login_method self.fake_rabbit = driver_conf.fake_rabbit self.virtual_host = driver_conf.rabbit_virtual_host self.rabbit_hosts = driver_conf.rabbit_hosts self.rabbit_port = driver_conf.rabbit_port self.rabbit_userid = driver_conf.rabbit_userid self.rabbit_password = driver_conf.rabbit_password self.rabbit_ha_queues = driver_conf.rabbit_ha_queues self.rabbit_transient_queues_ttl = \ driver_conf.rabbit_transient_queues_ttl self.rabbit_qos_prefetch_count = driver_conf.rabbit_qos_prefetch_count self.heartbeat_timeout_threshold = \ driver_conf.heartbeat_timeout_threshold self.heartbeat_rate = driver_conf.heartbeat_rate self.kombu_reconnect_delay = driver_conf.kombu_reconnect_delay self.amqp_durable_queues = driver_conf.amqp_durable_queues self.amqp_auto_delete = driver_conf.amqp_auto_delete self.ssl = driver_conf.ssl self.kombu_missing_consumer_retry_timeout = \ driver_conf.kombu_missing_consumer_retry_timeout self.kombu_failover_strategy = driver_conf.kombu_failover_strategy self.kombu_compression = driver_conf.kombu_compression if self.ssl: self.ssl_version = driver_conf.ssl_version self.ssl_key_file = driver_conf.ssl_key_file self.ssl_cert_file = driver_conf.ssl_cert_file self.ssl_ca_file = driver_conf.ssl_ca_file # Try forever? if self.max_retries <= 0: self.max_retries = None if url.virtual_host is not None: virtual_host = url.virtual_host else: virtual_host = self.virtual_host self._url = '' if self.fake_rabbit: LOG.warning(_LW("Deprecated: fake_rabbit option is deprecated, " "set rpc_backend to kombu+memory or use the fake " "driver instead.")) self._url = 'memory://%s/' % virtual_host elif url.hosts: if url.transport.startswith('kombu+'): LOG.warning(_LW('Selecting the kombu transport through the ' 'transport url (%s) is a experimental feature ' 'and this is not yet supported.'), url.transport) if len(url.hosts) > 1: random.shuffle(url.hosts) for host in url.hosts: transport = url.transport.replace('kombu+', '') transport = transport.replace('rabbit', 'amqp') self._url += '%s%s://%s:%s@%s:%s/%s' % ( ";" if self._url else '', transport, parse.quote(host.username or ''), parse.quote(host.password or ''), self._parse_url_hostname(host.hostname) or '', str(host.port or 5672), virtual_host) elif url.transport.startswith('kombu+'): # NOTE(sileht): url have a + but no hosts # (like kombu+memory:///), pass it to kombu as-is transport = url.transport.replace('kombu+', '') self._url = "%s://%s" % (transport, virtual_host) else: if len(self.rabbit_hosts) > 1: random.shuffle(self.rabbit_hosts) for adr in self.rabbit_hosts: hostname, port = netutils.parse_host_port( adr, default_port=self.rabbit_port) self._url += '%samqp://%s:%s@%s:%s/%s' % ( ";" if self._url else '', parse.quote(self.rabbit_userid, ''), parse.quote(self.rabbit_password, ''), self._parse_url_hostname(hostname), port, virtual_host) self._initial_pid = os.getpid() self._consumers = {} self._producer = None self._new_tags = set() self._active_tags = {} self._tags = itertools.count(1) # Set of exchanges and queues declared on the channel to avoid # unnecessary redeclaration. This set is resetted each time # the connection is resetted in Connection._set_current_channel self._declared_exchanges = set() self._declared_queues = set() self._consume_loop_stopped = False self.channel = None self.purpose = purpose # NOTE(sileht): if purpose is PURPOSE_LISTEN # we don't need the lock because we don't # have a heartbeat thread if purpose == rpc_common.PURPOSE_SEND: self._connection_lock = ConnectionLock() else: self._connection_lock = DummyConnectionLock() self.connection_id = str(uuid.uuid4()) self.name = "%s:%d:%s" % (os.path.basename(sys.argv[0]), os.getpid(), self.connection_id) self.connection = kombu.connection.Connection( self._url, ssl=self._fetch_ssl_params(), login_method=self.login_method, heartbeat=self.heartbeat_timeout_threshold, failover_strategy=self.kombu_failover_strategy, transport_options={ 'confirm_publish': True, 'client_properties': { 'capabilities': { 'authentication_failure_close': True, 'connection.blocked': True, 'consumer_cancel_notify': True }, 'connection_name': self.name}, 'on_blocked': self._on_connection_blocked, 'on_unblocked': self._on_connection_unblocked, }, ) LOG.debug('[%(connection_id)s] Connecting to AMQP server on' ' %(hostname)s:%(port)s', self._get_connection_info()) # NOTE(sileht): kombu recommend to run heartbeat_check every # seconds, but we use a lock around the kombu connection # so, to not lock to much this lock to most of the time do nothing # expected waiting the events drain, we start heartbeat_check and # retrieve the server heartbeat packet only two times more than # the minimum required for the heartbeat works # (heatbeat_timeout/heartbeat_rate/2.0, default kombu # heartbeat_rate is 2) self._heartbeat_wait_timeout = ( float(self.heartbeat_timeout_threshold) / float(self.heartbeat_rate) / 2.0) self._heartbeat_support_log_emitted = False # NOTE(sileht): just ensure the connection is setuped at startup with self._connection_lock: self.ensure_connection() # NOTE(sileht): if purpose is PURPOSE_LISTEN # the consume code does the heartbeat stuff # we don't need a thread self._heartbeat_thread = None if purpose == rpc_common.PURPOSE_SEND: self._heartbeat_start() LOG.debug('[%(connection_id)s] Connected to AMQP server on ' '%(hostname)s:%(port)s via [%(transport)s] client with' ' port %(client_port)s.', self._get_connection_info()) # NOTE(sileht): value chosen according the best practice from kombu # http://kombu.readthedocs.org/en/latest/reference/kombu.common.html#kombu.common.eventloop # For heatbeat, we can set a bigger timeout, and check we receive the # heartbeat packets regulary if self._heartbeat_supported_and_enabled(): self._poll_timeout = self._heartbeat_wait_timeout else: self._poll_timeout = 1 if self._url.startswith('memory://'): # Kludge to speed up tests. self.connection.transport.polling_interval = 0.0 # Fixup logging self.connection.hostname = "memory_driver" self.connection.port = 1234 self._poll_timeout = 0.05 # FIXME(markmc): use oslo sslutils when it is available as a library _SSL_PROTOCOLS = { "tlsv1": ssl.PROTOCOL_TLSv1, "sslv23": ssl.PROTOCOL_SSLv23 } _OPTIONAL_PROTOCOLS = { 'sslv2': 'PROTOCOL_SSLv2', 'sslv3': 'PROTOCOL_SSLv3', 'tlsv1_1': 'PROTOCOL_TLSv1_1', 'tlsv1_2': 'PROTOCOL_TLSv1_2', } for protocol in _OPTIONAL_PROTOCOLS: try: _SSL_PROTOCOLS[protocol] = getattr(ssl, _OPTIONAL_PROTOCOLS[protocol]) except AttributeError: pass @classmethod def validate_ssl_version(cls, version): key = version.lower() try: return cls._SSL_PROTOCOLS[key] except KeyError: raise RuntimeError(_("Invalid SSL version : %s") % version) def _parse_url_hostname(self, hostname): """Handles hostname returned from urlparse and checks whether it's ipaddress. If it's ipaddress it ensures that it has brackets for IPv6. """ return '[%s]' % hostname if ':' in hostname else hostname def _fetch_ssl_params(self): """Handles fetching what ssl params should be used for the connection (if any). """ if self.ssl: ssl_params = dict() # http://docs.python.org/library/ssl.html - ssl.wrap_socket if self.ssl_version: ssl_params['ssl_version'] = self.validate_ssl_version( self.ssl_version) if self.ssl_key_file: ssl_params['keyfile'] = self.ssl_key_file if self.ssl_cert_file: ssl_params['certfile'] = self.ssl_cert_file if self.ssl_ca_file: ssl_params['ca_certs'] = self.ssl_ca_file # We might want to allow variations in the # future with this? ssl_params['cert_reqs'] = ssl.CERT_REQUIRED return ssl_params or True return False @staticmethod def _on_connection_blocked(reason): LOG.error(_LE("The broker has blocked the connection: %s"), reason) @staticmethod def _on_connection_unblocked(): LOG.info(_LI("The broker has unblocked the connection")) def ensure_connection(self): # NOTE(sileht): we reset the channel and ensure # the kombu underlying connection works self._set_current_channel(None) self.ensure(method=self.connection.connect) self.set_transport_socket_timeout() def ensure(self, method, retry=None, recoverable_error_callback=None, error_callback=None, timeout_is_error=True): """Will retry up to retry number of times. retry = None means use the value of rabbit_max_retries retry = -1 means to retry forever retry = 0 means no retry retry = N means N retries NOTE(sileht): Must be called within the connection lock """ current_pid = os.getpid() if self._initial_pid != current_pid: LOG.warning(_LW("Process forked after connection established! " "This can result in unpredictable behavior. " "See: https://docs.openstack.org/oslo.messaging/" "latest/reference/transport.html")) self._initial_pid = current_pid if retry is None: retry = self.max_retries if retry is None or retry < 0: retry = None def on_error(exc, interval): LOG.debug("[%s] Received recoverable error from kombu:" % self.connection_id, exc_info=True) recoverable_error_callback and recoverable_error_callback(exc) interval = (self.kombu_reconnect_delay + interval if self.kombu_reconnect_delay > 0 else interval) info = {'err_str': exc, 'sleep_time': interval} info.update(self._get_connection_info()) if 'Socket closed' in six.text_type(exc): LOG.error(_LE('[%(connection_id)s] AMQP server' ' %(hostname)s:%(port)s closed' ' the connection. Check login credentials:' ' %(err_str)s'), info) else: LOG.error(_LE('[%(connection_id)s] AMQP server on ' '%(hostname)s:%(port)s is unreachable: ' '%(err_str)s. Trying again in ' '%(sleep_time)d seconds. Client port: ' '%(client_port)s'), info) # XXX(nic): when reconnecting to a RabbitMQ cluster # with mirrored queues in use, the attempt to release the # connection can hang "indefinitely" somewhere deep down # in Kombu. Blocking the thread for a bit prior to # release seems to kludge around the problem where it is # otherwise reproduceable. # TODO(sileht): Check if this is useful since we # use kombu for HA connection, the interval_step # should sufficient, because the underlying kombu transport # connection object freed. if self.kombu_reconnect_delay > 0: LOG.trace('Delaying reconnect for %1.1f seconds ...', self.kombu_reconnect_delay) time.sleep(self.kombu_reconnect_delay) def on_reconnection(new_channel): """Callback invoked when the kombu reconnects and creates a new channel, we use it the reconfigure our consumers. """ self._set_current_channel(new_channel) self.set_transport_socket_timeout() LOG.info(_LI('[%(connection_id)s] Reconnected to AMQP server on ' '%(hostname)s:%(port)s via [%(transport)s] client ' 'with port %(client_port)s.'), self._get_connection_info()) def execute_method(channel): self._set_current_channel(channel) method() try: autoretry_method = self.connection.autoretry( execute_method, channel=self.channel, max_retries=retry, errback=on_error, interval_start=self.interval_start or 1, interval_step=self.interval_stepping, interval_max=self.interval_max, on_revive=on_reconnection) ret, channel = autoretry_method() self._set_current_channel(channel) return ret except rpc_amqp.AMQPDestinationNotFound: # NOTE(sileht): we must reraise this without # trigger error_callback raise except Exception as exc: error_callback and error_callback(exc) self._set_current_channel(None) # NOTE(sileht): number of retry exceeded and the connection # is still broken info = {'err_str': exc, 'retry': retry} info.update(self.connection.info()) msg = _('Unable to connect to AMQP server on ' '%(hostname)s:%(port)s after %(retry)s ' 'tries: %(err_str)s') % info LOG.error(msg) raise exceptions.MessageDeliveryFailure(msg) def _set_current_channel(self, new_channel): """Change the channel to use. NOTE(sileht): Must be called within the connection lock """ if new_channel == self.channel: return if self.channel is not None: self._declared_queues.clear() self._declared_exchanges.clear() self.connection.maybe_close_channel(self.channel) self.channel = new_channel if new_channel is not None: if self.purpose == rpc_common.PURPOSE_LISTEN: self._set_qos(new_channel) self._producer = kombu.messaging.Producer(new_channel) for consumer in self._consumers: consumer.declare(self) def _set_qos(self, channel): """Set QoS prefetch count on the channel""" if self.rabbit_qos_prefetch_count > 0: channel.basic_qos(0, self.rabbit_qos_prefetch_count, False) def close(self): """Close/release this connection.""" self._heartbeat_stop() if self.connection: for consumer, tag in self._consumers.items(): if consumer.type == 'fanout': LOG.debug('[connection close] Deleting fanout ' 'queue: %s ' % consumer.queue.name) consumer.queue.delete() self._set_current_channel(None) self.connection.release() self.connection = None def reset(self): """Reset a connection so it can be used again.""" with self._connection_lock: try: for consumer, tag in self._consumers.items(): consumer.cancel(tag=tag) except kombu.exceptions.OperationalError: self.ensure_connection() self._consumers.clear() self._active_tags.clear() self._new_tags.clear() self._tags = itertools.count(1) def _heartbeat_supported_and_enabled(self): if self.heartbeat_timeout_threshold <= 0: return False if self.connection.supports_heartbeats: return True elif not self._heartbeat_support_log_emitted: LOG.warning(_LW("Heartbeat support requested but it is not " "supported by the kombu driver or the broker")) self._heartbeat_support_log_emitted = True return False def set_transport_socket_timeout(self, timeout=None): # NOTE(sileht): they are some case where the heartbeat check # or the producer.send return only when the system socket # timeout if reach. kombu doesn't allow use to customise this # timeout so for py-amqp we tweak ourself # NOTE(dmitryme): Current approach works with amqp==1.4.9 and # kombu==3.0.33. Once the commit below is released, we should # try to set the socket timeout in the constructor: # https://github.com/celery/py-amqp/pull/64 heartbeat_timeout = self.heartbeat_timeout_threshold if self._heartbeat_supported_and_enabled(): # NOTE(sileht): we are supposed to send heartbeat every # heartbeat_timeout, no need to wait more otherwise will # disconnect us, so raise timeout earlier ourself if timeout is None: timeout = heartbeat_timeout else: timeout = min(heartbeat_timeout, timeout) try: sock = self.channel.connection.sock except AttributeError as e: # Level is set to debug because otherwise we would spam the logs LOG.debug('[%s] Failed to get socket attribute: %s' % (self.connection_id, str(e))) else: sock.settimeout(timeout) # TCP_USER_TIMEOUT is not defined on Windows and Mac OS X if sys.platform != 'win32' and sys.platform != 'darwin': try: timeout = timeout * 1000 if timeout is not None else 0 # NOTE(gdavoian): only integers and strings are allowed # as socket options' values, and TCP_USER_TIMEOUT option # can take only integer values, so we round-up the timeout # to the nearest integer in order to ensure that the # connection is not broken before the expected timeout sock.setsockopt(socket.IPPROTO_TCP, TCP_USER_TIMEOUT, int(math.ceil(timeout))) except socket.error as error: code = error[0] # TCP_USER_TIMEOUT not defined on kernels <2.6.37 if code != errno.ENOPROTOOPT: raise @contextlib.contextmanager def _transport_socket_timeout(self, timeout): self.set_transport_socket_timeout(timeout) yield self.set_transport_socket_timeout() def _heartbeat_check(self): # NOTE(sileht): we are supposed to send at least one heartbeat # every heartbeat_timeout_threshold, so no need to way more self.connection.heartbeat_check(rate=self.heartbeat_rate) def _heartbeat_start(self): if self._heartbeat_supported_and_enabled(): self._heartbeat_exit_event = eventletutils.Event() self._heartbeat_thread = threading.Thread( target=self._heartbeat_thread_job) self._heartbeat_thread.daemon = True self._heartbeat_thread.start() else: self._heartbeat_thread = None def _heartbeat_stop(self): if self._heartbeat_thread is not None: self._heartbeat_exit_event.set() self._heartbeat_thread.join() self._heartbeat_thread = None def _heartbeat_thread_job(self): """Thread that maintains inactive connections """ while not self._heartbeat_exit_event.is_set(): with self._connection_lock.for_heartbeat(): try: try: self._heartbeat_check() # NOTE(sileht): We need to drain event to receive # heartbeat from the broker but don't hold the # connection too much times. In amqpdriver a connection # is used exclusively for read or for write, so we have # to do this for connection used for write drain_events # already do that for other connection try: self.connection.drain_events(timeout=0.001) except socket.timeout: pass except (socket.timeout, kombu.exceptions.OperationalError) as exc: LOG.info(_LI("A recoverable connection/channel error " "occurred, trying to reconnect: %s"), exc) self.ensure_connection() except Exception: LOG.warning(_LW("Unexpected error during heartbeart " "thread processing, retrying...")) LOG.debug('Exception', exc_info=True) self._heartbeat_exit_event.wait( timeout=self._heartbeat_wait_timeout) self._heartbeat_exit_event.clear() def declare_consumer(self, consumer): """Create a Consumer using the class that was passed in and add it to our list of consumers """ def _connect_error(exc): log_info = {'topic': consumer.routing_key, 'err_str': exc} LOG.error(_LE("Failed to declare consumer for topic '%(topic)s': " "%(err_str)s"), log_info) def _declare_consumer(): consumer.declare(self) tag = self._active_tags.get(consumer.queue_name) if tag is None: tag = next(self._tags) self._active_tags[consumer.queue_name] = tag self._new_tags.add(tag) self._consumers[consumer] = tag return consumer with self._connection_lock: return self.ensure(_declare_consumer, error_callback=_connect_error) def consume(self, timeout=None): """Consume from all queues/consumers.""" timer = rpc_common.DecayingTimer(duration=timeout) timer.start() def _raise_timeout(): raise rpc_common.Timeout() def _recoverable_error_callback(exc): if not isinstance(exc, rpc_common.Timeout): self._new_tags = set(self._consumers.values()) timer.check_return(_raise_timeout) def _error_callback(exc): _recoverable_error_callback(exc) LOG.error(_LE('Failed to consume message from queue: %s'), exc) def _consume(): # NOTE(sileht): in case the acknowledgment or requeue of a # message fail, the kombu transport can be disconnected # In this case, we must redeclare our consumers, so raise # a recoverable error to trigger the reconnection code. if not self.connection.connected: raise self.connection.recoverable_connection_errors[0] while self._new_tags: for consumer, tag in self._consumers.items(): if tag in self._new_tags: consumer.consume(self, tag=tag) self._new_tags.remove(tag) poll_timeout = (self._poll_timeout if timeout is None else min(timeout, self._poll_timeout)) while True: if self._consume_loop_stopped: return if self._heartbeat_supported_and_enabled(): self._heartbeat_check() try: self.connection.drain_events(timeout=poll_timeout) return except socket.timeout: poll_timeout = timer.check_return( _raise_timeout, maximum=self._poll_timeout) except self.connection.channel_errors as exc: if exc.code == 406 and exc.method_name == 'Basic.ack': # NOTE(gordc): occasionally multiple workers will grab # same message and acknowledge it. if it happens, meh. raise self.connection.recoverable_channel_errors[0] raise with self._connection_lock: self.ensure(_consume, recoverable_error_callback=_recoverable_error_callback, error_callback=_error_callback) def stop_consuming(self): self._consume_loop_stopped = True def declare_direct_consumer(self, topic, callback): """Create a 'direct' queue. In nova's use, this is generally a msg_id queue used for responses for call/multicall """ consumer = Consumer(exchange_name=topic, queue_name=topic, routing_key=topic, type='direct', durable=False, exchange_auto_delete=True, queue_auto_delete=False, callback=callback, rabbit_ha_queues=self.rabbit_ha_queues, rabbit_queue_ttl=self.rabbit_transient_queues_ttl) self.declare_consumer(consumer) def declare_topic_consumer(self, exchange_name, topic, callback=None, queue_name=None): """Create a 'topic' consumer.""" consumer = Consumer(exchange_name=exchange_name, queue_name=queue_name or topic, routing_key=topic, type='topic', durable=self.amqp_durable_queues, exchange_auto_delete=self.amqp_auto_delete, queue_auto_delete=self.amqp_auto_delete, callback=callback, rabbit_ha_queues=self.rabbit_ha_queues) self.declare_consumer(consumer) def declare_fanout_consumer(self, topic, callback): """Create a 'fanout' consumer.""" unique = uuid.uuid4().hex exchange_name = '%s_fanout' % topic queue_name = '%s_fanout_%s' % (topic, unique) consumer = Consumer(exchange_name=exchange_name, queue_name=queue_name, routing_key=topic, type='fanout', durable=False, exchange_auto_delete=True, queue_auto_delete=False, callback=callback, rabbit_ha_queues=self.rabbit_ha_queues, rabbit_queue_ttl=self.rabbit_transient_queues_ttl) self.declare_consumer(consumer) def _ensure_publishing(self, method, exchange, msg, routing_key=None, timeout=None, retry=None): """Send to a publisher based on the publisher class.""" def _error_callback(exc): log_info = {'topic': exchange.name, 'err_str': exc} LOG.error(_LE("Failed to publish message to topic " "'%(topic)s': %(err_str)s"), log_info) LOG.debug('Exception', exc_info=exc) method = functools.partial(method, exchange, msg, routing_key, timeout) with self._connection_lock: self.ensure(method, retry=retry, error_callback=_error_callback) def _get_connection_info(self): info = self.connection.info() client_port = None if (self.channel and hasattr(self.channel.connection, 'sock') and self.channel.connection.sock): client_port = self.channel.connection.sock.getsockname()[1] info.update({'client_port': client_port, 'connection_id': self.connection_id}) return info def _publish(self, exchange, msg, routing_key=None, timeout=None): """Publish a message.""" if not (exchange.passive or exchange.name in self._declared_exchanges): exchange(self.channel).declare() self._declared_exchanges.add(exchange.name) log_info = {'msg': msg, 'who': exchange or 'default', 'key': routing_key} LOG.trace('Connection._publish: sending message %(msg)s to' ' %(who)s with routing key %(key)s', log_info) # NOTE(sileht): no need to wait more, caller expects # a answer before timeout is reached with self._transport_socket_timeout(timeout): self._producer.publish(msg, exchange=exchange, routing_key=routing_key, expiration=timeout, compression=self.kombu_compression) def _publish_and_creates_default_queue(self, exchange, msg, routing_key=None, timeout=None): """Publisher that declares a default queue When the exchange is missing instead of silently creates an exchange not binded to a queue, this publisher creates a default queue named with the routing_key This is mainly used to not miss notification in case of nobody consumes them yet. If the future consumer bind the default queue it can retrieve missing messages. _set_current_channel is responsible to cleanup the cache. """ queue_indentifier = (exchange.name, routing_key) # NOTE(sileht): We only do it once per reconnection # the Connection._set_current_channel() is responsible to clear # this cache if queue_indentifier not in self._declared_queues: queue = kombu.entity.Queue( channel=self.channel, exchange=exchange, durable=exchange.durable, auto_delete=exchange.auto_delete, name=routing_key, routing_key=routing_key, queue_arguments=_get_queue_arguments(self.rabbit_ha_queues, 0)) log_info = {'key': routing_key, 'exchange': exchange} LOG.trace( 'Connection._publish_and_creates_default_queue: ' 'declare queue %(key)s on %(exchange)s exchange', log_info) queue.declare() self._declared_queues.add(queue_indentifier) self._publish(exchange, msg, routing_key=routing_key, timeout=timeout) def _publish_and_raises_on_missing_exchange(self, exchange, msg, routing_key=None, timeout=None): """Publisher that raises exception if exchange is missing.""" if not exchange.passive: raise RuntimeError("_publish_and_retry_on_missing_exchange() must " "be called with an passive exchange.") try: self._publish(exchange, msg, routing_key=routing_key, timeout=timeout) return except self.connection.channel_errors as exc: if exc.code == 404: # NOTE(noelbk/sileht): # If rabbit dies, the consumer can be disconnected before the # publisher sends, and if the consumer hasn't declared the # queue, the publisher's will send a message to an exchange # that's not bound to a queue, and the message wll be lost. # So we set passive=True to the publisher exchange and catch # the 404 kombu ChannelError and retry until the exchange # appears raise rpc_amqp.AMQPDestinationNotFound( "exchange %s doesn't exists" % exchange.name) raise def direct_send(self, msg_id, msg): """Send a 'direct' message.""" exchange = kombu.entity.Exchange(name=msg_id, type='direct', durable=False, auto_delete=True, passive=True) self._ensure_publishing(self._publish_and_raises_on_missing_exchange, exchange, msg, routing_key=msg_id) def topic_send(self, exchange_name, topic, msg, timeout=None, retry=None): """Send a 'topic' message.""" exchange = kombu.entity.Exchange( name=exchange_name, type='topic', durable=self.amqp_durable_queues, auto_delete=self.amqp_auto_delete) self._ensure_publishing(self._publish, exchange, msg, routing_key=topic, timeout=timeout, retry=retry) def fanout_send(self, topic, msg, retry=None): """Send a 'fanout' message.""" exchange = kombu.entity.Exchange(name='%s_fanout' % topic, type='fanout', durable=False, auto_delete=True) self._ensure_publishing(self._publish, exchange, msg, retry=retry) def notify_send(self, exchange_name, topic, msg, retry=None, **kwargs): """Send a notify message on a topic.""" exchange = kombu.entity.Exchange( name=exchange_name, type='topic', durable=self.amqp_durable_queues, auto_delete=self.amqp_auto_delete) self._ensure_publishing(self._publish_and_creates_default_queue, exchange, msg, routing_key=topic, retry=retry) class RabbitDriver(amqpdriver.AMQPDriverBase): """RabbitMQ Driver The ``rabbit`` driver is the default driver used in OpenStack's integration tests. The driver is aliased as ``kombu`` to support upgrading existing installations with older settings. """ def __init__(self, conf, url, default_exchange=None, allowed_remote_exmods=None): opt_group = cfg.OptGroup(name='oslo_messaging_rabbit', title='RabbitMQ driver options') conf.register_group(opt_group) conf.register_opts(rabbit_opts, group=opt_group) conf.register_opts(rpc_amqp.amqp_opts, group=opt_group) conf.register_opts(base.base_opts, group=opt_group) conf = rpc_common.ConfigOptsProxy(conf, url, opt_group.name) self.missing_destination_retry_timeout = ( conf.oslo_messaging_rabbit.kombu_missing_consumer_retry_timeout) self.prefetch_size = ( conf.oslo_messaging_rabbit.rabbit_qos_prefetch_count) # the pool configuration properties max_size = conf.oslo_messaging_rabbit.rpc_conn_pool_size min_size = conf.oslo_messaging_rabbit.conn_pool_min_size ttl = conf.oslo_messaging_rabbit.conn_pool_ttl connection_pool = pool.ConnectionPool( conf, max_size, min_size, ttl, url, Connection) super(RabbitDriver, self).__init__( conf, url, connection_pool, default_exchange, allowed_remote_exmods ) def require_features(self, requeue=True): pass oslo.messaging-5.35.0/oslo_messaging/_drivers/impl_pika.py0000666000175100017510000003620613224676046023750 0ustar zuulzuul00000000000000# Copyright 2011 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from debtcollector import deprecate from oslo_config import cfg from oslo_log import log as logging from oslo_utils import timeutils import pika_pool import tenacity from oslo_messaging._drivers import base from oslo_messaging._drivers import common from oslo_messaging._drivers.pika_driver import (pika_connection_factory as pika_drv_conn_factory) from oslo_messaging._drivers.pika_driver import pika_commons as pika_drv_cmns from oslo_messaging._drivers.pika_driver import pika_engine as pika_drv_engine from oslo_messaging._drivers.pika_driver import pika_exceptions as pika_drv_exc from oslo_messaging._drivers.pika_driver import pika_listener as pika_drv_lstnr from oslo_messaging._drivers.pika_driver import pika_message as pika_drv_msg from oslo_messaging._drivers.pika_driver import pika_poller as pika_drv_poller from oslo_messaging import exceptions LOG = logging.getLogger(__name__) pika_pool_opts = [ cfg.IntOpt('pool_max_size', default=30, help="Maximum number of connections to keep queued."), cfg.IntOpt('pool_max_overflow', default=0, help="Maximum number of connections to create above " "`pool_max_size`."), cfg.IntOpt('pool_timeout', default=30, help="Default number of seconds to wait for a connections to " "available"), cfg.IntOpt('pool_recycle', default=600, help="Lifetime of a connection (since creation) in seconds " "or None for no recycling. Expired connections are " "closed on acquire."), cfg.IntOpt('pool_stale', default=60, help="Threshold at which inactive (since release) connections " "are considered stale in seconds or None for no " "staleness. Stale connections are closed on acquire.") ] message_opts = [ cfg.StrOpt('default_serializer_type', default='json', choices=('json', 'msgpack'), help="Default serialization mechanism for " "serializing/deserializing outgoing/incoming messages") ] notification_opts = [ cfg.BoolOpt('notification_persistence', default=False, help="Persist notification messages."), cfg.StrOpt('default_notification_exchange', default="${control_exchange}_notification", help="Exchange name for sending notifications"), cfg.IntOpt( 'notification_listener_prefetch_count', default=100, help="Max number of not acknowledged message which RabbitMQ can send " "to notification listener." ), cfg.IntOpt( 'default_notification_retry_attempts', default=-1, help="Reconnecting retry count in case of connectivity problem during " "sending notification, -1 means infinite retry." ), cfg.FloatOpt( 'notification_retry_delay', default=0.25, help="Reconnecting retry delay in case of connectivity problem during " "sending notification message" ) ] rpc_opts = [ cfg.IntOpt('rpc_queue_expiration', default=60, help="Time to live for rpc queues without consumers in " "seconds."), cfg.StrOpt('default_rpc_exchange', default="${control_exchange}_rpc", help="Exchange name for sending RPC messages"), cfg.StrOpt('rpc_reply_exchange', default="${control_exchange}_rpc_reply", help="Exchange name for receiving RPC replies"), cfg.IntOpt( 'rpc_listener_prefetch_count', default=100, help="Max number of not acknowledged message which RabbitMQ can send " "to rpc listener." ), cfg.IntOpt( 'rpc_reply_listener_prefetch_count', default=100, help="Max number of not acknowledged message which RabbitMQ can send " "to rpc reply listener." ), cfg.IntOpt( 'rpc_reply_retry_attempts', default=-1, help="Reconnecting retry count in case of connectivity problem during " "sending reply. -1 means infinite retry during rpc_timeout" ), cfg.FloatOpt( 'rpc_reply_retry_delay', default=0.25, help="Reconnecting retry delay in case of connectivity problem during " "sending reply." ), cfg.IntOpt( 'default_rpc_retry_attempts', default=-1, help="Reconnecting retry count in case of connectivity problem during " "sending RPC message, -1 means infinite retry. If actual " "retry attempts in not 0 the rpc request could be processed more " "than one time" ), cfg.FloatOpt( 'rpc_retry_delay', default=0.25, help="Reconnecting retry delay in case of connectivity problem during " "sending RPC message" ) ] class PikaDriver(base.BaseDriver): """Pika Driver **Warning**: The ``pika`` driver has been deprecated and will be removed in a future release. It is recommended that all users of the ``pika`` driver transition to using the ``rabbit`` driver. """ def __init__(self, conf, url, default_exchange=None, allowed_remote_exmods=None): deprecate("The pika driver is no longer maintained. It has been" " deprecated", message="It is recommended that all users of the pika driver" " transition to using the rabbit driver.", version="pike", removal_version="rocky") opt_group = cfg.OptGroup(name='oslo_messaging_pika', title='Pika driver options') conf.register_group(opt_group) conf.register_opts(pika_drv_conn_factory.pika_opts, group=opt_group) conf.register_opts(pika_pool_opts, group=opt_group) conf.register_opts(message_opts, group=opt_group) conf.register_opts(rpc_opts, group=opt_group) conf.register_opts(notification_opts, group=opt_group) conf = common.ConfigOptsProxy(conf, url, opt_group.name) self._pika_engine = pika_drv_engine.PikaEngine( conf, url, default_exchange, allowed_remote_exmods ) self._reply_listener = pika_drv_lstnr.RpcReplyPikaListener( self._pika_engine ) super(PikaDriver, self).__init__(conf, url, default_exchange, allowed_remote_exmods) def require_features(self, requeue=False): pass def _declare_rpc_exchange(self, exchange, stopwatch): timeout = stopwatch.leftover(return_none=True) with (self._pika_engine.connection_without_confirmation_pool .acquire(timeout=timeout)) as conn: try: self._pika_engine.declare_exchange_by_channel( conn.channel, self._pika_engine.get_rpc_exchange_name( exchange ), "direct", False ) except pika_pool.Timeout as e: raise exceptions.MessagingTimeout( "Timeout for current operation was expired. {}.".format( str(e) ) ) def send(self, target, ctxt, message, wait_for_reply=None, timeout=None, retry=None): with timeutils.StopWatch(duration=timeout) as stopwatch: if retry is None: retry = self._pika_engine.default_rpc_retry_attempts exchange = self._pika_engine.get_rpc_exchange_name( target.exchange ) def on_exception(ex): if isinstance(ex, pika_drv_exc.ExchangeNotFoundException): # it is desired to create exchange because if we sent to # exchange which is not exists, we get ChannelClosed # exception and need to reconnect try: self._declare_rpc_exchange(exchange, stopwatch) except pika_drv_exc.ConnectionException as e: LOG.warning("Problem during declaring exchange. %s", e) return True elif isinstance(ex, (pika_drv_exc.ConnectionException, exceptions.MessageDeliveryFailure)): LOG.warning("Problem during message sending. %s", ex) return True else: return False if retry: retrier = tenacity.retry( stop=(tenacity.stop_never if retry == -1 else tenacity.stop_after_attempt(retry)), retry=tenacity.retry_if_exception(on_exception), wait=tenacity.wait_fixed(self._pika_engine.rpc_retry_delay) ) else: retrier = None if target.fanout: return self.cast_all_workers( exchange, target.topic, ctxt, message, stopwatch, retrier ) routing_key = self._pika_engine.get_rpc_queue_name( target.topic, target.server, retrier is None ) msg = pika_drv_msg.RpcPikaOutgoingMessage(self._pika_engine, message, ctxt) try: reply = msg.send( exchange=exchange, routing_key=routing_key, reply_listener=( self._reply_listener if wait_for_reply else None ), stopwatch=stopwatch, retrier=retrier ) except pika_drv_exc.ExchangeNotFoundException as ex: try: self._declare_rpc_exchange(exchange, stopwatch) except pika_drv_exc.ConnectionException as e: LOG.warning("Problem during declaring exchange. %s", e) raise ex if reply is not None: if reply.failure is not None: raise reply.failure return reply.result def cast_all_workers(self, exchange, topic, ctxt, message, stopwatch, retrier=None): msg = pika_drv_msg.PikaOutgoingMessage(self._pika_engine, message, ctxt) try: msg.send( exchange=exchange, routing_key=self._pika_engine.get_rpc_queue_name( topic, "all_workers", retrier is None ), mandatory=False, stopwatch=stopwatch, retrier=retrier ) except pika_drv_exc.ExchangeNotFoundException: try: self._declare_rpc_exchange(exchange, stopwatch) except pika_drv_exc.ConnectionException as e: LOG.warning("Problem during declaring exchange. %s", e) def _declare_notification_queue_binding( self, target, stopwatch=pika_drv_cmns.INFINITE_STOP_WATCH): if stopwatch.expired(): raise exceptions.MessagingTimeout( "Timeout for current operation was expired." ) try: timeout = stopwatch.leftover(return_none=True) with (self._pika_engine.connection_without_confirmation_pool .acquire)(timeout=timeout) as conn: self._pika_engine.declare_queue_binding_by_channel( conn.channel, exchange=( target.exchange or self._pika_engine.default_notification_exchange ), queue=target.topic, routing_key=target.topic, exchange_type='direct', queue_expiration=None, durable=self._pika_engine.notification_persistence, ) except pika_pool.Timeout as e: raise exceptions.MessagingTimeout( "Timeout for current operation was expired. {}.".format(str(e)) ) def send_notification(self, target, ctxt, message, version, retry=None): if retry is None: retry = self._pika_engine.default_notification_retry_attempts def on_exception(ex): if isinstance(ex, (pika_drv_exc.ExchangeNotFoundException, pika_drv_exc.RoutingException)): LOG.warning("Problem during sending notification. %s", ex) try: self._declare_notification_queue_binding(target) except pika_drv_exc.ConnectionException as e: LOG.warning("Problem during declaring notification queue " "binding. %s", e) return True elif isinstance(ex, (pika_drv_exc.ConnectionException, pika_drv_exc.MessageRejectedException)): LOG.warning("Problem during sending notification. %s", ex) return True else: return False if retry: retrier = tenacity.retry( stop=(tenacity.stop_never if retry == -1 else tenacity.stop_after_attempt(retry)), retry=tenacity.retry_if_exception(on_exception), wait=tenacity.wait_fixed( self._pika_engine.notification_retry_delay ) ) else: retrier = None msg = pika_drv_msg.PikaOutgoingMessage(self._pika_engine, message, ctxt) return msg.send( exchange=( target.exchange or self._pika_engine.default_notification_exchange ), routing_key=target.topic, confirm=True, mandatory=True, persistent=self._pika_engine.notification_persistence, retrier=retrier ) def listen(self, target, batch_size, batch_timeout): return pika_drv_poller.RpcServicePikaPoller( self._pika_engine, target, batch_size, batch_timeout, self._pika_engine.rpc_listener_prefetch_count ) def listen_for_notifications(self, targets_and_priorities, pool, batch_size, batch_timeout): return pika_drv_poller.NotificationPikaPoller( self._pika_engine, targets_and_priorities, batch_size, batch_timeout, self._pika_engine.notification_listener_prefetch_count, pool ) def cleanup(self): self._reply_listener.cleanup() self._pika_engine.cleanup() oslo.messaging-5.35.0/oslo_messaging/_drivers/pool.py0000666000175100017510000001102513224676046022744 0ustar zuulzuul00000000000000# Copyright 2013 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc import collections import sys import threading from oslo_log import log as logging from oslo_utils import timeutils import six from oslo_messaging._drivers import common LOG = logging.getLogger(__name__) # TODO(harlowja): remove this when we no longer have to support 2.7 if sys.version_info[0:2] < (3, 2): def wait_condition(cond): # FIXME(markmc): timeout needed to allow keyboard interrupt # http://bugs.python.org/issue8844 cond.wait(timeout=1) else: def wait_condition(cond): cond.wait() @six.add_metaclass(abc.ABCMeta) class Pool(object): """A thread-safe object pool. Modelled after the eventlet.pools.Pool interface, but designed to be safe when using native threads without the GIL. Resizing is not supported. """ def __init__(self, max_size=4, min_size=2, ttl=1200, on_expire=None): super(Pool, self).__init__() self._min_size = min_size self._max_size = max_size self._item_ttl = ttl self._current_size = 0 self._cond = threading.Condition() self._items = collections.deque() self._on_expire = on_expire def expire(self): """Remove expired items from left (the oldest item) to right (the newest item). """ with self._cond: while len(self._items) > self._min_size: try: ttl_watch, item = self._items.popleft() if ttl_watch.expired(): self._on_expire and self._on_expire(item) self._current_size -= 1 else: self._items.appendleft((ttl_watch, item)) return except IndexError: break def put(self, item): """Return an item to the pool.""" with self._cond: ttl_watch = timeutils.StopWatch(duration=self._item_ttl) ttl_watch.start() self._items.append((ttl_watch, item)) self._cond.notify() def get(self): """Return an item from the pool, when one is available. This may cause the calling thread to block. """ with self._cond: while True: try: ttl_watch, item = self._items.pop() self.expire() return item except IndexError: pass if self._current_size < self._max_size: self._current_size += 1 break wait_condition(self._cond) # We've grabbed a slot and dropped the lock, now do the creation try: return self.create() except Exception: with self._cond: self._current_size -= 1 raise def iter_free(self): """Iterate over free items.""" while True: try: _, item = self._items.pop() yield item except IndexError: raise StopIteration @abc.abstractmethod def create(self): """Construct a new item.""" class ConnectionPool(Pool): """Class that implements a Pool of Connections.""" def __init__(self, conf, max_size, min_size, ttl, url, connection_cls): self.connection_cls = connection_cls self.conf = conf self.url = url super(ConnectionPool, self).__init__(max_size, min_size, ttl, self._on_expire) def _on_expire(self, connection): connection.close() LOG.debug("Idle connection has expired and been closed." " Pool size: %d" % len(self._items)) def create(self, purpose=common.PURPOSE_SEND): LOG.debug('Pool creating new connection') return self.connection_cls(self.conf, self.url, purpose) def empty(self): for item in self.iter_free(): item.close() oslo.messaging-5.35.0/oslo_messaging/_drivers/impl_amqp1.py0000666000175100017510000003723313224676046024044 0ustar zuulzuul00000000000000# Copyright 2014, Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Driver for the 'amqp' transport. This module provides a transport driver that speaks version 1.0 of the AMQP messaging protocol. The driver sends messages and creates subscriptions via 'tasks' that are performed on its behalf via the controller module. """ import collections import logging import os import threading import uuid from oslo_config import cfg from oslo_messaging.target import Target from oslo_serialization import jsonutils from oslo_utils import importutils from oslo_utils import timeutils from oslo_messaging._drivers.amqp1_driver.eventloop import compute_timeout from oslo_messaging._drivers.amqp1_driver import opts from oslo_messaging._drivers import base from oslo_messaging._drivers import common from oslo_messaging._i18n import _LI, _LW proton = importutils.try_import('proton') controller = importutils.try_import( 'oslo_messaging._drivers.amqp1_driver.controller' ) LOG = logging.getLogger(__name__) def marshal_response(reply, failure): # TODO(grs): do replies have a context? # NOTE(flaper87): Set inferred to True since rabbitmq-amqp-1.0 doesn't # have support for vbin8. msg = proton.Message(inferred=True) if failure: failure = common.serialize_remote_exception(failure) data = {"failure": failure} else: data = {"response": reply} msg.body = jsonutils.dumps(data) return msg def unmarshal_response(message, allowed): # TODO(kgiusti) This may fail to unpack and raise an exception. Need to # communicate this to the caller! data = jsonutils.loads(message.body) failure = data.get('failure') if failure is not None: raise common.deserialize_remote_exception(failure, allowed) return data.get("response") def marshal_request(request, context, envelope): # NOTE(flaper87): Set inferred to True since rabbitmq-amqp-1.0 doesn't # have support for vbin8. msg = proton.Message(inferred=True) if envelope: request = common.serialize_msg(request) data = { "request": request, "context": context } msg.body = jsonutils.dumps(data) return msg def unmarshal_request(message): data = jsonutils.loads(message.body) msg = common.deserialize_msg(data.get("request")) return (msg, data.get("context")) class ProtonIncomingMessage(base.RpcIncomingMessage): def __init__(self, listener, ctxt, request, message, disposition): super(ProtonIncomingMessage, self).__init__(ctxt, request) self.listener = listener self._reply_to = message.reply_to self._correlation_id = message.id self._disposition = disposition def reply(self, reply=None, failure=None): """Schedule an RPCReplyTask to send the reply.""" if self._reply_to: response = marshal_response(reply, failure) response.correlation_id = self._correlation_id driver = self.listener.driver deadline = compute_timeout(driver._default_reply_timeout) ack = not driver._pre_settle_reply task = controller.SendTask("RPC Reply", response, self._reply_to, # analogous to kombu missing dest t/o: deadline, retry=driver._default_reply_retry, wait_for_ack=ack) driver._ctrl.add_task(task) rc = task.wait() if rc: # something failed. Not much we can do at this point but log LOG.debug("RPC Reply failed to send: %s", str(rc)) else: LOG.debug("Ignoring reply as no reply address available") def acknowledge(self): """Schedule a MessageDispositionTask to send the settlement.""" task = controller.MessageDispositionTask(self._disposition, released=False) self.listener.driver._ctrl.add_task(task) def requeue(self): """Schedule a MessageDispositionTask to release the message""" task = controller.MessageDispositionTask(self._disposition, released=True) self.listener.driver._ctrl.add_task(task) class Queue(object): def __init__(self): self._queue = collections.deque() self._lock = threading.Lock() self._pop_wake_condition = threading.Condition(self._lock) self._started = True def put(self, item): with self._lock: self._queue.appendleft(item) self._pop_wake_condition.notify() def pop(self, timeout): with timeutils.StopWatch(timeout) as stop_watcher: with self._lock: while len(self._queue) == 0: if stop_watcher.expired() or not self._started: return None self._pop_wake_condition.wait( stop_watcher.leftover(return_none=True) ) return self._queue.pop() def stop(self): with self._lock: self._started = False self._pop_wake_condition.notify_all() class ProtonListener(base.PollStyleListener): def __init__(self, driver): super(ProtonListener, self).__init__(driver.prefetch_size) self.driver = driver self.incoming = Queue() self.id = uuid.uuid4().hex def stop(self): self.incoming.stop() @base.batch_poll_helper def poll(self, timeout=None): qentry = self.incoming.pop(timeout) if qentry is None: return None message = qentry['message'] request, ctxt = unmarshal_request(message) disposition = qentry['disposition'] return ProtonIncomingMessage(self, ctxt, request, message, disposition) class ProtonDriver(base.BaseDriver): """AMQP 1.0 Driver See :doc:`AMQP1.0` for details. """ def __init__(self, conf, url, default_exchange=None, allowed_remote_exmods=[]): if proton is None or controller is None: raise NotImplementedError("Proton AMQP C libraries not installed") super(ProtonDriver, self).__init__(conf, url, default_exchange, allowed_remote_exmods) opt_group = cfg.OptGroup(name='oslo_messaging_amqp', title='AMQP 1.0 driver options') conf.register_group(opt_group) conf.register_opts(opts.amqp1_opts, group=opt_group) conf = common.ConfigOptsProxy(conf, url, opt_group.name) self._conf = conf self._default_exchange = default_exchange # lazy connection setup - don't create the controller until # after the first messaging request: self._ctrl = None self._pid = None self._lock = threading.Lock() # timeout for message acknowledgement opt_name = conf.oslo_messaging_amqp self._default_reply_timeout = opt_name.default_reply_timeout self._default_send_timeout = opt_name.default_send_timeout self._default_notify_timeout = opt_name.default_notify_timeout self._default_reply_retry = opt_name.default_reply_retry # which message types should be sent pre-settled? ps = [s.lower() for s in opt_name.pre_settled] self._pre_settle_call = 'rpc-call' in ps self._pre_settle_reply = 'rpc-reply' in ps self._pre_settle_cast = 'rpc-cast' in ps self._pre_settle_notify = 'notify' in ps bad_opts = set(ps).difference(['rpc-call', 'rpc-reply', 'rpc-cast', 'notify']) if bad_opts: LOG.warning(_LW("Ignoring unrecognized pre_settle value(s): %s"), " ".join(bad_opts)) def _ensure_connect_called(func): """Causes a new controller to be created when the messaging service is first used by the current process. It is safe to push tasks to it whether connected or not, but those tasks won't be processed until connection completes. """ def wrap(self, *args, **kws): with self._lock: # check to see if a fork was done after the Controller and its # I/O thread was spawned. old_pid will be None the first time # this is called which will cause the Controller to be created. old_pid = self._pid self._pid = os.getpid() if old_pid != self._pid: if self._ctrl is not None: # fork was called after the Controller was created, and # we are now executing as the child process. Do not # touch the existing Controller - it is owned by the # parent. Best we can do here is simply drop it and # hope we get lucky. LOG.warning(_LW("Process forked after connection " "established!")) self._ctrl = None # Create a Controller that connects to the messaging # service: self._ctrl = controller.Controller(self._url, self._default_exchange, self._conf) self._ctrl.connect() return func(self, *args, **kws) return wrap @_ensure_connect_called def send(self, target, ctxt, message, wait_for_reply=False, timeout=None, retry=None): """Send a message to the given target. :param target: destination for message :type target: oslo_messaging.Target :param ctxt: message context :type ctxt: dict :param message: message payload :type message: dict :param wait_for_reply: expects a reply message, wait for it :type wait_for_reply: bool :param timeout: raise exception if send does not complete within timeout seconds. None == no timeout. :type timeout: float :param retry: (optional) maximum re-send attempts on recoverable error None or -1 means to retry forever 0 means no retry N means N retries :type retry: int """ request = marshal_request(message, ctxt, envelope=False) expire = 0 if timeout: expire = compute_timeout(timeout) # when the caller times out # amqp uses millisecond time values, timeout is seconds request.ttl = int(timeout * 1000) request.expiry_time = int(expire * 1000) else: # no timeout provided by application. If the backend is queueless # this could lead to a hang - provide a default to prevent this # TODO(kgiusti) only do this if brokerless backend expire = compute_timeout(self._default_send_timeout) if wait_for_reply: ack = not self._pre_settle_call task = controller.RPCCallTask(target, request, expire, retry, wait_for_ack=ack) else: ack = not self._pre_settle_cast task = controller.SendTask("RPC Cast", request, target, expire, retry, wait_for_ack=ack) self._ctrl.add_task(task) reply = task.wait() if isinstance(reply, Exception): raise reply if reply: # TODO(kgiusti) how to handle failure to un-marshal? # Must log, and determine best way to communicate this failure # back up to the caller reply = unmarshal_response(reply, self._allowed_remote_exmods) return reply @_ensure_connect_called def send_notification(self, target, ctxt, message, version, retry=None): """Send a notification message to the given target. :param target: destination for message :type target: oslo_messaging.Target :param ctxt: message context :type ctxt: dict :param message: message payload :type message: dict :param version: message envelope version :type version: float :param retry: (optional) maximum re-send attempts on recoverable error None or -1 means to retry forever 0 means no retry N means N retries :type retry: int """ request = marshal_request(message, ctxt, (version == 2.0)) # no timeout is applied to notifications, however if the backend is # queueless this could lead to a hang - provide a default to prevent # this # TODO(kgiusti) should raise NotImplemented if not broker backend deadline = compute_timeout(self._default_notify_timeout) ack = not self._pre_settle_notify task = controller.SendTask("Notify", request, target, deadline, retry, wait_for_ack=ack, notification=True) self._ctrl.add_task(task) rc = task.wait() if isinstance(rc, Exception): raise rc @_ensure_connect_called def listen(self, target, batch_size, batch_timeout): """Construct a Listener for the given target.""" LOG.debug("Listen to %s", target) listener = ProtonListener(self) task = controller.SubscribeTask(target, listener) self._ctrl.add_task(task) task.wait() return base.PollStyleListenerAdapter(listener, batch_size, batch_timeout) @_ensure_connect_called def listen_for_notifications(self, targets_and_priorities, pool, batch_size, batch_timeout): """Construct a Listener for notifications on the given target and priority. """ # TODO(kgiusti) should raise NotImplemented if not broker backend LOG.debug("Listen for notifications %s", targets_and_priorities) if pool: raise NotImplementedError('"pool" not implemented by ' 'this transport driver') listener = ProtonListener(self) # this is how the destination target is created by the notifier, # see MessagingDriver.notify in oslo_messaging/notify/messaging.py for target, priority in targets_and_priorities: topic = '%s.%s' % (target.topic, priority) # Sooo... the exchange is simply discarded? (see above comment) task = controller.SubscribeTask(Target(topic=topic), listener, notifications=True) self._ctrl.add_task(task) task.wait() return base.PollStyleListenerAdapter(listener, batch_size, batch_timeout) def cleanup(self): """Release all resources.""" if self._ctrl: self._ctrl.shutdown() self._ctrl = None LOG.info(_LI("AMQP 1.0 messaging driver shutdown")) def require_features(self, requeue=True): pass oslo.messaging-5.35.0/oslo_messaging/_drivers/impl_kafka.py0000666000175100017510000003207613224676077024106 0ustar zuulzuul00000000000000# Copyright (C) 2015 Cisco Systems, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # Following code fixes 2 issues with kafka-python and # The current release of eventlet (0.19.0) does not actually remove # select.poll [1]. Because of kafka-python.selectors34 selects # PollSelector instead of SelectSelector [2]. PollSelector relies on # select.poll, which does not work when eventlet/greenlet is used. This # bug in evenlet is fixed in the master branch [3], but there's no # release of eventlet that includes this fix at this point. import json import threading import kafka from kafka.client_async import selectors import kafka.errors from oslo_log import log as logging from oslo_utils import eventletutils import tenacity from oslo_messaging._drivers import base from oslo_messaging._drivers import common as driver_common from oslo_messaging._drivers.kafka_driver import kafka_options from oslo_messaging._i18n import _LE from oslo_messaging._i18n import _LW from oslo_serialization import jsonutils import logging as l l.basicConfig(level=l.INFO) l.getLogger("kafka").setLevel(l.WARN) l.getLogger("stevedore").setLevel(l.WARN) if eventletutils.is_monkey_patched('select'): # monkeypatch the vendored SelectSelector._select like eventlet does # https://github.com/eventlet/eventlet/blob/master/eventlet/green/selectors.py#L32 from eventlet.green import select selectors.SelectSelector._select = staticmethod(select.select) # Force to use the select selectors KAFKA_SELECTOR = selectors.SelectSelector else: KAFKA_SELECTOR = selectors.DefaultSelector LOG = logging.getLogger(__name__) def unpack_message(msg): context = {} message = None msg = json.loads(msg) message = driver_common.deserialize_msg(msg) context = message['_context'] del message['_context'] return context, message def pack_message(ctxt, msg): """Pack context into msg.""" if isinstance(ctxt, dict): context_d = ctxt else: context_d = ctxt.to_dict() msg['_context'] = context_d msg = driver_common.serialize_msg(msg) return msg def concat(sep, items): return sep.join(filter(bool, items)) def target_to_topic(target, priority=None, vhost=None): """Convert target into topic string :param target: Message destination target :type target: oslo_messaging.Target :param priority: Notification priority :type priority: string :param priority: Notification vhost :type priority: string """ return concat(".", [target.topic, priority, vhost]) def retry_on_retriable_kafka_error(exc): return (isinstance(exc, kafka.errors.KafkaError) and exc.retriable) def with_reconnect(retries=None): def decorator(func): @tenacity.retry( retry=tenacity.retry_if_exception(retry_on_retriable_kafka_error), wait=tenacity.wait_fixed(1), stop=tenacity.stop_after_attempt(retries), reraise=True ) def wrapper(*args, **kwargs): return func(*args, **kwargs) return wrapper return decorator class Connection(object): def __init__(self, conf, url): self.conf = conf self.url = url self.virtual_host = url.virtual_host self._parse_url() def _parse_url(self): driver_conf = self.conf.oslo_messaging_kafka self.hostaddrs = [] for host in self.url.hosts: if host.hostname: self.hostaddrs.append("%s:%s" % ( host.hostname, host.port or driver_conf.kafka_default_port)) if not self.hostaddrs: self.hostaddrs.append("%s:%s" % (driver_conf.kafka_default_host, driver_conf.kafka_default_port)) def reset(self): """Reset a connection so it can be used again.""" pass class ConsumerConnection(Connection): def __init__(self, conf, url): super(ConsumerConnection, self).__init__(conf, url) driver_conf = self.conf.oslo_messaging_kafka self.consumer = None self.consumer_timeout = driver_conf.kafka_consumer_timeout self.max_fetch_bytes = driver_conf.kafka_max_fetch_bytes self.group_id = driver_conf.consumer_group self._consume_loop_stopped = False @with_reconnect() def _poll_messages(self, timeout): messages = self.consumer.poll(timeout * 1000.0) messages = [record.value for records in messages.values() if records for record in records] if not messages: # NOTE(sileht): really ? you return payload but no messages... # simulate timeout to consume message again raise kafka.errors.ConsumerTimeout() return messages def consume(self, timeout=None): """Receive up to 'max_fetch_messages' messages. :param timeout: poll timeout in seconds """ def _raise_timeout(exc): raise driver_common.Timeout(str(exc)) timer = driver_common.DecayingTimer(duration=timeout) timer.start() poll_timeout = (self.consumer_timeout if timeout is None else min(timeout, self.consumer_timeout)) while True: if self._consume_loop_stopped: return try: return self._poll_messages(poll_timeout) except kafka.errors.ConsumerTimeout as exc: poll_timeout = timer.check_return( _raise_timeout, exc, maximum=self.consumer_timeout) except Exception: LOG.exception(_LE("Failed to consume messages")) return def stop_consuming(self): self._consume_loop_stopped = True def close(self): if self.consumer: self.consumer.close() self.consumer = None @with_reconnect() def declare_topic_consumer(self, topics, group=None): # TODO(Support for manual/auto_commit functionality) # When auto_commit is False, consumer can manually notify # the completion of the subscription. # Currently we don't support for non auto commit option self.consumer = kafka.KafkaConsumer( *topics, group_id=(group or self.group_id), bootstrap_servers=self.hostaddrs, max_partition_fetch_bytes=self.max_fetch_bytes, selector=KAFKA_SELECTOR ) class ProducerConnection(Connection): def __init__(self, conf, url): super(ProducerConnection, self).__init__(conf, url) driver_conf = self.conf.oslo_messaging_kafka self.batch_size = driver_conf.producer_batch_size self.linger_ms = driver_conf.producer_batch_timeout * 1000 self.producer = None self.producer_lock = threading.Lock() def notify_send(self, topic, ctxt, msg, retry): """Send messages to Kafka broker. :param topic: String of the topic :param ctxt: context for the messages :param msg: messages for publishing :param retry: the number of retry """ retry = retry if retry >= 0 else None message = pack_message(ctxt, msg) message = jsonutils.dumps(message) @with_reconnect(retries=retry) def wrapped_with_reconnect(): self._ensure_producer() # NOTE(sileht): This returns a future, we can use get() # if we want to block like other driver future = self.producer.send(topic, message) future.get() try: wrapped_with_reconnect() except Exception: # NOTE(sileht): if something goes wrong close the producer # connection self._close_producer() raise def close(self): self._close_producer() def _close_producer(self): with self.producer_lock: if self.producer: self.producer.close() self.producer = None def _ensure_producer(self): if self.producer: return with self.producer_lock: if self.producer: return self.producer = kafka.KafkaProducer( bootstrap_servers=self.hostaddrs, linger_ms=self.linger_ms, batch_size=self.batch_size, selector=KAFKA_SELECTOR) class OsloKafkaMessage(base.RpcIncomingMessage): def __init__(self, ctxt, message): super(OsloKafkaMessage, self).__init__(ctxt, message) def requeue(self): LOG.warning(_LW("requeue is not supported")) def reply(self, reply=None, failure=None): LOG.warning(_LW("reply is not supported")) class KafkaListener(base.PollStyleListener): def __init__(self, conn): super(KafkaListener, self).__init__() self._stopped = threading.Event() self.conn = conn self.incoming_queue = [] # FIXME(sileht): We do a first poll to ensure we topics are created # This is a workaround mainly for functional tests, in real life # this is fine if topics are not created synchroneously self.poll(5) @base.batch_poll_helper def poll(self, timeout=None): while not self._stopped.is_set(): if self.incoming_queue: return self.incoming_queue.pop(0) try: messages = self.conn.consume(timeout=timeout) or [] for message in messages: msg = OsloKafkaMessage(*unpack_message(message)) self.incoming_queue.append(msg) except driver_common.Timeout: return None def stop(self): self._stopped.set() self.conn.stop_consuming() def cleanup(self): self.conn.close() class KafkaDriver(base.BaseDriver): """Note: Current implementation of this driver is experimental. We will have functional and/or integrated testing enabled for this driver. """ def __init__(self, conf, url, default_exchange=None, allowed_remote_exmods=None): conf = kafka_options.register_opts(conf, url) super(KafkaDriver, self).__init__( conf, url, default_exchange, allowed_remote_exmods) self.listeners = [] self.virtual_host = url.virtual_host self.pconn = ProducerConnection(conf, url) def cleanup(self): self.pconn.close() for c in self.listeners: c.close() self.listeners = [] def send(self, target, ctxt, message, wait_for_reply=None, timeout=None, retry=None): raise NotImplementedError( 'The RPC implementation for Kafka is not implemented') def send_notification(self, target, ctxt, message, version, retry=None): """Send notification to Kafka brokers :param target: Message destination target :type target: oslo_messaging.Target :param ctxt: Message context :type ctxt: dict :param message: Message payload to pass :type message: dict :param version: Messaging API version (currently not used) :type version: str :param retry: an optional default kafka consumer retries configuration None means to retry forever 0 means no retry N means N retries :type retry: int """ self.pconn.notify_send(target_to_topic(target, vhost=self.virtual_host), ctxt, message, retry) def listen(self, target, batch_size, batch_timeout): raise NotImplementedError( 'The RPC implementation for Kafka is not implemented') def listen_for_notifications(self, targets_and_priorities, pool, batch_size, batch_timeout): """Listen to a specified list of targets on Kafka brokers :param targets_and_priorities: List of pairs (target, priority) priority is not used for kafka driver target.exchange_target.topic is used as a kafka topic :type targets_and_priorities: list :param pool: consumer group of Kafka consumers :type pool: string """ conn = ConsumerConnection(self.conf, self._url) topics = set() for target, priority in targets_and_priorities: topics.add(target_to_topic(target, priority)) conn.declare_topic_consumer(topics, pool) listener = KafkaListener(conn) return base.PollStyleListenerAdapter(listener, batch_size, batch_timeout) oslo.messaging-5.35.0/oslo_messaging/_drivers/zmq_driver/0000775000175100017510000000000013224676256023605 5ustar zuulzuul00000000000000oslo.messaging-5.35.0/oslo_messaging/_drivers/zmq_driver/zmq_async.py0000666000175100017510000000363013224676046026164 0ustar zuulzuul00000000000000# Copyright 2015 Mirantis, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_utils import eventletutils from oslo_utils import importutils def import_zmq(): imported_zmq = importutils.try_import( 'eventlet.green.zmq' if eventletutils.is_monkey_patched('thread') else 'zmq', default=None ) return imported_zmq def get_poller(): if eventletutils.is_monkey_patched('thread'): from oslo_messaging._drivers.zmq_driver.poller import green_poller return green_poller.GreenPoller() from oslo_messaging._drivers.zmq_driver.poller import threading_poller return threading_poller.ThreadingPoller() def get_executor(method): if eventletutils.is_monkey_patched('thread'): from oslo_messaging._drivers.zmq_driver.poller import green_poller return green_poller.GreenExecutor(method) from oslo_messaging._drivers.zmq_driver.poller import threading_poller return threading_poller.ThreadingExecutor(method) def get_pool(size): import futurist if eventletutils.is_monkey_patched('thread'): return futurist.GreenThreadPoolExecutor(size) return futurist.ThreadPoolExecutor(size) def get_queue(): if eventletutils.is_monkey_patched('thread'): import eventlet return eventlet.queue.Queue(), eventlet.queue.Empty import six return six.moves.queue.Queue(), six.moves.queue.Empty oslo.messaging-5.35.0/oslo_messaging/_drivers/zmq_driver/zmq_options.py0000666000175100017510000002310513224676046026541 0ustar zuulzuul00000000000000# Copyright 2016 Mirantis, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import socket from oslo_config import cfg from oslo_messaging._drivers import base from oslo_messaging._drivers import common from oslo_messaging import server MATCHMAKER_BACKENDS = ('redis', 'sentinel', 'dummy') MATCHMAKER_DEFAULT = 'redis' zmq_opts = [ cfg.StrOpt('rpc_zmq_bind_address', default='*', deprecated_group='DEFAULT', help='ZeroMQ bind address. Should be a wildcard (*), ' 'an ethernet interface, or IP. ' 'The "host" option should point or resolve to this ' 'address.'), cfg.StrOpt('rpc_zmq_matchmaker', default=MATCHMAKER_DEFAULT, choices=MATCHMAKER_BACKENDS, deprecated_group='DEFAULT', help='MatchMaker driver.'), cfg.IntOpt('rpc_zmq_contexts', default=1, deprecated_group='DEFAULT', help='Number of ZeroMQ contexts, defaults to 1.'), cfg.IntOpt('rpc_zmq_topic_backlog', deprecated_group='DEFAULT', help='Maximum number of ingress messages to locally buffer ' 'per topic. Default is unlimited.'), cfg.StrOpt('rpc_zmq_ipc_dir', default='/var/run/openstack', deprecated_group='DEFAULT', help='Directory for holding IPC sockets.'), cfg.StrOpt('rpc_zmq_host', default=socket.gethostname(), sample_default='localhost', deprecated_group='DEFAULT', help='Name of this node. Must be a valid hostname, FQDN, or ' 'IP address. Must match "host" option, if running Nova.'), cfg.IntOpt('zmq_linger', default=-1, deprecated_group='DEFAULT', deprecated_name='rpc_cast_timeout', help='Number of seconds to wait before all pending ' 'messages will be sent after closing a socket. ' 'The default value of -1 specifies an infinite linger ' 'period. The value of 0 specifies no linger period. ' 'Pending messages shall be discarded immediately ' 'when the socket is closed. Positive values specify an ' 'upper bound for the linger period.'), cfg.IntOpt('rpc_poll_timeout', default=1, deprecated_group='DEFAULT', help='The default number of seconds that poll should wait. ' 'Poll raises timeout exception when timeout expired.'), cfg.IntOpt('zmq_target_expire', default=300, deprecated_group='DEFAULT', help='Expiration timeout in seconds of a name service record ' 'about existing target ( < 0 means no timeout).'), cfg.IntOpt('zmq_target_update', default=180, deprecated_group='DEFAULT', help='Update period in seconds of a name service record ' 'about existing target.'), cfg.BoolOpt('use_pub_sub', default=False, deprecated_group='DEFAULT', help='Use PUB/SUB pattern for fanout methods. ' 'PUB/SUB always uses proxy.'), cfg.BoolOpt('use_router_proxy', default=False, deprecated_group='DEFAULT', help='Use ROUTER remote proxy.'), cfg.BoolOpt('use_dynamic_connections', default=False, help='This option makes direct connections dynamic or static. ' 'It makes sense only with use_router_proxy=False which ' 'means to use direct connections for direct message ' 'types (ignored otherwise).'), cfg.IntOpt('zmq_failover_connections', default=2, help='How many additional connections to a host will be made ' 'for failover reasons. This option is actual only in ' 'dynamic connections mode.'), cfg.PortOpt('rpc_zmq_min_port', default=49153, deprecated_group='DEFAULT', help='Minimal port number for random ports range.'), cfg.IntOpt('rpc_zmq_max_port', min=1, max=65536, default=65536, deprecated_group='DEFAULT', help='Maximal port number for random ports range.'), cfg.IntOpt('rpc_zmq_bind_port_retries', default=100, deprecated_group='DEFAULT', help='Number of retries to find free port number before ' 'fail with ZMQBindError.'), cfg.StrOpt('rpc_zmq_serialization', default='json', choices=('json', 'msgpack'), deprecated_group='DEFAULT', help='Default serialization mechanism for ' 'serializing/deserializing outgoing/incoming messages'), cfg.BoolOpt('zmq_immediate', default=True, help='This option configures round-robin mode in zmq socket. ' 'True means not keeping a queue when server side ' 'disconnects. False means to keep queue and messages ' 'even if server is disconnected, when the server ' 'appears we send all accumulated messages to it.'), cfg.IntOpt('zmq_tcp_keepalive', default=-1, help='Enable/disable TCP keepalive (KA) mechanism. ' 'The default value of -1 (or any other negative value) ' 'means to skip any overrides and leave it to OS default; ' '0 and 1 (or any other positive value) mean to ' 'disable and enable the option respectively.'), cfg.IntOpt('zmq_tcp_keepalive_idle', default=-1, help='The duration between two keepalive transmissions in ' 'idle condition. ' 'The unit is platform dependent, for example, ' 'seconds in Linux, milliseconds in Windows etc. ' 'The default value of -1 (or any other negative value ' 'and 0) means to skip any overrides and leave it ' 'to OS default.'), cfg.IntOpt('zmq_tcp_keepalive_cnt', default=-1, help='The number of retransmissions to be carried out before ' 'declaring that remote end is not available. ' 'The default value of -1 (or any other negative value ' 'and 0) means to skip any overrides and leave it ' 'to OS default.'), cfg.IntOpt('zmq_tcp_keepalive_intvl', default=-1, help='The duration between two successive keepalive ' 'retransmissions, if acknowledgement to the previous ' 'keepalive transmission is not received. ' 'The unit is platform dependent, for example, ' 'seconds in Linux, milliseconds in Windows etc. ' 'The default value of -1 (or any other negative value ' 'and 0) means to skip any overrides and leave it ' 'to OS default.'), cfg.IntOpt('rpc_thread_pool_size', default=100, help='Maximum number of (green) threads to work concurrently.'), cfg.IntOpt('rpc_message_ttl', default=300, help='Expiration timeout in seconds of a sent/received message ' 'after which it is not tracked anymore by a ' 'client/server.'), cfg.BoolOpt('rpc_use_acks', default=False, help='Wait for message acknowledgements from receivers. ' 'This mechanism works only via proxy without PUB/SUB.'), cfg.IntOpt('rpc_ack_timeout_base', default=15, help='Number of seconds to wait for an ack from a cast/call. ' 'After each retry attempt this timeout is multiplied by ' 'some specified multiplier.'), cfg.IntOpt('rpc_ack_timeout_multiplier', default=2, help='Number to multiply base ack timeout by after each retry ' 'attempt.'), cfg.IntOpt('rpc_retry_attempts', default=3, help='Default number of message sending attempts in case ' 'of any problems occurred: positive value N means ' 'at most N retries, 0 means no retries, None or -1 ' '(or any other negative values) mean to retry forever. ' 'This option is used only if acknowledgments are ' 'enabled.'), cfg.ListOpt('subscribe_on', default=[], help='List of publisher hosts SubConsumer can subscribe on. ' 'This option has higher priority then the default ' 'publishers list taken from the matchmaker.'), ] def register_opts(conf, url): opt_group = cfg.OptGroup(name='oslo_messaging_zmq', title='ZeroMQ driver options') conf.register_opts(zmq_opts, group=opt_group) conf.register_opts(server._pool_opts) conf.register_opts(base.base_opts) return common.ConfigOptsProxy(conf, url, opt_group.name) oslo.messaging-5.35.0/oslo_messaging/_drivers/zmq_driver/client/0000775000175100017510000000000013224676256025063 5ustar zuulzuul00000000000000oslo.messaging-5.35.0/oslo_messaging/_drivers/zmq_driver/client/zmq_routing_table.py0000666000175100017510000001653213224676046031170 0ustar zuulzuul00000000000000# Copyright 2016 Mirantis, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import itertools import logging import threading import time from oslo_messaging._drivers.zmq_driver.matchmaker import zmq_matchmaker_base from oslo_messaging._drivers.zmq_driver import zmq_address from oslo_messaging._drivers.zmq_driver import zmq_async from oslo_messaging._drivers.zmq_driver import zmq_names from oslo_messaging._drivers.zmq_driver import zmq_updater from oslo_messaging._i18n import _LW LOG = logging.getLogger(__name__) zmq = zmq_async.import_zmq() class RoutingTableAdaptor(object): def __init__(self, conf, matchmaker, listener_type): self.conf = conf self.matchmaker = matchmaker self.listener_type = listener_type self.routing_table = RoutingTable(conf) self.routing_table_updater = RoutingTableUpdater( conf, matchmaker, self.routing_table) self.round_robin_targets = {} self._lock = threading.Lock() def get_round_robin_host(self, target): target_key = self._fetch_round_robin_hosts_from_matchmaker(target) rr_gen = self.round_robin_targets[target_key] host = next(rr_gen) LOG.debug("Host resolved for the current connection is %s" % host) return host def get_all_round_robin_hosts(self, target): target_key = self._fetch_round_robin_hosts_from_matchmaker(target) return self.routing_table.get_hosts_fanout(target_key) def _fetch_round_robin_hosts_from_matchmaker(self, target): target_key = zmq_address.target_to_key( target, zmq_names.socket_type_str(self.listener_type)) LOG.debug("Processing target %s for round-robin." % target_key) if target_key not in self.round_robin_targets: with self._lock: if target_key not in self.round_robin_targets: LOG.debug("Target %s is not in cache. Check matchmaker " "server." % target_key) hosts = self.matchmaker.get_hosts_retry( target, zmq_names.socket_type_str(self.listener_type)) LOG.debug("Received hosts %s" % hosts) self.routing_table.update_hosts(target_key, hosts) self.round_robin_targets[target_key] = \ self.routing_table.get_hosts_round_robin(target_key) return target_key def get_fanout_hosts(self, target): target_key = zmq_address.target_to_key( target, zmq_names.socket_type_str(self.listener_type)) LOG.debug("Processing target %s for fanout." % target_key) if not self.routing_table.contains(target_key): self._fetch_fanout_hosts_from_matchmaker(target, target_key) return self.routing_table.get_hosts_fanout(target_key) def _fetch_fanout_hosts_from_matchmaker(self, target, target_key): with self._lock: if not self.routing_table.contains(target_key): LOG.debug("Target %s is not in cache. Check matchmaker server." % target_key) hosts = self.matchmaker.get_hosts_fanout( target, zmq_names.socket_type_str(self.listener_type)) LOG.debug("Received hosts %s" % hosts) self.routing_table.update_hosts(target_key, hosts) def cleanup(self): self.routing_table_updater.cleanup() class RoutingTable(object): def __init__(self, conf): self.conf = conf self.targets = {} self._lock = threading.Lock() def register(self, target_key, host): with self._lock: if target_key in self.targets: hosts, tm = self.targets[target_key] if host not in hosts: hosts.add(host) self.targets[target_key] = (hosts, self._create_tm()) else: self.targets[target_key] = ({host}, self._create_tm()) def get_targets(self): with self._lock: return list(self.targets.keys()) def unregister(self, target_key, host): with self._lock: hosts, tm = self.targets.get(target_key) if hosts and host in hosts: hosts.discard(host) self.targets[target_key] = (hosts, self._create_tm()) def update_hosts(self, target_key, hosts_updated): with self._lock: if target_key in self.targets and not hosts_updated: self.targets.pop(target_key) return hosts_current, _ = self.targets.get(target_key, (set(), None)) hosts_updated = set(hosts_updated) has_differences = hosts_updated ^ hosts_current if has_differences: self.targets[target_key] = (hosts_updated, self._create_tm()) def get_hosts_round_robin(self, target_key): while self.contains(target_key): for host in self._get_hosts_rr(target_key): yield host def get_hosts_fanout(self, target_key): hosts, _ = self._get_hosts(target_key) return hosts def contains(self, target_key): with self._lock: return target_key in self.targets def _get_hosts(self, target_key): with self._lock: hosts, tm = self.targets.get(target_key, ([], None)) hosts = list(hosts) return hosts, tm def _get_tm(self, target_key): with self._lock: _, tm = self.targets.get(target_key) return tm def _is_target_changed(self, target_key, tm_orig): return self._get_tm(target_key) != tm_orig @staticmethod def _create_tm(): return time.time() def _get_hosts_rr(self, target_key): hosts, tm_original = self._get_hosts(target_key) for host in itertools.cycle(hosts): if self._is_target_changed(target_key, tm_original): raise StopIteration() yield host class RoutingTableUpdater(zmq_updater.UpdaterBase): def __init__(self, conf, matchmaker, routing_table): self.routing_table = routing_table super(RoutingTableUpdater, self).__init__( conf, matchmaker, self._update_routing_table, conf.oslo_messaging_zmq.zmq_target_update) def _update_routing_table(self): target_keys = self.routing_table.get_targets() try: for target_key in target_keys: hosts = self.matchmaker.get_hosts_by_key(target_key) self.routing_table.update_hosts(target_key, hosts) LOG.debug("Updating routing table from the matchmaker. " "%d target(s) updated %s." % (len(target_keys), target_keys)) except zmq_matchmaker_base.MatchmakerUnavailable: LOG.warning(_LW("Not updated. Matchmaker was not available.")) oslo.messaging-5.35.0/oslo_messaging/_drivers/zmq_driver/client/zmq_client.py0000666000175100017510000000715313224676046027607 0ustar zuulzuul00000000000000# Copyright 2015-2016 Mirantis, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_messaging._drivers import common from oslo_messaging._drivers.zmq_driver.client import zmq_client_base from oslo_messaging._drivers.zmq_driver import zmq_async from oslo_messaging._drivers.zmq_driver import zmq_names zmq = zmq_async.import_zmq() class WrongClientException(common.RPCException): """Raised if client type doesn't match configuration""" class ZmqClientMixDirectPubSub(zmq_client_base.ZmqClientBase): """Client for using with direct connections and fanout over proxy: use_pub_sub = true use_router_proxy = false """ def __init__(self, conf, matchmaker=None, allowed_remote_exmods=None): if conf.oslo_messaging_zmq.use_router_proxy or not \ conf.oslo_messaging_zmq.use_pub_sub: raise WrongClientException() publisher = self._create_publisher_direct_dynamic(conf, matchmaker) \ if conf.oslo_messaging_zmq.use_dynamic_connections else \ self._create_publisher_direct(conf, matchmaker) publisher_proxy = self._create_publisher_proxy_dynamic(conf, matchmaker) \ if conf.oslo_messaging_zmq.use_dynamic_connections else \ self._create_publisher_proxy(conf, matchmaker) super(ZmqClientMixDirectPubSub, self).__init__( conf, matchmaker, allowed_remote_exmods, publishers={ zmq_names.CAST_FANOUT_TYPE: publisher_proxy, zmq_names.NOTIFY_TYPE: publisher_proxy, "default": publisher } ) class ZmqClientDirect(zmq_client_base.ZmqClientBase): """This kind of client (publishers combination) is to be used for direct connections only: use_pub_sub = false use_router_proxy = false """ def __init__(self, conf, matchmaker=None, allowed_remote_exmods=None): if conf.oslo_messaging_zmq.use_pub_sub or \ conf.oslo_messaging_zmq.use_router_proxy: raise WrongClientException() publisher = self._create_publisher_direct_dynamic(conf, matchmaker) \ if conf.oslo_messaging_zmq.use_dynamic_connections else \ self._create_publisher_direct(conf, matchmaker) super(ZmqClientDirect, self).__init__( conf, matchmaker, allowed_remote_exmods, publishers={ "default": publisher } ) class ZmqClientProxy(zmq_client_base.ZmqClientBase): """Client for using with proxy: use_pub_sub = true use_router_proxy = true or use_pub_sub = false use_router_proxy = true """ def __init__(self, conf, matchmaker=None, allowed_remote_exmods=None): if not conf.oslo_messaging_zmq.use_router_proxy: raise WrongClientException() super(ZmqClientProxy, self).__init__( conf, matchmaker, allowed_remote_exmods, publishers={ "default": self._create_publisher_proxy(conf, matchmaker) } ) oslo.messaging-5.35.0/oslo_messaging/_drivers/zmq_driver/client/zmq_client_base.py0000666000175100017510000001221113224676046030570 0ustar zuulzuul00000000000000# Copyright 2015-2016 Mirantis, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_messaging._drivers.zmq_driver.client.publishers.dealer \ import zmq_dealer_publisher_direct from oslo_messaging._drivers.zmq_driver.client.publishers.dealer \ import zmq_dealer_publisher_proxy from oslo_messaging._drivers.zmq_driver.client import zmq_ack_manager from oslo_messaging._drivers.zmq_driver.client import zmq_publisher_manager from oslo_messaging._drivers.zmq_driver.client import zmq_request from oslo_messaging._drivers.zmq_driver import zmq_async from oslo_messaging._drivers.zmq_driver import zmq_names zmq = zmq_async.import_zmq() class ZmqClientBase(object): def __init__(self, conf, matchmaker=None, allowed_remote_exmods=None, publishers=None): self.conf = conf self.matchmaker = matchmaker self.allowed_remote_exmods = allowed_remote_exmods or [] self.publishers = publishers self.call_publisher = publishers.get(zmq_names.CALL_TYPE, publishers["default"]) self.cast_publisher = publishers.get(zmq_names.CAST_TYPE, publishers["default"]) self.fanout_publisher = publishers.get(zmq_names.CAST_FANOUT_TYPE, publishers["default"]) self.notify_publisher = publishers.get(zmq_names.NOTIFY_TYPE, publishers["default"]) def send_call(self, target, context, message, timeout=None, retry=None): request = zmq_request.CallRequest( target, context=context, message=message, retry=retry, timeout=timeout, allowed_remote_exmods=self.allowed_remote_exmods ) return self.call_publisher.send_call(request) def send_cast(self, target, context, message, retry=None): request = zmq_request.CastRequest( target, context=context, message=message, retry=retry ) self.cast_publisher.send_cast(request) def send_fanout(self, target, context, message, retry=None): request = zmq_request.FanoutRequest( target, context=context, message=message, retry=retry ) self.fanout_publisher.send_fanout(request) def send_notify(self, target, context, message, version, retry=None): request = zmq_request.NotificationRequest( target, context=context, message=message, retry=retry, version=version ) self.notify_publisher.send_notify(request) @staticmethod def _create_publisher_direct(conf, matchmaker): publisher_cls = zmq_dealer_publisher_direct.DealerPublisherDirectStatic publisher_direct = publisher_cls(conf, matchmaker) publisher_manager_cls = zmq_publisher_manager.PublisherManagerStatic return publisher_manager_cls(publisher_direct) @staticmethod def _create_publisher_direct_dynamic(conf, matchmaker): publisher_cls = zmq_dealer_publisher_direct.DealerPublisherDirect publisher_direct = publisher_cls(conf, matchmaker) publisher_manager_cls = zmq_publisher_manager.PublisherManagerDynamic \ if conf.oslo_messaging_zmq.use_pub_sub else \ zmq_publisher_manager.PublisherManagerDynamicAsyncMultisend return publisher_manager_cls(publisher_direct) @staticmethod def _create_publisher_proxy(conf, matchmaker): publisher_proxy = \ zmq_dealer_publisher_proxy.DealerPublisherProxy(conf, matchmaker) if conf.oslo_messaging_zmq.rpc_use_acks: ack_manager_cls = zmq_ack_manager.AckManager \ if conf.oslo_messaging_zmq.use_pub_sub else \ zmq_ack_manager.AckManagerAsyncMultisend return ack_manager_cls(publisher_proxy) else: publisher_manager_cls = \ zmq_publisher_manager.PublisherManagerStatic \ if conf.oslo_messaging_zmq.use_pub_sub else \ zmq_publisher_manager.PublisherManagerStaticAsyncMultisend return publisher_manager_cls(publisher_proxy) @staticmethod def _create_publisher_proxy_dynamic(conf, matchmaker): publisher_proxy = \ zmq_dealer_publisher_proxy.DealerPublisherProxyDynamic(conf, matchmaker) return zmq_publisher_manager.PublisherManagerDynamic(publisher_proxy) def cleanup(self): cleaned = set() for publisher in self.publishers.values(): if publisher not in cleaned: publisher.cleanup() cleaned.add(publisher) oslo.messaging-5.35.0/oslo_messaging/_drivers/zmq_driver/client/zmq_request.py0000666000175100017510000000733213224676046030020 0ustar zuulzuul00000000000000# Copyright 2015-2016 Mirantis, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc import logging import uuid import six from oslo_messaging._drivers.zmq_driver import zmq_async from oslo_messaging._drivers.zmq_driver import zmq_names from oslo_messaging._drivers.zmq_driver import zmq_version from oslo_messaging._i18n import _LE LOG = logging.getLogger(__name__) zmq = zmq_async.import_zmq() @six.add_metaclass(abc.ABCMeta) class Request(object): """Zmq request abstract class Represents socket (publisher) independent data object to publish. Request object should contain all needed information for a publisher to publish it, for instance: message payload, target, timeout and retries etc. """ def __init__(self, target, context=None, message=None, retry=None): """Construct request object :param target: Message destination target :type target: oslo_messaging.Target :param context: Message context :type context: dict :param message: Message payload to pass :type message: dict :param retry: an optional default connection retries configuration None or -1 means to retry forever 0 means no retry N means N retries :type retry: int """ if self.msg_type not in zmq_names.REQUEST_TYPES: raise RuntimeError("Unknown request type!") self.target = target self.context = context self.message = message self.retry = retry if not isinstance(retry, int) and retry is not None: raise ValueError( "retry must be an integer, not {0}".format(type(retry))) self.message_id = str(uuid.uuid1()) @abc.abstractproperty def msg_type(self): """ZMQ request type""" @property def message_version(self): return zmq_version.MESSAGE_VERSION class RpcRequest(Request): def __init__(self, *args, **kwargs): message = kwargs.get("message") if message['method'] is None: errmsg = _LE("No method specified for RPC call") LOG.error(_LE("No method specified for RPC call")) raise KeyError(errmsg) super(RpcRequest, self).__init__(*args, **kwargs) class CallRequest(RpcRequest): msg_type = zmq_names.CALL_TYPE def __init__(self, *args, **kwargs): self.allowed_remote_exmods = kwargs.pop("allowed_remote_exmods") self.timeout = kwargs.pop("timeout") if self.timeout is None: raise ValueError("Timeout should be specified for a RPC call!") elif not isinstance(self.timeout, int): raise ValueError( "timeout must be an integer, not {0}" .format(type(self.timeout))) super(CallRequest, self).__init__(*args, **kwargs) class CastRequest(RpcRequest): msg_type = zmq_names.CAST_TYPE class FanoutRequest(RpcRequest): msg_type = zmq_names.CAST_FANOUT_TYPE class NotificationRequest(Request): msg_type = zmq_names.NOTIFY_TYPE def __init__(self, *args, **kwargs): self.version = kwargs.pop("version") super(NotificationRequest, self).__init__(*args, **kwargs) oslo.messaging-5.35.0/oslo_messaging/_drivers/zmq_driver/client/zmq_senders.py0000666000175100017510000001615713224676046030000 0ustar zuulzuul00000000000000# Copyright 2016 Mirantis, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc import logging import threading import six from oslo_messaging._drivers.zmq_driver import zmq_async from oslo_messaging._drivers.zmq_driver import zmq_names from oslo_messaging._drivers.zmq_driver import zmq_version LOG = logging.getLogger(__name__) zmq = zmq_async.import_zmq() @six.add_metaclass(abc.ABCMeta) class SenderBase(object): """Base request/response sending interface.""" def __init__(self, conf, async=False): self.conf = conf self.async = async self._lock = threading.Lock() self._send_versions = zmq_version.get_method_versions(self, 'send') def _get_send_version(self, version): send_version = self._send_versions.get(version) if send_version is None: raise zmq_version.UnsupportedMessageVersionError(version) return send_version @abc.abstractmethod def send(self, socket, message): """Send a message via a socket in a thread-safe manner.""" class RequestSenderBase(SenderBase): pass class AckSenderBase(SenderBase): pass class ReplySenderBase(SenderBase): pass class RequestSenderProxy(RequestSenderBase): def send(self, socket, request): assert request.msg_type in zmq_names.REQUEST_TYPES, "Request expected!" send_version = self._get_send_version(request.message_version) with self._lock: send_version(socket, request) LOG.debug("->[proxy:%(addr)s] Sending %(msg_type)s message " "%(msg_id)s to target %(target)s (v%(msg_version)s)", {"addr": list(socket.connections), "msg_type": zmq_names.message_type_str(request.msg_type), "msg_id": request.message_id, "target": request.target, "msg_version": request.message_version}) def _send_v_1_0(self, socket, request): socket.send(b'', zmq.SNDMORE) socket.send_string('1.0', zmq.SNDMORE) socket.send(six.b(str(request.msg_type)), zmq.SNDMORE) socket.send(request.routing_key, zmq.SNDMORE) socket.send_string(request.message_id, zmq.SNDMORE) socket.send_dumped([request.context, request.message]) class AckSenderProxy(AckSenderBase): def send(self, socket, ack): assert ack.msg_type == zmq_names.ACK_TYPE, "Ack expected!" send_version = self._get_send_version(ack.message_version) with self._lock: send_version(socket, ack) LOG.debug("->[proxy:%(addr)s] Sending %(msg_type)s for %(msg_id)s " "(v%(msg_version)s)", {"addr": list(socket.connections), "msg_type": zmq_names.message_type_str(ack.msg_type), "msg_id": ack.message_id, "msg_version": ack.message_version}) def _send_v_1_0(self, socket, ack): socket.send(b'', zmq.SNDMORE) socket.send_string('1.0', zmq.SNDMORE) socket.send(six.b(str(ack.msg_type)), zmq.SNDMORE) socket.send(ack.reply_id, zmq.SNDMORE) socket.send_string(ack.message_id) class ReplySenderProxy(ReplySenderBase): def send(self, socket, reply): assert reply.msg_type == zmq_names.REPLY_TYPE, "Reply expected!" send_version = self._get_send_version(reply.message_version) with self._lock: send_version(socket, reply) LOG.debug("->[proxy:%(addr)s] Sending %(msg_type)s for %(msg_id)s " "(v%(msg_version)s)", {"addr": list(socket.connections), "msg_type": zmq_names.message_type_str(reply.msg_type), "msg_id": reply.message_id, "msg_version": reply.message_version}) def _send_v_1_0(self, socket, reply): socket.send(b'', zmq.SNDMORE) socket.send_string('1.0', zmq.SNDMORE) socket.send(six.b(str(reply.msg_type)), zmq.SNDMORE) socket.send(reply.reply_id, zmq.SNDMORE) socket.send_string(reply.message_id, zmq.SNDMORE) socket.send_dumped([reply.reply_body, reply.failure]) class RequestSenderDirect(RequestSenderBase): def send(self, socket, request): assert request.msg_type in zmq_names.REQUEST_TYPES, "Request expected!" send_version = self._get_send_version(request.message_version) with self._lock: send_version(socket, request) LOG.debug("Sending %(msg_type)s message %(msg_id)s to " "target %(target)s (v%(msg_version)s)", {"msg_type": zmq_names.message_type_str(request.msg_type), "msg_id": request.message_id, "target": request.target, "msg_version": request.message_version}) def _send_v_1_0(self, socket, request): flags = zmq.NOBLOCK if self.async else 0 socket.send(b'', zmq.SNDMORE | flags) socket.send_string('1.0', zmq.SNDMORE | flags) socket.send(six.b(str(request.msg_type)), zmq.SNDMORE | flags) socket.send_string(request.message_id, zmq.SNDMORE | flags) socket.send_dumped([request.context, request.message], flags) class AckSenderDirect(AckSenderBase): def send(self, socket, ack): assert ack.msg_type == zmq_names.ACK_TYPE, "Ack expected!" send_version = self._get_send_version(ack.message_version) with self._lock: send_version(socket, ack) LOG.debug("Sending %(msg_type)s for %(msg_id)s (v%(msg_version)s)", {"msg_type": zmq_names.message_type_str(ack.msg_type), "msg_id": ack.message_id, "msg_version": ack.message_version}) def _send_v_1_0(self, socket, ack): raise NotImplementedError() class ReplySenderDirect(ReplySenderBase): def send(self, socket, reply): assert reply.msg_type == zmq_names.REPLY_TYPE, "Reply expected!" send_version = self._get_send_version(reply.message_version) with self._lock: send_version(socket, reply) LOG.debug("Sending %(msg_type)s for %(msg_id)s (v%(msg_version)s)", {"msg_type": zmq_names.message_type_str(reply.msg_type), "msg_id": reply.message_id, "msg_version": reply.message_version}) def _send_v_1_0(self, socket, reply): socket.send(reply.reply_id, zmq.SNDMORE) socket.send(b'', zmq.SNDMORE) socket.send_string('1.0', zmq.SNDMORE) socket.send(six.b(str(reply.msg_type)), zmq.SNDMORE) socket.send_string(reply.message_id, zmq.SNDMORE) socket.send_dumped([reply.reply_body, reply.failure]) oslo.messaging-5.35.0/oslo_messaging/_drivers/zmq_driver/client/publishers/0000775000175100017510000000000013224676256027243 5ustar zuulzuul00000000000000oslo.messaging-5.35.0/oslo_messaging/_drivers/zmq_driver/client/publishers/zmq_publisher_base.py0000666000175100017510000000564213224676046033501 0ustar zuulzuul00000000000000# Copyright 2015-2016 Mirantis, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc import six import oslo_messaging from oslo_messaging._drivers.zmq_driver import zmq_async zmq = zmq_async.import_zmq() @six.add_metaclass(abc.ABCMeta) class PublisherBase(object): """Abstract publisher class Each publisher from zmq-driver client should implement this interface to serve as a messages publisher. Publisher can send request objects from zmq_request. """ def __init__(self, sockets_manager, sender, receiver): """Construct publisher. Accept sockets manager, sender and receiver objects. :param sockets_manager: sockets manager object :type sockets_manager: zmq_sockets_manager.SocketsManager :param sender: request sender object :type sender: zmq_senders.RequestSenderBase :param receiver: response receiver object :type receiver: zmq_receivers.ReceiverBase """ self.sockets_manager = sockets_manager self.conf = sockets_manager.conf self.matchmaker = sockets_manager.matchmaker self.sender = sender self.receiver = receiver @abc.abstractmethod def acquire_connection(self, request): """Get socket to publish request on it. :param request: request object :type senders: zmq_request.Request """ @abc.abstractmethod def send_request(self, socket, request): """Publish request on a socket. :param socket: socket object to publish request on :type socket: zmq_socket.ZmqSocket :param request: request object :type senders: zmq_request.Request """ @abc.abstractmethod def receive_reply(self, socket, request): """Wait for a reply via the socket used for sending the request. :param socket: socket object to receive reply from :type socket: zmq_socket.ZmqSocket :param request: request object :type senders: zmq_request.Request """ @staticmethod def _raise_timeout(request): raise oslo_messaging.MessagingTimeout( "Timeout %(tout)s seconds was reached for message %(msg_id)s" % {"tout": request.timeout, "msg_id": request.message_id}) def cleanup(self): """Cleanup publisher: stop receiving responses, close allocated connections etc. """ self.receiver.stop() oslo.messaging-5.35.0/oslo_messaging/_drivers/zmq_driver/client/publishers/__init__.py0000666000175100017510000000000013224676046031341 0ustar zuulzuul00000000000000oslo.messaging-5.35.0/oslo_messaging/_drivers/zmq_driver/client/publishers/dealer/0000775000175100017510000000000013224676256030477 5ustar zuulzuul00000000000000././@LongLink0000000000000000000000000000016100000000000011213 Lustar 00000000000000oslo.messaging-5.35.0/oslo_messaging/_drivers/zmq_driver/client/publishers/dealer/zmq_dealer_publisher_direct.pyoslo.messaging-5.35.0/oslo_messaging/_drivers/zmq_driver/client/publishers/dealer/zmq_dealer_publish0000666000175100017510000001527013224676046034277 0ustar zuulzuul00000000000000# Copyright 2015-2016 Mirantis, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import logging import tenacity from oslo_messaging._drivers.zmq_driver.client.publishers.dealer \ import zmq_dealer_publisher_base from oslo_messaging._drivers.zmq_driver.client import zmq_receivers from oslo_messaging._drivers.zmq_driver.client import zmq_routing_table from oslo_messaging._drivers.zmq_driver.client import zmq_senders from oslo_messaging._drivers.zmq_driver.client import zmq_sockets_manager from oslo_messaging._drivers.zmq_driver import zmq_address from oslo_messaging._drivers.zmq_driver import zmq_async from oslo_messaging._drivers.zmq_driver import zmq_names LOG = logging.getLogger(__name__) zmq = zmq_async.import_zmq() class DealerPublisherDirect(zmq_dealer_publisher_base.DealerPublisherBase): """DEALER-publisher using direct dynamic connections. Publishing directly to remote services assumes the following: - All direct connections are dynamic - so they live per message, thus each message send executes the following: * Open a new socket * Connect to some host got from the RoutingTable * Send message(s) * Close connection, destroy socket - RoutingTable/RoutingTableUpdater implements local cache of matchmaker (e.g. Redis) for target resolution to the list of available hosts. Cache updates in a background thread. - Caching of connections is not appropriate for directly connected OS services, because finally it results in a full-mesh of connections between services. - Yes we lose on performance opening and closing connections for each message, but that is done intentionally to implement the dynamic connections concept. The key thought here is to have minimum number of connected services at the moment. - Using the local RoutingTable cache is done to optimise access to the matchmaker so we don't call the matchmaker per each message """ def __init__(self, conf, matchmaker): sender = zmq_senders.RequestSenderDirect(conf, async=True) receiver = zmq_receivers.ReceiverDirect(conf) super(DealerPublisherDirect, self).__init__(conf, matchmaker, sender, receiver) self.routing_table = zmq_routing_table.RoutingTableAdaptor( conf, matchmaker, zmq.ROUTER) def _get_round_robin_host_connection(self, target, socket): host = self.routing_table.get_round_robin_host(target) socket.connect_to_host(host) failover_hosts = self.routing_table.get_all_round_robin_hosts(target) upper_bound = self.conf.oslo_messaging_zmq.zmq_failover_connections for host in failover_hosts[:upper_bound]: socket.connect_to_host(host) def _get_fanout_connection(self, target, socket): for host in self.routing_table.get_fanout_hosts(target): socket.connect_to_host(host) def acquire_connection(self, request): if request.msg_type in zmq_names.DIRECT_TYPES: socket = self.sockets_manager.get_socket() self._get_round_robin_host_connection(request.target, socket) return socket elif request.msg_type in zmq_names.MULTISEND_TYPES: socket = self.sockets_manager.get_socket(immediate=False) self._get_fanout_connection(request.target, socket) return socket def _finally_unregister(self, socket, request): super(DealerPublisherDirect, self)._finally_unregister(socket, request) self.receiver.unregister_socket(socket) def send_request(self, socket, request): if hasattr(request, 'timeout'): _stop = tenacity.stop_after_delay(request.timeout) elif request.retry is not None and request.retry > 0: # no rpc_response_timeout option if notification _stop = tenacity.stop_after_attempt(request.retry) else: # well, now what? _stop = tenacity.stop_after_delay(60) @tenacity.retry(retry=tenacity.retry_if_exception_type(zmq.Again), stop=_stop) def send_retrying(): if request.msg_type in zmq_names.MULTISEND_TYPES: for _ in range(socket.connections_count()): self.sender.send(socket, request) else: self.sender.send(socket, request) return send_retrying() def cleanup(self): self.routing_table.cleanup() super(DealerPublisherDirect, self).cleanup() class DealerPublisherDirectStatic(DealerPublisherDirect): """DEALER-publisher using direct static connections. For some reason direct static connections may be also useful. Assume a case when some agents are not connected with control services over RPC (Ironic or Cinder+Ceph), and RPC is used only between controllers. In this case number of RPC connections doesn't matter (very small) so we can use static connections without fear and have all performance benefits from it. """ def __init__(self, conf, matchmaker): super(DealerPublisherDirectStatic, self).__init__(conf, matchmaker) self.fanout_sockets = zmq_sockets_manager.SocketsManager( conf, matchmaker, zmq.DEALER) def acquire_connection(self, request): target_key = zmq_address.target_to_key( request.target, zmq_names.socket_type_str(zmq.ROUTER)) if request.msg_type in zmq_names.MULTISEND_TYPES: hosts = self.routing_table.get_fanout_hosts(request.target) return self.fanout_sockets.get_cached_socket(target_key, hosts, immediate=False) else: hosts = self.routing_table.get_all_round_robin_hosts( request.target) return self.sockets_manager.get_cached_socket(target_key, hosts) def _finally_unregister(self, socket, request): self.receiver.untrack_request(request) def cleanup(self): self.fanout_sockets.cleanup() super(DealerPublisherDirectStatic, self).cleanup() ././@LongLink0000000000000000000000000000016000000000000011212 Lustar 00000000000000oslo.messaging-5.35.0/oslo_messaging/_drivers/zmq_driver/client/publishers/dealer/zmq_dealer_publisher_proxy.pyoslo.messaging-5.35.0/oslo_messaging/_drivers/zmq_driver/client/publishers/dealer/zmq_dealer_publish0000666000175100017510000001231413224676046034273 0ustar zuulzuul00000000000000# Copyright 2015-2016 Mirantis, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import random import uuid import six from oslo_messaging._drivers.zmq_driver.client.publishers.dealer \ import zmq_dealer_publisher_base from oslo_messaging._drivers.zmq_driver.client import zmq_receivers from oslo_messaging._drivers.zmq_driver.client import zmq_routing_table from oslo_messaging._drivers.zmq_driver.client import zmq_senders from oslo_messaging._drivers.zmq_driver.matchmaker import zmq_matchmaker_base from oslo_messaging._drivers.zmq_driver import zmq_address from oslo_messaging._drivers.zmq_driver import zmq_async from oslo_messaging._drivers.zmq_driver import zmq_names from oslo_messaging._drivers.zmq_driver import zmq_updater zmq = zmq_async.import_zmq() class DealerPublisherProxy(zmq_dealer_publisher_base.DealerPublisherBase): """DEALER-publisher via proxy.""" def __init__(self, conf, matchmaker): sender = zmq_senders.RequestSenderProxy(conf) receiver = zmq_receivers.ReceiverProxy(conf) super(DealerPublisherProxy, self).__init__(conf, matchmaker, sender, receiver) self.socket = self.sockets_manager.get_socket_to_publishers( self._generate_identity()) self.routing_table = zmq_routing_table.RoutingTableAdaptor( conf, matchmaker, zmq.DEALER) self.connection_updater = PublisherConnectionUpdater( self.conf, self.matchmaker, self.socket) def _generate_identity(self): return six.b(self.conf.oslo_messaging_zmq.rpc_zmq_host + "/" + str(uuid.uuid4())) def _check_reply(self, reply, request): super(DealerPublisherProxy, self)._check_reply(reply, request) assert reply.reply_id == request.routing_key, \ "Reply from recipient expected!" def _get_routing_keys(self, request): if request.msg_type in zmq_names.DIRECT_TYPES: return [self.routing_table.get_round_robin_host(request.target)] else: return \ [zmq_address.target_to_subscribe_filter(request.target)] \ if self.conf.oslo_messaging_zmq.use_pub_sub else \ self.routing_table.get_fanout_hosts(request.target) def acquire_connection(self, request): return self.socket def send_request(self, socket, request): for routing_key in self._get_routing_keys(request): request.routing_key = routing_key self.sender.send(socket, request) def cleanup(self): self.connection_updater.stop() self.routing_table.cleanup() super(DealerPublisherProxy, self).cleanup() class PublisherConnectionUpdater(zmq_updater.ConnectionUpdater): def _update_connection(self): publishers = self.matchmaker.get_publishers() for pub_address, fe_router_address in publishers: self.socket.connect_to_host(fe_router_address) class DealerPublisherProxyDynamic( zmq_dealer_publisher_base.DealerPublisherBase): def __init__(self, conf, matchmaker): sender = zmq_senders.RequestSenderProxy(conf) receiver = zmq_receivers.ReceiverDirect(conf) super(DealerPublisherProxyDynamic, self).__init__(conf, matchmaker, sender, receiver) self.publishers = set() self.updater = DynamicPublishersUpdater(conf, matchmaker, self.publishers) self.updater.update_publishers() def acquire_connection(self, request): if not self.publishers: raise zmq_matchmaker_base.MatchmakerUnavailable() socket = self.sockets_manager.get_socket() publishers = list(self.publishers) random.shuffle(publishers) for publisher in publishers: socket.connect_to_host(publisher) return socket def send_request(self, socket, request): request.routing_key = \ zmq_address.target_to_subscribe_filter(request.target) self.sender.send(socket, request) def cleanup(self): self.updater.cleanup() super(DealerPublisherProxyDynamic, self).cleanup() class DynamicPublishersUpdater(zmq_updater.UpdaterBase): def __init__(self, conf, matchmaker, publishers): super(DynamicPublishersUpdater, self).__init__( conf, matchmaker, self.update_publishers, sleep_for=conf.oslo_messaging_zmq.zmq_target_update ) self.publishers = publishers def update_publishers(self): publishers = self.matchmaker.get_publishers() for pub_address, fe_router_address in publishers: self.publishers.add(fe_router_address) ././@LongLink0000000000000000000000000000015700000000000011220 Lustar 00000000000000oslo.messaging-5.35.0/oslo_messaging/_drivers/zmq_driver/client/publishers/dealer/zmq_dealer_publisher_base.pyoslo.messaging-5.35.0/oslo_messaging/_drivers/zmq_driver/client/publishers/dealer/zmq_dealer_publish0000666000175100017510000000503613224676046034276 0ustar zuulzuul00000000000000# Copyright 2016 Mirantis, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from concurrent import futures import logging from oslo_messaging._drivers import common as rpc_common from oslo_messaging._drivers.zmq_driver.client.publishers \ import zmq_publisher_base from oslo_messaging._drivers.zmq_driver.client import zmq_response from oslo_messaging._drivers.zmq_driver.client import zmq_sockets_manager from oslo_messaging._drivers.zmq_driver import zmq_async from oslo_messaging._i18n import _LE LOG = logging.getLogger(__name__) zmq = zmq_async.import_zmq() class DealerPublisherBase(zmq_publisher_base.PublisherBase): """Abstract DEALER-publisher.""" def __init__(self, conf, matchmaker, sender, receiver): sockets_manager = zmq_sockets_manager.SocketsManager( conf, matchmaker, zmq.DEALER) super(DealerPublisherBase, self).__init__( sockets_manager, sender, receiver) def _check_reply(self, reply, request): assert isinstance(reply, zmq_response.Reply), "Reply expected!" def _finally_unregister(self, socket, request): self.receiver.untrack_request(request) def receive_reply(self, socket, request): self.receiver.register_socket(socket) _, reply_future = self.receiver.track_request(request) try: reply = reply_future.result(timeout=request.timeout) self._check_reply(reply, request) except AssertionError: LOG.error(_LE("Message format error in reply for %s"), request.message_id) return None except futures.TimeoutError: self._raise_timeout(request) finally: self._finally_unregister(socket, request) if reply.failure: raise rpc_common.deserialize_remote_exception( reply.failure, request.allowed_remote_exmods) else: return reply.reply_body def cleanup(self): super(DealerPublisherBase, self).cleanup() self.sockets_manager.cleanup() oslo.messaging-5.35.0/oslo_messaging/_drivers/zmq_driver/client/publishers/dealer/__init__.py0000666000175100017510000000000013224676046032575 0ustar zuulzuul00000000000000oslo.messaging-5.35.0/oslo_messaging/_drivers/zmq_driver/client/__init__.py0000666000175100017510000000000013224676046027161 0ustar zuulzuul00000000000000oslo.messaging-5.35.0/oslo_messaging/_drivers/zmq_driver/client/zmq_response.py0000666000175100017510000000453513224676046030170 0ustar zuulzuul00000000000000# Copyright 2015-2016 Mirantis, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc import six from oslo_messaging._drivers.zmq_driver import zmq_names @six.add_metaclass(abc.ABCMeta) class Response(object): def __init__(self, message_id=None, reply_id=None, message_version=None): if self.msg_type not in zmq_names.RESPONSE_TYPES: raise RuntimeError("Unknown response type!") self._message_id = message_id self._reply_id = reply_id self._message_version = message_version @abc.abstractproperty def msg_type(self): """ZMQ response type""" @property def message_id(self): return self._message_id @property def reply_id(self): return self._reply_id @property def message_version(self): return self._message_version def to_dict(self): return {zmq_names.FIELD_MSG_ID: self._message_id, zmq_names.FIELD_REPLY_ID: self._reply_id, zmq_names.FIELD_MSG_VERSION: self._message_version} def __str__(self): return str(self.to_dict()) class Ack(Response): msg_type = zmq_names.ACK_TYPE class Reply(Response): msg_type = zmq_names.REPLY_TYPE def __init__(self, message_id=None, reply_id=None, message_version=None, reply_body=None, failure=None): super(Reply, self).__init__(message_id, reply_id, message_version) self._reply_body = reply_body self._failure = failure @property def reply_body(self): return self._reply_body @property def failure(self): return self._failure def to_dict(self): dict_ = super(Reply, self).to_dict() dict_.update({zmq_names.FIELD_REPLY_BODY: self._reply_body, zmq_names.FIELD_FAILURE: self._failure}) return dict_ oslo.messaging-5.35.0/oslo_messaging/_drivers/zmq_driver/client/zmq_ack_manager.py0000666000175100017510000001041113224676046030550 0ustar zuulzuul00000000000000# Copyright 2016 Mirantis, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from concurrent import futures import logging from oslo_messaging._drivers.zmq_driver.client import zmq_publisher_manager from oslo_messaging._drivers.zmq_driver.client import zmq_response from oslo_messaging._drivers.zmq_driver import zmq_async from oslo_messaging._drivers.zmq_driver import zmq_names from oslo_messaging._i18n import _LE, _LW LOG = logging.getLogger(__name__) zmq = zmq_async.import_zmq() class AckManager(zmq_publisher_manager.PublisherManagerBase): def __init__(self, publisher): super(AckManager, self).__init__(publisher, with_pool=True) @staticmethod def _check_ack(ack, request): if ack is not None: assert isinstance(ack, zmq_response.Ack), "Ack expected!" assert ack.reply_id == request.routing_key, \ "Ack from recipient expected!" def _wait_for_ack(self, request, ack_future=None): if ack_future is None: ack_future = self._schedule_request_for_ack(request) retries = \ request.retry or self.conf.oslo_messaging_zmq.rpc_retry_attempts if retries is None: retries = -1 timeout = self.conf.oslo_messaging_zmq.rpc_ack_timeout_base done = ack_future is None while not done: try: ack = ack_future.result(timeout=timeout) done = True self._check_ack(ack, request) except AssertionError: LOG.error(_LE("Message format error in ack for %s"), request.message_id) except futures.TimeoutError: LOG.warning(_LW("No ack received within %(tout)s seconds " "for %(msg_id)s"), {"tout": timeout, "msg_id": request.message_id}) if retries != 0: if retries > 0: retries -= 1 self.sender.send(ack_future.socket, request) timeout *= \ self.conf.oslo_messaging_zmq.rpc_ack_timeout_multiplier else: LOG.warning(_LW("Exhausted number of retries for %s"), request.message_id) done = True if request.msg_type != zmq_names.CALL_TYPE: self.receiver.untrack_request(request) @zmq_publisher_manager.target_not_found_warn def _send_request(self, request): socket = self.publisher.acquire_connection(request) self.publisher.send_request(socket, request) return socket def _schedule_request_for_ack(self, request): socket = self._send_request(request) if socket is None: return None self.receiver.register_socket(socket) ack_future, _ = self.receiver.track_request(request) ack_future.socket = socket return ack_future def send_call(self, request): ack_future = self._schedule_request_for_ack(request) if ack_future is None: self.publisher._raise_timeout(request) self.pool.submit(self._wait_for_ack, request, ack_future) try: return self.publisher.receive_reply(ack_future.socket, request) finally: if not ack_future.done(): ack_future.set_result(None) def send_cast(self, request): self.pool.submit(self._wait_for_ack, request) send_fanout = _send_request send_notify = _send_request class AckManagerAsyncMultisend(AckManager): def _send_request_async(self, request): self.pool.submit(self._send_request, request) send_fanout = _send_request_async send_notify = _send_request_async oslo.messaging-5.35.0/oslo_messaging/_drivers/zmq_driver/client/zmq_sockets_manager.py0000666000175100017510000000661013224676046031473 0ustar zuulzuul00000000000000# Copyright 2016 Mirantis, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import logging from oslo_messaging._drivers.zmq_driver import zmq_async from oslo_messaging._drivers.zmq_driver import zmq_socket zmq = zmq_async.import_zmq() LOG = logging.getLogger(__name__) class SocketsManager(object): def __init__(self, conf, matchmaker, socket_type): self.conf = conf self.matchmaker = matchmaker self.socket_type = socket_type self.zmq_context = zmq.Context() self.socket_to_publishers = None self.socket_to_routers = None self.sockets = {} def get_socket(self, immediate=True): return zmq_socket.ZmqSocket(self.conf, self.zmq_context, self.socket_type, immediate=immediate) def get_cached_socket(self, target_key, hosts=None, immediate=True): hosts = [] if hosts is None else hosts socket = self.sockets.get(target_key, None) if socket is None: LOG.debug("CREATING NEW socket for target_key %s " % target_key) socket = zmq_socket.ZmqSocket(self.conf, self.zmq_context, self.socket_type, immediate=immediate) self.sockets[target_key] = socket for host in hosts: socket.connect_to_host(host) LOG.debug("Target key: %s socket:%s" % (target_key, socket.handle.identity)) return socket def get_socket_to_publishers(self, identity=None): if self.socket_to_publishers is not None: return self.socket_to_publishers self.socket_to_publishers = zmq_socket.ZmqSocket( self.conf, self.zmq_context, self.socket_type, immediate=self.conf.oslo_messaging_zmq.zmq_immediate, identity=identity) publishers = self.matchmaker.get_publishers() for pub_address, fe_router_address in publishers: self.socket_to_publishers.connect_to_host(fe_router_address) return self.socket_to_publishers def get_socket_to_routers(self, identity=None): if self.socket_to_routers is not None: return self.socket_to_routers self.socket_to_routers = zmq_socket.ZmqSocket( self.conf, self.zmq_context, self.socket_type, immediate=self.conf.oslo_messaging_zmq.zmq_immediate, identity=identity) routers = self.matchmaker.get_routers() for be_router_address in routers: self.socket_to_routers.connect_to_host(be_router_address) return self.socket_to_routers def cleanup(self): if self.socket_to_publishers: self.socket_to_publishers.close() if self.socket_to_routers: self.socket_to_routers.close() for socket in self.sockets.values(): socket.close() oslo.messaging-5.35.0/oslo_messaging/_drivers/zmq_driver/client/zmq_publisher_manager.py0000666000175100017510000001270613224676046032020 0ustar zuulzuul00000000000000# Copyright 2016 Mirantis, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc import contextlib import logging import six import tenacity from oslo_messaging._drivers.zmq_driver.matchmaker import zmq_matchmaker_base from oslo_messaging._drivers.zmq_driver import zmq_async from oslo_messaging._i18n import _LW LOG = logging.getLogger(__name__) zmq = zmq_async.import_zmq() def _drop_message_warn(request): LOG.warning(_LW("Matchmaker contains no records for specified " "target %(target)s. Dropping message %(msg_id)s."), {"target": request.target, "msg_id": request.message_id}) def target_not_found_warn(func): def _target_not_found_warn(self, request, *args, **kwargs): try: return func(self, request, *args, **kwargs) except (zmq_matchmaker_base.MatchmakerUnavailable, tenacity.RetryError): _drop_message_warn(request) return _target_not_found_warn def target_not_found_timeout(func): def _target_not_found_timeout(self, request, *args, **kwargs): try: return func(self, request, *args, **kwargs) except (zmq_matchmaker_base.MatchmakerUnavailable, tenacity.RetryError): _drop_message_warn(request) self.publisher._raise_timeout(request) return _target_not_found_timeout @six.add_metaclass(abc.ABCMeta) class PublisherManagerBase(object): """Abstract publisher manager class Publisher knows how to establish connection, how to send message, and how to receive reply. PublisherManager coordinates all these steps regarding retrying logic in AckManager implementations. May also have an additional thread pool for scheduling background tasks. """ def __init__(self, publisher, with_pool=False): self.publisher = publisher self.conf = publisher.conf self.sender = publisher.sender self.receiver = publisher.receiver if with_pool: self.pool = zmq_async.get_pool( size=self.conf.oslo_messaging_zmq.rpc_thread_pool_size ) else: self.pool = None @abc.abstractmethod def send_call(self, request): """Send call request :param request: request object :type request: zmq_request.CallRequest """ @abc.abstractmethod def send_cast(self, request): """Send cast request :param request: request object :type request: zmq_request.CastRequest """ @abc.abstractmethod def send_fanout(self, request): """Send fanout request :param request: request object :type request: zmq_request.FanoutRequest """ @abc.abstractmethod def send_notify(self, request): """Send notification request :param request: request object :type request: zmq_request.NotificationRequest """ def cleanup(self): if self.pool: self.pool.shutdown(wait=True) self.publisher.cleanup() class PublisherManagerDynamic(PublisherManagerBase): @target_not_found_timeout def send_call(self, request): with contextlib.closing(self.publisher.acquire_connection(request)) \ as socket: self.publisher.send_request(socket, request) reply = self.publisher.receive_reply(socket, request) return reply @target_not_found_warn def _send(self, request): with contextlib.closing(self.publisher.acquire_connection(request)) \ as socket: self.publisher.send_request(socket, request) send_cast = _send send_fanout = _send send_notify = _send class PublisherManagerDynamicAsyncMultisend(PublisherManagerDynamic): def __init__(self, publisher): super(PublisherManagerDynamicAsyncMultisend, self).__init__( publisher, with_pool=True ) def _send_async(self, request): self.pool.submit(self._send, request) send_fanout = _send_async send_notify = _send_async class PublisherManagerStatic(PublisherManagerBase): @target_not_found_timeout def send_call(self, request): socket = self.publisher.acquire_connection(request) self.publisher.send_request(socket, request) reply = self.publisher.receive_reply(socket, request) return reply @target_not_found_warn def _send(self, request): socket = self.publisher.acquire_connection(request) self.publisher.send_request(socket, request) send_cast = _send send_fanout = _send send_notify = _send class PublisherManagerStaticAsyncMultisend(PublisherManagerStatic): def __init__(self, publisher): super(PublisherManagerStaticAsyncMultisend, self).__init__( publisher, with_pool=True ) def _send_async(self, request): self.pool.submit(self._send, request) send_fanout = _send_async send_notify = _send_async oslo.messaging-5.35.0/oslo_messaging/_drivers/zmq_driver/client/zmq_receivers.py0000666000175100017510000001610013224676046030310 0ustar zuulzuul00000000000000# Copyright 2016 Mirantis, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc import logging import threading import futurist import six from oslo_messaging._drivers.zmq_driver.client import zmq_response from oslo_messaging._drivers.zmq_driver import zmq_async from oslo_messaging._drivers.zmq_driver import zmq_names from oslo_messaging._drivers.zmq_driver import zmq_version from oslo_messaging._i18n import _LE LOG = logging.getLogger(__name__) zmq = zmq_async.import_zmq() def suppress_errors(func): @six.wraps(func) def silent_func(self, socket): try: return func(self, socket) except Exception as e: LOG.error(_LE("Receiving message failed: %r"), e) # NOTE(gdavoian): drop the left parts of a broken message, since # they most likely will lead to additional exceptions if socket.getsockopt(zmq.RCVMORE): socket.recv_multipart() return silent_func @six.add_metaclass(abc.ABCMeta) class ReceiverBase(object): """Base response receiving interface.""" def __init__(self, conf): self.conf = conf self._lock = threading.Lock() self._requests = {} self._poller = zmq_async.get_poller() self._receive_response_versions = \ zmq_version.get_method_versions(self, 'receive_response') self._executor = zmq_async.get_executor(self._run_loop) self._executor.execute() def register_socket(self, socket): """Register a socket for receiving data.""" self._poller.register(socket, self.receive_response) def unregister_socket(self, socket): """Unregister a socket from receiving data.""" self._poller.unregister(socket) @abc.abstractmethod def receive_response(self, socket): """Receive a response (ack or reply) and return it.""" def track_request(self, request): """Track a request via already registered sockets and return a pair of ack and reply futures for monitoring all possible types of responses for the given request. """ message_id = request.message_id futures = self._get_futures(message_id) if futures is None: ack_future = reply_future = None if self.conf.oslo_messaging_zmq.rpc_use_acks: ack_future = futurist.Future() if request.msg_type == zmq_names.CALL_TYPE: reply_future = futurist.Future() futures = (ack_future, reply_future) self._set_futures(message_id, futures) return futures def untrack_request(self, request): """Untrack a request and stop monitoring any responses.""" self._pop_futures(request.message_id) def stop(self): self._poller.close() self._executor.stop() def _get_futures(self, message_id): with self._lock: return self._requests.get(message_id) def _set_futures(self, message_id, futures): with self._lock: self._requests[message_id] = futures def _pop_futures(self, message_id): with self._lock: return self._requests.pop(message_id, None) def _run_loop(self): response, socket = \ self._poller.poll(self.conf.oslo_messaging_zmq.rpc_poll_timeout) if response is None: return message_type, message_id = response.msg_type, response.message_id futures = self._get_futures(message_id) if futures is not None: ack_future, reply_future = futures if message_type == zmq_names.REPLY_TYPE: reply_future.set_result(response) else: ack_future.set_result(response) LOG.debug("Received %(msg_type)s for %(msg_id)s", {"msg_type": zmq_names.message_type_str(message_type), "msg_id": message_id}) def _get_receive_response_version(self, version): receive_response_version = self._receive_response_versions.get(version) if receive_response_version is None: raise zmq_version.UnsupportedMessageVersionError(version) return receive_response_version class ReceiverProxy(ReceiverBase): @suppress_errors def receive_response(self, socket): empty = socket.recv() assert empty == b'', "Empty delimiter expected!" message_version = socket.recv_string() assert message_version != b'', "Valid message version expected!" receive_response_version = \ self._get_receive_response_version(message_version) return receive_response_version(socket) def _receive_response_v_1_0(self, socket): reply_id = socket.recv() assert reply_id != b'', "Valid reply id expected!" message_type = int(socket.recv()) assert message_type in zmq_names.RESPONSE_TYPES, "Response expected!" message_id = socket.recv_string() assert message_id != '', "Valid message id expected!" if message_type == zmq_names.REPLY_TYPE: reply_body, failure = socket.recv_loaded() reply = zmq_response.Reply(message_id=message_id, reply_id=reply_id, reply_body=reply_body, failure=failure) return reply else: ack = zmq_response.Ack(message_id=message_id, reply_id=reply_id) return ack class ReceiverDirect(ReceiverBase): @suppress_errors def receive_response(self, socket): empty = socket.recv() assert empty == b'', "Empty delimiter expected!" message_version = socket.recv_string() assert message_version != b'', "Valid message version expected!" receive_response_version = \ self._get_receive_response_version(message_version) return receive_response_version(socket) def _receive_response_v_1_0(self, socket): message_type = int(socket.recv()) assert message_type in zmq_names.RESPONSE_TYPES, "Response expected!" message_id = socket.recv_string() assert message_id != '', "Valid message id expected!" if message_type == zmq_names.REPLY_TYPE: reply_body, failure = socket.recv_loaded() reply = zmq_response.Reply(message_id=message_id, reply_body=reply_body, failure=failure) return reply else: ack = zmq_response.Ack(message_id=message_id) return ack oslo.messaging-5.35.0/oslo_messaging/_drivers/zmq_driver/zmq_socket.py0000666000175100017510000002304613224676046026342 0ustar zuulzuul00000000000000# Copyright 2015 Mirantis, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import logging import uuid import six from oslo_messaging._drivers import common as rpc_common from oslo_messaging._drivers.zmq_driver import zmq_address from oslo_messaging._drivers.zmq_driver import zmq_async from oslo_messaging._drivers.zmq_driver import zmq_names from oslo_messaging._i18n import _LE from oslo_messaging import exceptions from oslo_serialization.serializer import json_serializer from oslo_serialization.serializer import msgpack_serializer LOG = logging.getLogger(__name__) zmq = zmq_async.import_zmq() class ZmqSocket(object): SERIALIZERS = { 'json': json_serializer.JSONSerializer(), 'msgpack': msgpack_serializer.MessagePackSerializer() } def __init__(self, conf, context, socket_type, immediate, high_watermark=0, identity=None): self.conf = conf self.context = context self.socket_type = socket_type self.handle = context.socket(socket_type) self.handle.set_hwm(high_watermark) # Set linger period linger = -1 if self.conf.oslo_messaging_zmq.zmq_linger >= 0: # Convert seconds to milliseconds linger = self.conf.oslo_messaging_zmq.zmq_linger * 1000 self.handle.setsockopt(zmq.LINGER, linger) # Put messages to only connected queues self.handle.setsockopt(zmq.IMMEDIATE, 1 if immediate else 0) # Setup timeout on socket sending if hasattr(self.conf, 'rpc_response_timeout'): self.handle.setsockopt(zmq.SNDTIMEO, self.conf.rpc_response_timeout * 1000) # Configure TCP keep alive keepalive = self.conf.oslo_messaging_zmq.zmq_tcp_keepalive if keepalive < 0: keepalive = -1 elif keepalive > 0: keepalive = 1 self.handle.setsockopt(zmq.TCP_KEEPALIVE, keepalive) keepalive_idle = self.conf.oslo_messaging_zmq.zmq_tcp_keepalive_idle if keepalive_idle <= 0: keepalive_idle = -1 self.handle.setsockopt(zmq.TCP_KEEPALIVE_IDLE, keepalive_idle) keepalive_cnt = self.conf.oslo_messaging_zmq.zmq_tcp_keepalive_cnt if keepalive_cnt <= 0: keepalive_cnt = -1 self.handle.setsockopt(zmq.TCP_KEEPALIVE_CNT, keepalive_cnt) keepalive_intvl = self.conf.oslo_messaging_zmq.zmq_tcp_keepalive_intvl if keepalive_intvl <= 0: keepalive_intvl = -1 self.handle.setsockopt(zmq.TCP_KEEPALIVE_INTVL, keepalive_intvl) self.handle.identity = \ six.b(str(uuid.uuid4())) if identity is None else identity self.connections = set() def _get_serializer(self, serialization): serializer = self.SERIALIZERS.get(serialization, None) if serializer is None: raise NotImplementedError( "Serialization '{}' is not supported".format(serialization) ) return serializer def type_name(self): return zmq_names.socket_type_str(self.socket_type) def connections_count(self): return len(self.connections) def connect(self, address): if address not in self.connections: self.handle.connect(address) self.connections.add(address) def setsockopt(self, *args, **kwargs): self.handle.setsockopt(*args, **kwargs) def setsockopt_string(self, *args, **kwargs): self.handle.setsockopt_string(*args, **kwargs) def getsockopt(self, *args, **kwargs): return self.handle.getsockopt(*args, **kwargs) def getsockopt_string(self, *args, **kwargs): return self.handle.getsockopt_string(*args, **kwargs) def send(self, *args, **kwargs): self.handle.send(*args, **kwargs) def send_string(self, u, *args, **kwargs): # NOTE(ozamiatin): Not using send_string until # eventlet zmq support this convenience method # in thread-safe manner encoding = kwargs.pop('encoding', 'utf-8') s = u.encode(encoding) if isinstance(u, six.text_type) else u self.handle.send(s, *args, **kwargs) def send_json(self, *args, **kwargs): self.handle.send_json(*args, **kwargs) def send_pyobj(self, *args, **kwargs): self.handle.send_pyobj(*args, **kwargs) def send_multipart(self, *args, **kwargs): self.handle.send_multipart(*args, **kwargs) def send_dumped(self, obj, *args, **kwargs): serialization = kwargs.pop( 'serialization', self.conf.oslo_messaging_zmq.rpc_zmq_serialization) serializer = self._get_serializer(serialization) s = serializer.dump_as_bytes(obj) self.handle.send(s, *args, **kwargs) def recv(self, *args, **kwargs): return self.handle.recv(*args, **kwargs) def recv_string(self, *args, **kwargs): # NOTE(ozamiatin): Not using recv_string until # eventlet zmq support this convenience method # in thread-safe manner encoding = kwargs.pop('encoding', 'utf-8') s = self.handle.recv(*args, **kwargs) u = s.decode(encoding) if isinstance(s, six.binary_type) else s return u def recv_json(self, *args, **kwargs): return self.handle.recv_json(*args, **kwargs) def recv_pyobj(self, *args, **kwargs): return self.handle.recv_pyobj(*args, **kwargs) def recv_multipart(self, *args, **kwargs): return self.handle.recv_multipart(*args, **kwargs) def recv_loaded(self, *args, **kwargs): serialization = kwargs.pop( 'serialization', self.conf.oslo_messaging_zmq.rpc_zmq_serialization) serializer = self._get_serializer(serialization) s = self.handle.recv(*args, **kwargs) obj = serializer.load_from_bytes(s) return obj def close(self, *args, **kwargs): identity = self.handle.identity self.handle.close(*args, **kwargs) LOG.debug("Socket %s closed" % identity) def connect_to_address(self, address): if address in self.connections: return stype = zmq_names.socket_type_str(self.socket_type) sid = self.handle.identity try: LOG.debug("Connecting %(stype)s socket %(sid)s to %(address)s", {"stype": stype, "sid": sid, "address": address}) self.connect(address) except zmq.ZMQError as e: LOG.error(_LE("Failed connecting %(stype)s-%(sid)s to " "%(address)s: %(e)s"), {"stype": stype, "sid": sid, "address": address, "e": e}) raise rpc_common.RPCException( "Failed connecting %(stype)s-%(sid)s to %(address)s: %(e)s" % {"stype": stype, "sid": sid, "address": address, "e": e}) def connect_to_host(self, host): address = zmq_address.get_tcp_direct_address( host.decode('utf-8') if six.PY3 and isinstance(host, six.binary_type) else host ) self.connect_to_address(address) class ZmqPortBusy(exceptions.MessagingException): """Raised when binding to a port failure""" def __init__(self, port_number): super(ZmqPortBusy, self).__init__() self.port_number = port_number class ZmqRandomPortSocket(ZmqSocket): def __init__(self, conf, context, socket_type, host=None, high_watermark=0, identity=None): super(ZmqRandomPortSocket, self).__init__( conf, context, socket_type, immediate=False, high_watermark=high_watermark, identity=identity) self.bind_address = zmq_address.get_tcp_random_address(self.conf) if host is None: host = conf.oslo_messaging_zmq.rpc_zmq_host try: self.port = self.handle.bind_to_random_port( self.bind_address, min_port=conf.oslo_messaging_zmq.rpc_zmq_min_port, max_port=conf.oslo_messaging_zmq.rpc_zmq_max_port, max_tries=conf.oslo_messaging_zmq.rpc_zmq_bind_port_retries) self.connect_address = zmq_address.combine_address(host, self.port) except zmq.ZMQBindError: LOG.error(_LE("Random ports range exceeded!")) raise ZmqPortBusy(port_number=0) class ZmqFixedPortSocket(ZmqSocket): def __init__(self, conf, context, socket_type, host, port, high_watermark=0, identity=None): super(ZmqFixedPortSocket, self).__init__( conf, context, socket_type, immediate=False, high_watermark=high_watermark, identity=identity) self.connect_address = zmq_address.combine_address(host, port) self.bind_address = zmq_address.get_tcp_direct_address( zmq_address.combine_address( conf.oslo_messaging_zmq.rpc_zmq_bind_address, port)) self.host = host self.port = port try: self.handle.bind(self.bind_address) except zmq.ZMQError as e: LOG.exception(e) LOG.error(_LE("Chosen port %d is being busy.") % self.port) raise ZmqPortBusy(port_number=port) oslo.messaging-5.35.0/oslo_messaging/_drivers/zmq_driver/zmq_poller.py0000666000175100017510000000576413224676046026356 0ustar zuulzuul00000000000000# Copyright 2015 Mirantis, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc import six @six.add_metaclass(abc.ABCMeta) class ZmqPoller(object): """Base poller interface Needed to poll on zmq sockets in green and native async manner. Native poller implementation wraps zmq.Poller helper class. Wrapping is needed to provide unified poller interface in zmq-driver (for both native and zmq pollers). It makes some difference with poller-helper from zmq library which doesn't actually receive message. The poller object should be obtained over: poller = zmq_async.get_poller() Then we have to register sockets for polling. We are able to provide specific receiving method. By default poller calls socket.recv_multipart. def receive_message(socket): id = socket.recv_string() ctxt = socket.recv_json() msg = socket.recv_json() return (id, ctxt, msg) poller.register(socket, recv_method=receive_message) Further to receive a message we should call: message, socket = poller.poll() The 'message' here contains (id, ctxt, msg) tuple. """ @abc.abstractmethod def register(self, socket, recv_method=None): """Register socket to poll :param socket: Socket to subscribe for polling :type socket: ZmqSocket :param recv_method: Optional specific receiver procedure Should return received message object :type recv_method: callable """ @abc.abstractmethod def unregister(self, socket): """Unregister socket from poll :param socket: Socket to unsubscribe from polling :type socket: ZmqSocket """ @abc.abstractmethod def poll(self, timeout=None): """Poll for messages :param timeout: Optional polling timeout None or -1 means poll forever any positive value means timeout in seconds :type timeout: int :returns: (message, socket) tuple """ @abc.abstractmethod def close(self): """Terminate polling""" @six.add_metaclass(abc.ABCMeta) class Executor(object): """Base executor interface for threading/green async executors""" def __init__(self, thread): self.thread = thread @abc.abstractmethod def execute(self): """Run execution""" @abc.abstractmethod def stop(self): """Stop execution""" oslo.messaging-5.35.0/oslo_messaging/_drivers/zmq_driver/__init__.py0000666000175100017510000000000013224676046025703 0ustar zuulzuul00000000000000oslo.messaging-5.35.0/oslo_messaging/_drivers/zmq_driver/zmq_version.py0000666000175100017510000000422213224676046026532 0ustar zuulzuul00000000000000# Copyright 2016 Mirantis, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import re from oslo_messaging._drivers import common as rpc_common from oslo_messaging._i18n import _ # current driver's version for representing internal message format MESSAGE_VERSION = '1.0' class UnsupportedMessageVersionError(rpc_common.RPCException): msg_fmt = _("Message version %(version)s is not supported.") def __init__(self, version): super(UnsupportedMessageVersionError, self).__init__(version=version) def get_method_versions(obj, method_name): """Useful function for initializing versioned senders/receivers. Returns a dictionary of different internal versions of the given method. Assumes that the object has the particular versioned method and this method is public. Thus versions are private implementations of the method. For example, for a method 'func' methods '_func_v_1_0', '_func_v_1_5', '_func_v_2_0', etc. are assumed as its respective 1.0, 1.5, 2.0 versions. """ assert callable(getattr(obj, method_name, None)), \ "Object must have specified method!" assert not method_name.startswith('_'), "Method must be public!" method_versions = {} for attr_name in dir(obj): if attr_name == method_name: continue attr = getattr(obj, attr_name, None) if not callable(attr): continue match_obj = re.match(r'^_%s_v_(\d)_(\d)$' % method_name, attr_name) if match_obj is not None: version = '.'.join([match_obj.group(1), match_obj.group(2)]) method_versions[version] = attr return method_versions oslo.messaging-5.35.0/oslo_messaging/_drivers/zmq_driver/zmq_names.py0000666000175100017510000000432413224676046026153 0ustar zuulzuul00000000000000# Copyright 2015-2016 Mirantis, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_messaging._drivers.zmq_driver import zmq_async zmq = zmq_async.import_zmq() FIELD_MSG_ID = 'message_id' FIELD_REPLY_ID = 'reply_id' FIELD_MSG_VERSION = 'message_version' FIELD_REPLY_BODY = 'reply_body' FIELD_FAILURE = 'failure' REPLY_ID_IDX = 0 EMPTY_IDX = 1 MESSAGE_VERSION_IDX = 2 MESSAGE_TYPE_IDX = 3 ROUTING_KEY_IDX = 4 MESSAGE_ID_IDX = 5 DEFAULT_TYPE = 0 CALL_TYPE = 1 CAST_TYPE = 2 CAST_FANOUT_TYPE = 3 NOTIFY_TYPE = 4 REPLY_TYPE = 5 ACK_TYPE = 6 REQUEST_TYPES = (CALL_TYPE, CAST_TYPE, CAST_FANOUT_TYPE, NOTIFY_TYPE) RESPONSE_TYPES = (REPLY_TYPE, ACK_TYPE) MESSAGE_TYPES = REQUEST_TYPES + RESPONSE_TYPES MULTISEND_TYPES = (CAST_FANOUT_TYPE, NOTIFY_TYPE) DIRECT_TYPES = (CALL_TYPE, CAST_TYPE) + RESPONSE_TYPES CAST_TYPES = (CAST_TYPE, CAST_FANOUT_TYPE) NOTIFY_TYPES = (NOTIFY_TYPE,) NON_BLOCKING_TYPES = CAST_TYPES + NOTIFY_TYPES def socket_type_str(socket_type): zmq_socket_str = {zmq.DEALER: "DEALER", zmq.ROUTER: "ROUTER", zmq.PUSH: "PUSH", zmq.PULL: "PULL", zmq.REQ: "REQ", zmq.REP: "REP", zmq.PUB: "PUB", zmq.SUB: "SUB"} return zmq_socket_str[socket_type] def message_type_str(message_type): msg_type_str = {CALL_TYPE: "CALL", CAST_TYPE: "CAST", CAST_FANOUT_TYPE: "CAST_FANOUT", NOTIFY_TYPE: "NOTIFY", REPLY_TYPE: "REPLY", ACK_TYPE: "ACK"} return msg_type_str.get(message_type, "UNKNOWN") oslo.messaging-5.35.0/oslo_messaging/_drivers/zmq_driver/zmq_address.py0000666000175100017510000000251213224676046026472 0ustar zuulzuul00000000000000# Copyright 2015 Mirantis, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import six def combine_address(host, port): return "%s:%s" % (host, port) def get_tcp_direct_address(host): return "tcp://%s" % str(host) def get_tcp_random_address(conf): return "tcp://%s" % conf.oslo_messaging_zmq.rpc_zmq_bind_address def prefix_str(key, listener_type): return listener_type + "/" + key def target_to_key(target, listener_type=None): key = target.topic if target.server and not target.fanout: # FIXME(ozamiatin): Workaround for Cinder. # Remove split when Bug #1630975 is being fixed. key += "/" + target.server.split('@')[0] return prefix_str(key, listener_type) if listener_type else key def target_to_subscribe_filter(target): return six.b(target.topic) oslo.messaging-5.35.0/oslo_messaging/_drivers/zmq_driver/server/0000775000175100017510000000000013224676256025113 5ustar zuulzuul00000000000000oslo.messaging-5.35.0/oslo_messaging/_drivers/zmq_driver/server/zmq_server.py0000666000175100017510000001063213224676046027663 0ustar zuulzuul00000000000000# Copyright 2015-2016 Mirantis, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import logging from oslo_messaging._drivers import base from oslo_messaging._drivers.zmq_driver.server.consumers\ import zmq_dealer_consumer from oslo_messaging._drivers.zmq_driver.server.consumers\ import zmq_router_consumer from oslo_messaging._drivers.zmq_driver.server.consumers\ import zmq_sub_consumer from oslo_messaging._drivers.zmq_driver import zmq_async from oslo_messaging._i18n import _LI LOG = logging.getLogger(__name__) zmq = zmq_async.import_zmq() class ZmqServer(base.PollStyleListener): def __init__(self, driver, conf, matchmaker, target, poller=None): super(ZmqServer, self).__init__() self.driver = driver self.conf = conf self.matchmaker = matchmaker self.target = target self.poller = poller or zmq_async.get_poller() LOG.info(_LI('[%(host)s] Run server %(target)s'), {'host': self.conf.oslo_messaging_zmq.rpc_zmq_host, 'target': self.target}) if conf.oslo_messaging_zmq.use_router_proxy: self.router_consumer = None dealer_consumer_cls = \ zmq_dealer_consumer.DealerConsumerWithAcks \ if conf.oslo_messaging_zmq.rpc_use_acks else \ zmq_dealer_consumer.DealerConsumer self.dealer_consumer = dealer_consumer_cls(conf, self.poller, self) else: self.router_consumer = \ zmq_router_consumer.RouterConsumer(conf, self.poller, self) self.dealer_consumer = None self.sub_consumer = \ zmq_sub_consumer.SubConsumer(conf, self.poller, self) \ if conf.oslo_messaging_zmq.use_pub_sub else None self.consumers = [] if self.router_consumer is not None: self.consumers.append(self.router_consumer) if self.dealer_consumer is not None: self.consumers.append(self.dealer_consumer) if self.sub_consumer is not None: self.consumers.append(self.sub_consumer) @base.batch_poll_helper def poll(self, timeout=None): message, socket = self.poller.poll( timeout or self.conf.oslo_messaging_zmq.rpc_poll_timeout) return message def stop(self): self.poller.close() for consumer in self.consumers: consumer.stop() LOG.info(_LI('[%(host)s] Stop server %(target)s'), {'host': self.conf.oslo_messaging_zmq.rpc_zmq_host, 'target': self.target}) def cleanup(self): self.poller.close() for consumer in self.consumers: consumer.cleanup() LOG.info(_LI('[%(host)s] Destroy server %(target)s'), {'host': self.conf.oslo_messaging_zmq.rpc_zmq_host, 'target': self.target}) class ZmqNotificationServer(base.PollStyleListener): def __init__(self, driver, conf, matchmaker, targets_and_priorities): super(ZmqNotificationServer, self).__init__() self.driver = driver self.conf = conf self.matchmaker = matchmaker self.servers = [] self.poller = zmq_async.get_poller() self._listen(targets_and_priorities) def _listen(self, targets_and_priorities): for target, priority in targets_and_priorities: t = copy.deepcopy(target) t.topic = target.topic + '.' + priority self.servers.append(ZmqServer( self.driver, self.conf, self.matchmaker, t, self.poller)) @base.batch_poll_helper def poll(self, timeout=None): message, socket = self.poller.poll( timeout or self.conf.oslo_messaging_zmq.rpc_poll_timeout) return message def stop(self): for server in self.servers: server.stop() def cleanup(self): for server in self.servers: server.cleanup() oslo.messaging-5.35.0/oslo_messaging/_drivers/zmq_driver/server/zmq_incoming_message.py0000666000175100017510000000265213224676046031667 0ustar zuulzuul00000000000000# Copyright 2015-2016 Mirantis, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_messaging._drivers import base class ZmqIncomingMessage(base.RpcIncomingMessage): """Base class for RPC-messages via ZMQ-driver. Behaviour of messages is fully defined by consumers which produced them from obtained raw data. """ def __init__(self, context, message, **kwargs): super(ZmqIncomingMessage, self).__init__(context, message) self._reply_method = kwargs.pop('reply_method', lambda self, reply, failure: None) for key, value in kwargs.items(): setattr(self, key, value) def acknowledge(self): """Acknowledge is not supported.""" def reply(self, reply=None, failure=None): self._reply_method(self, reply=reply, failure=failure) def requeue(self): """Requeue is not supported.""" oslo.messaging-5.35.0/oslo_messaging/_drivers/zmq_driver/server/__init__.py0000666000175100017510000000000013224676046027211 0ustar zuulzuul00000000000000oslo.messaging-5.35.0/oslo_messaging/_drivers/zmq_driver/server/zmq_ttl_cache.py0000666000175100017510000000546013224676046030306 0ustar zuulzuul00000000000000# Copyright 2016 Mirantis, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import logging import threading import time from oslo_messaging._drivers.zmq_driver import zmq_async LOG = logging.getLogger(__name__) zmq = zmq_async.import_zmq() class TTLCache(object): _UNDEFINED = object() def __init__(self, ttl=None): self._lock = threading.Lock() self._cache = {} self._executor = None if not (ttl is None or isinstance(ttl, (int, float))): raise ValueError('ttl must be None or a number') # no (i.e. infinite) ttl if ttl is None or ttl <= 0: ttl = float('inf') else: self._executor = zmq_async.get_executor(self._update_cache) self._ttl = ttl if self._executor: self._executor.execute() @staticmethod def _is_expired(expiration_time, current_time): return expiration_time <= current_time def add(self, key, value=None): with self._lock: expiration_time = time.time() + self._ttl self._cache[key] = (value, expiration_time) def get(self, key, default=None): with self._lock: data = self._cache.get(key) if data is None: return default value, expiration_time = data if self._is_expired(expiration_time, time.time()): del self._cache[key] return default return value def __contains__(self, key): return self.get(key, self._UNDEFINED) is not self._UNDEFINED def _update_cache(self): with self._lock: current_time = time.time() old_size = len(self._cache) self._cache = \ {key: (value, expiration_time) for key, (value, expiration_time) in self._cache.items() if not self._is_expired(expiration_time, current_time)} new_size = len(self._cache) LOG.debug('Updated cache: current size %(new_size)s ' '(%(size_difference)s records removed)', {'new_size': new_size, 'size_difference': old_size - new_size}) time.sleep(self._ttl) def cleanup(self): if self._executor: self._executor.stop() oslo.messaging-5.35.0/oslo_messaging/_drivers/zmq_driver/server/consumers/0000775000175100017510000000000013224676256027131 5ustar zuulzuul00000000000000oslo.messaging-5.35.0/oslo_messaging/_drivers/zmq_driver/server/consumers/zmq_consumer_base.py0000666000175100017510000001242113224676046033216 0ustar zuulzuul00000000000000# Copyright 2015 Mirantis, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc import logging import six from oslo_messaging._drivers import common as rpc_common from oslo_messaging._drivers.zmq_driver.matchmaker import zmq_matchmaker_base from oslo_messaging._drivers.zmq_driver import zmq_address from oslo_messaging._drivers.zmq_driver import zmq_async from oslo_messaging._drivers.zmq_driver import zmq_names from oslo_messaging._drivers.zmq_driver import zmq_socket from oslo_messaging._drivers.zmq_driver import zmq_updater from oslo_messaging._i18n import _LE, _LI, _LW LOG = logging.getLogger(__name__) zmq = zmq_async.import_zmq() @six.add_metaclass(abc.ABCMeta) class ConsumerBase(object): def __init__(self, conf, poller, server): self.conf = conf self.poller = poller self.server = server self.sockets = [] self.context = zmq.Context() def stop(self): """Stop consumer polling/updating.""" @abc.abstractmethod def receive_request(self, socket): """Receive a request via a socket.""" def cleanup(self): for socket in self.sockets: if not socket.handle.closed: socket.close() self.sockets = [] class SingleSocketConsumer(ConsumerBase): def __init__(self, conf, poller, server, socket_type): super(SingleSocketConsumer, self).__init__(conf, poller, server) self.matchmaker = server.matchmaker self.target = server.target self.socket_type = socket_type self.host = None self.socket = self.subscribe_socket(socket_type) self.target_updater = TargetUpdater( conf, self.matchmaker, self.target, self.host, socket_type) def stop(self): self.target_updater.stop() def subscribe_socket(self, socket_type): try: socket = zmq_socket.ZmqRandomPortSocket( self.conf, self.context, socket_type) self.sockets.append(socket) LOG.debug("Run %(stype)s consumer on %(addr)s:%(port)d", {"stype": zmq_names.socket_type_str(socket_type), "addr": socket.bind_address, "port": socket.port}) self.host = zmq_address.combine_address( self.conf.oslo_messaging_zmq.rpc_zmq_host, socket.port) self.poller.register(socket, self.receive_request) return socket except zmq.ZMQError as e: errmsg = _LE("Failed binding to port %(port)d: %(e)s")\ % (self.port, e) LOG.error(_LE("Failed binding to port %(port)d: %(e)s"), (self.port, e)) raise rpc_common.RPCException(errmsg) @property def address(self): return self.socket.bind_address @property def port(self): return self.socket.port def cleanup(self): self.target_updater.cleanup() super(SingleSocketConsumer, self).cleanup() class TargetUpdater(zmq_updater.UpdaterBase): """This entity performs periodic async updates to the matchmaker. """ def __init__(self, conf, matchmaker, target, host, socket_type): self.target = target self.host = host self.socket_type = socket_type self.conf = conf self.matchmaker = matchmaker self._sleep_for = conf.oslo_messaging_zmq.zmq_target_update # NOTE(ozamiatin): Update target immediately not waiting # for background executor to initialize. self._update_target() super(TargetUpdater, self).__init__( conf, matchmaker, self._update_target, conf.oslo_messaging_zmq.zmq_target_update) def _update_target(self): try: self.matchmaker.register( self.target, self.host, zmq_names.socket_type_str(self.socket_type), expire=self.conf.oslo_messaging_zmq.zmq_target_expire) if self._sleep_for != \ self.conf.oslo_messaging_zmq.zmq_target_update: self._sleep_for = \ self.conf.oslo_messaging_zmq.zmq_target_update LOG.info(_LI("Falling back to the normal update %d sec") % self._sleep_for) except zmq_matchmaker_base.MatchmakerUnavailable: # Update target frequently until first successful update # After matchmaker is back update normally as of config self._sleep_for = 10 LOG.warning(_LW("Failed connecting to the Matchmaker, " "update each %d sec") % self._sleep_for) def stop(self): super(TargetUpdater, self).stop() self.matchmaker.unregister( self.target, self.host, zmq_names.socket_type_str(self.socket_type)) oslo.messaging-5.35.0/oslo_messaging/_drivers/zmq_driver/server/consumers/__init__.py0000666000175100017510000000000013224676046031227 0ustar zuulzuul00000000000000oslo.messaging-5.35.0/oslo_messaging/_drivers/zmq_driver/server/consumers/zmq_sub_consumer.py0000666000175100017510000001255113224676046033101 0ustar zuulzuul00000000000000# Copyright 2015-2016 Mirantis, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import logging import uuid import six from oslo_messaging._drivers.zmq_driver.server.consumers \ import zmq_consumer_base from oslo_messaging._drivers.zmq_driver.server import zmq_incoming_message from oslo_messaging._drivers.zmq_driver import zmq_address from oslo_messaging._drivers.zmq_driver import zmq_async from oslo_messaging._drivers.zmq_driver import zmq_names from oslo_messaging._drivers.zmq_driver import zmq_socket from oslo_messaging._drivers.zmq_driver import zmq_updater from oslo_messaging._drivers.zmq_driver import zmq_version from oslo_messaging._i18n import _LE, _LI LOG = logging.getLogger(__name__) zmq = zmq_async.import_zmq() class SubConsumer(zmq_consumer_base.ConsumerBase): def __init__(self, conf, poller, server): super(SubConsumer, self).__init__(conf, poller, server) self.matchmaker = SubscriptionMatchmakerWrapper(conf, server.matchmaker) self.target = server.target self.socket = zmq_socket.ZmqSocket(self.conf, self.context, zmq.SUB, immediate=False, identity=self._generate_identity()) self.sockets.append(self.socket) self.host = self.socket.handle.identity self._subscribe_to_topic() self._receive_request_versions = \ zmq_version.get_method_versions(self, 'receive_request') self.connection_updater = SubscriberConnectionUpdater( conf, self.matchmaker, self.socket) self.poller.register(self.socket, self.receive_request) LOG.info(_LI("[%s] Run SUB consumer"), self.host) def _generate_identity(self): return six.b(self.conf.oslo_messaging_zmq.rpc_zmq_host + '/') + \ zmq_address.target_to_subscribe_filter(self.target) + \ six.b('/' + str(uuid.uuid4())) def _subscribe_to_topic(self): topic_filter = zmq_address.target_to_subscribe_filter(self.target) self.socket.setsockopt(zmq.SUBSCRIBE, topic_filter) LOG.debug("[%(host)s] Subscribing to topic %(filter)s", {"host": self.host, "filter": topic_filter}) def _get_receive_request_version(self, version): receive_request_version = self._receive_request_versions.get(version) if receive_request_version is None: raise zmq_version.UnsupportedMessageVersionError(version) return receive_request_version def _receive_request_v_1_0(self, topic_filter, socket): message_type = int(socket.recv()) assert message_type in zmq_names.MULTISEND_TYPES, "Fanout expected!" message_id = socket.recv() context, message = socket.recv_loaded() LOG.debug("[%(host)s] Received on topic %(filter)s message %(msg_id)s " "(v%(msg_version)s)", {'host': self.host, 'filter': topic_filter, 'msg_id': message_id, 'msg_version': '1.0'}) return context, message def receive_request(self, socket): try: topic_filter = socket.recv() message_version = socket.recv_string() receive_request_version = \ self._get_receive_request_version(message_version) context, message = receive_request_version(topic_filter, socket) return zmq_incoming_message.ZmqIncomingMessage(context, message) except (zmq.ZMQError, AssertionError, ValueError, zmq_version.UnsupportedMessageVersionError) as e: LOG.error(_LE("Receiving message failed: %s"), str(e)) # NOTE(gdavoian): drop the left parts of a broken message if socket.getsockopt(zmq.RCVMORE): socket.recv_multipart() def cleanup(self): LOG.info(_LI("[%s] Destroy SUB consumer"), self.host) self.connection_updater.cleanup() super(SubConsumer, self).cleanup() class SubscriptionMatchmakerWrapper(object): def __init__(self, conf, matchmaker): self.conf = conf self.matchmaker = matchmaker def get_publishers(self): conf_publishers = self.conf.oslo_messaging_zmq.subscribe_on LOG.debug("Publishers taken from configuration %s", conf_publishers) if conf_publishers: return [(publisher, None) for publisher in conf_publishers] return self.matchmaker.get_publishers() class SubscriberConnectionUpdater(zmq_updater.ConnectionUpdater): def _update_connection(self): publishers = self.matchmaker.get_publishers() for publisher_address, router_address in publishers: self.socket.connect_to_host(publisher_address) LOG.debug("[%s] SUB consumer connected to publishers %s", self.socket.handle.identity, publishers) oslo.messaging-5.35.0/oslo_messaging/_drivers/zmq_driver/server/consumers/zmq_dealer_consumer.py0000666000175100017510000002173513224676046033550 0ustar zuulzuul00000000000000# Copyright 2016 Mirantis, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import logging import uuid import six from oslo_messaging._drivers import common as rpc_common from oslo_messaging._drivers.zmq_driver.client import zmq_response from oslo_messaging._drivers.zmq_driver.client import zmq_senders from oslo_messaging._drivers.zmq_driver.client import zmq_sockets_manager from oslo_messaging._drivers.zmq_driver.server.consumers \ import zmq_consumer_base from oslo_messaging._drivers.zmq_driver.server import zmq_incoming_message from oslo_messaging._drivers.zmq_driver.server import zmq_ttl_cache from oslo_messaging._drivers.zmq_driver import zmq_address from oslo_messaging._drivers.zmq_driver import zmq_async from oslo_messaging._drivers.zmq_driver import zmq_names from oslo_messaging._drivers.zmq_driver import zmq_updater from oslo_messaging._drivers.zmq_driver import zmq_version from oslo_messaging._i18n import _LE, _LI, _LW LOG = logging.getLogger(__name__) zmq = zmq_async.import_zmq() class DealerConsumer(zmq_consumer_base.SingleSocketConsumer): def __init__(self, conf, poller, server): self.reply_sender = zmq_senders.ReplySenderProxy(conf) self.sockets_manager = zmq_sockets_manager.SocketsManager( conf, server.matchmaker, zmq.DEALER) self.host = None super(DealerConsumer, self).__init__(conf, poller, server, zmq.DEALER) self._receive_request_versions = \ zmq_version.get_method_versions(self, 'receive_request') self.connection_updater = ConsumerConnectionUpdater( conf, self.matchmaker, self.socket) LOG.info(_LI("[%s] Run DEALER consumer"), self.host) def _generate_identity(self): return six.b(self.conf.oslo_messaging_zmq.rpc_zmq_host + "/" + zmq_address.target_to_key(self.target) + "/" + str(uuid.uuid4())) def subscribe_socket(self, socket_type): try: socket = self.sockets_manager.get_socket_to_routers( self._generate_identity()) self.host = socket.handle.identity self.poller.register(socket, self.receive_request) return socket except zmq.ZMQError as e: LOG.error(_LE("Failed connecting to ROUTER socket %(e)s") % e) raise rpc_common.RPCException(str(e)) def _reply(self, rpc_message, reply, failure): if failure is not None: failure = rpc_common.serialize_remote_exception(failure) reply = zmq_response.Reply(message_id=rpc_message.message_id, reply_id=rpc_message.reply_id, message_version=rpc_message.message_version, reply_body=reply, failure=failure) self.reply_sender.send(rpc_message.socket, reply) return reply def _create_message(self, context, message, message_version, reply_id, message_id, socket, message_type): if message_type == zmq_names.CALL_TYPE: message = zmq_incoming_message.ZmqIncomingMessage( context, message, message_version=message_version, reply_id=reply_id, message_id=message_id, socket=socket, reply_method=self._reply ) else: message = zmq_incoming_message.ZmqIncomingMessage(context, message) LOG.debug("[%(host)s] Received %(msg_type)s message %(msg_id)s " "(v%(msg_version)s)", {"host": self.host, "msg_type": zmq_names.message_type_str(message_type), "msg_id": message_id, "msg_version": message_version}) return message def _get_receive_request_version(self, version): receive_request_version = self._receive_request_versions.get(version) if receive_request_version is None: raise zmq_version.UnsupportedMessageVersionError(version) return receive_request_version def receive_request(self, socket): try: empty = socket.recv() assert empty == b'', "Empty delimiter expected!" message_version = socket.recv_string() assert message_version != b'', "Valid message version expected!" receive_request_version = \ self._get_receive_request_version(message_version) return receive_request_version(socket) except (zmq.ZMQError, AssertionError, ValueError, zmq_version.UnsupportedMessageVersionError) as e: LOG.error(_LE("Receiving message failure: %s"), str(e)) # NOTE(gdavoian): drop the left parts of a broken message if socket.getsockopt(zmq.RCVMORE): socket.recv_multipart() def _receive_request_v_1_0(self, socket): reply_id = socket.recv() assert reply_id != b'', "Valid reply id expected!" message_type = int(socket.recv()) assert message_type in zmq_names.REQUEST_TYPES, "Request expected!" message_id = socket.recv_string() assert message_id != '', "Valid message id expected!" context, message = socket.recv_loaded() return self._create_message(context, message, '1.0', reply_id, message_id, socket, message_type) def cleanup(self): LOG.info(_LI("[%s] Destroy DEALER consumer"), self.host) self.connection_updater.cleanup() super(DealerConsumer, self).cleanup() class DealerConsumerWithAcks(DealerConsumer): def __init__(self, conf, poller, server): super(DealerConsumerWithAcks, self).__init__(conf, poller, server) self.ack_sender = zmq_senders.AckSenderProxy(conf) self.messages_cache = zmq_ttl_cache.TTLCache( ttl=conf.oslo_messaging_zmq.rpc_message_ttl ) def _acknowledge(self, message_version, reply_id, message_id, socket): ack = zmq_response.Ack(message_id=message_id, reply_id=reply_id, message_version=message_version) self.ack_sender.send(socket, ack) def _reply(self, rpc_message, reply, failure): reply = super(DealerConsumerWithAcks, self)._reply(rpc_message, reply, failure) self.messages_cache.add(rpc_message.message_id, reply) return reply def _reply_from_cache(self, message_id, socket): reply = self.messages_cache.get(message_id) if reply is not None: self.reply_sender.send(socket, reply) def _create_message(self, context, message, message_version, reply_id, message_id, socket, message_type): # drop a duplicate message if message_id in self.messages_cache: LOG.warning( _LW("[%(host)s] Dropping duplicate %(msg_type)s " "message %(msg_id)s"), {"host": self.host, "msg_type": zmq_names.message_type_str(message_type), "msg_id": message_id} ) # NOTE(gdavoian): send yet another ack for the direct # message, since the old one might be lost; # for the CALL message also try to resend its reply # (of course, if it was already obtained and cached). if message_type in zmq_names.DIRECT_TYPES: self._acknowledge(message_version, reply_id, message_id, socket) if message_type == zmq_names.CALL_TYPE: self._reply_from_cache(message_id, socket) return None self.messages_cache.add(message_id) # NOTE(gdavoian): send an immediate ack, since it may # be too late to wait until the message will be # dispatched and processed by a RPC server if message_type in zmq_names.DIRECT_TYPES: self._acknowledge(message_version, reply_id, message_id, socket) return super(DealerConsumerWithAcks, self)._create_message( context, message, message_version, reply_id, message_id, socket, message_type ) def cleanup(self): self.messages_cache.cleanup() super(DealerConsumerWithAcks, self).cleanup() class ConsumerConnectionUpdater(zmq_updater.ConnectionUpdater): def _update_connection(self): routers = self.matchmaker.get_routers() for router_address in routers: self.socket.connect_to_host(router_address) oslo.messaging-5.35.0/oslo_messaging/_drivers/zmq_driver/server/consumers/zmq_router_consumer.py0000666000175100017510000001144613224676046033632 0ustar zuulzuul00000000000000# Copyright 2015-2016 Mirantis, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import logging from oslo_messaging._drivers import common as rpc_common from oslo_messaging._drivers.zmq_driver.client import zmq_response from oslo_messaging._drivers.zmq_driver.client import zmq_senders from oslo_messaging._drivers.zmq_driver.server.consumers \ import zmq_consumer_base from oslo_messaging._drivers.zmq_driver.server import zmq_incoming_message from oslo_messaging._drivers.zmq_driver import zmq_async from oslo_messaging._drivers.zmq_driver import zmq_names from oslo_messaging._drivers.zmq_driver import zmq_version from oslo_messaging._i18n import _LE, _LI LOG = logging.getLogger(__name__) zmq = zmq_async.import_zmq() class RouterConsumer(zmq_consumer_base.SingleSocketConsumer): def __init__(self, conf, poller, server): self.reply_sender = zmq_senders.ReplySenderDirect(conf) super(RouterConsumer, self).__init__(conf, poller, server, zmq.ROUTER) self._receive_request_versions = \ zmq_version.get_method_versions(self, 'receive_request') LOG.info(_LI("[%s] Run ROUTER consumer"), self.host) def _reply(self, rpc_message, reply, failure): if failure is not None: failure = rpc_common.serialize_remote_exception(failure) reply = zmq_response.Reply(message_id=rpc_message.message_id, reply_id=rpc_message.reply_id, message_version=rpc_message.message_version, reply_body=reply, failure=failure) self.reply_sender.send(rpc_message.socket, reply) return reply def _create_message(self, context, message, message_version, reply_id, message_id, socket, message_type): if message_type == zmq_names.CALL_TYPE: message = zmq_incoming_message.ZmqIncomingMessage( context, message, message_version=message_version, reply_id=reply_id, message_id=message_id, socket=socket, reply_method=self._reply ) else: message = zmq_incoming_message.ZmqIncomingMessage(context, message) LOG.debug("[%(host)s] Received %(msg_type)s message %(msg_id)s " "(v%(msg_version)s)", {"host": self.host, "msg_type": zmq_names.message_type_str(message_type), "msg_id": message_id, "msg_version": message_version}) return message def _get_receive_request_version(self, version): receive_request_version = self._receive_request_versions.get(version) if receive_request_version is None: raise zmq_version.UnsupportedMessageVersionError(version) return receive_request_version def receive_request(self, socket): try: reply_id = socket.recv() assert reply_id != b'', "Valid reply id expected!" empty = socket.recv() assert empty == b'', "Empty delimiter expected!" message_version = socket.recv_string() assert message_version != b'', "Valid message version expected!" receive_request_version = \ self._get_receive_request_version(message_version) return receive_request_version(reply_id, socket) except (zmq.ZMQError, AssertionError, ValueError, zmq_version.UnsupportedMessageVersionError) as e: LOG.error(_LE("Receiving message failed: %s"), str(e)) # NOTE(gdavoian): drop the left parts of a broken message if socket.getsockopt(zmq.RCVMORE): socket.recv_multipart() def _receive_request_v_1_0(self, reply_id, socket): message_type = int(socket.recv()) assert message_type in zmq_names.REQUEST_TYPES, "Request expected!" message_id = socket.recv_string() assert message_id != '', "Valid message id expected!" context, message = socket.recv_loaded() return self._create_message(context, message, '1.0', reply_id, message_id, socket, message_type) def cleanup(self): LOG.info(_LI("[%s] Destroy ROUTER consumer"), self.host) super(RouterConsumer, self).cleanup() oslo.messaging-5.35.0/oslo_messaging/_drivers/zmq_driver/proxy/0000775000175100017510000000000013224676256024766 5ustar zuulzuul00000000000000oslo.messaging-5.35.0/oslo_messaging/_drivers/zmq_driver/proxy/zmq_sender.py0000666000175100017510000001263513224676046027515 0ustar zuulzuul00000000000000# Copyright 2016 Mirantis, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc import logging import six from oslo_messaging._drivers.zmq_driver import zmq_async from oslo_messaging._drivers.zmq_driver import zmq_names from oslo_messaging._drivers.zmq_driver import zmq_version from oslo_messaging._i18n import _LW LOG = logging.getLogger(__name__) zmq = zmq_async.import_zmq() @six.add_metaclass(abc.ABCMeta) class Sender(object): @abc.abstractmethod def send_message(self, socket, multipart_message): """Send message to a socket from a multipart list.""" class CentralSender(Sender): def __init__(self): self._send_message_versions = \ zmq_version.get_method_versions(self, 'send_message') def send_message(self, socket, multipart_message): message_version = multipart_message[zmq_names.MESSAGE_VERSION_IDX] if six.PY3: message_version = message_version.decode('utf-8') send_message_version = self._send_message_versions.get(message_version) if send_message_version is None: LOG.warning(_LW("Dropping message with unsupported version %s"), message_version) return send_message_version(socket, multipart_message) class LocalSender(Sender): pass class CentralRouterSender(CentralSender): def _send_message_v_1_0(self, socket, multipart_message): message_type = int(multipart_message[zmq_names.MESSAGE_TYPE_IDX]) routing_key = multipart_message[zmq_names.ROUTING_KEY_IDX] reply_id = multipart_message[zmq_names.REPLY_ID_IDX] message_id = multipart_message[zmq_names.MESSAGE_ID_IDX] message_version = multipart_message[zmq_names.MESSAGE_VERSION_IDX] socket.send(routing_key, zmq.SNDMORE) socket.send(b'', zmq.SNDMORE) socket.send(message_version, zmq.SNDMORE) socket.send(reply_id, zmq.SNDMORE) socket.send(multipart_message[zmq_names.MESSAGE_TYPE_IDX], zmq.SNDMORE) socket.send_multipart(multipart_message[zmq_names.MESSAGE_ID_IDX:]) LOG.debug("Dispatching %(msg_type)s message %(msg_id)s - from %(rid)s " "-> to %(rkey)s (v%(msg_version)s)", {"msg_type": zmq_names.message_type_str(message_type), "msg_id": message_id, "rkey": routing_key, "rid": reply_id, "msg_version": message_version}) class CentralAckSender(CentralSender): def _send_message_v_1_0(self, socket, multipart_message): message_type = zmq_names.ACK_TYPE message_id = multipart_message[zmq_names.MESSAGE_ID_IDX] routing_key = socket.handle.identity reply_id = multipart_message[zmq_names.REPLY_ID_IDX] message_version = multipart_message[zmq_names.MESSAGE_VERSION_IDX] socket.send(reply_id, zmq.SNDMORE) socket.send(b'', zmq.SNDMORE) socket.send(message_version, zmq.SNDMORE) socket.send(routing_key, zmq.SNDMORE) socket.send(six.b(str(message_type)), zmq.SNDMORE) socket.send_string(message_id) LOG.debug("Sending %(msg_type)s for %(msg_id)s to %(rid)s " "[from %(rkey)s] (v%(msg_version)s)", {"msg_type": zmq_names.message_type_str(message_type), "msg_id": message_id, "rid": reply_id, "rkey": routing_key, "msg_version": message_version}) class CentralPublisherSender(CentralSender): def _send_message_v_1_0(self, socket, multipart_message): message_type = int(multipart_message[zmq_names.MESSAGE_TYPE_IDX]) assert message_type in zmq_names.MULTISEND_TYPES, "Fanout expected!" topic_filter = multipart_message[zmq_names.ROUTING_KEY_IDX] message_id = multipart_message[zmq_names.MESSAGE_ID_IDX] message_version = multipart_message[zmq_names.MESSAGE_VERSION_IDX] socket.send(topic_filter, zmq.SNDMORE) socket.send(message_version, zmq.SNDMORE) socket.send(six.b(str(message_type)), zmq.SNDMORE) socket.send_multipart(multipart_message[zmq_names.MESSAGE_ID_IDX:]) LOG.debug("Publishing message %(msg_id)s on [%(topic)s] " "(v%(msg_version)s)", {"topic": topic_filter, "msg_id": message_id, "msg_version": message_version}) class LocalPublisherSender(LocalSender): TOPIC_IDX = 0 MSG_VERSION_IDX = 1 MSG_TYPE_IDX = 2 MSG_ID_IDX = 3 def send_message(self, socket, multipart_message): socket.send_multipart(multipart_message) LOG.debug("Publishing message %(msg_id)s on [%(topic)s] " "(v%(msg_version)s)", {"topic": multipart_message[self.TOPIC_IDX], "msg_id": multipart_message[self.MSG_ID_IDX], "msg_version": multipart_message[self.MSG_VERSION_IDX]}) oslo.messaging-5.35.0/oslo_messaging/_drivers/zmq_driver/proxy/__init__.py0000666000175100017510000000000013224676046027064 0ustar zuulzuul00000000000000oslo.messaging-5.35.0/oslo_messaging/_drivers/zmq_driver/proxy/central/0000775000175100017510000000000013224676256026416 5ustar zuulzuul00000000000000oslo.messaging-5.35.0/oslo_messaging/_drivers/zmq_driver/proxy/central/__init__.py0000666000175100017510000000000013224676046030514 0ustar zuulzuul00000000000000oslo.messaging-5.35.0/oslo_messaging/_drivers/zmq_driver/proxy/central/zmq_publisher_proxy.py0000666000175100017510000000421313224676046033114 0ustar zuulzuul00000000000000# Copyright 2015-2016 Mirantis, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_messaging._drivers.zmq_driver.proxy import zmq_sender from oslo_messaging._drivers.zmq_driver import zmq_async from oslo_messaging._drivers.zmq_driver import zmq_socket zmq = zmq_async.import_zmq() class PublisherProxy(object): """PUB/SUB based request publisher The publisher intended to be used for Fanout and Notify multi-sending patterns. It differs from direct publishers like DEALER or PUSH based in a way it treats matchmaker. Here all publishers register in the matchmaker. Subscribers (server-side) take the list of publishers and connect to all of them but subscribe only to a specific topic-filtering tag generated from the Target object. """ def __init__(self, conf, matchmaker, sender=None): super(PublisherProxy, self).__init__() self.conf = conf self.zmq_context = zmq.Context() self.matchmaker = matchmaker port = conf.zmq_proxy_opts.publisher_port self.socket = zmq_socket.ZmqFixedPortSocket( self.conf, self.zmq_context, zmq.PUB, conf.zmq_proxy_opts.host, port) if port != 0 else \ zmq_socket.ZmqRandomPortSocket( self.conf, self.zmq_context, zmq.PUB, conf.zmq_proxy_opts.host) self.host = self.socket.connect_address self.sender = sender or zmq_sender.CentralPublisherSender() def send_request(self, multipart_message): self.sender.send_message(self.socket, multipart_message) def cleanup(self): self.socket.close() oslo.messaging-5.35.0/oslo_messaging/_drivers/zmq_driver/proxy/central/zmq_central_proxy.py0000666000175100017510000001244113224676046032551 0ustar zuulzuul00000000000000# Copyright 2015-2016 Mirantis, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import logging from oslo_messaging._drivers.zmq_driver.proxy.central \ import zmq_publisher_proxy from oslo_messaging._drivers.zmq_driver.proxy \ import zmq_base_proxy from oslo_messaging._drivers.zmq_driver.proxy import zmq_sender from oslo_messaging._drivers.zmq_driver import zmq_async from oslo_messaging._drivers.zmq_driver import zmq_names from oslo_messaging._drivers.zmq_driver import zmq_updater from oslo_messaging._i18n import _LI LOG = logging.getLogger(__name__) zmq = zmq_async.import_zmq() class SingleRouterProxy(zmq_base_proxy.ProxyBase): PROXY_TYPE = "ROUTER" def __init__(self, conf, context, matchmaker): super(SingleRouterProxy, self).__init__(conf, context, matchmaker) port = conf.zmq_proxy_opts.frontend_port self.fe_router_socket = zmq_base_proxy.create_socket( conf, context, port, zmq.ROUTER) self.poller.register(self.fe_router_socket, self._receive_message) self.publisher = zmq_publisher_proxy.PublisherProxy(conf, matchmaker) self.router_sender = zmq_sender.CentralRouterSender() self.ack_sender = zmq_sender.CentralAckSender() self._router_updater = self._create_router_updater() def run(self): message, socket = self.poller.poll() if message is None: return message_type = int(message[zmq_names.MESSAGE_TYPE_IDX]) if self.conf.oslo_messaging_zmq.use_pub_sub and \ message_type in zmq_names.MULTISEND_TYPES: self.publisher.send_request(message) if socket is self.fe_router_socket and \ self.conf.zmq_proxy_opts.ack_pub_sub: self.ack_sender.send_message(socket, message) else: self.router_sender.send_message( self._get_socket_to_dispatch_on(socket), message) def _create_router_updater(self): return RouterUpdater( self.conf, self.matchmaker, self.publisher.host, self.fe_router_socket.connect_address, self.fe_router_socket.connect_address) def _get_socket_to_dispatch_on(self, socket): return self.fe_router_socket def cleanup(self): super(SingleRouterProxy, self).cleanup() self._router_updater.cleanup() self.fe_router_socket.close() self.publisher.cleanup() class DoubleRouterProxy(SingleRouterProxy): PROXY_TYPE = "ROUTER-ROUTER" def __init__(self, conf, context, matchmaker): port = conf.zmq_proxy_opts.backend_port self.be_router_socket = zmq_base_proxy.create_socket( conf, context, port, zmq.ROUTER) super(DoubleRouterProxy, self).__init__(conf, context, matchmaker) self.poller.register(self.be_router_socket, self._receive_message) def _create_router_updater(self): return RouterUpdater( self.conf, self.matchmaker, self.publisher.host, self.fe_router_socket.connect_address, self.be_router_socket.connect_address) def _get_socket_to_dispatch_on(self, socket): return self.be_router_socket \ if socket is self.fe_router_socket \ else self.fe_router_socket def cleanup(self): super(DoubleRouterProxy, self).cleanup() self.be_router_socket.close() class RouterUpdater(zmq_updater.UpdaterBase): """This entity performs periodic async updates from router proxy to the matchmaker. """ def __init__(self, conf, matchmaker, publisher_address, fe_router_address, be_router_address): self.publisher_address = publisher_address self.fe_router_address = fe_router_address self.be_router_address = be_router_address super(RouterUpdater, self).__init__( conf, matchmaker, self._update_records, conf.oslo_messaging_zmq.zmq_target_update) def _update_records(self): self.matchmaker.register_publisher( (self.publisher_address, self.fe_router_address), expire=self.conf.oslo_messaging_zmq.zmq_target_expire) LOG.info(_LI("[PUB:%(pub)s, ROUTER:%(router)s] Update PUB publisher"), {"pub": self.publisher_address, "router": self.fe_router_address}) self.matchmaker.register_router( self.be_router_address, expire=self.conf.oslo_messaging_zmq.zmq_target_expire) LOG.info(_LI("[Backend ROUTER:%(router)s] Update ROUTER"), {"router": self.be_router_address}) def cleanup(self): super(RouterUpdater, self).cleanup() self.matchmaker.unregister_publisher( (self.publisher_address, self.fe_router_address)) self.matchmaker.unregister_router( self.be_router_address) oslo.messaging-5.35.0/oslo_messaging/_drivers/zmq_driver/proxy/zmq_base_proxy.py0000666000175100017510000000543513224676046030410 0ustar zuulzuul00000000000000# Copyright 2016 Mirantis, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import logging import uuid import six from oslo_messaging._drivers.zmq_driver import zmq_async from oslo_messaging._drivers.zmq_driver import zmq_names from oslo_messaging._drivers.zmq_driver import zmq_socket from oslo_messaging._i18n import _LI, _LE LOG = logging.getLogger(__name__) zmq = zmq_async.import_zmq() def check_message_format(func): def _check_message_format(socket): try: return func(socket) except Exception as e: LOG.error(_LE("Received message with wrong format: %r. " "Dropping invalid message"), e) # NOTE(gdavoian): drop the left parts of a broken message, since # they most likely will break the order of next messages' parts if socket.getsockopt(zmq.RCVMORE): socket.recv_multipart() return _check_message_format def create_socket(conf, context, port, socket_type): host = conf.zmq_proxy_opts.host identity = six.b(host) + b"/zmq-proxy/" + six.b(str(uuid.uuid4())) if port != 0: return zmq_socket.ZmqFixedPortSocket(conf, context, socket_type, host, port, identity=identity) else: return zmq_socket.ZmqRandomPortSocket(conf, context, socket_type, host, identity=identity) class ProxyBase(object): PROXY_TYPE = "UNDEFINED" def __init__(self, conf, context, matchmaker): self.conf = conf self.context = context self.matchmaker = matchmaker LOG.info(_LI("Running %s proxy") % self.PROXY_TYPE) self.poller = zmq_async.get_poller() @staticmethod @check_message_format def _receive_message(socket): message = socket.recv_multipart() assert message[zmq_names.EMPTY_IDX] == b'', "Empty delimiter expected!" message_type = int(message[zmq_names.MESSAGE_TYPE_IDX]) assert message_type in zmq_names.MESSAGE_TYPES, \ "Known message type expected!" assert len(message) > zmq_names.MESSAGE_ID_IDX, \ "At least %d parts expected!" % (zmq_names.MESSAGE_ID_IDX + 1) return message def cleanup(self): self.poller.close() oslo.messaging-5.35.0/oslo_messaging/_drivers/zmq_driver/proxy/local/0000775000175100017510000000000013224676256026060 5ustar zuulzuul00000000000000oslo.messaging-5.35.0/oslo_messaging/_drivers/zmq_driver/proxy/local/zmq_local_proxy.py0000666000175100017510000000440113224676046031652 0ustar zuulzuul00000000000000# Copyright 2016 Mirantis, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_messaging._drivers.zmq_driver.proxy.central \ import zmq_publisher_proxy from oslo_messaging._drivers.zmq_driver.proxy \ import zmq_base_proxy from oslo_messaging._drivers.zmq_driver.proxy import zmq_sender from oslo_messaging._drivers.zmq_driver.server.consumers \ import zmq_sub_consumer from oslo_messaging._drivers.zmq_driver import zmq_async from oslo_messaging._drivers.zmq_driver import zmq_socket zmq = zmq_async.import_zmq() class LocalPublisherProxy(zmq_base_proxy.ProxyBase): PROXY_TYPE = "L-PUBLISHER" def __init__(self, conf, context, matchmaker): wrapper = zmq_sub_consumer.SubscriptionMatchmakerWrapper(conf, matchmaker) super(LocalPublisherProxy, self).__init__(conf, context, wrapper) self.fe_sub = zmq_socket.ZmqSocket(conf, context, zmq.SUB, False) self.fe_sub.setsockopt(zmq.SUBSCRIBE, b'') self.connection_updater = zmq_sub_consumer.SubscriberConnectionUpdater( conf, self.matchmaker, self.fe_sub) self.poller.register(self.fe_sub, self.receive_message) self.publisher = zmq_publisher_proxy.PublisherProxy( conf, matchmaker, sender=zmq_sender.LocalPublisherSender()) def run(self): message, socket = self.poller.poll() if message is None: return self.publisher.send_request(message) @staticmethod def receive_message(socket): return socket.recv_multipart() def cleanup(self): super(LocalPublisherProxy, self).cleanup() self.fe_sub.close() self.connection_updater.cleanup() self.publisher.cleanup() oslo.messaging-5.35.0/oslo_messaging/_drivers/zmq_driver/proxy/local/__init__.py0000666000175100017510000000000013224676046030156 0ustar zuulzuul00000000000000oslo.messaging-5.35.0/oslo_messaging/_drivers/zmq_driver/proxy/zmq_proxy.py0000666000175100017510000002075613224676046027421 0ustar zuulzuul00000000000000# Copyright 2015-2016 Mirantis, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import argparse import logging import socket from oslo_config import cfg from stevedore import driver from oslo_messaging._drivers import impl_zmq from oslo_messaging._drivers.zmq_driver.proxy.central import zmq_central_proxy from oslo_messaging._drivers.zmq_driver.proxy.local import zmq_local_proxy from oslo_messaging._drivers.zmq_driver import zmq_async from oslo_messaging._i18n import _LI from oslo_messaging import transport LOG = logging.getLogger(__name__) zmq = zmq_async.import_zmq() USAGE = """ Usage: ./zmq-proxy.py [-h] [] ... Usage example: python oslo_messaging/_cmd/zmq-proxy.py""" zmq_proxy_opts = [ cfg.StrOpt('host', default=socket.gethostname(), help='Hostname (FQDN) of current proxy' ' an ethernet interface, or IP address.'), cfg.IntOpt('frontend_port', default=0, help='Front-end ROUTER port number. Zero means random.'), cfg.IntOpt('backend_port', default=0, help='Back-end ROUTER port number. Zero means random.'), cfg.IntOpt('publisher_port', default=0, help='Publisher port number. Zero means random.'), cfg.BoolOpt('local_publisher', default=False, help='Specify publisher/subscriber local proxy.'), cfg.BoolOpt('ack_pub_sub', default=False, help='Use acknowledgements for notifying senders about ' 'receiving their fanout messages. ' 'The option is ignored if PUB/SUB is disabled.'), cfg.StrOpt('url', default='zmq://127.0.0.1:6379/', help='ZMQ-driver transport URL with additional configurations') ] def parse_command_line_args(conf): parser = argparse.ArgumentParser( description='ZeroMQ proxy service', usage=USAGE ) parser.add_argument('-c', '--config-file', dest='config_file', type=str, help='Path to configuration file') parser.add_argument('-l', '--log-file', dest='log_file', type=str, help='Path to log file') parser.add_argument('-H', '--host', dest='host', type=str, help='Host FQDN for current proxy') parser.add_argument('-f', '--frontend-port', dest='frontend_port', type=int, help='Front-end ROUTER port number') parser.add_argument('-b', '--backend-port', dest='backend_port', type=int, help='Back-end ROUTER port number') parser.add_argument('-p', '--publisher-port', dest='publisher_port', type=int, help='Back-end PUBLISHER port number') parser.add_argument('-lp', '--local-publisher', dest='local_publisher', action='store_true', help='Specify publisher/subscriber local proxy.') parser.add_argument('-a', '--ack-pub-sub', dest='ack_pub_sub', action='store_true', help='Acknowledge PUB/SUB messages') parser.add_argument('-u', '--url', dest='url', type=str, help='Transport URL with configurations') parser.add_argument('-d', '--debug', dest='debug', action='store_true', help='Turn on DEBUG logging level instead of INFO') args = parser.parse_args() if args.config_file: conf(['--config-file', args.config_file]) log_kwargs = {'level': logging.DEBUG if args.debug else logging.INFO, 'format': '%(asctime)s %(name)s %(levelname)-8s %(message)s'} if args.log_file: log_kwargs.update({'filename': args.log_file}) logging.basicConfig(**log_kwargs) if args.host: conf.set_override('host', args.host, group='zmq_proxy_opts') if args.frontend_port: conf.set_override('frontend_port', args.frontend_port, group='zmq_proxy_opts') if args.backend_port: conf.set_override('backend_port', args.backend_port, group='zmq_proxy_opts') if args.publisher_port: conf.set_override('publisher_port', args.publisher_port, group='zmq_proxy_opts') if args.local_publisher: conf.set_override('local_publisher', args.local_publisher, group='zmq_proxy_opts') if args.ack_pub_sub: conf.set_override('ack_pub_sub', args.ack_pub_sub, group='zmq_proxy_opts') if args.url: conf.set_override('url', args.url, group='zmq_proxy_opts') class ZmqProxy(object): """Wrapper class for Publishers and Routers proxies. The main reason to have a proxy is high complexity of TCP sockets number growth with direct connections (when services connect directly to each other). The general complexity for ZeroMQ+Openstack deployment with direct connections may be square(N) (where N is a number of nodes in deployment). With proxy the complexity is reduced to k*N where k is a number of services. Currently there are 2 types of proxy, they are Publishers and Routers. Publisher proxy serves for PUB-SUB pattern implementation where Publisher is a server which performs broadcast to subscribers. Router is used for direct message types in case of number of TCP socket connections is critical for specific deployment. Generally 3 publishers is enough for deployment. Router is used for direct messages in order to reduce the number of allocated TCP sockets in controller. The list of requirements to Router: 1. There may be any number of routers in the deployment. Routers are registered in a name-server and client connects dynamically to all of them performing load balancing. 2. Routers should be transparent for clients and servers. Which means it doesn't change the way of messaging between client and the final target by hiding the target from a client. 3. Router may be restarted or shut down at any time losing all messages in its queue. Smart retrying (based on acknowledgements from server side) and load balancing between other Router instances from the client side should handle the situation. 4. Router takes all the routing information from message envelope and doesn't perform Target-resolution in any way. 5. Routers don't talk to each other and no synchronization is needed. 6. Load balancing is performed by the client in a round-robin fashion. Those requirements should limit the performance impact caused by using of proxies making proxies as lightweight as possible. """ def __init__(self, conf): super(ZmqProxy, self).__init__() self.conf = conf url = transport.TransportURL.parse( self.conf, url=self.conf.zmq_proxy_opts.url ) self.matchmaker = driver.DriverManager( 'oslo.messaging.zmq.matchmaker', impl_zmq.ZmqDriver.get_matchmaker_backend(self.conf, url) ).driver(self.conf, url=url) self.context = zmq.Context() self.proxy = self._choose_proxy_implementation() def _choose_proxy_implementation(self): if self.conf.zmq_proxy_opts.local_publisher: return zmq_local_proxy.LocalPublisherProxy(self.conf, self.context, self.matchmaker) elif self.conf.zmq_proxy_opts.frontend_port != 0 and \ self.conf.zmq_proxy_opts.backend_port == 0: return zmq_central_proxy.SingleRouterProxy(self.conf, self.context, self.matchmaker) else: return zmq_central_proxy.DoubleRouterProxy(self.conf, self.context, self.matchmaker) def run(self): self.proxy.run() def close(self): LOG.info(_LI("Proxy shutting down ...")) self.proxy.cleanup() oslo.messaging-5.35.0/oslo_messaging/_drivers/zmq_driver/poller/0000775000175100017510000000000013224676256025102 5ustar zuulzuul00000000000000oslo.messaging-5.35.0/oslo_messaging/_drivers/zmq_driver/poller/threading_poller.py0000666000175100017510000000553713224676046031007 0ustar zuulzuul00000000000000# Copyright 2015 Mirantis, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import logging import threading from oslo_messaging._drivers.zmq_driver import zmq_async from oslo_messaging._drivers.zmq_driver import zmq_poller zmq = zmq_async.import_zmq() LOG = logging.getLogger(__name__) class ThreadingPoller(zmq_poller.ZmqPoller): def __init__(self): self.poller = zmq.Poller() self.sockets_and_recv_methods = {} def register(self, socket, recv_method=None): socket_handle = socket.handle if socket_handle in self.sockets_and_recv_methods: return LOG.debug("Registering socket %s", socket_handle.identity) self.sockets_and_recv_methods[socket_handle] = (socket, recv_method) self.poller.register(socket_handle, zmq.POLLIN) def unregister(self, socket): socket_handle = socket.handle socket_and_recv_method = \ self.sockets_and_recv_methods.pop(socket_handle, None) if socket_and_recv_method: LOG.debug("Unregistering socket %s", socket_handle.identity) self.poller.unregister(socket_handle) def poll(self, timeout=None): if timeout is not None and timeout > 0: timeout *= 1000 # convert seconds to milliseconds socket_handles = {} try: socket_handles = dict(self.poller.poll(timeout=timeout)) except zmq.ZMQError as e: LOG.debug("Polling terminated with error: %s", e) if not socket_handles: return None, None for socket_handle in socket_handles: socket, recv_method = self.sockets_and_recv_methods[socket_handle] if recv_method: return recv_method(socket), socket else: return socket.recv_multipart(), socket def close(self): pass # Nothing to do for threading poller class ThreadingExecutor(zmq_poller.Executor): def __init__(self, method): self._method = method thread = threading.Thread(target=self._loop) thread.daemon = True super(ThreadingExecutor, self).__init__(thread) self._stop = threading.Event() def _loop(self): while not self._stop.is_set(): self._method() def execute(self): self.thread.start() def stop(self): self._stop.set() oslo.messaging-5.35.0/oslo_messaging/_drivers/zmq_driver/poller/__init__.py0000666000175100017510000000000013224676046027200 0ustar zuulzuul00000000000000oslo.messaging-5.35.0/oslo_messaging/_drivers/zmq_driver/poller/green_poller.py0000666000175100017510000000471713224676046030141 0ustar zuulzuul00000000000000# Copyright 2015 Mirantis, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import logging import eventlet from oslo_messaging._drivers.zmq_driver import zmq_poller LOG = logging.getLogger(__name__) class GreenPoller(zmq_poller.ZmqPoller): def __init__(self): self.incoming_queue = eventlet.queue.LightQueue() self.thread_by_socket = {} def register(self, socket, recv_method=None): if socket not in self.thread_by_socket: LOG.debug("Registering socket %s", socket.handle.identity) self.thread_by_socket[socket] = eventlet.spawn( self._socket_receive, socket, recv_method ) def unregister(self, socket): thread = self.thread_by_socket.pop(socket, None) if thread: LOG.debug("Unregistering socket %s", socket.handle.identity) thread.kill() def _socket_receive(self, socket, recv_method=None): while True: if recv_method: incoming = recv_method(socket) else: incoming = socket.recv_multipart() self.incoming_queue.put((incoming, socket)) eventlet.sleep() def poll(self, timeout=None): try: return self.incoming_queue.get(timeout=timeout) except eventlet.queue.Empty: return None, None def close(self): for thread in self.thread_by_socket.values(): thread.kill() self.thread_by_socket = {} class GreenExecutor(zmq_poller.Executor): def __init__(self, method): self._method = method super(GreenExecutor, self).__init__(None) def _loop(self): while True: self._method() eventlet.sleep() def execute(self): if self.thread is None: self.thread = eventlet.spawn(self._loop) def stop(self): if self.thread is not None: self.thread.kill() self.thread = None oslo.messaging-5.35.0/oslo_messaging/_drivers/zmq_driver/matchmaker/0000775000175100017510000000000013224676256025721 5ustar zuulzuul00000000000000oslo.messaging-5.35.0/oslo_messaging/_drivers/zmq_driver/matchmaker/zmq_matchmaker_redis.py0000666000175100017510000004041013224676046032462 0ustar zuulzuul00000000000000# Copyright 2016 Mirantis, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc import functools import logging import random import time from oslo_config import cfg from oslo_utils import importutils import six import tenacity from oslo_messaging._drivers.zmq_driver.matchmaker import zmq_matchmaker_base from oslo_messaging._drivers.zmq_driver import zmq_address from oslo_messaging._drivers.zmq_driver import zmq_updater from oslo_messaging._i18n import _LE, _LI, _LW redis = importutils.try_import('redis') redis_sentinel = importutils.try_import('redis.sentinel') LOG = logging.getLogger(__name__) matchmaker_redis_opts = [ cfg.StrOpt('host', default='127.0.0.1', deprecated_for_removal=True, deprecated_reason="Replaced by [DEFAULT]/transport_url", help='Host to locate redis.'), cfg.PortOpt('port', default=6379, deprecated_for_removal=True, deprecated_reason="Replaced by [DEFAULT]/transport_url", help='Use this port to connect to redis host.'), cfg.StrOpt('password', default='', secret=True, deprecated_for_removal=True, deprecated_reason="Replaced by [DEFAULT]/transport_url", help='Password for Redis server (optional).'), cfg.ListOpt('sentinel_hosts', default=[], deprecated_for_removal=True, deprecated_reason="Replaced by [DEFAULT]/transport_url", help='List of Redis Sentinel hosts (fault tolerance mode), ' 'e.g., [host:port, host1:port ... ]'), cfg.StrOpt('sentinel_group_name', default='oslo-messaging-zeromq', help='Redis replica set name.'), cfg.IntOpt('wait_timeout', default=2000, help='Time in ms to wait between connection attempts.'), cfg.IntOpt('check_timeout', default=20000, help='Time in ms to wait before the transaction is killed.'), cfg.IntOpt('socket_timeout', default=10000, help='Timeout in ms on blocking socket operations.'), ] _PUBLISHERS_KEY = "PUBLISHERS" _ROUTERS_KEY = "ROUTERS" def write_to_redis_connection_warn(func): @functools.wraps(func) def func_wrapper(self, *args, **kwargs): # try to perform a write operation to all available hosts success = False for redis_instance in self._redis_instances: if not redis_instance._is_available: continue try: func(self, redis_instance, *args, **kwargs) success = True except redis.ConnectionError: LOG.warning(_LW("Redis host %s is not available now."), redis_instance._address) redis_instance._is_available = False redis_instance._ready_from = float("inf") if not success: raise zmq_matchmaker_base.MatchmakerUnavailable() return func_wrapper def read_from_redis_connection_warn(func): @functools.wraps(func) def func_wrapper(self, *args, **kwargs): # try to perform a read operation from any available and ready host for redis_instance in self._redis_instances: if not redis_instance._is_available \ or redis_instance._ready_from > time.time(): continue try: return func(self, redis_instance, *args, **kwargs) except redis.ConnectionError: LOG.warning(_LW("Redis host %s is not available now."), redis_instance._address) redis_instance._is_available = False redis_instance._ready_from = float("inf") raise zmq_matchmaker_base.MatchmakerUnavailable() return func_wrapper def no_reraise(func): def func_wrapper(*args, **kwargs): try: return func(*args, **kwargs) except zmq_matchmaker_base.MatchmakerUnavailable: pass return func_wrapper def empty_list_on_error(func): def func_wrapper(*args, **kwargs): try: return func(*args, **kwargs) except zmq_matchmaker_base.MatchmakerUnavailable: return [] return func_wrapper def is_empty(hosts): return not hosts @six.add_metaclass(abc.ABCMeta) class MatchmakerRedisBase(zmq_matchmaker_base.MatchmakerBase): def __init__(self, conf, *args, **kwargs): if redis is None: raise ImportError(_LE("Redis package is not available!")) super(MatchmakerRedisBase, self).__init__(conf, *args, **kwargs) self.conf.register_opts(matchmaker_redis_opts, "matchmaker_redis") @abc.abstractmethod def _sadd(self, key, value, expire): pass @abc.abstractmethod def _srem(self, key, value): pass @abc.abstractmethod def _smembers(self, key): pass @abc.abstractmethod def _ttl(self, key): pass @no_reraise def register_publisher(self, hostname, expire=-1): hostname = ','.join(hostname) self._sadd(_PUBLISHERS_KEY, hostname, expire) self._sadd(hostname, ' ', expire) @no_reraise def unregister_publisher(self, hostname): hostname = ','.join(hostname) self._srem(_PUBLISHERS_KEY, hostname) self._srem(hostname, ' ') @empty_list_on_error def get_publishers(self): return [tuple(hostname.split(',')) for hostname in self._smembers(_PUBLISHERS_KEY)] @no_reraise def register_router(self, hostname, expire=-1): self._sadd(_ROUTERS_KEY, hostname, expire) self._sadd(hostname, ' ', expire) @no_reraise def unregister_router(self, hostname): self._srem(_ROUTERS_KEY, hostname) self._srem(hostname, ' ') @empty_list_on_error def get_routers(self): return self._smembers(_ROUTERS_KEY) def get_hosts_by_key(self, key): return self._smembers(key) def register(self, target, hostname, listener_type, expire=-1): if target.server: key = zmq_address.target_to_key(target, listener_type) self._sadd(key, hostname, expire) self._sadd(hostname, ' ', expire) key = zmq_address.prefix_str(target.topic, listener_type) self._sadd(key, hostname, expire) self._sadd(hostname, ' ', expire) @no_reraise def unregister(self, target, hostname, listener_type): if target.server: key = zmq_address.target_to_key(target, listener_type) self._srem(key, hostname) self._srem(hostname, ' ') key = zmq_address.prefix_str(target.topic, listener_type) self._srem(key, hostname) self._srem(hostname, ' ') def get_hosts(self, target, listener_type): hosts = [] if target.server: key = zmq_address.target_to_key(target, listener_type) hosts.extend(self._smembers(key)) else: key = zmq_address.prefix_str(target.topic, listener_type) hosts.extend(self._smembers(key)) LOG.debug("[Redis] get_hosts for target %(target)s: %(hosts)s", {"target": target, "hosts": hosts}) return hosts def get_hosts_retry(self, target, listener_type): return self._retry_method(target, listener_type, self.get_hosts) def get_hosts_fanout(self, target, listener_type): key = zmq_address.target_to_key(target, listener_type) hosts = list(self._smembers(key)) LOG.debug("[Redis] get_hosts_fanout for target %(target)s: %(hosts)s", {"target": target, "hosts": hosts}) return hosts def get_hosts_fanout_retry(self, target, listener_type): return self._retry_method(target, listener_type, self.get_hosts_fanout) def _retry_method(self, target, listener_type, method): wait_timeout = self.conf.matchmaker_redis.wait_timeout / 1000. check_timeout = self.conf.matchmaker_redis.check_timeout / 1000. @tenacity.retry(retry=tenacity.retry_if_result(is_empty), wait=tenacity.wait_fixed(wait_timeout), stop=tenacity.stop_after_delay(check_timeout)) def _get_hosts_retry(target, listener_type): return method(target, listener_type) return _get_hosts_retry(target, listener_type) class MatchmakerRedis(MatchmakerRedisBase): def __init__(self, conf, *args, **kwargs): super(MatchmakerRedis, self).__init__(conf, *args, **kwargs) self._redis_hosts = self._extract_redis_hosts() self._redis_instances = [ redis.StrictRedis(host=redis_host["host"], port=redis_host["port"], password=redis_host["password"]) for redis_host in self._redis_hosts ] for redis_host, redis_instance \ in six.moves.zip(self._redis_hosts, self._redis_instances): address = "{host}:{port}".format(host=redis_host["host"], port=redis_host["port"]) redis_instance._address = address is_available = self._check_availability(redis_instance) if is_available: redis_instance._is_available = True redis_instance._ready_from = time.time() else: LOG.warning(_LW("Redis host %s is not available now."), address) redis_instance._is_available = False redis_instance._ready_from = float("inf") # NOTE(gdavoian): store instances in a random order # (for the sake of load balancing) random.shuffle(self._redis_instances) self._availability_updater = \ MatchmakerRedisAvailabilityUpdater(self.conf, self) def _extract_redis_hosts(self): if self.url and self.url.hosts: return [{"host": redis_host.hostname, "port": redis_host.port, "password": redis_host.password} for redis_host in self.url.hosts] else: # FIXME(gdavoian): remove the code below along with the # corresponding deprecated options in the next release return [{"host": self.conf.matchmaker_redis.host, "port": self.conf.matchmaker_redis.port, "password": self.conf.matchmaker_redis.password}] @staticmethod def _check_availability(redis_instance): try: redis_instance.ping() return True except redis.ConnectionError: return False @write_to_redis_connection_warn def _sadd(self, redis_instance, key, value, expire): redis_instance.sadd(key, value) if expire > 0: redis_instance.expire(key, expire) @write_to_redis_connection_warn def _srem(self, redis_instance, key, value): redis_instance.srem(key, value) @read_from_redis_connection_warn def _ttl(self, redis_instance, key): # NOTE(ozamiatin): If the specialized key doesn't exist, # TTL fuction would return -2. If key exists, # but doesn't have expiration associated, # TTL func would return -1. For more information, # please visit http://redis.io/commands/ttl return redis_instance.ttl(key) @read_from_redis_connection_warn def _smembers(self, redis_instance, key): hosts = redis_instance.smembers(key) return [host for host in hosts if redis_instance.ttl(host) >= -1] class MatchmakerRedisAvailabilityUpdater(zmq_updater.UpdaterBase): _MIN_SLEEP_FOR = 10 def __init__(self, conf, matchmaker): super(MatchmakerRedisAvailabilityUpdater, self).__init__( conf, matchmaker, self._update_availability, sleep_for=conf.oslo_messaging_zmq.zmq_target_update ) def _update_availability(self): fraction_of_available_instances = 0 for redis_instance in self.matchmaker._redis_instances: if not redis_instance._is_available: is_available = \ self.matchmaker._check_availability(redis_instance) if is_available: LOG.info(_LI("Redis host %s is available again."), redis_instance._address) fraction_of_available_instances += 1 # NOTE(gdavoian): mark an instance as available for # writing to, but wait until all services register # themselves in it for making the instance ready for # reading from redis_instance._is_available = True redis_instance._ready_from = time.time() + \ self.conf.oslo_messaging_zmq.zmq_target_expire else: fraction_of_available_instances += 1 fraction_of_available_instances /= \ float(len(self.matchmaker._redis_instances)) # NOTE(gdavoian): make the sleep time proportional to the number of # currently available instances self._sleep_for = max(self.conf.oslo_messaging_zmq.zmq_target_update * fraction_of_available_instances, self._MIN_SLEEP_FOR) class MatchmakerSentinel(MatchmakerRedisBase): def __init__(self, conf, *args, **kwargs): super(MatchmakerSentinel, self).__init__(conf, *args, **kwargs) socket_timeout = self.conf.matchmaker_redis.socket_timeout / 1000. self._sentinel_hosts, self._password, self._master_group = \ self._extract_sentinel_hosts() self._sentinel = redis_sentinel.Sentinel( sentinels=self._sentinel_hosts, socket_timeout=socket_timeout, password=self._password) self._slave = self._master = None @property def _redis_master(self): try: if not self._master: self._master = self._sentinel.master_for(self._master_group) return self._master except redis_sentinel.MasterNotFoundError: raise zmq_matchmaker_base.MatchmakerUnavailable() @property def _redis_slave(self): try: if not self._slave: self._slave = self._sentinel.slave_for(self._master_group) except redis_sentinel.SlaveNotFoundError: # use the master as slave (temporary) return self._redis_master return self._slave def _extract_sentinel_hosts(self): sentinels = [] master_group = self.conf.matchmaker_redis.sentinel_group_name master_password = None if self.url and self.url.hosts: for host in self.url.hosts: target = host.hostname, host.port if host.password: master_password = host.password sentinels.append(target) if self.url.virtual_host: # url://:pass@sentinel_a,:pass@sentinel_b/master_group_name master_group = self.url.virtual_host elif self.conf.matchmaker_redis.sentinel_hosts: s = self.conf.matchmaker_redis.sentinel_hosts sentinels.extend([tuple(target.split(":")) for target in s]) master_password = self.conf.matchmaker_redis.password return sentinels, master_password, master_group def _sadd(self, key, value, expire): self._redis_master.sadd(key, value) if expire > 0: self._redis_master.expire(key, expire) def _srem(self, key, value): self._redis_master.srem(key, value) def _smembers(self, key): hosts = self._redis_slave.smembers(key) return [host for host in hosts if self._ttl(host) >= -1] def _ttl(self, key): return self._redis_slave.ttl(key) oslo.messaging-5.35.0/oslo_messaging/_drivers/zmq_driver/matchmaker/__init__.py0000666000175100017510000000000013224676046030017 0ustar zuulzuul00000000000000oslo.messaging-5.35.0/oslo_messaging/_drivers/zmq_driver/matchmaker/zmq_matchmaker_base.py0000777000175100017510000002304713224676046032300 0ustar zuulzuul00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc import collections import logging import six import time from oslo_messaging._drivers import common as rpc_common from oslo_messaging._drivers.zmq_driver import zmq_address from oslo_messaging._drivers.zmq_driver import zmq_async from oslo_messaging._i18n import _LE LOG = logging.getLogger(__name__) class MatchmakerUnavailable(rpc_common.RPCException): """Exception is raised on connection error to matchmaker service""" def __init__(self): super(MatchmakerUnavailable, self).__init__( message=_LE("Matchmaker is not currently available.")) @six.add_metaclass(abc.ABCMeta) class MatchmakerBase(object): def __init__(self, conf, *args, **kwargs): super(MatchmakerBase, self).__init__() self.conf = conf self.url = kwargs.get('url') @abc.abstractmethod def register_publisher(self, hostname, expire=-1): """Register publisher on nameserver. This works for PUB-SUB only :param hostname: host for the topic in "host:port" format host for back-chatter in "host:port" format :type hostname: tuple :param expire: record expiration timeout :type expire: int """ @abc.abstractmethod def unregister_publisher(self, hostname): """Unregister publisher on nameserver. This works for PUB-SUB only :param hostname: host for the topic in "host:port" format host for back-chatter in "host:port" format :type hostname: tuple """ @abc.abstractmethod def get_publishers(self): """Get all publisher-hosts from nameserver. :returns: a list of tuples of strings "hostname:port" hosts """ @abc.abstractmethod def register_router(self, hostname, expire=-1): """Register router on the nameserver. This works for ROUTER proxy only :param hostname: host for the topic in "host:port" format :type hostname: str :param expire: record expiration timeout :type expire: int """ @abc.abstractmethod def unregister_router(self, hostname): """Unregister router on the nameserver. This works for ROUTER proxy only :param hostname: host for the topic in "host:port" format :type hostname: str """ @abc.abstractmethod def get_routers(self): """Get all router-hosts from nameserver. :returns: a list of strings "hostname:port" hosts """ @abc.abstractmethod def register(self, target, hostname, listener_type, expire=-1): """Register target on nameserver. If record already exists and has expiration timeout it will be updated. Existing records without timeout will stay untouched :param target: the target for host :type target: Target :param hostname: host for the topic in "host:port" format :type hostname: str :param listener_type: listener socket type ROUTER, SUB etc. :type listener_type: str :param expire: record expiration timeout :type expire: int """ @abc.abstractmethod def unregister(self, target, hostname, listener_type): """Unregister target from nameserver. :param target: the target for host :type target: Target :param hostname: host for the topic in "host:port" format :type hostname: str :param listener_type: listener socket type ROUTER, SUB etc. :type listener_type: str """ @abc.abstractmethod def get_hosts(self, target, listener_type): """Get all hosts from nameserver by target. :param target: the default target for invocations :type target: Target :param listener_type: listener socket type ROUTER, SUB etc. :type listener_type: str :returns: a list of "hostname:port" hosts """ @abc.abstractmethod def get_hosts_retry(self, target, listener_type): """Retry if not hosts - used on client first time connection. :param target: the default target for invocations :type target: Target :param listener_type: listener socket type ROUTER, SUB etc. :type listener_type: str :returns: a list of "hostname:port" hosts """ @abc.abstractmethod def get_hosts_fanout(self, target, listener_type): """Get all hosts for fanout from nameserver by target. :param target: the default target for invocations :type target: Target :param listener_type: listener socket type ROUTER, SUB etc. :type listener_type: str :returns: a list of "hostname:port" hosts """ @abc.abstractmethod def get_hosts_fanout_retry(self, target, listener_type): """Retry if not host for fanout - used on client first time connection. :param target: the default target for invocations :type target: Target :param listener_type: listener socket type ROUTER, SUB etc. :type listener_type: str :returns: a list of "hostname:port" hosts """ class MatchmakerDummy(MatchmakerBase): def __init__(self, conf, *args, **kwargs): super(MatchmakerDummy, self).__init__(conf, *args, **kwargs) self._cache = collections.defaultdict(list) self._publishers = set() self._routers = set() self._address = {} self.executor = zmq_async.get_executor(method=self._loop) self.executor.execute() def register_publisher(self, hostname, expire=-1): if hostname not in self._publishers: self._publishers.add(hostname) self._address[hostname] = expire def unregister_publisher(self, hostname): if hostname in self._publishers: self._publishers.remove(hostname) if hostname in self._address: self._address.pop(hostname) def get_publishers(self): hosts = [host for host in self._publishers if self._address[host] > 0] return hosts def register_router(self, hostname, expire=-1): if hostname not in self._routers: self._routers.add(hostname) self._address[hostname] = expire def unregister_router(self, hostname): if hostname in self._routers: self._routers.remove(hostname) if hostname in self._address: self._address.pop(hostname) def get_routers(self): hosts = [host for host in self._routers if self._address[host] > 0] return hosts def _loop(self): for hostname in self._address: expire = self._address[hostname] if expire > 0: self._address[hostname] = expire - 1 time.sleep(1) def register(self, target, hostname, listener_type, expire=-1): if target.server: key = zmq_address.target_to_key(target, listener_type) if hostname not in self._cache[key]: self._cache[key].append(hostname) key = zmq_address.prefix_str(target.topic, listener_type) if hostname not in self._cache[key]: self._cache[key].append(hostname) self._address[hostname] = expire def unregister(self, target, hostname, listener_type): if target.server: key = zmq_address.target_to_key(target, listener_type) if hostname in self._cache[key]: self._cache[key].remove(hostname) key = zmq_address.prefix_str(target.topic, listener_type) if hostname in self._cache[key]: self._cache[key].remove(hostname) if hostname in self._address: self._address.pop(hostname) def get_hosts(self, target, listener_type): hosts = [] if target.server: key = zmq_address.target_to_key(target, listener_type) hosts.extend([host for host in self._cache[key] if self._address[host] > 0]) if not hosts: key = zmq_address.prefix_str(target.topic, listener_type) hosts.extend([host for host in self._cache[key] if self._address[host] > 0]) LOG.debug("[Dummy] get_hosts for target %(target)s: %(hosts)s", {"target": target, "hosts": hosts}) return hosts def get_hosts_retry(self, target, listener_type): # Do not complicate dummy matchmaker # This method will act smarter in real world matchmakers return self.get_hosts(target, listener_type) def get_hosts_fanout(self, target, listener_type): hosts = [] key = zmq_address.target_to_key(target, listener_type) hosts.extend([host for host in self._cache[key] if self._address[host] > 0]) LOG.debug("[Dummy] get_hosts_fanout for target %(target)s: %(hosts)s", {"target": target, "hosts": hosts}) return hosts def get_hosts_fanout_retry(self, target, listener_type): # Do not complicate dummy matchmaker # This method will act smarter in real world matchmakers return self.get_hosts_fanout(target, listener_type) oslo.messaging-5.35.0/oslo_messaging/_drivers/zmq_driver/zmq_updater.py0000666000175100017510000000322213224676046026510 0ustar zuulzuul00000000000000# Copyright 2016 Mirantis, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc import time import six from oslo_messaging._drivers.zmq_driver import zmq_async zmq = zmq_async.import_zmq() class UpdaterBase(object): def __init__(self, conf, matchmaker, update_method, sleep_for): self.conf = conf self.matchmaker = matchmaker self.update_method = update_method self._sleep_for = sleep_for self.executor = zmq_async.get_executor(method=self._update_loop) self.executor.execute() def stop(self): self.executor.stop() def _update_loop(self): self.update_method() time.sleep(self._sleep_for) def cleanup(self): self.executor.stop() @six.add_metaclass(abc.ABCMeta) class ConnectionUpdater(UpdaterBase): def __init__(self, conf, matchmaker, socket): self.socket = socket super(ConnectionUpdater, self).__init__( conf, matchmaker, self._update_connection, conf.oslo_messaging_zmq.zmq_target_update) @abc.abstractmethod def _update_connection(self): """Update connection info""" oslo.messaging-5.35.0/oslo_messaging/_drivers/kafka_driver/0000775000175100017510000000000013224676256024053 5ustar zuulzuul00000000000000oslo.messaging-5.35.0/oslo_messaging/_drivers/kafka_driver/__init__.py0000666000175100017510000000000013224676046026151 0ustar zuulzuul00000000000000oslo.messaging-5.35.0/oslo_messaging/_drivers/kafka_driver/kafka_options.py0000666000175100017510000000541113224676077027261 0ustar zuulzuul00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_messaging._drivers import common KAFKA_OPTS = [ cfg.StrOpt('kafka_default_host', default='localhost', deprecated_for_removal=True, deprecated_reason="Replaced by [DEFAULT]/transport_url", help='Default Kafka broker Host'), cfg.PortOpt('kafka_default_port', default=9092, deprecated_for_removal=True, deprecated_reason="Replaced by [DEFAULT]/transport_url", help='Default Kafka broker Port'), cfg.IntOpt('kafka_max_fetch_bytes', default=1024 * 1024, help='Max fetch bytes of Kafka consumer'), cfg.FloatOpt('kafka_consumer_timeout', default=1.0, help='Default timeout(s) for Kafka consumers'), cfg.IntOpt('pool_size', default=10, deprecated_for_removal=True, deprecated_reason='Driver no longer uses connection pool. ', help='Pool Size for Kafka Consumers'), cfg.IntOpt('conn_pool_min_size', default=2, deprecated_for_removal=True, deprecated_reason='Driver no longer uses connection pool. ', help='The pool size limit for connections expiration policy'), cfg.IntOpt('conn_pool_ttl', default=1200, deprecated_for_removal=True, deprecated_reason='Driver no longer uses connection pool. ', help='The time-to-live in sec of idle connections in the pool'), cfg.StrOpt('consumer_group', default="oslo_messaging_consumer", help='Group id for Kafka consumer. Consumers in one group ' 'will coordinate message consumption'), cfg.FloatOpt('producer_batch_timeout', default=0., help="Upper bound on the delay for KafkaProducer batching " "in seconds"), cfg.IntOpt('producer_batch_size', default=16384, help='Size of batch for the producer async send') ] def register_opts(conf, url): opt_group = cfg.OptGroup(name='oslo_messaging_kafka', title='Kafka driver options') conf.register_group(opt_group) conf.register_opts(KAFKA_OPTS, group=opt_group) return common.ConfigOptsProxy(conf, url, opt_group.name) oslo.messaging-5.35.0/oslo_messaging/exceptions.py0000666000175100017510000000242313224676046022341 0ustar zuulzuul00000000000000 # Copyright 2013 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. __all__ = ['MessagingException', 'MessagingTimeout', 'MessageDeliveryFailure', 'InvalidTarget'] import six class MessagingException(Exception): """Base class for exceptions.""" class MessagingTimeout(MessagingException): """Raised if message sending times out.""" class MessageDeliveryFailure(MessagingException): """Raised if message sending failed after the asked retry.""" class InvalidTarget(MessagingException, ValueError): """Raised if a target does not meet certain pre-conditions.""" def __init__(self, msg, target): msg = msg + ":" + six.text_type(target) super(InvalidTarget, self).__init__(msg) self.target = target oslo.messaging-5.35.0/oslo_messaging/_i18n.py0000666000175100017510000000221113224676046021071 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """oslo.i18n integration module. See https://docs.openstack.org/oslo.i18n/latest/user/index.html """ import oslo_i18n _translators = oslo_i18n.TranslatorFactory(domain='oslo_messaging') # The primary translation function using the well-known name "_" _ = _translators.primary # Translators for log levels. # # The abbreviated names are meant to reflect the usual use of a short # name like '_'. The "L" is for "log" and the other letter comes from # the level. _LI = _translators.log_info _LW = _translators.log_warning _LE = _translators.log_error _LC = _translators.log_critical oslo.messaging-5.35.0/oslo_messaging/opts.py0000666000175100017510000000717113224676046021152 0ustar zuulzuul00000000000000 # Copyright 2014 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. __all__ = [ 'list_opts' ] import copy import itertools from oslo_messaging._drivers import amqp from oslo_messaging._drivers.amqp1_driver import opts as amqp_opts from oslo_messaging._drivers import base as drivers_base from oslo_messaging._drivers import impl_pika from oslo_messaging._drivers import impl_rabbit from oslo_messaging._drivers.impl_zmq import zmq_options from oslo_messaging._drivers.kafka_driver import kafka_options from oslo_messaging._drivers.pika_driver import pika_connection_factory from oslo_messaging._drivers.zmq_driver.matchmaker import zmq_matchmaker_redis from oslo_messaging.notify import notifier from oslo_messaging.rpc import client from oslo_messaging import server from oslo_messaging import transport _global_opt_lists = [ drivers_base.base_opts, zmq_options.zmq_opts, server._pool_opts, client._client_opts, transport._transport_opts, ] _opts = [ (None, list(itertools.chain(*_global_opt_lists))), ('matchmaker_redis', zmq_matchmaker_redis.matchmaker_redis_opts), ('oslo_messaging_zmq', zmq_options.zmq_opts), ('oslo_messaging_amqp', amqp_opts.amqp1_opts), ('oslo_messaging_notifications', notifier._notifier_opts), ('oslo_messaging_rabbit', list( itertools.chain(amqp.amqp_opts, impl_rabbit.rabbit_opts, pika_connection_factory.pika_opts, impl_pika.pika_pool_opts, impl_pika.message_opts, impl_pika.notification_opts, impl_pika.rpc_opts))), ('oslo_messaging_kafka', kafka_options.KAFKA_OPTS), ] def list_opts(): """Return a list of oslo.config options available in the library. The returned list includes all oslo.config options which may be registered at runtime by the library. Each element of the list is a tuple. The first element is the name of the group under which the list of elements in the second element will be registered. A group name of None corresponds to the [DEFAULT] group in config files. This function is also discoverable via the 'oslo_messaging' entry point under the 'oslo.config.opts' namespace. The purpose of this is to allow tools like the Oslo sample config file generator to discover the options exposed to users by this library. :returns: a list of (group_name, opts) tuples """ return [(g, copy.deepcopy(o)) for g, o in _opts] def set_defaults(conf, executor_thread_pool_size=None): """Set defaults for configuration variables. Overrides default options values. :param conf: Config instance specified to set default options in it. Using of instances instead of a global config object prevents conflicts between options declaration. :type conf: oslo.config.cfg.ConfigOpts instance. :keyword executor_thread_pool_size: Size of executor thread pool. :type executor_thread_pool_size: int :default executor_thread_pool_size: None """ if executor_thread_pool_size is not None: conf.set_default('executor_thread_pool_size', executor_thread_pool_size) oslo.messaging-5.35.0/oslo_messaging/server.py0000666000175100017510000004157313224676046021477 0ustar zuulzuul00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # Copyright 2013 Red Hat, Inc. # Copyright 2013 New Dream Network, LLC (DreamHost) # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. __all__ = [ 'ExecutorLoadFailure', 'MessageHandlingServer', 'MessagingServerError', 'ServerListenError', ] import abc import functools import inspect import logging import threading import traceback import debtcollector from oslo_config import cfg from oslo_service import service from oslo_utils import eventletutils from oslo_utils import timeutils import six from stevedore import driver from oslo_messaging._drivers import base as driver_base from oslo_messaging._i18n import _LW from oslo_messaging import exceptions LOG = logging.getLogger(__name__) # The default number of seconds of waiting after which we will emit a log # message DEFAULT_LOG_AFTER = 30 _pool_opts = [ cfg.IntOpt('executor_thread_pool_size', default=64, deprecated_name="rpc_thread_pool_size", help='Size of executor thread pool when' ' executor is threading or eventlet.'), ] class MessagingServerError(exceptions.MessagingException): """Base class for all MessageHandlingServer exceptions.""" class ExecutorLoadFailure(MessagingServerError): """Raised if an executor can't be loaded.""" def __init__(self, executor, ex): msg = 'Failed to load executor "%s": %s' % (executor, ex) super(ExecutorLoadFailure, self).__init__(msg) self.executor = executor self.ex = ex class ServerListenError(MessagingServerError): """Raised if we failed to listen on a target.""" def __init__(self, target, ex): msg = 'Failed to listen on target "%s": %s' % (target, ex) super(ServerListenError, self).__init__(msg) self.target = target self.ex = ex class TaskTimeout(MessagingServerError): """Raised if we timed out waiting for a task to complete.""" class _OrderedTask(object): """A task which must be executed in a particular order. A caller may wait for this task to complete by calling `wait_for_completion`. A caller may run this task with `run_once`, which will ensure that however many times the task is called it only runs once. Simultaneous callers will block until the running task completes, which means that any caller can be sure that the task has completed after run_once returns. """ INIT = 0 # The task has not yet started RUNNING = 1 # The task is running somewhere COMPLETE = 2 # The task has run somewhere def __init__(self, name): """Create a new _OrderedTask. :param name: The name of this task. Used in log messages. """ super(_OrderedTask, self).__init__() self._name = name self._cond = threading.Condition() self._state = self.INIT def _wait(self, condition, msg, log_after, timeout_timer): """Wait while condition() is true. Write a log message if condition() has not become false within `log_after` seconds. Raise TaskTimeout if timeout_timer expires while waiting. """ log_timer = None if log_after != 0: log_timer = timeutils.StopWatch(duration=log_after) log_timer.start() while condition(): if log_timer is not None and log_timer.expired(): LOG.warning(_LW('Possible hang: %s'), msg) LOG.debug(''.join(traceback.format_stack())) # Only log once. After than we wait indefinitely without # logging. log_timer = None if timeout_timer is not None and timeout_timer.expired(): raise TaskTimeout(msg) timeouts = [] if log_timer is not None: timeouts.append(log_timer.leftover()) if timeout_timer is not None: timeouts.append(timeout_timer.leftover()) wait = None if timeouts: wait = min(timeouts) self._cond.wait(wait) @property def complete(self): return self._state == self.COMPLETE def wait_for_completion(self, caller, log_after, timeout_timer): """Wait until this task has completed. :param caller: The name of the task which is waiting. :param log_after: Emit a log message if waiting longer than `log_after` seconds. :param timeout_timer: Raise TaskTimeout if StopWatch object `timeout_timer` expires while waiting. """ with self._cond: msg = '%s is waiting for %s to complete' % (caller, self._name) self._wait(lambda: not self.complete, msg, log_after, timeout_timer) def run_once(self, fn, log_after, timeout_timer): """Run a task exactly once. If it is currently running in another thread, wait for it to complete. If it has already run, return immediately without running it again. :param fn: The task to run. It must be a callable taking no arguments. It may optionally return another callable, which also takes no arguments, which will be executed after completion has been signaled to other threads. :param log_after: Emit a log message if waiting longer than `log_after` seconds. :param timeout_timer: Raise TaskTimeout if StopWatch object `timeout_timer` expires while waiting. """ with self._cond: if self._state == self.INIT: self._state = self.RUNNING # Note that nothing waits on RUNNING, so no need to notify # We need to release the condition lock before calling out to # prevent deadlocks. Reacquire it immediately afterwards. self._cond.release() try: post_fn = fn() finally: self._cond.acquire() self._state = self.COMPLETE self._cond.notify_all() if post_fn is not None: # Release the condition lock before calling out to prevent # deadlocks. Reacquire it immediately afterwards. self._cond.release() try: post_fn() finally: self._cond.acquire() elif self._state == self.RUNNING: msg = ('%s is waiting for another thread to complete' % self._name) self._wait(lambda: self._state == self.RUNNING, msg, log_after, timeout_timer) class _OrderedTaskRunner(object): """Mixin for a class which executes ordered tasks.""" def __init__(self, *args, **kwargs): super(_OrderedTaskRunner, self).__init__(*args, **kwargs) # Get a list of methods on this object which have the _ordered # attribute self._tasks = [name for (name, member) in inspect.getmembers(self) if inspect.ismethod(member) and getattr(member, '_ordered', False)] self.reset_states() self._reset_lock = threading.Lock() def reset_states(self): # Create new task states for tasks in reset self._states = {task: _OrderedTask(task) for task in self._tasks} @staticmethod def decorate_ordered(fn, state, after, reset_after): @functools.wraps(fn) def wrapper(self, *args, **kwargs): # If the reset_after state has already completed, reset state so # we can run again. # NOTE(mdbooth): This is ugly and requires external locking to be # deterministic when using multiple threads. Consider a thread that # does: server.stop(), server.wait(). If another thread causes a # reset between stop() and wait(), this will not have the intended # behaviour. It is safe without external locking, if the caller # instantiates a new object. with self._reset_lock: if (reset_after is not None and self._states[reset_after].complete): self.reset_states() # Store the states we started with in case the state wraps on us # while we're sleeping. We must wait and run_once in the same # epoch. If the epoch ended while we were sleeping, run_once will # safely do nothing. states = self._states log_after = kwargs.pop('log_after', DEFAULT_LOG_AFTER) timeout = kwargs.pop('timeout', None) timeout_timer = None if timeout is not None: timeout_timer = timeutils.StopWatch(duration=timeout) timeout_timer.start() # Wait for the given preceding state to complete if after is not None: states[after].wait_for_completion(state, log_after, timeout_timer) # Run this state states[state].run_once(lambda: fn(self, *args, **kwargs), log_after, timeout_timer) return wrapper def ordered(after=None, reset_after=None): """A method which will be executed as an ordered task. The method will be called exactly once, however many times it is called. If it is called multiple times simultaneously it will only be called once, but all callers will wait until execution is complete. If `after` is given, this method will not run until `after` has completed. If `reset_after` is given and the target method has completed, allow this task to run again by resetting all task states. :param after: Optionally, the name of another `ordered` method. Wait for the completion of `after` before executing this method. :param reset_after: Optionally, the name of another `ordered` method. Reset all states when calling this method if `reset_after` has completed. """ def _ordered(fn): # Set an attribute on the method so we can find it later setattr(fn, '_ordered', True) state = fn.__name__ return _OrderedTaskRunner.decorate_ordered(fn, state, after, reset_after) return _ordered @six.add_metaclass(abc.ABCMeta) class MessageHandlingServer(service.ServiceBase, _OrderedTaskRunner): """Server for handling messages. Connect a transport to a dispatcher that knows how to process the message using an executor that knows how the app wants to create new tasks. """ def __init__(self, transport, dispatcher, executor='blocking'): """Construct a message handling server. The dispatcher parameter is a DispatcherBase instance which is used for routing request to endpoint for processing. The executor parameter controls how incoming messages will be received and dispatched. By default, the most simple executor is used - the blocking executor. It handles only one message at once. It's recommended to use threading or eventlet. :param transport: the messaging transport :type transport: Transport :param dispatcher: has a dispatch() method which is invoked for each incoming request :type dispatcher: DispatcherBase :param executor: name of message executor - available values are 'eventlet' and 'threading' :type executor: str """ self.conf = transport.conf self.conf.register_opts(_pool_opts) self.transport = transport self.dispatcher = dispatcher self.executor_type = executor if self.executor_type == 'blocking': debtcollector.deprecate( 'blocking executor is deprecated. Executor default will be ' 'removed. Use explicitly threading or eventlet instead', version="pike", removal_version="rocky", category=FutureWarning) elif self.executor_type == "eventlet": eventletutils.warn_eventlet_not_patched( expected_patched_modules=['thread'], what="the 'oslo.messaging eventlet executor'") self.listener = None try: mgr = driver.DriverManager('oslo.messaging.executors', self.executor_type) except RuntimeError as ex: raise ExecutorLoadFailure(self.executor_type, ex) self._executor_cls = mgr.driver self._work_executor = None self._started = False super(MessageHandlingServer, self).__init__() def _on_incoming(self, incoming): """Handles on_incoming event :param incoming: incoming request. """ self._work_executor.submit(self._process_incoming, incoming) @abc.abstractmethod def _process_incoming(self, incoming): """Perform processing incoming request :param incoming: incoming request. """ @abc.abstractmethod def _create_listener(self): """Creates listener object for polling requests :return: MessageListenerAdapter """ @ordered(reset_after='stop') def start(self, override_pool_size=None): """Start handling incoming messages. This method causes the server to begin polling the transport for incoming messages and passing them to the dispatcher. Message processing will continue until the stop() method is called. The executor controls how the server integrates with the applications I/O handling strategy - it may choose to poll for messages in a new process, thread or co-operatively scheduled coroutine or simply by registering a callback with an event loop. Similarly, the executor may choose to dispatch messages in a new thread, coroutine or simply the current thread. """ # Warn that restarting will be deprecated if self._started: LOG.warning(_LW('Restarting a MessageHandlingServer is inherently ' 'racy. It is deprecated, and will become a noop ' 'in a future release of oslo.messaging. If you ' 'need to restart MessageHandlingServer you should ' 'instantiate a new object.')) self._started = True executor_opts = {} if self.executor_type in ("threading", "eventlet"): executor_opts["max_workers"] = ( override_pool_size or self.conf.executor_thread_pool_size ) self._work_executor = self._executor_cls(**executor_opts) try: self.listener = self._create_listener() except driver_base.TransportDriverError as ex: raise ServerListenError(self.target, ex) self.listener.start(self._on_incoming) @ordered(after='start') def stop(self): """Stop handling incoming messages. Once this method returns, no new incoming messages will be handled by the server. However, the server may still be in the process of handling some messages, and underlying driver resources associated to this server are still in use. See 'wait' for more details. """ self.listener.stop() self._started = False @ordered(after='stop') def wait(self): """Wait for message processing to complete. After calling stop(), there may still be some existing messages which have not been completely processed. The wait() method blocks until all message processing has completed. Once it's finished, the underlying driver resources associated to this server are released (like closing useless network connections). """ self._work_executor.shutdown(wait=True) # Close listener connection after processing all messages self.listener.cleanup() def reset(self): """Reset service. Called in case service running in daemon mode receives SIGHUP. """ # TODO(sergey.vilgelm): implement this method pass oslo.messaging-5.35.0/oslo_messaging/__init__.py0000666000175100017510000000140113224676046021712 0ustar zuulzuul00000000000000 # Copyright 2013 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from .exceptions import * from .notify import * from .rpc import * from .serializer import * from .server import * from .target import * from .transport import * oslo.messaging-5.35.0/oslo_messaging/serializer.py0000666000175100017510000000465113224676046022336 0ustar zuulzuul00000000000000# Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. __all__ = ['Serializer', 'NoOpSerializer', 'JsonPayloadSerializer'] """Provides the definition of a message serialization handler""" import abc from oslo_serialization import jsonutils import six @six.add_metaclass(abc.ABCMeta) class Serializer(object): """Generic (de-)serialization definition base class.""" @abc.abstractmethod def serialize_entity(self, ctxt, entity): """Serialize something to primitive form. :param ctxt: Request context, in deserialized form :param entity: Entity to be serialized :returns: Serialized form of entity """ @abc.abstractmethod def deserialize_entity(self, ctxt, entity): """Deserialize something from primitive form. :param ctxt: Request context, in deserialized form :param entity: Primitive to be deserialized :returns: Deserialized form of entity """ @abc.abstractmethod def serialize_context(self, ctxt): """Serialize a request context into a dictionary. :param ctxt: Request context :returns: Serialized form of context """ @abc.abstractmethod def deserialize_context(self, ctxt): """Deserialize a dictionary into a request context. :param ctxt: Request context dictionary :returns: Deserialized form of entity """ class NoOpSerializer(Serializer): """A serializer that does nothing.""" def serialize_entity(self, ctxt, entity): return entity def deserialize_entity(self, ctxt, entity): return entity def serialize_context(self, ctxt): return ctxt def deserialize_context(self, ctxt): return ctxt class JsonPayloadSerializer(NoOpSerializer): @staticmethod def serialize_entity(context, entity): return jsonutils.to_primitive(entity, convert_instances=True) oslo.messaging-5.35.0/oslo_messaging/target.py0000666000175100017510000001045713224676046021454 0ustar zuulzuul00000000000000 # Copyright 2013 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. class Target(object): """Identifies the destination of messages. A Target encapsulates all the information to identify where a message should be sent or what messages a server is listening for. Different subsets of the information encapsulated in a Target object is relevant to various aspects of the API: an RPC Server's target: topic and server is required; exchange is optional an RPC endpoint's target: namespace and version is optional an RPC client sending a message: topic is required, all other attributes optional a Notification Server's target: topic is required, exchange is optional; all other attributes ignored a Notifier's target: topic is required, exchange is optional; all other attributes ignored Its attributes are: :param exchange: A scope for topics. Leave unspecified to default to the control_exchange configuration option. :type exchange: str :param topic: A name which identifies the set of interfaces exposed by a server. Multiple servers may listen on a topic and messages will be dispatched to one of the servers selected in a best-effort round-robin fashion (unless fanout is ``True``). :type topic: str :param namespace: Identifies a particular RPC interface (i.e. set of methods) exposed by a server. The default interface has no namespace identifier and is referred to as the null namespace. :type namespace: str :param version: RPC interfaces have a major.minor version number associated with them. A minor number increment indicates a backwards compatible change and an incompatible change is indicated by a major number bump. Servers may implement multiple major versions and clients may require indicate that their message requires a particular minimum minor version. :type version: str :param server: RPC Clients can request that a message be directed to a specific server, rather than just one of a pool of servers listening on the topic. :type server: str :param fanout: Clients may request that a copy of the message be delivered to all servers listening on a topic by setting fanout to ``True``, rather than just one of them. :type fanout: bool :param legacy_namespaces: A server always accepts messages specified via the 'namespace' parameter, and may also accept messages defined via this parameter. This option should be used to switch namespaces safely during rolling upgrades. :type legacy_namespaces: list of strings """ def __init__(self, exchange=None, topic=None, namespace=None, version=None, server=None, fanout=None, legacy_namespaces=None): self.exchange = exchange self.topic = topic self.namespace = namespace self.version = version self.server = server self.fanout = fanout self.accepted_namespaces = [namespace] + (legacy_namespaces or []) def __call__(self, **kwargs): for a in ('exchange', 'topic', 'namespace', 'version', 'server', 'fanout'): kwargs.setdefault(a, getattr(self, a)) return Target(**kwargs) def __eq__(self, other): return vars(self) == vars(other) def __ne__(self, other): return not self == other def __repr__(self): attrs = [] for a in ['exchange', 'topic', 'namespace', 'version', 'server', 'fanout']: v = getattr(self, a) if v: attrs.append((a, v)) values = ', '.join(['%s=%s' % i for i in attrs]) return '' def __hash__(self): return id(self) oslo.messaging-5.35.0/oslo_messaging/transport.py0000666000175100017510000005157513224676046022230 0ustar zuulzuul00000000000000 # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # Copyright 2013 Red Hat, Inc. # Copyright (c) 2012 Rackspace Hosting # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. __all__ = [ 'DriverLoadFailure', 'InvalidTransportURL', 'Transport', 'TransportHost', 'TransportURL', 'get_transport', 'set_transport_defaults', ] import logging from debtcollector import removals from oslo_config import cfg import six from six.moves.urllib import parse from stevedore import driver from oslo_messaging._i18n import _LW from oslo_messaging import exceptions LOG = logging.getLogger(__name__) _transport_opts = [ cfg.StrOpt('transport_url', secret=True, help='The network address and optional user credentials for ' 'connecting to the messaging backend, in URL format. The ' 'expected format is:\n\n' 'driver://[user:pass@]host:port[,[userN:passN@]hostN:' 'portN]/virtual_host?query\n\n' 'Example: rabbit://rabbitmq:password@127.0.0.1:5672//\n\n' 'For full details on the fields in the URL see the ' 'documentation of oslo_messaging.TransportURL at ' 'https://docs.openstack.org/oslo.messaging/latest/' 'reference/transport.html'), cfg.StrOpt('rpc_backend', deprecated_for_removal=True, deprecated_reason="Replaced by [DEFAULT]/transport_url", default='rabbit', help='The messaging driver to use, defaults to rabbit. Other ' 'drivers include amqp and zmq.'), cfg.StrOpt('control_exchange', default='openstack', help='The default exchange under which topics are scoped. May ' 'be overridden by an exchange name specified in the ' 'transport_url option.'), ] def set_transport_defaults(control_exchange): """Set defaults for messaging transport configuration options. :param control_exchange: the default exchange under which topics are scoped :type control_exchange: str """ cfg.set_defaults(_transport_opts, control_exchange=control_exchange) class Transport(object): """A messaging transport. This is a mostly opaque handle for an underlying messaging transport driver. RPCs and Notifications may use separate messaging systems that utilize different drivers, access permissions, message delivery, etc. To ensure the correct messaging functionality, the corresponding method should be used to construct a Transport object from transport configuration gleaned from the user's configuration and, optionally, a transport URL. The factory method for RPC Transport objects:: def get_rpc_transport(conf, url=None, allowed_remote_exmods=None) If a transport URL is supplied as a parameter, any transport configuration contained in it takes precedence. If no transport URL is supplied, but there is a transport URL supplied in the user's configuration then that URL will take the place of the URL parameter. The factory method for Notification Transport objects:: def get_notification_transport(conf, url=None, allowed_remote_exmods=None) If no transport URL is provided, the URL in the notifications section of the config file will be used. If that URL is also absent, the same transport as specified in the user's default section will be used. The Transport has a single 'conf' property which is the cfg.ConfigOpts instance used to construct the transport object. """ def __init__(self, driver): self.conf = driver.conf self._driver = driver def _require_driver_features(self, requeue=False): self._driver.require_features(requeue=requeue) def _send(self, target, ctxt, message, wait_for_reply=None, timeout=None, retry=None): if not target.topic: raise exceptions.InvalidTarget('A topic is required to send', target) return self._driver.send(target, ctxt, message, wait_for_reply=wait_for_reply, timeout=timeout, retry=retry) def _send_notification(self, target, ctxt, message, version, retry=None): if not target.topic: raise exceptions.InvalidTarget('A topic is required to send', target) self._driver.send_notification(target, ctxt, message, version, retry=retry) def _listen(self, target, batch_size, batch_timeout): if not (target.topic and target.server): raise exceptions.InvalidTarget('A server\'s target must have ' 'topic and server names specified', target) return self._driver.listen(target, batch_size, batch_timeout) def _listen_for_notifications(self, targets_and_priorities, pool, batch_size, batch_timeout): for target, priority in targets_and_priorities: if not target.topic: raise exceptions.InvalidTarget('A target must have ' 'topic specified', target) return self._driver.listen_for_notifications( targets_and_priorities, pool, batch_size, batch_timeout ) def cleanup(self): """Release all resources associated with this transport.""" self._driver.cleanup() class RPCTransport(Transport): """Transport object for RPC.""" def __init__(self, driver): super(RPCTransport, self).__init__(driver) class NotificationTransport(Transport): """Transport object for notifications.""" def __init__(self, driver): super(NotificationTransport, self).__init__(driver) class InvalidTransportURL(exceptions.MessagingException): """Raised if transport URL is invalid.""" def __init__(self, url, msg): super(InvalidTransportURL, self).__init__(msg) self.url = url class DriverLoadFailure(exceptions.MessagingException): """Raised if a transport driver can't be loaded.""" def __init__(self, driver, ex): msg = 'Failed to load transport driver "%s": %s' % (driver, ex) super(DriverLoadFailure, self).__init__(msg) self.driver = driver self.ex = ex def _get_transport(conf, url=None, allowed_remote_exmods=None, aliases=None, transport_cls=RPCTransport): allowed_remote_exmods = allowed_remote_exmods or [] conf.register_opts(_transport_opts) if not isinstance(url, TransportURL): url = TransportURL.parse(conf, url, aliases) kwargs = dict(default_exchange=conf.control_exchange, allowed_remote_exmods=allowed_remote_exmods) try: mgr = driver.DriverManager('oslo.messaging.drivers', url.transport.split('+')[0], invoke_on_load=True, invoke_args=[conf, url], invoke_kwds=kwargs) except RuntimeError as ex: raise DriverLoadFailure(url.transport, ex) return transport_cls(mgr.driver) @removals.remove( message='use get_rpc_transport or get_notification_transport' ) @removals.removed_kwarg('aliases', 'Parameter aliases is deprecated for removal.') def get_transport(conf, url=None, allowed_remote_exmods=None, aliases=None): """A factory method for Transport objects. This method will construct a Transport object from transport configuration gleaned from the user's configuration and, optionally, a transport URL. If a transport URL is supplied as a parameter, any transport configuration contained in it takes precedence. If no transport URL is supplied, but there is a transport URL supplied in the user's configuration then that URL will take the place of the URL parameter. In both cases, any configuration not supplied in the transport URL may be taken from individual configuration parameters in the user's configuration. An example transport URL might be:: rabbit://me:passwd@host:5672/virtual_host and can either be passed as a string or a TransportURL object. :param conf: the user configuration :type conf: cfg.ConfigOpts :param url: a transport URL, see :py:class:`transport.TransportURL` :type url: str or TransportURL :param allowed_remote_exmods: a list of modules which a client using this transport will deserialize remote exceptions from :type allowed_remote_exmods: list :param aliases: DEPRECATED: A map of transport alias to transport name :type aliases: dict """ return _get_transport(conf, url, allowed_remote_exmods, aliases, transport_cls=RPCTransport) class TransportHost(object): """A host element of a parsed transport URL.""" def __init__(self, hostname=None, port=None, username=None, password=None): self.hostname = hostname self.port = port self.username = username self.password = password def __hash__(self): return hash((self.hostname, self.port, self.username, self.password)) def __eq__(self, other): return vars(self) == vars(other) def __ne__(self, other): return not self == other def __repr__(self): attrs = [] for a in ['hostname', 'port', 'username', 'password']: v = getattr(self, a) if v: attrs.append((a, repr(v))) values = ', '.join(['%s=%s' % i for i in attrs]) return '' class TransportURL(object): """A parsed transport URL. Transport URLs take the form:: driver://[user:pass@]host:port[,[userN:passN@]hostN:portN]/virtual_host?query where: driver Specifies the transport driver to use. Typically this is `rabbit` for the RabbitMQ broker. See the documentation for other available transport drivers. [user:pass@]host:port Specifies the network location of the broker. `user` and `pass` are the optional username and password used for authentication with the broker. `user` and `pass` may contain any of the following ASCII characters: * Alphabetic (a-z and A-Z) * Numeric (0-9) * Special characters: & = $ - _ . + ! * ( ) `user` may include at most one `@` character for compatibility with some implementations of SASL. All other characters in `user` and `pass` must be encoded via '%nn' You may include multiple different network locations separated by commas. The client will connect to any of the available locations and will automatically fail over to another should the connection fail. virtual_host Specifies the "virtual host" within the broker. Support for virtual hosts is specific to the message bus used. query Permits passing driver-specific options which override the corresponding values from the configuration file. :param conf: a ConfigOpts instance :type conf: oslo.config.cfg.ConfigOpts :param transport: a transport name for example 'rabbit' :type transport: str :param virtual_host: a virtual host path for example '/' :type virtual_host: str :param hosts: a list of TransportHost objects :type hosts: list :param aliases: DEPRECATED: a map of transport alias to transport name :type aliases: dict :param query: a dictionary of URL query parameters :type query: dict """ @removals.removed_kwarg('aliases', 'Parameter aliases is deprecated for removal.') def __init__(self, conf, transport=None, virtual_host=None, hosts=None, aliases=None, query=None): self.conf = conf self.conf.register_opts(_transport_opts) self._transport = transport self.virtual_host = virtual_host if hosts is None: self.hosts = [] else: self.hosts = hosts if aliases is None: self.aliases = {} else: self.aliases = aliases if query is None: self.query = {} else: self.query = query self._deprecation_logged = False @property def transport(self): if self._transport is None: transport = self.conf.rpc_backend else: transport = self._transport final_transport = self.aliases.get(transport, transport) if not self._deprecation_logged and final_transport != transport: # NOTE(sileht): The first step is deprecate this one cycle. # To ensure deployer have updated they configuration during Ocata # Then in P we will deprecate aliases kwargs of TransportURL() and # get_transport() for consuming application LOG.warning('legacy "rpc_backend" is deprecated, ' '"%(legacy_transport)s" must be replaced by ' '"%(final_transport)s"' % { 'legacy_transport': transport, 'final_transport': final_transport}) self._deprecation_logged = True return final_transport @transport.setter def transport(self, value): self._transport = value def __hash__(self): return hash((tuple(self.hosts), self.transport, self.virtual_host)) def __eq__(self, other): return (self.transport == other.transport and self.virtual_host == other.virtual_host and self.hosts == other.hosts) def __ne__(self, other): return not self == other def __repr__(self): attrs = [] for a in ['transport', 'virtual_host', 'hosts']: v = getattr(self, a) if v: attrs.append((a, repr(v))) values = ', '.join(['%s=%s' % i for i in attrs]) return '' def __str__(self): netlocs = [] for host in self.hosts: username = host.username password = host.password hostname = host.hostname port = host.port # Starting place for the network location netloc = '' # Build the username and password portion of the transport URL if username is not None or password is not None: if username is not None: netloc += parse.quote(username, '') if password is not None: netloc += ':%s' % parse.quote(password, '') netloc += '@' # Build the network location portion of the transport URL if hostname: if ':' in hostname: netloc += '[%s]' % hostname else: netloc += hostname if port is not None: netloc += ':%d' % port netlocs.append(netloc) # Assemble the transport URL url = '%s://%s/' % (self.transport, ','.join(netlocs)) if self.virtual_host: url += parse.quote(self.virtual_host) if self.query: url += '?' + parse.urlencode(self.query, doseq=True) return url @removals.removed_kwarg('aliases', 'Parameter aliases is deprecated for removal.') @classmethod def parse(cls, conf, url=None, aliases=None): """Parse a URL as defined by :py:class:`TransportURL` and return a TransportURL object. Assuming a URL takes the form of:: transport://user:pass@host:port[,userN:passN@hostN:portN]/virtual_host?query then parse the URL and return a TransportURL object. Netloc is parsed following the sequence bellow: * It is first split by ',' in order to support multiple hosts * All hosts should be specified with username/password or not at the same time. In case of lack of specification, username and password will be omitted:: user:pass@host1:port1,host2:port2 [ {"username": "user", "password": "pass", "host": "host1:port1"}, {"host": "host2:port2"} ] If the url is not provided conf.transport_url is parsed instead. :param conf: a ConfigOpts instance :type conf: oslo.config.cfg.ConfigOpts :param url: The URL to parse :type url: str :param aliases: A map of transport alias to transport name :type aliases: dict :returns: A TransportURL """ if not url: conf.register_opts(_transport_opts) url = url or conf.transport_url if not url: return cls(conf) if aliases is None else cls(conf, aliases=aliases) if not isinstance(url, six.string_types): raise InvalidTransportURL(url, 'Wrong URL type') url = parse.urlparse(url) if not url.scheme: raise InvalidTransportURL(url.geturl(), 'No scheme specified') transport = url.scheme query = {} if url.query: for key, values in parse.parse_qs(url.query).items(): query[key] = ','.join(values) virtual_host = None if url.path.startswith('/'): virtual_host = parse.unquote(url.path[1:]) hosts_with_credentials = [] hosts_without_credentials = [] hosts = [] for host in url.netloc.split(','): if not host: continue hostname = host username = password = port = None if '@' in host: username, hostname = host.rsplit('@', 1) if ':' in username: username, password = username.split(':', 1) password = parse.unquote(password) username = parse.unquote(username) if not hostname: hostname = None elif hostname.startswith('['): # Find the closing ']' and extract the hostname host_end = hostname.find(']') if host_end < 0: # NOTE(Vek): Identical to what Python 2.7's # urlparse.urlparse() raises in this case raise ValueError('Invalid IPv6 URL') port_text = hostname[host_end:] hostname = hostname[1:host_end] # Now we need the port; this is compliant with how urlparse # parses the port data port = None if ':' in port_text: port = port_text.split(':', 1)[1] elif ':' in hostname: hostname, port = hostname.split(':', 1) if port == "": port = None if port is not None: port = int(port) if username is None or password is None: hosts_without_credentials.append(hostname) else: hosts_with_credentials.append(hostname) hosts.append(TransportHost(hostname=hostname, port=port, username=username, password=password)) if (len(hosts_with_credentials) > 0 and len(hosts_without_credentials) > 0): LOG.warning(_LW("All hosts must be set with username/password or " "not at the same time. Hosts with credentials " "are: %(hosts_with_credentials)s. Hosts without " "credentials are %(hosts_without_credentials)s."), {'hosts_with_credentials': hosts_with_credentials, 'hosts_without_credentials': hosts_without_credentials}) if aliases is None: return cls(conf, transport, virtual_host, hosts, query=query) else: return cls(conf, transport, virtual_host, hosts, aliases, query) oslo.messaging-5.35.0/oslo_messaging/hacking/0000775000175100017510000000000013224676256021212 5ustar zuulzuul00000000000000oslo.messaging-5.35.0/oslo_messaging/hacking/checks.py0000666000175100017510000003363413224676046023034 0ustar zuulzuul00000000000000# Copyright (c) 2014 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import re import ast import six oslo_namespace_imports_dot = re.compile(r"import[\s]+oslo[.][^\s]+") oslo_namespace_imports_from_dot = re.compile(r"from[\s]+oslo[.]") oslo_namespace_imports_from_root = re.compile(r"from[\s]+oslo[\s]+import[\s]+") mock_imports_directly = re.compile(r"import[\s]+mock") mock_imports_direclty_from = re.compile(r"from[\s]+mock[\s]+import[\s]+") def check_oslo_namespace_imports(logical_line): if re.match(oslo_namespace_imports_from_dot, logical_line): msg = ("O323: '%s' must be used instead of '%s'.") % ( logical_line.replace('oslo.', 'oslo_'), logical_line) yield(0, msg) elif re.match(oslo_namespace_imports_from_root, logical_line): msg = ("O323: '%s' must be used instead of '%s'.") % ( logical_line.replace('from oslo import ', 'import oslo_'), logical_line) yield(0, msg) elif re.match(oslo_namespace_imports_dot, logical_line): msg = ("O323: '%s' must be used instead of '%s'.") % ( logical_line.replace('import', 'from').replace('.', ' import '), logical_line) yield(0, msg) def check_mock_imports(logical_line): if re.match(mock_imports_directly, logical_line): msg = ("O324: '%s' must be used instead of '%s'.") % ( logical_line.replace('import mock', 'from six.moves import mock'), logical_line) yield(0, msg) elif re.match(mock_imports_direclty_from, logical_line): msg = "O324: Use mock from six.moves." yield(0, msg) class BaseASTChecker(ast.NodeVisitor): """Provides a simple framework for writing AST-based checks. Subclasses should implement visit_* methods like any other AST visitor implementation. When they detect an error for a particular node the method should call ``self.add_error(offending_node)``. Details about where in the code the error occurred will be pulled from the node object. Subclasses should also provide a class variable named CHECK_DESC to be used for the human readable error message. """ def __init__(self, tree, filename): """This object is created automatically by pep8. :param tree: an AST tree :param filename: name of the file being analyzed (ignored by our checks) """ self._tree = tree self._errors = [] def run(self): """Called automatically by pep8.""" self.visit(self._tree) return self._errors def add_error(self, node, message=None): """Add an error caused by a node to the list of errors for pep8.""" message = message or self.CHECK_DESC error = (node.lineno, node.col_offset, message, self.__class__) self._errors.append(error) class CheckForLoggingIssues(BaseASTChecker): DEBUG_CHECK_DESC = 'O324 Using translated string in debug logging' NONDEBUG_CHECK_DESC = 'O325 Not using translating helper for logging' EXCESS_HELPER_CHECK_DESC = 'O326 Using hints when _ is necessary' LOG_MODULES = ('logging') I18N_MODULES = ( 'oslo_messaging._i18n._', 'oslo_messaging._i18n._LI', 'oslo_messaging._i18n._LW', 'oslo_messaging._i18n._LE', 'oslo_messaging._i18n._LC', ) TRANS_HELPER_MAP = { 'debug': None, 'info': '_LI', 'warn': '_LW', 'warning': '_LW', 'error': '_LE', 'exception': '_LE', 'critical': '_LC', } def __init__(self, tree, filename): super(CheckForLoggingIssues, self).__init__(tree, filename) self.logger_names = [] self.logger_module_names = [] self.i18n_names = {} # NOTE(dstanek): this kinda accounts for scopes when talking # about only leaf node in the graph self.assignments = {} def generic_visit(self, node): """Called if no explicit visitor function exists for a node.""" for field, value in ast.iter_fields(node): if isinstance(value, list): for item in value: if isinstance(item, ast.AST): item._parent = node self.visit(item) elif isinstance(value, ast.AST): value._parent = node self.visit(value) def _filter_imports(self, module_name, alias): """Keeps lists of logging and i18n imports.""" if module_name in self.LOG_MODULES: self.logger_module_names.append(alias.asname or alias.name) elif module_name in self.I18N_MODULES: self.i18n_names[alias.asname or alias.name] = alias.name def visit_Import(self, node): for alias in node.names: self._filter_imports(alias.name, alias) return super(CheckForLoggingIssues, self).generic_visit(node) def visit_ImportFrom(self, node): for alias in node.names: full_name = '%s.%s' % (node.module, alias.name) self._filter_imports(full_name, alias) return super(CheckForLoggingIssues, self).generic_visit(node) def _find_name(self, node): """Return the fully qualified name or a Name or Attribute.""" if isinstance(node, ast.Name): return node.id elif (isinstance(node, ast.Attribute) and isinstance(node.value, (ast.Name, ast.Attribute))): method_name = node.attr obj_name = self._find_name(node.value) if obj_name is None: return None return obj_name + '.' + method_name elif isinstance(node, six.string_types): return node else: # could be Subscript, Call or many more return None def visit_Assign(self, node): """Look for 'LOG = logging.getLogger' This handles the simple case: name = [logging_module].getLogger(...) - or - name = [i18n_name](...) And some much more comple ones: name = [i18n_name](...) % X - or - self.name = [i18n_name](...) % X """ attr_node_types = (ast.Name, ast.Attribute) if (len(node.targets) != 1 or not isinstance(node.targets[0], attr_node_types)): # say no to: "x, y = ..." return super(CheckForLoggingIssues, self).generic_visit(node) target_name = self._find_name(node.targets[0]) if (isinstance(node.value, ast.BinOp) and isinstance(node.value.op, ast.Mod)): if (isinstance(node.value.left, ast.Call) and isinstance(node.value.left.func, ast.Name) and node.value.left.func.id in self.i18n_names): # NOTE(dstanek): this is done to match cases like: # `msg = _('something %s') % x` node = ast.Assign(value=node.value.left) if not isinstance(node.value, ast.Call): # node.value must be a call to getLogger self.assignments.pop(target_name, None) return super(CheckForLoggingIssues, self).generic_visit(node) # is this a call to an i18n function? if (isinstance(node.value.func, ast.Name) and node.value.func.id in self.i18n_names): self.assignments[target_name] = node.value.func.id return super(CheckForLoggingIssues, self).generic_visit(node) if (not isinstance(node.value.func, ast.Attribute) or not isinstance(node.value.func.value, attr_node_types)): # function must be an attribute on an object like # logging.getLogger return super(CheckForLoggingIssues, self).generic_visit(node) object_name = self._find_name(node.value.func.value) func_name = node.value.func.attr if (object_name in self.logger_module_names and func_name == 'getLogger'): self.logger_names.append(target_name) return super(CheckForLoggingIssues, self).generic_visit(node) def visit_Call(self, node): """Look for the 'LOG.*' calls.""" # obj.method if isinstance(node.func, ast.Attribute): obj_name = self._find_name(node.func.value) if isinstance(node.func.value, ast.Name): method_name = node.func.attr elif isinstance(node.func.value, ast.Attribute): obj_name = self._find_name(node.func.value) method_name = node.func.attr else: # could be Subscript, Call or many more return super(CheckForLoggingIssues, self).generic_visit(node) # if dealing with a logger the method can't be "warn" if obj_name in self.logger_names and method_name == 'warn': msg = node.args[0] # first arg to a logging method is the msg self.add_error(msg, message=self.USING_DEPRECATED_WARN) # must be a logger instance and one of the support logging methods if (obj_name not in self.logger_names or method_name not in self.TRANS_HELPER_MAP): return super(CheckForLoggingIssues, self).generic_visit(node) # the call must have arguments if not node.args: return super(CheckForLoggingIssues, self).generic_visit(node) if method_name == 'debug': self._process_debug(node) elif method_name in self.TRANS_HELPER_MAP: self._process_non_debug(node, method_name) return super(CheckForLoggingIssues, self).generic_visit(node) def _process_debug(self, node): msg = node.args[0] # first arg to a logging method is the msg # if first arg is a call to a i18n name if (isinstance(msg, ast.Call) and isinstance(msg.func, ast.Name) and msg.func.id in self.i18n_names): self.add_error(msg, message=self.DEBUG_CHECK_DESC) # if the first arg is a reference to a i18n call elif (isinstance(msg, ast.Name) and msg.id in self.assignments and not self._is_raised_later(node, msg.id)): self.add_error(msg, message=self.DEBUG_CHECK_DESC) def _process_non_debug(self, node, method_name): msg = node.args[0] # first arg to a logging method is the msg # if first arg is a call to a i18n name if isinstance(msg, ast.Call): try: func_name = msg.func.id except AttributeError: # in the case of logging only an exception, the msg function # will not have an id associated with it, for instance: # LOG.warning(six.text_type(e)) return # the function name is the correct translation helper # for the logging method if func_name == self.TRANS_HELPER_MAP[method_name]: return # the function name is an alias for the correct translation # helper for the loggine method if (self.i18n_names[func_name] == self.TRANS_HELPER_MAP[method_name]): return self.add_error(msg, message=self.NONDEBUG_CHECK_DESC) # if the first arg is not a reference to the correct i18n hint elif isinstance(msg, ast.Name): # FIXME(dstanek): to make sure more robust we should be checking # all names passed into a logging method. we can't right now # because: # 1. We have code like this that we'll fix when dealing with the %: # msg = _('....') % {} # LOG.warning(msg) # 2. We also do LOG.exception(e) in several places. I'm not sure # exactly what we should be doing about that. if msg.id not in self.assignments: return helper_method_name = self.TRANS_HELPER_MAP[method_name] if (self.assignments[msg.id] != helper_method_name and not self._is_raised_later(node, msg.id)): self.add_error(msg, message=self.NONDEBUG_CHECK_DESC) elif (self.assignments[msg.id] == helper_method_name and self._is_raised_later(node, msg.id)): self.add_error(msg, message=self.EXCESS_HELPER_CHECK_DESC) def _is_raised_later(self, node, name): def find_peers(node): node_for_line = node._parent for _field, value in ast.iter_fields(node._parent._parent): if isinstance(value, list) and node_for_line in value: return value[value.index(node_for_line) + 1:] continue return [] peers = find_peers(node) for peer in peers: if isinstance(peer, ast.Raise): if six.PY3: exc = peer.exc else: exc = peer.type if (isinstance(exc, ast.Call) and len(exc.args) > 0 and isinstance(exc.args[0], ast.Name) and name in (a.id for a in exc.args)): return True else: return False elif isinstance(peer, ast.Assign): if name in (t.id for t in peer.targets if hasattr(t, 'id')): return False def factory(register): register(CheckForLoggingIssues) register(check_oslo_namespace_imports) register(check_mock_imports) oslo.messaging-5.35.0/oslo_messaging/hacking/__init__.py0000666000175100017510000000000013224676046023310 0ustar zuulzuul00000000000000oslo.messaging-5.35.0/oslo_messaging/notify/0000775000175100017510000000000013224676256021116 5ustar zuulzuul00000000000000oslo.messaging-5.35.0/oslo_messaging/notify/_impl_log.py0000666000175100017510000000336513224676046023437 0ustar zuulzuul00000000000000 # Copyright 2011 OpenStack Foundation. # All Rights Reserved. # Copyright 2013 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import logging import warnings from oslo_serialization import jsonutils from oslo_utils import strutils from oslo_messaging.notify import notifier class LogDriver(notifier.Driver): "Publish notifications via Python logging infrastructure." # NOTE(dhellmann): For backwards-compatibility with configurations # that may have modified the settings for this logger using a # configuration file, we keep the name # 'oslo.messaging.notification' even though the package is now # 'oslo_messaging'. LOGGER_BASE = 'oslo.messaging.notification' def notify(self, ctxt, message, priority, retry): logger = logging.getLogger('%s.%s' % (self.LOGGER_BASE, message['event_type'])) method = getattr(logger, priority.lower(), None) if method: method(jsonutils.dumps(strutils.mask_dict_password(message))) else: warnings.warn('Unable to log message as notify cannot find a ' 'logger with the priority specified ' '%s' % priority.lower()) oslo.messaging-5.35.0/oslo_messaging/notify/logger.py0000666000175100017510000000533413224676046022753 0ustar zuulzuul00000000000000# Copyright 2013 eNovance # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Driver for the Python logging package that sends log records as a notification. """ import logging from oslo_config import cfg from oslo_messaging.notify import notifier class LoggingNotificationHandler(logging.Handler): """Handler for logging to the messaging notification system. Each time the application logs a message using the :py:mod:`logging` module, it will be sent as a notification. The severity used for the notification will be the same as the one used for the log record. This can be used into a Python logging configuration this way:: [handler_notifier] class=oslo_messaging.LoggingNotificationHandler level=ERROR args=('rabbit:///') """ CONF = cfg.CONF """Default configuration object used, subclass this class if you want to use another one. """ def __init__(self, url, publisher_id=None, driver=None, topic=None, serializer=None): self.notifier = notifier.Notifier( notifier.get_notification_transport(self.CONF, url), publisher_id, driver, serializer() if serializer else None, topics=(topic if isinstance(topic, list) or topic is None else [topic])) logging.Handler.__init__(self) def emit(self, record): """Emit the log record to the messaging notification system. :param record: A log record to emit. """ method = getattr(self.notifier, record.levelname.lower(), None) if not method: return method( {}, 'logrecord', { 'name': record.name, 'levelno': record.levelno, 'levelname': record.levelname, 'exc_info': record.exc_info, 'pathname': record.pathname, 'lineno': record.lineno, 'msg': record.getMessage(), 'funcName': record.funcName, 'thread': record.thread, 'processName': record.processName, 'process': record.process, 'extra': getattr(record, 'extra', None), } ) oslo.messaging-5.35.0/oslo_messaging/notify/listener.py0000666000175100017510000002665113224676046023326 0ustar zuulzuul00000000000000# Copyright 2011 OpenStack Foundation. # All Rights Reserved. # Copyright 2013 eNovance # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """A notification listener is used to process notification messages sent by a notifier that uses the ``messaging`` driver. A notification listener subscribes to the topic - and optionally exchange - in the supplied target. Notification messages sent by notifier clients to the target's topic/exchange are received by the listener. If multiple listeners subscribe to the same target, the notification will be received by only one of the listeners. The receiving listener is selected from the group using a best-effort round-robin algorithm. This delivery pattern can be altered somewhat by specifying a pool name for the listener. Listeners with the same pool name behave like a subgroup within the group of listeners subscribed to the same topic/exchange. Each subgroup of listeners will receive a copy of the notification to be consumed by one member of the subgroup. Therefore, multiple copies of the notification will be delivered - one to the group of listeners that have no pool name (if they exist), and one to each subgroup of listeners that share the same pool name. Note that not all transport drivers have implemented support for listener pools. Those drivers that do not support pools will raise a NotImplementedError if a pool name is specified to get_notification_listener(). A notification listener exposes a number of endpoints, each of which contain a set of methods. Each method's name corresponds to a notification's priority. When a notification is received it is dispatched to the method named like the notification's priority - e.g. ``info`` notifications are dispatched to the info() method, etc. Optionally a notification endpoint can define a NotificationFilter. Notification messages that do not match the filter's rules will *not* be passed to the endpoint's methods. Parameters to endpoint methods are: the request context supplied by the client, the publisher_id of the notification message, the event_type, the payload and metadata. The metadata parameter is a mapping containing a unique message_id and a timestamp. An endpoint method can explicitly return oslo_messaging.NotificationResult.HANDLED to acknowledge a message or oslo_messaging.NotificationResult.REQUEUE to requeue the message. Note that not all transport drivers implement support for requeueing. In order to use this feature, applications should assert that the feature is available by passing allow_requeue=True to get_notification_listener(). If the driver does not support requeueing, it will raise NotImplementedError at this point. The message is acknowledged only if all endpoints either return oslo_messaging.NotificationResult.HANDLED or None. Each notification listener is associated with an executor which controls how incoming notification messages will be received and dispatched. Refer to the Executor documentation for descriptions of the other types of executors. *Note:* If the "eventlet" executor is used, the threading and time library need to be monkeypatched. Notification listener have start(), stop() and wait() messages to begin handling requests, stop handling requests, and wait for all in-process requests to complete after the listener has been stopped. To create a notification listener, you supply a transport, list of targets and a list of endpoints. A transport can be obtained simply by calling the get_notification_transport() method:: transport = messaging.get_notification_transport(conf) which will load the appropriate transport driver according to the user's messaging configuration. See get_notification_transport() for more details. A simple example of a notification listener with multiple endpoints might be:: from oslo_config import cfg import oslo_messaging class NotificationEndpoint(object): filter_rule = oslo_messaging.NotificationFilter( publisher_id='^compute.*') def warn(self, ctxt, publisher_id, event_type, payload, metadata): do_something(payload) class ErrorEndpoint(object): filter_rule = oslo_messaging.NotificationFilter( event_type='^instance\..*\.start$', context={'ctxt_key': 'regexp'}) def error(self, ctxt, publisher_id, event_type, payload, metadata): do_something(payload) transport = oslo_messaging.get_notification_transport(cfg.CONF) targets = [ oslo_messaging.Target(topic='notifications'), oslo_messaging.Target(topic='notifications_bis') ] endpoints = [ NotificationEndpoint(), ErrorEndpoint(), ] pool = "listener-workers" server = oslo_messaging.get_notification_listener(transport, targets, endpoints, pool=pool) server.start() server.wait() By supplying a serializer object, a listener can deserialize a request context and arguments from primitive types. """ import itertools import logging from oslo_messaging._i18n import _LE from oslo_messaging.notify import dispatcher as notify_dispatcher from oslo_messaging import server as msg_server from oslo_messaging import transport as msg_transport LOG = logging.getLogger(__name__) class NotificationServerBase(msg_server.MessageHandlingServer): def __init__(self, transport, targets, dispatcher, executor='blocking', allow_requeue=True, pool=None, batch_size=1, batch_timeout=None): super(NotificationServerBase, self).__init__(transport, dispatcher, executor) self._allow_requeue = allow_requeue self._pool = pool self.targets = targets self._targets_priorities = set( itertools.product(self.targets, self.dispatcher.supported_priorities) ) self._batch_size = batch_size self._batch_timeout = batch_timeout def _create_listener(self): return self.transport._listen_for_notifications( self._targets_priorities, self._pool, self._batch_size, self._batch_timeout ) class NotificationServer(NotificationServerBase): def __init__(self, transport, targets, dispatcher, executor='blocking', allow_requeue=True, pool=None): if not isinstance(transport, msg_transport.NotificationTransport): LOG.warning("Using RPC transport for notifications. Please use " "get_notification_transport to obtain a " "notification transport instance.") super(NotificationServer, self).__init__( transport, targets, dispatcher, executor, allow_requeue, pool, 1, None ) def _process_incoming(self, incoming): message = incoming[0] try: res = self.dispatcher.dispatch(message) except Exception: LOG.exception(_LE('Exception during message handling.')) res = notify_dispatcher.NotificationResult.REQUEUE try: if (res == notify_dispatcher.NotificationResult.REQUEUE and self._allow_requeue): message.requeue() else: message.acknowledge() except Exception: LOG.exception(_LE("Fail to ack/requeue message.")) class BatchNotificationServer(NotificationServerBase): def _process_incoming(self, incoming): try: not_processed_messages = self.dispatcher.dispatch(incoming) except Exception: not_processed_messages = set(incoming) LOG.exception(_LE('Exception during messages handling.')) for m in incoming: try: if m in not_processed_messages and self._allow_requeue: m.requeue() else: m.acknowledge() except Exception: LOG.exception(_LE("Fail to ack/requeue message.")) def get_notification_listener(transport, targets, endpoints, executor='blocking', serializer=None, allow_requeue=False, pool=None): """Construct a notification listener The executor parameter controls how incoming messages will be received and dispatched. If the eventlet executor is used, the threading and time library need to be monkeypatched. :param transport: the messaging transport :type transport: Transport :param targets: the exchanges and topics to listen on :type targets: list of Target :param endpoints: a list of endpoint objects :type endpoints: list :param executor: name of message executor - available values are 'eventlet' and 'threading' :type executor: str :param serializer: an optional entity serializer :type serializer: Serializer :param allow_requeue: whether NotificationResult.REQUEUE support is needed :type allow_requeue: bool :param pool: the pool name :type pool: str :raises: NotImplementedError """ dispatcher = notify_dispatcher.NotificationDispatcher(endpoints, serializer) return NotificationServer(transport, targets, dispatcher, executor, allow_requeue, pool) def get_batch_notification_listener(transport, targets, endpoints, executor='blocking', serializer=None, allow_requeue=False, pool=None, batch_size=None, batch_timeout=None): """Construct a batch notification listener The executor parameter controls how incoming messages will be received and dispatched. If the eventlet executor is used, the threading and time library need to be monkeypatched. :param transport: the messaging transport :type transport: Transport :param targets: the exchanges and topics to listen on :type targets: list of Target :param endpoints: a list of endpoint objects :type endpoints: list :param executor: name of message executor - available values are 'eventlet' and 'threading' :type executor: str :param serializer: an optional entity serializer :type serializer: Serializer :param allow_requeue: whether NotificationResult.REQUEUE support is needed :type allow_requeue: bool :param pool: the pool name :type pool: str :param batch_size: number of messages to wait before calling endpoints callacks :type batch_size: int :param batch_timeout: number of seconds to wait before calling endpoints callacks :type batch_timeout: int :raises: NotImplementedError """ dispatcher = notify_dispatcher.BatchNotificationDispatcher( endpoints, serializer) return BatchNotificationServer( transport, targets, dispatcher, executor, allow_requeue, pool, batch_size, batch_timeout ) oslo.messaging-5.35.0/oslo_messaging/notify/dispatcher.py0000666000175100017510000001415013224676046023616 0ustar zuulzuul00000000000000# Copyright 2011 OpenStack Foundation. # All Rights Reserved. # Copyright 2013 eNovance # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import itertools import logging import operator import six from oslo_messaging._i18n import _LW from oslo_messaging import dispatcher from oslo_messaging import serializer as msg_serializer LOG = logging.getLogger(__name__) PRIORITIES = ['audit', 'debug', 'info', 'warn', 'error', 'critical', 'sample'] class NotificationResult(object): HANDLED = 'handled' REQUEUE = 'requeue' class NotificationDispatcher(dispatcher.DispatcherBase): def __init__(self, endpoints, serializer): self.endpoints = endpoints self.serializer = serializer or msg_serializer.NoOpSerializer() self._callbacks_by_priority = {} for endpoint, prio in itertools.product(endpoints, PRIORITIES): if hasattr(endpoint, prio): method = getattr(endpoint, prio) screen = getattr(endpoint, 'filter_rule', None) self._callbacks_by_priority.setdefault(prio, []).append( (screen, method)) @property def supported_priorities(self): return self._callbacks_by_priority.keys() def dispatch(self, incoming): """Dispatch notification messages to the appropriate endpoint method. """ priority, raw_message, message = self._extract_user_message(incoming) if priority not in PRIORITIES: LOG.warning(_LW('Unknown priority "%s"'), priority) return for screen, callback in self._callbacks_by_priority.get(priority, []): if screen and not screen.match(message["ctxt"], message["publisher_id"], message["event_type"], message["metadata"], message["payload"]): continue ret = self._exec_callback(callback, message) if ret == NotificationResult.REQUEUE: return ret return NotificationResult.HANDLED def _exec_callback(self, callback, message): try: return callback(message["ctxt"], message["publisher_id"], message["event_type"], message["payload"], message["metadata"]) except Exception: LOG.exception("Callback raised an exception.") return NotificationResult.REQUEUE def _extract_user_message(self, incoming): ctxt = self.serializer.deserialize_context(incoming.ctxt) message = incoming.message publisher_id = message.get('publisher_id') event_type = message.get('event_type') metadata = { 'message_id': message.get('message_id'), 'timestamp': message.get('timestamp') } priority = message.get('priority', '').lower() payload = self.serializer.deserialize_entity(ctxt, message.get('payload')) return priority, incoming, dict(ctxt=ctxt, publisher_id=publisher_id, event_type=event_type, payload=payload, metadata=metadata) class BatchNotificationDispatcher(NotificationDispatcher): """A message dispatcher which understands Notification messages. A MessageHandlingServer is constructed by passing a callable dispatcher which is invoked with a list of message dictionaries each time 'batch_size' messages are received or 'batch_timeout' seconds is reached. """ def dispatch(self, incoming): """Dispatch notification messages to the appropriate endpoint method. """ messages_grouped = itertools.groupby(sorted( (self._extract_user_message(m) for m in incoming), key=operator.itemgetter(0)), operator.itemgetter(0)) requeues = set() for priority, messages in messages_grouped: __, raw_messages, messages = six.moves.zip(*messages) if priority not in PRIORITIES: LOG.warning(_LW('Unknown priority "%s"'), priority) continue for screen, callback in self._callbacks_by_priority.get(priority, []): if screen: filtered_messages = [message for message in messages if screen.match( message["ctxt"], message["publisher_id"], message["event_type"], message["metadata"], message["payload"])] else: filtered_messages = list(messages) if not filtered_messages: continue ret = self._exec_callback(callback, filtered_messages) if ret == NotificationResult.REQUEUE: requeues.update(raw_messages) break return requeues def _exec_callback(self, callback, messages): try: return callback(messages) except Exception: LOG.exception("Callback raised an exception.") return NotificationResult.REQUEUE oslo.messaging-5.35.0/oslo_messaging/notify/messaging.py0000666000175100017510000000615013224676046023446 0ustar zuulzuul00000000000000 # Copyright 2011 OpenStack Foundation. # All Rights Reserved. # Copyright 2013 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Notification drivers for sending notifications via messaging. The messaging drivers publish notification messages to notification listeners. The driver will block the notifier's thread until the notification message has been passed to the messaging transport. There is no guarantee that the notification message will be consumed by a notification listener. Notification messages are sent 'at-most-once' - ensuring that they are not duplicated. If the connection to the messaging service is not active when a notification is sent this driver will block waiting for the connection to complete. If the connection fails to complete, the driver will try to re-establish that connection. By default this will continue indefinitely until the connection completes. However, the retry parameter can be used to have the notification send fail with a MessageDeliveryFailure after the given number of retries. """ import logging import oslo_messaging from oslo_messaging._i18n import _LE from oslo_messaging.notify import notifier LOG = logging.getLogger(__name__) class MessagingDriver(notifier.Driver): """Send notifications using the 1.0 message format. This driver sends notifications over the configured messaging transport, but without any message envelope (also known as message format 1.0). This driver should only be used in cases where there are existing consumers deployed which do not support the 2.0 message format. """ def __init__(self, conf, topics, transport, version=1.0): super(MessagingDriver, self).__init__(conf, topics, transport) self.version = version def notify(self, ctxt, message, priority, retry): priority = priority.lower() for topic in self.topics: target = oslo_messaging.Target(topic='%s.%s' % (topic, priority)) try: self.transport._send_notification(target, ctxt, message, version=self.version, retry=retry) except Exception: LOG.exception(_LE("Could not send notification to %(topic)s. " "Payload=%(message)s"), dict(topic=topic, message=message)) class MessagingV2Driver(MessagingDriver): "Send notifications using the 2.0 message format." def __init__(self, conf, **kwargs): super(MessagingV2Driver, self).__init__(conf, version=2.0, **kwargs) oslo.messaging-5.35.0/oslo_messaging/notify/notifier.py0000666000175100017510000004227513224676046023320 0ustar zuulzuul00000000000000 # Copyright 2011 OpenStack Foundation. # All Rights Reserved. # Copyright 2013 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc import argparse import logging import uuid from oslo_config import cfg from oslo_utils import timeutils import six from stevedore import extension from stevedore import named from oslo_messaging._i18n import _LE from oslo_messaging import serializer as msg_serializer from oslo_messaging import transport as msg_transport _notifier_opts = [ cfg.MultiStrOpt('driver', default=[], deprecated_name='notification_driver', deprecated_group='DEFAULT', help='The Drivers(s) to handle sending notifications. ' 'Possible values are messaging, messagingv2, ' 'routing, log, test, noop'), cfg.StrOpt('transport_url', deprecated_name='notification_transport_url', deprecated_group='DEFAULT', secret=True, help='A URL representing the messaging driver to use for ' 'notifications. If not set, we fall back to the same ' 'configuration used for RPC.'), cfg.ListOpt('topics', default=['notifications', ], deprecated_opts=[ cfg.DeprecatedOpt('topics', group='rpc_notifier2'), cfg.DeprecatedOpt('notification_topics', group='DEFAULT') ], help='AMQP topic used for OpenStack notifications.'), cfg.IntOpt('retry', default=-1, help='The maximum number of attempts to re-send a notification ' 'message which failed to be delivered due to a ' 'recoverable error. 0 - No retry, -1 - indefinite'), ] _LOG = logging.getLogger(__name__) def _send_notification(): """Command line tool to send notifications manually.""" parser = argparse.ArgumentParser( description='Oslo.messaging notification sending', ) parser.add_argument('--config-file', help='Path to configuration file') parser.add_argument('--transport-url', help='Transport URL') parser.add_argument('--publisher-id', help='Publisher ID') parser.add_argument('--event-type', default="test", help="Event type") parser.add_argument('--topic', nargs='*', help="Topic to send to") parser.add_argument('--priority', default="info", choices=("info", "audit", "warn", "error", "critical", "sample"), help='Event type') parser.add_argument('--driver', default="messagingv2", choices=extension.ExtensionManager( 'oslo.messaging.notify.drivers' ).names(), help='Notification driver') parser.add_argument('payload') args = parser.parse_args() conf = cfg.ConfigOpts() conf([], default_config_files=[args.config_file] if args.config_file else None) transport = get_notification_transport(conf, url=args.transport_url) notifier = Notifier(transport, args.publisher_id, topics=args.topic, driver=args.driver) notifier._notify({}, args.event_type, args.payload, args.priority) @six.add_metaclass(abc.ABCMeta) class Driver(object): """Base driver for Notifications""" def __init__(self, conf, topics, transport): """base driver initialization :param conf: configuration options :param topics: list of topics :param transport: transport driver to use """ self.conf = conf self.topics = topics self.transport = transport @abc.abstractmethod def notify(self, ctxt, msg, priority, retry): """send a single notification with a specific priority :param ctxt: current request context :param msg: message to be sent :type msg: str :param priority: priority of the message :type priority: str :param retry: connection retries configuration (used by the messaging driver): None or -1 means to retry forever. 0 means no retry is attempted. N means attempt at most N retries. :type retry: int """ pass def get_notification_transport(conf, url=None, allowed_remote_exmods=None, aliases=None): """A factory method for Transport objects for notifications. This method should be used for notifications, in case notifications are being sent over a different message bus than normal messaging functionality; for example, using a different driver, or with different access permissions. If no transport URL is provided, the URL in the notifications section of the config file will be used. If that URL is also absent, the same transport as specified in the messaging section will be used. If a transport URL is provided, then this function works exactly the same as get_transport. :param conf: the user configuration :type conf: cfg.ConfigOpts :param url: a transport URL, see :py:class:`transport.TransportURL` :type url: str or TransportURL :param allowed_remote_exmods: a list of modules which a client using this transport will deserialize remote exceptions from :type allowed_remote_exmods: list :param aliases: A map of transport alias to transport name :type aliases: dict """ conf.register_opts(_notifier_opts, group='oslo_messaging_notifications') if url is None: url = conf.oslo_messaging_notifications.transport_url return msg_transport._get_transport( conf, url, allowed_remote_exmods, aliases, transport_cls=msg_transport.NotificationTransport) class Notifier(object): """Send notification messages. The Notifier class is used for sending notification messages over a messaging transport or other means. Notification messages follow the following format:: {'message_id': six.text_type(uuid.uuid4()), 'publisher_id': 'compute.host1', 'timestamp': timeutils.utcnow(), 'priority': 'WARN', 'event_type': 'compute.create_instance', 'payload': {'instance_id': 12, ... }} A Notifier object can be instantiated with a transport object and a publisher ID: notifier = messaging.Notifier(get_notification_transport(CONF), 'compute') and notifications are sent via drivers chosen with the driver config option and on the topics chosen with the topics config option in [oslo_messaging_notifications] section. Alternatively, a Notifier object can be instantiated with a specific driver or topic:: transport = notifier.get_notification_transport(CONF) notifier = notifier.Notifier(transport, 'compute.host', driver='messaging', topics=['notifications']) Notifier objects are relatively expensive to instantiate (mostly the cost of loading notification drivers), so it is possible to specialize a given Notifier object with a different publisher id using the prepare() method:: notifier = notifier.prepare(publisher_id='compute') notifier.info(ctxt, event_type, payload) """ def __init__(self, transport, publisher_id=None, driver=None, serializer=None, retry=None, topics=None): """Construct a Notifier object. :param transport: the transport to use for sending messages :type transport: oslo_messaging.Transport :param publisher_id: field in notifications sent, for example 'compute.host1' :type publisher_id: str :param driver: a driver to lookup from oslo_messaging.notify.drivers :type driver: str :param serializer: an optional entity serializer :type serializer: Serializer :param retry: connection retries configuration (used by the messaging driver): None or -1 means to retry forever. 0 means no retry is attempted. N means attempt at most N retries. :type retry: int :param topics: the topics which to send messages on :type topics: list of strings """ conf = transport.conf conf.register_opts(_notifier_opts, group='oslo_messaging_notifications') if not isinstance(transport, msg_transport.NotificationTransport): _LOG.warning("Using RPC transport for notifications. Please use " "get_notification_transport to obtain a " "notification transport instance.") self.transport = transport self.publisher_id = publisher_id if retry is not None: self.retry = retry else: self.retry = conf.oslo_messaging_notifications.retry self._driver_names = ([driver] if driver is not None else conf.oslo_messaging_notifications.driver) if topics is not None: self._topics = topics else: self._topics = conf.oslo_messaging_notifications.topics self._serializer = serializer or msg_serializer.NoOpSerializer() self._driver_mgr = named.NamedExtensionManager( 'oslo.messaging.notify.drivers', names=self._driver_names, invoke_on_load=True, invoke_args=[conf], invoke_kwds={ 'topics': self._topics, 'transport': self.transport, } ) _marker = object() def prepare(self, publisher_id=_marker, retry=_marker): """Return a specialized Notifier instance. Returns a new Notifier instance with the supplied publisher_id. Allows sending notifications from multiple publisher_ids without the overhead of notification driver loading. :param publisher_id: field in notifications sent, for example 'compute.host1' :type publisher_id: str :param retry: connection retries configuration (used by the messaging driver): None or -1 means to retry forever. 0 means no retry is attempted. N means attempt at most N retries. :type retry: int """ return _SubNotifier._prepare(self, publisher_id, retry=retry) def _notify(self, ctxt, event_type, payload, priority, publisher_id=None, retry=None): payload = self._serializer.serialize_entity(ctxt, payload) ctxt = self._serializer.serialize_context(ctxt) msg = dict(message_id=six.text_type(uuid.uuid4()), publisher_id=publisher_id or self.publisher_id, event_type=event_type, priority=priority, payload=payload, timestamp=six.text_type(timeutils.utcnow())) def do_notify(ext): try: ext.obj.notify(ctxt, msg, priority, retry or self.retry) except Exception as e: _LOG.exception(_LE("Problem '%(e)s' attempting to send to " "notification system. Payload=%(payload)s"), dict(e=e, payload=payload)) if self._driver_mgr.extensions: self._driver_mgr.map(do_notify) def audit(self, ctxt, event_type, payload): """Send a notification at audit level. :param ctxt: a request context dict :type ctxt: dict :param event_type: describes the event, for example 'compute.create_instance' :type event_type: str :param payload: the notification payload :type payload: dict :raises: MessageDeliveryFailure """ self._notify(ctxt, event_type, payload, 'AUDIT') def debug(self, ctxt, event_type, payload): """Send a notification at debug level. :param ctxt: a request context dict :type ctxt: dict :param event_type: describes the event, for example 'compute.create_instance' :type event_type: str :param payload: the notification payload :type payload: dict :raises: MessageDeliveryFailure """ self._notify(ctxt, event_type, payload, 'DEBUG') def info(self, ctxt, event_type, payload): """Send a notification at info level. :param ctxt: a request context dict :type ctxt: dict :param event_type: describes the event, for example 'compute.create_instance' :type event_type: str :param payload: the notification payload :type payload: dict :raises: MessageDeliveryFailure """ self._notify(ctxt, event_type, payload, 'INFO') def warn(self, ctxt, event_type, payload): """Send a notification at warning level. :param ctxt: a request context dict :type ctxt: dict :param event_type: describes the event, for example 'compute.create_instance' :type event_type: str :param payload: the notification payload :type payload: dict :raises: MessageDeliveryFailure """ self._notify(ctxt, event_type, payload, 'WARN') warning = warn def error(self, ctxt, event_type, payload): """Send a notification at error level. :param ctxt: a request context dict :type ctxt: dict :param event_type: describes the event, for example 'compute.create_instance' :type event_type: str :param payload: the notification payload :type payload: dict :raises: MessageDeliveryFailure """ self._notify(ctxt, event_type, payload, 'ERROR') def critical(self, ctxt, event_type, payload): """Send a notification at critical level. :param ctxt: a request context dict :type ctxt: dict :param event_type: describes the event, for example 'compute.create_instance' :type event_type: str :param payload: the notification payload :type payload: dict :raises: MessageDeliveryFailure """ self._notify(ctxt, event_type, payload, 'CRITICAL') def sample(self, ctxt, event_type, payload): """Send a notification at sample level. Sample notifications are for high-frequency events that typically contain small payloads. eg: "CPU = 70%" Not all drivers support the sample level (log, for example) so these could be dropped. :param ctxt: a request context dict :type ctxt: dict :param event_type: describes the event, for example 'compute.create_instance' :type event_type: str :param payload: the notification payload :type payload: dict :raises: MessageDeliveryFailure """ self._notify(ctxt, event_type, payload, 'SAMPLE') def is_enabled(self): """Check if the notifier will emit notifications anywhere. :return: false if the driver of the notifier is set only to noop, true otherwise """ return self._driver_mgr.names() != ['noop'] class _SubNotifier(Notifier): _marker = Notifier._marker def __init__(self, base, publisher_id, retry): self._base = base self.transport = base.transport self.publisher_id = publisher_id self.retry = retry self._serializer = self._base._serializer self._driver_mgr = self._base._driver_mgr def _notify(self, ctxt, event_type, payload, priority): super(_SubNotifier, self)._notify(ctxt, event_type, payload, priority) @classmethod def _prepare(cls, base, publisher_id=_marker, retry=_marker): if publisher_id is cls._marker: publisher_id = base.publisher_id if retry is cls._marker: retry = base.retry return cls(base, publisher_id, retry=retry) oslo.messaging-5.35.0/oslo_messaging/notify/log_handler.py0000666000175100017510000000342313224676046023747 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import logging from oslo_config import cfg class LoggingErrorNotificationHandler(logging.Handler): def __init__(self, *args, **kwargs): # NOTE(dhellmann): Avoid a cyclical import by doing this one # at runtime. import oslo_messaging logging.Handler.__init__(self, *args, **kwargs) self._transport = oslo_messaging.get_notification_transport(cfg.CONF) self._notifier = oslo_messaging.Notifier( self._transport, publisher_id='error.publisher') def emit(self, record): conf = self._transport.conf # NOTE(bnemec): Notifier registers this opt with the transport. if ('log' in conf.oslo_messaging_notifications.driver): # NOTE(lbragstad): If we detect that log is one of the # notification drivers, then return. This protects from infinite # recursion where something bad happens, it gets logged, the log # handler sends a notification, and the log_notifier sees the # notification and logs it. return self._notifier.error({}, 'error_notification', dict(error=record.msg)) PublishErrorsHandler = LoggingErrorNotificationHandler oslo.messaging-5.35.0/oslo_messaging/notify/_impl_noop.py0000666000175100017510000000146113224676046023624 0ustar zuulzuul00000000000000 # Copyright 2011 OpenStack Foundation. # All Rights Reserved. # Copyright 2013 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_messaging.notify import notifier class NoOpDriver(notifier.Driver): def notify(self, ctxt, message, priority, retry): pass oslo.messaging-5.35.0/oslo_messaging/notify/__init__.py0000666000175100017510000000214513224676046023230 0ustar zuulzuul00000000000000 # Copyright 2013 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. __all__ = ['Notifier', 'LoggingNotificationHandler', 'get_notification_transport', 'get_notification_listener', 'get_batch_notification_listener', 'NotificationResult', 'NotificationFilter', 'PublishErrorsHandler', 'LoggingErrorNotificationHandler'] from .filter import NotificationFilter from .notifier import * from .listener import * from .log_handler import * from .logger import * from .dispatcher import NotificationResult oslo.messaging-5.35.0/oslo_messaging/notify/middleware.py0000666000175100017510000000765013224676046023614 0ustar zuulzuul00000000000000# Copyright (c) 2013-2014 eNovance # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Send notifications on request """ import logging import os.path import sys import traceback as tb from oslo_config import cfg from oslo_middleware import base import webob.dec import oslo_messaging from oslo_messaging._i18n import _LE from oslo_messaging import notify LOG = logging.getLogger(__name__) def log_and_ignore_error(fn): def wrapped(*args, **kwargs): try: return fn(*args, **kwargs) except Exception as e: LOG.exception(_LE('An exception occurred processing ' 'the API call: %s ') % e) return wrapped class RequestNotifier(base.Middleware): """Send notification on request.""" @classmethod def factory(cls, global_conf, **local_conf): """Factory method for paste.deploy.""" conf = global_conf.copy() conf.update(local_conf) def _factory(app): return cls(app, **conf) return _factory def __init__(self, app, **conf): self.notifier = notify.Notifier( oslo_messaging.get_notification_transport(cfg.CONF, conf.get('url')), publisher_id=conf.get('publisher_id', os.path.basename(sys.argv[0]))) self.service_name = conf.get('service_name') self.ignore_req_list = [x.upper().strip() for x in conf.get('ignore_req_list', '').split(',')] super(RequestNotifier, self).__init__(app) @staticmethod def environ_to_dict(environ): """Following PEP 333, server variables are lower case, so don't include them. """ return dict((k, v) for k, v in environ.items() if k.isupper() and k != 'HTTP_X_AUTH_TOKEN') @log_and_ignore_error def process_request(self, request): request.environ['HTTP_X_SERVICE_NAME'] = \ self.service_name or request.host payload = { 'request': self.environ_to_dict(request.environ), } self.notifier.info({}, 'http.request', payload) @log_and_ignore_error def process_response(self, request, response, exception=None, traceback=None): payload = { 'request': self.environ_to_dict(request.environ), } if response: payload['response'] = { 'status': response.status, 'headers': response.headers, } if exception: payload['exception'] = { 'value': repr(exception), 'traceback': tb.format_tb(traceback) } self.notifier.info({}, 'http.response', payload) @webob.dec.wsgify def __call__(self, req): if req.method in self.ignore_req_list: return req.get_response(self.application) else: self.process_request(req) try: response = req.get_response(self.application) except Exception: exc_type, value, traceback = sys.exc_info() self.process_response(req, None, value, traceback) raise else: self.process_response(req, response) return response oslo.messaging-5.35.0/oslo_messaging/notify/filter.py0000666000175100017510000000624613224676046022764 0ustar zuulzuul00000000000000# # Copyright 2013 eNovance # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import re import six class NotificationFilter(object): """Filter notification messages The NotificationFilter class is used to filter notifications that an endpoint will received. The notification can be filter on different fields: context, publisher_id, event_type, metadata and payload. The filter is done via a regular expression filter_rule = NotificationFilter( publisher_id='^compute.*', context={'tenant_id': '^5f643cfc-664b-4c69-8000-ce2ed7b08216$', 'roles': 'private'}, event_type='^compute\.instance\..*', metadata={'timestamp': 'Aug'}, payload={'state': '^active$') """ def __init__(self, context=None, publisher_id=None, event_type=None, metadata=None, payload=None): self._regex_publisher_id = None self._regex_event_type = None if publisher_id is not None: self._regex_publisher_id = re.compile(publisher_id) if event_type is not None: self._regex_event_type = re.compile(event_type) self._regexs_context = self._build_regex_dict(context) self._regexs_metadata = self._build_regex_dict(metadata) self._regexs_payload = self._build_regex_dict(payload) @staticmethod def _build_regex_dict(regex_list): if regex_list is None: return {} return dict((k, re.compile(regex_list[k])) for k in regex_list) @staticmethod def _check_for_single_mismatch(data, regex): if regex is None: return False if not isinstance(data, six.string_types): return True if not regex.match(data): return True return False @classmethod def _check_for_mismatch(cls, data, regex): if isinstance(regex, dict): for k in regex: if k not in data: return True if cls._check_for_single_mismatch(data[k], regex[k]): return True return False else: return cls._check_for_single_mismatch(data, regex) def match(self, context, publisher_id, event_type, metadata, payload): if (self._check_for_mismatch(publisher_id, self._regex_publisher_id) or self._check_for_mismatch(event_type, self._regex_event_type) or self._check_for_mismatch(context, self._regexs_context) or self._check_for_mismatch(metadata, self._regexs_metadata) or self._check_for_mismatch(payload, self._regexs_payload)): return False return True oslo.messaging-5.35.0/oslo_messaging/notify/_impl_routing.py0000666000175100017510000001223213224676046024336 0ustar zuulzuul00000000000000# Copyright 2014 Rackspace Hosting # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import logging from oslo_config import cfg from oslo_utils import fnmatch from stevedore import dispatch import yaml from oslo_messaging._i18n import _LI, _LW from oslo_messaging.notify import notifier LOG = logging.getLogger(__name__) router_config = cfg.StrOpt('routing_config', default='', deprecated_group='DEFAULT', deprecated_name='routing_notifier_config', help='RoutingNotifier configuration file location.') CONF = cfg.CONF CONF.register_opt(router_config, group='oslo_messaging_notifications') class RoutingDriver(notifier.Driver): NOTIFIER_PLUGIN_NAMESPACE = 'oslo.messaging.notify.drivers' plugin_manager = None routing_groups = None # The routing groups from the config file. used_drivers = None # Used driver names, extracted from config file. def _should_load_plugin(self, ext, *args, **kwargs): # Hack to keep stevedore from circular importing since these # endpoints are used for different purposes. if ext.name == 'routing': return False return ext.name in self.used_drivers def _get_notifier_config_file(self, filename): """Broken out for testing.""" return open(filename, 'r') def _load_notifiers(self): """One-time load of notifier config file.""" self.routing_groups = {} self.used_drivers = set() filename = CONF.oslo_messaging_notifications.routing_config if not filename: return # Infer which drivers are used from the config file. self.routing_groups = yaml.safe_load( self._get_notifier_config_file(filename)) if not self.routing_groups: self.routing_groups = {} # In case we got None from load() return for group in self.routing_groups.values(): self.used_drivers.update(group.keys()) LOG.debug('loading notifiers from %s', self.NOTIFIER_PLUGIN_NAMESPACE) self.plugin_manager = dispatch.DispatchExtensionManager( namespace=self.NOTIFIER_PLUGIN_NAMESPACE, check_func=self._should_load_plugin, invoke_on_load=True, invoke_args=None) if not list(self.plugin_manager): LOG.warning(_LW("Failed to load any notifiers for %s"), self.NOTIFIER_PLUGIN_NAMESPACE) def _get_drivers_for_message(self, group, event_type, priority): """Which drivers should be called for this event_type or priority. """ accepted_drivers = set() for driver, rules in group.items(): checks = [] for key, patterns in rules.items(): if key == 'accepted_events': c = [fnmatch.fnmatch(event_type, p) for p in patterns] checks.append(any(c)) if key == 'accepted_priorities': c = [fnmatch.fnmatch(priority, p.lower()) for p in patterns] checks.append(any(c)) if all(checks): accepted_drivers.add(driver) return list(accepted_drivers) def _filter_func(self, ext, context, message, priority, retry, accepted_drivers): """True/False if the driver should be called for this message. """ # context is unused here, but passed in by map() return ext.name in accepted_drivers def _call_notify(self, ext, context, message, priority, retry, accepted_drivers): """Emit the notification. """ # accepted_drivers is passed in as a result of the map() function LOG.info(_LI("Routing '%(event)s' notification to '%(driver)s' " "driver"), {'event': message.get('event_type'), 'driver': ext.name}) ext.obj.notify(context, message, priority, retry) def notify(self, context, message, priority, retry): if not self.plugin_manager: self._load_notifiers() # Fail if these aren't present ... event_type = message['event_type'] accepted_drivers = set() for group in self.routing_groups.values(): accepted_drivers.update( self._get_drivers_for_message(group, event_type, priority.lower())) self.plugin_manager.map(self._filter_func, self._call_notify, context, message, priority, retry, list(accepted_drivers)) oslo.messaging-5.35.0/oslo_messaging/notify/_impl_test.py0000666000175100017510000000204613224676046023630 0ustar zuulzuul00000000000000 # Copyright 2011 OpenStack Foundation. # All Rights Reserved. # Copyright 2013 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_messaging.notify import notifier NOTIFICATIONS = [] def reset(): "Clear out the list of recorded notifications." global NOTIFICATIONS NOTIFICATIONS = [] class TestDriver(notifier.Driver): "Store notifications in memory for test verification." def notify(self, ctxt, message, priority, retry): NOTIFICATIONS.append((ctxt, message, priority, retry)) oslo.messaging-5.35.0/oslo_messaging/_cmd/0000775000175100017510000000000013224676256020510 5ustar zuulzuul00000000000000oslo.messaging-5.35.0/oslo_messaging/_cmd/__init__.py0000666000175100017510000000000013224676046022606 0ustar zuulzuul00000000000000oslo.messaging-5.35.0/oslo_messaging/_cmd/zmq_proxy.py0000666000175100017510000000273013224676046023133 0ustar zuulzuul00000000000000# Copyright 2015-2016 Mirantis, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import logging from oslo_config import cfg from oslo_messaging._drivers.zmq_driver.proxy import zmq_proxy from oslo_messaging._drivers.zmq_driver import zmq_options from oslo_messaging._i18n import _LI from oslo_messaging.transport import TransportURL LOG = logging.getLogger(__name__) def main(): conf = cfg.CONF opt_group = cfg.OptGroup(name='zmq_proxy_opts', title='ZeroMQ proxy options') conf.register_opts(zmq_proxy.zmq_proxy_opts, group=opt_group) zmq_options.register_opts(conf, TransportURL.parse(conf)) zmq_proxy.parse_command_line_args(conf) reactor = zmq_proxy.ZmqProxy(conf) try: while True: reactor.run() except (KeyboardInterrupt, SystemExit): LOG.info(_LI("Exit proxy by interrupt signal.")) finally: reactor.close() if __name__ == "__main__": main() oslo.messaging-5.35.0/oslo_messaging/rpc/0000775000175100017510000000000013224676256020372 5ustar zuulzuul00000000000000oslo.messaging-5.35.0/oslo_messaging/rpc/dispatcher.py0000666000175100017510000001755313224676046023104 0ustar zuulzuul00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # Copyright 2013 Red Hat, Inc. # Copyright 2013 New Dream Network, LLC (DreamHost) # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. __all__ = [ 'NoSuchMethod', 'RPCAccessPolicyBase', 'LegacyRPCAccessPolicy', 'DefaultRPCAccessPolicy', 'ExplicitRPCAccessPolicy', 'RPCDispatcher', 'RPCDispatcherError', 'UnsupportedVersion', 'ExpectedException', ] from abc import ABCMeta from abc import abstractmethod import sys import six from oslo_messaging import _utils as utils from oslo_messaging import dispatcher from oslo_messaging import serializer as msg_serializer from oslo_messaging import server as msg_server from oslo_messaging import target as msg_target class ExpectedException(Exception): """Encapsulates an expected exception raised by an RPC endpoint Merely instantiating this exception records the current exception information, which will be passed back to the RPC client without exceptional logging. """ def __init__(self): self.exc_info = sys.exc_info() class RPCDispatcherError(msg_server.MessagingServerError): "A base class for all RPC dispatcher exceptions." class NoSuchMethod(RPCDispatcherError, AttributeError): "Raised if there is no endpoint which exposes the requested method." def __init__(self, method): msg = "Endpoint does not support RPC method %s" % method super(NoSuchMethod, self).__init__(msg) self.method = method class UnsupportedVersion(RPCDispatcherError): "Raised if there is no endpoint which supports the requested version." def __init__(self, version, method=None): msg = "Endpoint does not support RPC version %s" % version if method: msg = "%s. Attempted method: %s" % (msg, method) super(UnsupportedVersion, self).__init__(msg) self.version = version self.method = method @six.add_metaclass(ABCMeta) class RPCAccessPolicyBase(object): """Determines which endpoint methods may be invoked via RPC""" @abstractmethod def is_allowed(self, endpoint, method): """Applies an access policy to the rpc method :param endpoint: the instance of a rpc endpoint :param method: the method of the endpoint :return: True if the method may be invoked via RPC, else False. """ class LegacyRPCAccessPolicy(RPCAccessPolicyBase): """The legacy access policy allows RPC access to all callable endpoint methods including private methods (methods prefixed by '_') """ def is_allowed(self, endpoint, method): return True class DefaultRPCAccessPolicy(RPCAccessPolicyBase): """The default access policy prevents RPC calls to private methods (methods prefixed by '_') .. note:: LegacyRPCAdapterPolicy currently needs to be the default while we have projects that rely on exposing private methods. """ def is_allowed(self, endpoint, method): return not method.startswith('_') class ExplicitRPCAccessPolicy(RPCAccessPolicyBase): """Policy which requires decorated endpoint methods to allow dispatch""" def is_allowed(self, endpoint, method): if hasattr(endpoint, method): return hasattr(getattr(endpoint, method), 'exposed') return False class RPCDispatcher(dispatcher.DispatcherBase): """A message dispatcher which understands RPC messages. A MessageHandlingServer is constructed by passing a callable dispatcher which is invoked with context and message dictionaries each time a message is received. RPCDispatcher is one such dispatcher which understands the format of RPC messages. The dispatcher looks at the namespace, version and method values in the message and matches those against a list of available endpoints. Endpoints may have a target attribute describing the namespace and version of the methods exposed by that object. The RPCDispatcher may have an access_policy attribute which determines which of the endpoint methods are to be dispatched. The default access_policy dispatches all public methods on an endpoint object. """ def __init__(self, endpoints, serializer, access_policy=None): """Construct a rpc server dispatcher. :param endpoints: list of endpoint objects for dispatching to :param serializer: optional message serializer """ for ep in endpoints: target = getattr(ep, 'target', None) if target and not isinstance(target, msg_target.Target): errmsg = "'target' is a reserved Endpoint attribute used" + \ " for namespace and version filtering. It must" + \ " be of type oslo_messaging.Target. Do not" + \ " define an Endpoint method named 'target'" raise TypeError("%s: endpoint=%s" % (errmsg, ep)) self.endpoints = endpoints self.serializer = serializer or msg_serializer.NoOpSerializer() self._default_target = msg_target.Target() if access_policy is not None: if issubclass(access_policy, RPCAccessPolicyBase): self.access_policy = access_policy() else: raise TypeError('access_policy must be a subclass of ' 'RPCAccessPolicyBase') else: self.access_policy = DefaultRPCAccessPolicy() @staticmethod def _is_namespace(target, namespace): return namespace in target.accepted_namespaces @staticmethod def _is_compatible(target, version): endpoint_version = target.version or '1.0' return utils.version_is_compatible(endpoint_version, version) def _do_dispatch(self, endpoint, method, ctxt, args): ctxt = self.serializer.deserialize_context(ctxt) new_args = dict() for argname, arg in args.items(): new_args[argname] = self.serializer.deserialize_entity(ctxt, arg) func = getattr(endpoint, method) result = func(ctxt, **new_args) return self.serializer.serialize_entity(ctxt, result) def dispatch(self, incoming): """Dispatch an RPC message to the appropriate endpoint method. :param incoming: incoming message :type incoming: IncomingMessage :raises: NoSuchMethod, UnsupportedVersion """ message = incoming.message ctxt = incoming.ctxt method = message.get('method') args = message.get('args', {}) namespace = message.get('namespace') version = message.get('version', '1.0') found_compatible = False for endpoint in self.endpoints: target = getattr(endpoint, 'target', None) if not target: target = self._default_target if not (self._is_namespace(target, namespace) and self._is_compatible(target, version)): continue if hasattr(endpoint, method): if self.access_policy.is_allowed(endpoint, method): return self._do_dispatch(endpoint, method, ctxt, args) found_compatible = True if found_compatible: raise NoSuchMethod(method) else: raise UnsupportedVersion(version, method=method) oslo.messaging-5.35.0/oslo_messaging/rpc/server.py0000666000175100017510000002230513224676046022253 0ustar zuulzuul00000000000000 # Copyright 2013 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ An RPC server exposes a number of endpoints, each of which contain a set of methods which may be invoked remotely by clients over a given transport. To create an RPC server, you supply a transport, target and a list of endpoints. A transport can be obtained simply by calling the get_rpc_transport() method:: transport = messaging.get_rpc_transport(conf) which will load the appropriate transport driver according to the user's messaging configuration. See get_rpc_transport() for more details. The target supplied when creating an RPC server expresses the topic, server name and - optionally - the exchange to listen on. See Target for more details on these attributes. Multiple RPC Servers may listen to the same topic (and exchange) simultaneously. See RPCClient for details regarding how RPC requests are distributed to the Servers in this case. Each endpoint object may have a target attribute which may have namespace and version fields set. By default, we use the 'null namespace' and version 1.0. Incoming method calls will be dispatched to the first endpoint with the requested method, a matching namespace and a compatible version number. The first parameter to method invocations is always the request context supplied by the client. The remaining parameters are the arguments supplied to the method by the client. Endpoint methods may return a value. If so the RPC Server will send the returned value back to the requesting client via the transport. The executor parameter controls how incoming messages will be received and dispatched. Refer to the Executor documentation for descriptions of the types of executors. *Note:* If the "eventlet" executor is used, the threading and time library need to be monkeypatched. The RPC reply operation is best-effort: the server will consider the message containing the reply successfully sent once it is accepted by the messaging transport. The server does not guarantee that the reply is processed by the RPC client. If the send fails an error will be logged and the server will continue to processing incoming RPC requests. Parameters to the method invocation and values returned from the method are python primitive types. However the actual encoding of the data in the message may not be in primitive form (e.g. the message payload may be a dictionary encoded as an ASCII string using JSON). A serializer object is used to convert incoming encoded message data to primitive types. The serializer is also used to convert the return value from primitive types to an encoding suitable for the message payload. RPC servers have start(), stop() and wait() methods to begin handling requests, stop handling requests, and wait for all in-process requests to complete after the Server has been stopped. A simple example of an RPC server with multiple endpoints might be:: # NOTE(changzhi): We are using eventlet executor and # time.sleep(1), therefore, the server code needs to be # monkey-patched. import eventlet eventlet.monkey_patch() from oslo_config import cfg import oslo_messaging import time class ServerControlEndpoint(object): target = oslo_messaging.Target(namespace='control', version='2.0') def __init__(self, server): self.server = server def stop(self, ctx): if self.server: self.server.stop() class TestEndpoint(object): def test(self, ctx, arg): return arg transport = oslo_messaging.get_rpc_transport(cfg.CONF) target = oslo_messaging.Target(topic='test', server='server1') endpoints = [ ServerControlEndpoint(None), TestEndpoint(), ] server = oslo_messaging.get_rpc_server(transport, target, endpoints, executor='eventlet') try: server.start() while True: time.sleep(1) except KeyboardInterrupt: print("Stopping server") server.stop() server.wait() """ __all__ = [ 'get_rpc_server', 'expected_exceptions', 'expose' ] import logging import sys from oslo_messaging._i18n import _LE from oslo_messaging.rpc import dispatcher as rpc_dispatcher from oslo_messaging import server as msg_server from oslo_messaging import transport as msg_transport LOG = logging.getLogger(__name__) class RPCServer(msg_server.MessageHandlingServer): def __init__(self, transport, target, dispatcher, executor='blocking'): super(RPCServer, self).__init__(transport, dispatcher, executor) if not isinstance(transport, msg_transport.RPCTransport): LOG.warning("Using notification transport for RPC. Please use " "get_rpc_transport to obtain an RPC transport " "instance.") self._target = target def _create_listener(self): return self.transport._listen(self._target, 1, None) def _process_incoming(self, incoming): message = incoming[0] try: message.acknowledge() except Exception: LOG.exception(_LE("Can not acknowledge message. Skip processing")) return failure = None try: res = self.dispatcher.dispatch(message) except rpc_dispatcher.ExpectedException as e: failure = e.exc_info LOG.debug(u'Expected exception during message handling (%s)', e) except Exception: # current sys.exc_info() content can be overridden # by another exception raised by a log handler during # LOG.exception(). So keep a copy and delete it later. failure = sys.exc_info() LOG.exception(_LE('Exception during message handling')) try: if failure is None: message.reply(res) else: message.reply(failure=failure) except Exception: LOG.exception(_LE("Can not send reply for message")) finally: # NOTE(dhellmann): Remove circular object reference # between the current stack frame and the traceback in # exc_info. del failure def get_rpc_server(transport, target, endpoints, executor='blocking', serializer=None, access_policy=None): """Construct an RPC server. :param transport: the messaging transport :type transport: Transport :param target: the exchange, topic and server to listen on :type target: Target :param endpoints: a list of endpoint objects :type endpoints: list :param executor: name of message executor - available values are 'eventlet' and 'threading' :type executor: str :param serializer: an optional entity serializer :type serializer: Serializer :param access_policy: an optional access policy. Defaults to DefaultRPCAccessPolicy :type access_policy: RPCAccessPolicyBase """ dispatcher = rpc_dispatcher.RPCDispatcher(endpoints, serializer, access_policy) return RPCServer(transport, target, dispatcher, executor) def expected_exceptions(*exceptions): """Decorator for RPC endpoint methods that raise expected exceptions. Marking an endpoint method with this decorator allows the declaration of expected exceptions that the RPC server should not consider fatal, and not log as if they were generated in a real error scenario. Note that this will cause listed exceptions to be wrapped in an ExpectedException, which is used internally by the RPC sever. The RPC client will see the original exception type. """ def outer(func): def inner(*args, **kwargs): try: return func(*args, **kwargs) # Take advantage of the fact that we can catch # multiple exception types using a tuple of # exception classes, with subclass detection # for free. Any exception that is not in or # derived from the args passed to us will be # ignored and thrown as normal. except exceptions: raise rpc_dispatcher.ExpectedException() return inner return outer def expose(func): """Decorator for RPC endpoint methods that are exposed to the RPC client. If the dispatcher's access_policy is set to ExplicitRPCAccessPolicy then endpoint methods need to be explicitly exposed.:: # foo() cannot be invoked by an RPC client def foo(self): pass # bar() can be invoked by an RPC client @rpc.expose def bar(self): pass """ func.exposed = True return func oslo.messaging-5.35.0/oslo_messaging/rpc/__init__.py0000666000175100017510000000214413224676046022503 0ustar zuulzuul00000000000000 # Copyright 2013 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. __all__ = [ 'ClientSendError', 'ExpectedException', 'NoSuchMethod', 'RPCClient', 'RPCAccessPolicyBase', 'LegacyRPCAccessPolicy', 'DefaultRPCAccessPolicy', 'ExplicitRPCAccessPolicy', 'RPCDispatcher', 'RPCDispatcherError', 'RPCVersionCapError', 'RemoteError', 'UnsupportedVersion', 'expected_exceptions', 'get_rpc_transport', 'get_rpc_server', 'expose' ] from .client import * from .dispatcher import * from .transport import * from .server import * oslo.messaging-5.35.0/oslo_messaging/rpc/client.py0000666000175100017510000004642313224676046022232 0ustar zuulzuul00000000000000 # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # Copyright 2013 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. __all__ = [ 'ClientSendError', 'RPCClient', 'RPCVersionCapError', 'RemoteError', ] import abc import logging from oslo_config import cfg import six from oslo_messaging._drivers import base as driver_base from oslo_messaging import _utils as utils from oslo_messaging import exceptions from oslo_messaging import serializer as msg_serializer from oslo_messaging import transport as msg_transport LOG = logging.getLogger(__name__) _client_opts = [ cfg.IntOpt('rpc_response_timeout', default=60, help='Seconds to wait for a response from a call.'), ] class RemoteError(exceptions.MessagingException): """Signifies that a remote endpoint method has raised an exception. Contains a string representation of the type of the original exception, the value of the original exception, and the traceback. These are sent to the parent as a joined string so printing the exception contains all of the relevant info. """ def __init__(self, exc_type=None, value=None, traceback=None): self.exc_type = exc_type self.value = value self.traceback = traceback msg = ("Remote error: %(exc_type)s %(value)s\n%(traceback)s." % dict(exc_type=self.exc_type, value=self.value, traceback=self.traceback)) super(RemoteError, self).__init__(msg) class RPCVersionCapError(exceptions.MessagingException): def __init__(self, version, version_cap): self.version = version self.version_cap = version_cap msg = ("Requested message version, %(version)s is incompatible. It " "needs to be equal in major version and less than or equal " "in minor version as the specified version cap " "%(version_cap)s." % dict(version=self.version, version_cap=self.version_cap)) super(RPCVersionCapError, self).__init__(msg) class ClientSendError(exceptions.MessagingException): """Raised if we failed to send a message to a target.""" def __init__(self, target, ex): msg = 'Failed to send to target "%s": %s' % (target, ex) super(ClientSendError, self).__init__(msg) self.target = target self.ex = ex @six.add_metaclass(abc.ABCMeta) class _BaseCallContext(object): _marker = object() def __init__(self, transport, target, serializer, timeout=None, version_cap=None, retry=None): self.conf = transport.conf self.transport = transport self.target = target self.serializer = serializer self.timeout = timeout self.retry = retry self.version_cap = version_cap super(_BaseCallContext, self).__init__() def _make_message(self, ctxt, method, args): msg = dict(method=method) msg['args'] = dict() for argname, arg in args.items(): msg['args'][argname] = self.serializer.serialize_entity(ctxt, arg) if self.target.namespace is not None: msg['namespace'] = self.target.namespace if self.target.version is not None: msg['version'] = self.target.version return msg def _check_version_cap(self, version): if not utils.version_is_compatible(self.version_cap, version): raise RPCVersionCapError(version=version, version_cap=self.version_cap) def can_send_version(self, version=_marker): """Check to see if a version is compatible with the version cap.""" version = self.target.version if version is self._marker else version return utils.version_is_compatible(self.version_cap, version) @classmethod def _check_version(cls, version): if version is not cls._marker: # quick sanity check to make sure parsable version numbers are used try: utils.version_is_compatible(version, version) except (IndexError, ValueError): raise exceptions.MessagingException( "Version must contain a major and minor integer. Got %s" % version) def cast(self, ctxt, method, **kwargs): """Invoke a method and return immediately. See RPCClient.cast().""" msg = self._make_message(ctxt, method, kwargs) msg_ctxt = self.serializer.serialize_context(ctxt) self._check_version_cap(msg.get('version')) try: self.transport._send(self.target, msg_ctxt, msg, retry=self.retry) except driver_base.TransportDriverError as ex: raise ClientSendError(self.target, ex) def call(self, ctxt, method, **kwargs): """Invoke a method and wait for a reply. See RPCClient.call().""" if self.target.fanout: raise exceptions.InvalidTarget('A call cannot be used with fanout', self.target) msg = self._make_message(ctxt, method, kwargs) msg_ctxt = self.serializer.serialize_context(ctxt) timeout = self.timeout if self.timeout is None: timeout = self.conf.rpc_response_timeout self._check_version_cap(msg.get('version')) try: result = self.transport._send(self.target, msg_ctxt, msg, wait_for_reply=True, timeout=timeout, retry=self.retry) except driver_base.TransportDriverError as ex: raise ClientSendError(self.target, ex) return self.serializer.deserialize_entity(ctxt, result) @abc.abstractmethod def prepare(self, exchange=_marker, topic=_marker, namespace=_marker, version=_marker, server=_marker, fanout=_marker, timeout=_marker, version_cap=_marker, retry=_marker): """Prepare a method invocation context. See RPCClient.prepare().""" class _CallContext(_BaseCallContext): _marker = _BaseCallContext._marker @classmethod def _prepare(cls, call_context, exchange=_marker, topic=_marker, namespace=_marker, version=_marker, server=_marker, fanout=_marker, timeout=_marker, version_cap=_marker, retry=_marker): cls._check_version(version) kwargs = dict( exchange=exchange, topic=topic, namespace=namespace, version=version, server=server, fanout=fanout) kwargs = dict([(k, v) for k, v in kwargs.items() if v is not cls._marker]) target = call_context.target(**kwargs) if timeout is cls._marker: timeout = call_context.timeout if version_cap is cls._marker: version_cap = call_context.version_cap if retry is cls._marker: retry = call_context.retry return _CallContext(call_context.transport, target, call_context.serializer, timeout, version_cap, retry) def prepare(self, exchange=_marker, topic=_marker, namespace=_marker, version=_marker, server=_marker, fanout=_marker, timeout=_marker, version_cap=_marker, retry=_marker): return _CallContext._prepare(self, exchange, topic, namespace, version, server, fanout, timeout, version_cap, retry) class RPCClient(_BaseCallContext): """A class for invoking methods on remote RPC servers. The RPCClient class is responsible for sending method invocations to and receiving return values from remote RPC servers via a messaging transport. Two RPC patterns are supported: RPC calls and RPC casts. An RPC cast is used when an RPC method does *not* return a value to the caller. An RPC call is used when a return value is expected from the method. For further information see the cast() and call() methods. The default target used for all subsequent calls and casts is supplied to the RPCClient constructor. The client uses the target to control how the RPC request is delivered to a server. If only the target's topic (and optionally exchange) are set, then the RPC can be serviced by any server that is listening to that topic (and exchange). If multiple servers are listening on that topic/exchange, then one server is picked using a best-effort round-robin algorithm. Alternatively, the client can set the Target's ``server`` attribute to the name of a specific server to send the RPC request to one particular server. In the case of RPC cast, the RPC request can be broadcast to all servers listening to the Target's topic/exchange by setting the Target's ``fanout`` property to ``True``. While the default target is set on construction, target attributes can be overridden for individual method invocations using the prepare() method. A method invocation consists of a request context dictionary, a method name and a dictionary of arguments. This class is intended to be used by wrapping it in another class which provides methods on the subclass to perform the remote invocation using call() or cast():: class TestClient(object): def __init__(self, transport): target = messaging.Target(topic='test', version='2.0') self._client = messaging.RPCClient(transport, target) def test(self, ctxt, arg): return self._client.call(ctxt, 'test', arg=arg) An example of using the prepare() method to override some attributes of the default target:: def test(self, ctxt, arg): cctxt = self._client.prepare(version='2.5') return cctxt.call(ctxt, 'test', arg=arg) RPCClient have a number of other properties - for example, timeout and version_cap - which may make sense to override for some method invocations, so they too can be passed to prepare():: def test(self, ctxt, arg): cctxt = self._client.prepare(timeout=10) return cctxt.call(ctxt, 'test', arg=arg) However, this class can be used directly without wrapping it another class. For example:: transport = messaging.get_rpc_transport(cfg.CONF) target = messaging.Target(topic='test', version='2.0') client = messaging.RPCClient(transport, target) client.call(ctxt, 'test', arg=arg) but this is probably only useful in limited circumstances as a wrapper class will usually help to make the code much more obvious. If the connection to the messaging service is not active when an RPC request is made the client will block waiting for the connection to complete. If the connection fails to complete, the client will try to re-establish that connection. By default this will continue indefinitely until the connection completes. However, the retry parameter can be used to have the RPC request fail with a MessageDeliveryFailure after the given number of retries. For example:: client = messaging.RPCClient(transport, target, retry=None) client.call(ctxt, 'sync') try: client.prepare(retry=0).cast(ctxt, 'ping') except messaging.MessageDeliveryFailure: LOG.error("Failed to send ping message") """ _marker = _BaseCallContext._marker def __init__(self, transport, target, timeout=None, version_cap=None, serializer=None, retry=None): """Construct an RPC client. :param transport: a messaging transport handle :type transport: Transport :param target: the default target for invocations :type target: Target :param timeout: an optional default timeout (in seconds) for call()s :type timeout: int or float :param version_cap: raise a RPCVersionCapError version exceeds this cap :type version_cap: str :param serializer: an optional entity serializer :type serializer: Serializer :param retry: an optional default connection retries configuration: None or -1 means to retry forever. 0 means no retry is attempted. N means attempt at most N retries. :type retry: int """ if serializer is None: serializer = msg_serializer.NoOpSerializer() if not isinstance(transport, msg_transport.RPCTransport): LOG.warning("Using notification transport for RPC. Please use " "get_rpc_transport to obtain an RPC transport " "instance.") super(RPCClient, self).__init__( transport, target, serializer, timeout, version_cap, retry ) self.conf.register_opts(_client_opts) def prepare(self, exchange=_marker, topic=_marker, namespace=_marker, version=_marker, server=_marker, fanout=_marker, timeout=_marker, version_cap=_marker, retry=_marker): """Prepare a method invocation context. Use this method to override client properties for an individual method invocation. For example:: def test(self, ctxt, arg): cctxt = self.prepare(version='2.5') return cctxt.call(ctxt, 'test', arg=arg) :param exchange: see Target.exchange :type exchange: str :param topic: see Target.topic :type topic: str :param namespace: see Target.namespace :type namespace: str :param version: requirement the server must support, see Target.version :type version: str :param server: send to a specific server, see Target.server :type server: str :param fanout: send to all servers on topic, see Target.fanout :type fanout: bool :param timeout: an optional default timeout (in seconds) for call()s :type timeout: int or float :param version_cap: raise a RPCVersionCapError version exceeds this cap :type version_cap: str :param retry: an optional connection retries configuration: None or -1 means to retry forever. 0 means no retry is attempted. N means attempt at most N retries. :type retry: int """ return _CallContext._prepare(self, exchange, topic, namespace, version, server, fanout, timeout, version_cap, retry) def cast(self, ctxt, method, **kwargs): """Invoke a method without blocking for a return value. The cast() method is used to invoke an RPC method that does not return a value. cast() RPC requests may be broadcast to all Servers listening on a given topic by setting the fanout Target property to ``True``. The cast() operation is best-effort: cast() will block the calling thread until the RPC request method is accepted by the messaging transport, but cast() does *not* verify that the RPC method has been invoked by the server. cast() does guarantee that the method will be not executed twice on a destination (e.g. 'at-most-once' execution). There are no ordering guarantees across successive casts, even among casts to the same destination. Therefore methods may be executed in an order different from the order in which they are cast. Method arguments must either be primitive types or types supported by the client's serializer (if any). Similarly, the request context must be a dict unless the client's serializer supports serializing another type. :param ctxt: a request context dict :type ctxt: dict :param method: the method name :type method: str :param kwargs: a dict of method arguments :type kwargs: dict :raises: MessageDeliveryFailure if the messaging transport fails to accept the request. """ self.prepare().cast(ctxt, method, **kwargs) def call(self, ctxt, method, **kwargs): """Invoke a method and wait for a reply. The call() method is used to invoke RPC methods that return a value. Since only a single return value is permitted it is not possible to call() to a fanout target. call() will block the calling thread until the messaging transport provides the return value, a timeout occurs, or a non-recoverable error occurs. call() guarantees that the RPC request is done 'at-most-once' which ensures that the call will never be duplicated. However if the call should fail or time out before the return value arrives then there are no guarantees whether or not the method was invoked. Since call() blocks until completion of the RPC method, call()s from the same thread are guaranteed to be processed in-order. Method arguments must either be primitive types or types supported by the client's serializer (if any). Similarly, the request context must be a dict unless the client's serializer supports serializing another type. The semantics of how any errors raised by the remote RPC endpoint method are handled are quite subtle. Firstly, if the remote exception is contained in one of the modules listed in the allow_remote_exmods messaging.get_rpc_transport() parameter, then it this exception will be re-raised by call(). However, such locally re-raised remote exceptions are distinguishable from the same exception type raised locally because re-raised remote exceptions are modified such that their class name ends with the '_Remote' suffix so you may do:: if ex.__class__.__name__.endswith('_Remote'): # Some special case for locally re-raised remote exceptions Secondly, if a remote exception is not from a module listed in the allowed_remote_exmods list, then a messaging.RemoteError exception is raised with all details of the remote exception. :param ctxt: a request context dict :type ctxt: dict :param method: the method name :type method: str :param kwargs: a dict of method arguments :type kwargs: dict :raises: MessagingTimeout, RemoteError, MessageDeliveryFailure """ return self.prepare().call(ctxt, method, **kwargs) def can_send_version(self, version=_marker): """Check to see if a version is compatible with the version cap.""" return self.prepare(version=version).can_send_version() oslo.messaging-5.35.0/oslo_messaging/rpc/transport.py0000666000175100017510000000364313224676046023005 0ustar zuulzuul00000000000000# Copyright 2017 OpenStack Foundation. # All Rights Reserved. # Copyright 2017 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. __all__ = [ 'get_rpc_transport' ] from oslo_messaging import transport as msg_transport def get_rpc_transport(conf, url=None, allowed_remote_exmods=None): """A factory method for Transport objects for RPCs. This method should be used to ensure the correct messaging functionality for RPCs. RPCs and Notifications may use separate messaging systems that utilize different drivers, different access permissions, message delivery, etc. Presently, this function works exactly the same as get_transport. It's use is recommended as disambiguates the intended use for the transport and may in the future extend functionality related to the separation of messaging backends. :param conf: the user configuration :type conf: cfg.ConfigOpts :param url: a transport URL, see :py:class:`transport.TransportURL` :type url: str or TransportURL :param allowed_remote_exmods: a list of modules which a client using this transport will deserialize remote exceptions from :type allowed_remote_exmods: list """ return msg_transport._get_transport( conf, url, allowed_remote_exmods, transport_cls=msg_transport.RPCTransport) oslo.messaging-5.35.0/oslo_messaging/conffixture.py0000666000175100017510000001277213224676046022524 0ustar zuulzuul00000000000000 # Copyright 2013 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. __all__ = ['ConfFixture'] import sys import fixtures from functools import wraps def _import_opts(conf, module, opts, group=None): __import__(module) conf.register_opts(getattr(sys.modules[module], opts), group=group) class ConfFixture(fixtures.Fixture): """Tweak configuration options for unit testing. oslo.messaging registers a number of configuration options, but rather than directly referencing those options, users of the API should use this interface for querying and overriding certain configuration options. An example usage:: self.messaging_conf = self.useFixture(messaging.ConfFixture(cfg.CONF)) self.messaging_conf.transport_driver = 'fake' :param conf: a ConfigOpts instance :type conf: oslo.config.cfg.ConfigOpts """ def __init__(self, conf): self.conf = conf _import_opts(self.conf, 'oslo_messaging._drivers.impl_rabbit', 'rabbit_opts', 'oslo_messaging_rabbit') _import_opts(self.conf, 'oslo_messaging._drivers.base', 'base_opts', 'oslo_messaging_rabbit') _import_opts(self.conf, 'oslo_messaging._drivers.amqp', 'amqp_opts', 'oslo_messaging_rabbit') _import_opts(self.conf, 'oslo_messaging._drivers.amqp1_driver.opts', 'amqp1_opts', 'oslo_messaging_amqp') _import_opts(self.conf, 'oslo_messaging._drivers.zmq_driver.zmq_options', 'zmq_opts', 'oslo_messaging_zmq') _import_opts(self.conf, 'oslo_messaging._drivers.zmq_driver.' 'matchmaker.zmq_matchmaker_redis', 'matchmaker_redis_opts', 'matchmaker_redis') _import_opts(self.conf, 'oslo_messaging.rpc.client', '_client_opts') _import_opts(self.conf, 'oslo_messaging.transport', '_transport_opts') _import_opts(self.conf, 'oslo_messaging.notify.notifier', '_notifier_opts', 'oslo_messaging_notifications') def _setup_decorator(self): # Support older test cases that still use the set_override # with the old config key names def decorator_for_set_override(wrapped_function): @wraps(wrapped_function) def _wrapper(*args, **kwargs): group = 'oslo_messaging_notifications' if args[0] == 'notification_driver': args = ('driver', args[1], group) elif args[0] == 'notification_transport_url': args = ('transport_url', args[1], group) elif args[0] == 'notification_topics': args = ('topics', args[1], group) return wrapped_function(*args, **kwargs) _wrapper.wrapped = wrapped_function return _wrapper def decorator_for_clear_override(wrapped_function): @wraps(wrapped_function) def _wrapper(*args, **kwargs): group = 'oslo_messaging_notifications' if args[0] == 'notification_driver': args = ('driver', group) elif args[0] == 'notification_transport_url': args = ('transport_url', group) elif args[0] == 'notification_topics': args = ('topics', group) return wrapped_function(*args, **kwargs) _wrapper.wrapped = wrapped_function return _wrapper if not hasattr(self.conf.set_override, 'wrapped'): self.conf.set_override = decorator_for_set_override( self.conf.set_override) if not hasattr(self.conf.clear_override, 'wrapped'): self.conf.clear_override = decorator_for_clear_override( self.conf.clear_override) def _teardown_decorator(self): if hasattr(self.conf.set_override, 'wrapped'): self.conf.set_override = self.conf.set_override.wrapped if hasattr(self.conf.clear_override, 'wrapped'): self.conf.clear_override = self.conf.clear_override.wrapped def setUp(self): super(ConfFixture, self).setUp() self._setup_decorator() self.addCleanup(self._teardown_decorator) self.addCleanup(self.conf.reset) @property def transport_driver(self): """The transport driver - for example 'rabbit', 'amqp' or 'fake'.""" return self.conf.rpc_backend @transport_driver.setter def transport_driver(self, value): self.conf.set_override('rpc_backend', value) @property def response_timeout(self): """Default number of seconds to wait for a response from a call.""" return self.conf.rpc_response_timeout @response_timeout.setter def response_timeout(self, value): self.conf.set_override('rpc_response_timeout', value) oslo.messaging-5.35.0/oslo_messaging/version.py0000666000175100017510000000126613224676046021651 0ustar zuulzuul00000000000000# Copyright 2016 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import pbr.version version_info = pbr.version.VersionInfo('oslo.messaging') oslo.messaging-5.35.0/.testr.conf0000666000175100017510000000046013224676046016662 0ustar zuulzuul00000000000000[DEFAULT] test_command=OS_STDOUT_CAPTURE=${OS_STDOUT_CAPTURE:-1} OS_STDERR_CAPTURE=${OS_STDERR_CAPTURE:-1} OS_TEST_TIMEOUT=${OS_TEST_TIMEOUT:-60} ${PYTHON:-python} -m subunit.run discover -t ./ . $LISTOPT $IDOPTION test_id_option=--load-list $IDFILE test_list_option=--list group_regex=${OS_GROUP_REGEX} oslo.messaging-5.35.0/README.rst0000666000175100017510000000162713224676046016271 0ustar zuulzuul00000000000000======================== Team and repository tags ======================== .. image:: https://governance.openstack.org/tc/badges/oslo.messaging.svg :target: https://governance.openstack.org/tc/reference/tags/index.html .. Change things from this point on Oslo Messaging Library ====================== .. image:: https://img.shields.io/pypi/v/oslo.messaging.svg :target: https://pypi.python.org/pypi/oslo.messaging/ :alt: Latest Version .. image:: https://img.shields.io/pypi/dm/oslo.messaging.svg :target: https://pypi.python.org/pypi/oslo.messaging/ :alt: Downloads The Oslo messaging API supports RPC and notifications over a number of different messaging transports. * License: Apache License, Version 2.0 * Documentation: https://docs.openstack.org/oslo.messaging/latest/ * Source: https://git.openstack.org/cgit/openstack/oslo.messaging * Bugs: https://bugs.launchpad.net/oslo.messaging oslo.messaging-5.35.0/setup-test-env-zmq.sh0000777000175100017510000000131113224676046020637 0ustar zuulzuul00000000000000#!/bin/bash set -e . tools/functions.sh DATADIR=$(mktemp -d /tmp/OSLOMSG-ZEROMQ.XXXXX) trap "clean_exit $DATADIR" EXIT export ZMQ_MATCHMAKER=redis export ZMQ_REDIS_PORT=65123 export ZMQ_IPC_DIR=${DATADIR} export ZMQ_USE_PUB_SUB=false export ZMQ_USE_ROUTER_PROXY=false export ZMQ_USE_ACKS=false export ZMQ_USE_DYNAMIC_CONNECTIONS=false export TRANSPORT_URL="zmq+${ZMQ_MATCHMAKER}://127.0.0.1:${ZMQ_REDIS_PORT}" cat > ${DATADIR}/zmq.conf < clonemap.yaml << EOF clonemap: - name: openstack-infra/devstack-gate dest: devstack-gate EOF /usr/zuul-env/bin/zuul-cloner -m clonemap.yaml --cache-dir /opt/git \ git://git.openstack.org \ openstack-infra/devstack-gate executable: /bin/bash chdir: '{{ ansible_user_dir }}/workspace' environment: '{{ zuul | zuul_legacy_vars }}' - shell: cmd: | set -e set -x export PYTHONUNBUFFERED=true export DEVSTACK_GATE_TEMPEST=1 export DEVSTACK_GATE_TEMPEST_FULL=1 export PROJECTS="openstack/devstack-plugin-kafka $PROJECTS" export DEVSTACK_LOCAL_CONFIG="enable_plugin devstack-plugin-kafka git://git.openstack.org/openstack/devstack-plugin-kafka" export DEVSTACK_PROJECT_FROM_GIT="oslo.messaging" cp devstack-gate/devstack-vm-gate-wrap.sh ./safe-devstack-vm-gate-wrap.sh ./safe-devstack-vm-gate-wrap.sh executable: /bin/bash chdir: '{{ ansible_user_dir }}/workspace' environment: '{{ zuul | zuul_legacy_vars }}' oslo.messaging-5.35.0/playbooks/oslo.messaging-tempest-neutron-dsvm-src-rabbit-default/0000775000175100017510000000000013224676256031316 5ustar zuulzuul00000000000000oslo.messaging-5.35.0/playbooks/oslo.messaging-tempest-neutron-dsvm-src-rabbit-default/post.yaml0000666000175100017510000000063313224676046033170 0ustar zuulzuul00000000000000- hosts: primary tasks: - name: Copy files from {{ ansible_user_dir }}/workspace/ on node synchronize: src: '{{ ansible_user_dir }}/workspace/' dest: '{{ zuul.executor.log_root }}' mode: pull copy_links: true verify_host: true rsync_opts: - --include=/logs/** - --include=*/ - --exclude=* - --prune-empty-dirs oslo.messaging-5.35.0/playbooks/oslo.messaging-tempest-neutron-dsvm-src-rabbit-default/run.yaml0000666000175100017510000000236113224676046033007 0ustar zuulzuul00000000000000- hosts: all name: Tempest full with neutron using rabbitmq tasks: - name: Ensure legacy workspace directory file: path: '{{ ansible_user_dir }}/workspace' state: directory - shell: cmd: | set -e set -x cat > clonemap.yaml << EOF clonemap: - name: openstack-infra/devstack-gate dest: devstack-gate EOF /usr/zuul-env/bin/zuul-cloner -m clonemap.yaml --cache-dir /opt/git \ git://git.openstack.org \ openstack-infra/devstack-gate executable: /bin/bash chdir: '{{ ansible_user_dir }}/workspace' environment: '{{ zuul | zuul_legacy_vars }}' - shell: cmd: | set -e set -x export PYTHONUNBUFFERED=true export DEVSTACK_GATE_TEMPEST=1 export DEVSTACK_GATE_TEMPEST_FULL=1 export DEVSTACK_GATE_NEUTRON=1 export DEVSTACK_PROJECT_FROM_GIT="oslo.messaging" cp devstack-gate/devstack-vm-gate-wrap.sh ./safe-devstack-vm-gate-wrap.sh ./safe-devstack-vm-gate-wrap.sh executable: /bin/bash chdir: '{{ ansible_user_dir }}/workspace' environment: '{{ zuul | zuul_legacy_vars }}' oslo.messaging-5.35.0/playbooks/oslo.messaging-telemetry-dsvm-integration-zmq/0000775000175100017510000000000013224676256027635 5ustar zuulzuul00000000000000oslo.messaging-5.35.0/playbooks/oslo.messaging-telemetry-dsvm-integration-zmq/post.yaml0000666000175100017510000000455113224676046031512 0ustar zuulzuul00000000000000- hosts: primary tasks: - name: Copy files from {{ ansible_user_dir }}/workspace/ on node synchronize: src: '{{ ansible_user_dir }}/workspace/' dest: '{{ zuul.executor.log_root }}' mode: pull copy_links: true verify_host: true rsync_opts: - --include=**/*nose_results.html - --include=*/ - --exclude=* - --prune-empty-dirs - name: Copy files from {{ ansible_user_dir }}/workspace/ on node synchronize: src: '{{ ansible_user_dir }}/workspace/' dest: '{{ zuul.executor.log_root }}' mode: pull copy_links: true verify_host: true rsync_opts: - --include=**/*testr_results.html.gz - --include=*/ - --exclude=* - --prune-empty-dirs - name: Copy files from {{ ansible_user_dir }}/workspace/ on node synchronize: src: '{{ ansible_user_dir }}/workspace/' dest: '{{ zuul.executor.log_root }}' mode: pull copy_links: true verify_host: true rsync_opts: - --include=/.testrepository/tmp* - --include=*/ - --exclude=* - --prune-empty-dirs - name: Copy files from {{ ansible_user_dir }}/workspace/ on node synchronize: src: '{{ ansible_user_dir }}/workspace/' dest: '{{ zuul.executor.log_root }}' mode: pull copy_links: true verify_host: true rsync_opts: - --include=**/*testrepository.subunit.gz - --include=*/ - --exclude=* - --prune-empty-dirs - name: Copy files from {{ ansible_user_dir }}/workspace/ on node synchronize: src: '{{ ansible_user_dir }}/workspace/' dest: '{{ zuul.executor.log_root }}/tox' mode: pull copy_links: true verify_host: true rsync_opts: - --include=/.tox/*/log/* - --include=*/ - --exclude=* - --prune-empty-dirs - name: Copy files from {{ ansible_user_dir }}/workspace/ on node synchronize: src: '{{ ansible_user_dir }}/workspace/' dest: '{{ zuul.executor.log_root }}' mode: pull copy_links: true verify_host: true rsync_opts: - --include=/logs/** - --include=*/ - --exclude=* - --prune-empty-dirs oslo.messaging-5.35.0/playbooks/oslo.messaging-telemetry-dsvm-integration-zmq/run.yaml0000666000175100017510000000635213224676046031332 0ustar zuulzuul00000000000000- hosts: all name: Autoconverted job legacy-oslo.messaging-telemetry-dsvm-integration-zmq from old job gate-oslo.messaging-telemetry-dsvm-integration-zmq-ubuntu-xenial-nv tasks: - name: Ensure legacy workspace directory file: path: '{{ ansible_user_dir }}/workspace' state: directory - shell: cmd: | set -e set -x cat > clonemap.yaml << EOF clonemap: - name: openstack-infra/devstack-gate dest: devstack-gate EOF /usr/zuul-env/bin/zuul-cloner -m clonemap.yaml --cache-dir /opt/git \ git://git.openstack.org \ openstack-infra/devstack-gate executable: /bin/bash chdir: '{{ ansible_user_dir }}/workspace' environment: '{{ zuul | zuul_legacy_vars }}' - shell: cmd: | set -e set -x export PYTHONUNBUFFERED=true export DEVSTACK_GATE_HEAT=1 export DEVSTACK_GATE_NEUTRON=1 export DEVSTACK_GATE_TEMPEST=1 export DEVSTACK_GATE_EXERCISES=0 export DEVSTACK_GATE_INSTALL_TESTONLY=1 export PROJECTS="openstack/ceilometer $PROJECTS" export PROJECTS="openstack/aodh $PROJECTS" export PROJECTS="openstack/devstack-plugin-zmq $PROJECTS" case "$ZUUL_BRANCH" in "stable/ocata") export DEVSTACK_LOCAL_CONFIG+=$'\n'"enable_plugin gnocchi git://git.openstack.org/openstack/gnocchi" export DEVSTACK_LOCAL_CONFIG+=$'\n'"enable_plugin panko git://git.openstack.org/openstack/panko" export OVERRIDE_GNOCCHI_PROJECT_BRANCH="stable/3.1" export PROJECTS="openstack/panko $PROJECTS openstack/gnocchi" ;; *) export DEVSTACK_LOCAL_CONFIG+=$'\n'"enable_plugin panko git://git.openstack.org/openstack/panko" export PROJECTS="openstack/panko $PROJECTS" ;; esac export DEVSTACK_LOCAL_CONFIG+=$'\n'"enable_plugin ceilometer git://git.openstack.org/openstack/ceilometer" export DEVSTACK_LOCAL_CONFIG+=$'\n'"enable_plugin aodh git://git.openstack.org/openstack/aodh" export DEVSTACK_LOCAL_CONFIG+=$'\n'"enable_plugin heat git://git.openstack.org/openstack/heat" export DEVSTACK_LOCAL_CONFIG+=$'\n'"CEILOMETER_BACKEND=gnocchi" export DEVSTACK_LOCAL_CONFIG+=$'\n'"GNOCCHI_ARCHIVE_POLICY=high" export DEVSTACK_LOCAL_CONFIG+=$'\n'"CEILOMETER_PIPELINE_INTERVAL=5" export DEVSTACK_LOCAL_CONFIG+=$'\n'"GNOCCHI_STORAGE_BACKEND=file" export DEVSTACK_LOCAL_CONFIG+=$'\n'"enable_plugin devstack-plugin-zmq git://git.openstack.org/openstack/devstack-plugin-zmq" export DEVSTACK_PROJECT_FROM_GIT="oslo.messaging" function post_test_hook { cd /opt/stack/new/ceilometer/ceilometer/tests/integration/hooks/ ./post_test_hook.sh } export -f post_test_hook cp devstack-gate/devstack-vm-gate-wrap.sh ./safe-devstack-vm-gate-wrap.sh ./safe-devstack-vm-gate-wrap.sh executable: /bin/bash chdir: '{{ ansible_user_dir }}/workspace' environment: '{{ zuul | zuul_legacy_vars }}' oslo.messaging-5.35.0/playbooks/oslo.messaging-src-dsvm-full-amqp1-dual-centos-7/0000775000175100017510000000000013224676256027621 5ustar zuulzuul00000000000000oslo.messaging-5.35.0/playbooks/oslo.messaging-src-dsvm-full-amqp1-dual-centos-7/post.yaml0000666000175100017510000000063313224676046031473 0ustar zuulzuul00000000000000- hosts: primary tasks: - name: Copy files from {{ ansible_user_dir }}/workspace/ on node synchronize: src: '{{ ansible_user_dir }}/workspace/' dest: '{{ zuul.executor.log_root }}' mode: pull copy_links: true verify_host: true rsync_opts: - --include=/logs/** - --include=*/ - --exclude=* - --prune-empty-dirs oslo.messaging-5.35.0/playbooks/oslo.messaging-src-dsvm-full-amqp1-dual-centos-7/run.yaml0000666000175100017510000000310613224676046031310 0ustar zuulzuul00000000000000- hosts: all name: Autoconverted job legacy-oslo.messaging-src-dsvm-full-amqp1-dual-centos-7 from old job gate-oslo.messaging-src-dsvm-full-amqp1-dual-centos-7-nv tasks: - name: Ensure legacy workspace directory file: path: '{{ ansible_user_dir }}/workspace' state: directory - shell: cmd: | set -e set -x cat > clonemap.yaml << EOF clonemap: - name: openstack-infra/devstack-gate dest: devstack-gate EOF /usr/zuul-env/bin/zuul-cloner -m clonemap.yaml --cache-dir /opt/git \ git://git.openstack.org \ openstack-infra/devstack-gate executable: /bin/bash chdir: '{{ ansible_user_dir }}/workspace' environment: '{{ zuul | zuul_legacy_vars }}' - shell: cmd: | set -e set -x export PYTHONUNBUFFERED=true export DEVSTACK_GATE_TEMPEST=1 export DEVSTACK_GATE_TEMPEST_FULL=1 export PROJECTS="openstack/devstack-plugin-amqp1 $PROJECTS" export DEVSTACK_LOCAL_CONFIG="enable_plugin devstack-plugin-amqp1 git://git.openstack.org/openstack/devstack-plugin-amqp1" export DEVSTACK_PROJECT_FROM_GIT="oslo.messaging" export DEVSTACK_LOCAL_CONFIG+=$'\n'"AMQP1_SERVICE=qpid-dual" cp devstack-gate/devstack-vm-gate-wrap.sh ./safe-devstack-vm-gate-wrap.sh ./safe-devstack-vm-gate-wrap.sh executable: /bin/bash chdir: '{{ ansible_user_dir }}/workspace' environment: '{{ zuul | zuul_legacy_vars }}' oslo.messaging-5.35.0/playbooks/oslo.messaging-devstack-full/0000775000175100017510000000000013224676256024272 5ustar zuulzuul00000000000000oslo.messaging-5.35.0/playbooks/oslo.messaging-devstack-full/post.yaml0000666000175100017510000000063413224676046026145 0ustar zuulzuul00000000000000- hosts: primary tasks: - name: Copy files from {{ ansible_user_dir }}/workspace/ on node synchronize: src: '{{ ansible_user_dir }}/workspace/' dest: '{{ zuul.executor.log_root }}' mode: pull copy_links: true verify_host: true rsync_opts: - --include=/logs/** - --include=*/ - --exclude=* - --prune-empty-dirs oslo.messaging-5.35.0/playbooks/oslo.messaging-devstack-full/run.yaml0000666000175100017510000000307213224676046025763 0ustar zuulzuul00000000000000- hosts: all name: testing the devstack-tempest job for oslo.messaging tasks: - name: Ensure legacy workspace directory file: path: '{{ ansible_user_dir }}/workspace' state: directory - shell: cmd: | set -x echo "test run phase" export -p ls -l pwd executable: /bin/bash environment: '{{ zuul | zuul_legacy_vars }}' - shell: cmd: | set -e set -x cat > clonemap.yaml << EOF clonemap: - name: openstack-infra/devstack-gate dest: devstack-gate EOF /usr/zuul-env/bin/zuul-cloner -m clonemap.yaml --cache-dir /opt/git \ git://git.openstack.org \ openstack-infra/devstack-gate executable: /bin/bash chdir: '{{ ansible_user_dir }}/workspace' environment: '{{ zuul | zuul_legacy_vars }}' - shell: cmd: | set -e set -x export PYTHONUNBUFFERED=true export DEVSTACK_GATE_TEMPEST=1 export DEVSTACK_GATE_TEMPEST_FULL=1 export PROJECTS="openstack/oslo.messaging $PROJECTS" if [ -n "{{ transport_plugin_project }}" ]; then export PROJECTS="{{ transport_plugin_project }} $PROJECTS" fi cp devstack-gate/devstack-vm-gate-wrap.sh ./safe-devstack-vm-gate-wrap.sh ./safe-devstack-vm-gate-wrap.sh executable: /bin/bash chdir: '{{ ansible_user_dir }}/workspace' environment: '{{ zuul | zuul_legacy_vars }}' oslo.messaging-5.35.0/playbooks/oslo.messaging-devstack-full/pre.yaml0000666000175100017510000000031513224676046025742 0ustar zuulzuul00000000000000- hosts: all name: Test the pre run tasks: - shell: cmd: | set -x echo "test pre phase" export -p ls -l pwd executable: /bin/bash oslo.messaging-5.35.0/playbooks/oslo.messaging-src-grenade-dsvm/0000775000175100017510000000000013224676256024667 5ustar zuulzuul00000000000000oslo.messaging-5.35.0/playbooks/oslo.messaging-src-grenade-dsvm/post.yaml0000666000175100017510000000063313224676046026541 0ustar zuulzuul00000000000000- hosts: primary tasks: - name: Copy files from {{ ansible_user_dir }}/workspace/ on node synchronize: src: '{{ ansible_user_dir }}/workspace/' dest: '{{ zuul.executor.log_root }}' mode: pull copy_links: true verify_host: true rsync_opts: - --include=/logs/** - --include=*/ - --exclude=* - --prune-empty-dirs oslo.messaging-5.35.0/playbooks/oslo.messaging-src-grenade-dsvm/run.yaml0000666000175100017510000000353013224676046026357 0ustar zuulzuul00000000000000- hosts: all name: Autoconverted job legacy-oslo.messaging-src-grenade-dsvm from old job gate-oslo.messaging-src-grenade-dsvm-ubuntu-xenial-nv tasks: - name: Ensure legacy workspace directory file: path: '{{ ansible_user_dir }}/workspace' state: directory - shell: cmd: | set -e set -x cat > clonemap.yaml << EOF clonemap: - name: openstack-infra/devstack-gate dest: devstack-gate EOF /usr/zuul-env/bin/zuul-cloner -m clonemap.yaml --cache-dir /opt/git \ git://git.openstack.org \ openstack-infra/devstack-gate executable: /bin/bash chdir: '{{ ansible_user_dir }}/workspace' environment: '{{ zuul | zuul_legacy_vars }}' - shell: cmd: | set -e set -x export PROJECTS="openstack-dev/grenade $PROJECTS" export PYTHONUNBUFFERED=true export DEVSTACK_GATE_TEMPEST=1 export DEVSTACK_GATE_GRENADE=pullup export BRANCH_OVERRIDE=default if [ "$BRANCH_OVERRIDE" != "default" ] ; then export OVERRIDE_ZUUL_BRANCH=$BRANCH_OVERRIDE fi export DEVSTACK_PROJECT_FROM_GIT=$ZUUL_SHORT_PROJECT_NAME # Even if the branch is overridden, make sure we use # the correct branch using the OVERRIDE_*_PROJECT_BRANCH # variable. uc_project=`echo $DEVSTACK_PROJECT_FROM_GIT | tr [:lower:] [:upper:] | tr '-' '_' | sed 's/[^A-Z_]//'` export "OVERRIDE_"$uc_project"_PROJECT_BRANCH"=$ZUUL_BRANCH cp devstack-gate/devstack-vm-gate-wrap.sh ./safe-devstack-vm-gate-wrap.sh ./safe-devstack-vm-gate-wrap.sh executable: /bin/bash chdir: '{{ ansible_user_dir }}/workspace' environment: '{{ zuul | zuul_legacy_vars }}' oslo.messaging-5.35.0/playbooks/oslo.messaging-telemetry-dsvm-integration-kafka/0000775000175100017510000000000013224676256030103 5ustar zuulzuul00000000000000oslo.messaging-5.35.0/playbooks/oslo.messaging-telemetry-dsvm-integration-kafka/post.yaml0000666000175100017510000000455113224676046031760 0ustar zuulzuul00000000000000- hosts: primary tasks: - name: Copy files from {{ ansible_user_dir }}/workspace/ on node synchronize: src: '{{ ansible_user_dir }}/workspace/' dest: '{{ zuul.executor.log_root }}' mode: pull copy_links: true verify_host: true rsync_opts: - --include=**/*nose_results.html - --include=*/ - --exclude=* - --prune-empty-dirs - name: Copy files from {{ ansible_user_dir }}/workspace/ on node synchronize: src: '{{ ansible_user_dir }}/workspace/' dest: '{{ zuul.executor.log_root }}' mode: pull copy_links: true verify_host: true rsync_opts: - --include=**/*testr_results.html.gz - --include=*/ - --exclude=* - --prune-empty-dirs - name: Copy files from {{ ansible_user_dir }}/workspace/ on node synchronize: src: '{{ ansible_user_dir }}/workspace/' dest: '{{ zuul.executor.log_root }}' mode: pull copy_links: true verify_host: true rsync_opts: - --include=/.testrepository/tmp* - --include=*/ - --exclude=* - --prune-empty-dirs - name: Copy files from {{ ansible_user_dir }}/workspace/ on node synchronize: src: '{{ ansible_user_dir }}/workspace/' dest: '{{ zuul.executor.log_root }}' mode: pull copy_links: true verify_host: true rsync_opts: - --include=**/*testrepository.subunit.gz - --include=*/ - --exclude=* - --prune-empty-dirs - name: Copy files from {{ ansible_user_dir }}/workspace/ on node synchronize: src: '{{ ansible_user_dir }}/workspace/' dest: '{{ zuul.executor.log_root }}/tox' mode: pull copy_links: true verify_host: true rsync_opts: - --include=/.tox/*/log/* - --include=*/ - --exclude=* - --prune-empty-dirs - name: Copy files from {{ ansible_user_dir }}/workspace/ on node synchronize: src: '{{ ansible_user_dir }}/workspace/' dest: '{{ zuul.executor.log_root }}' mode: pull copy_links: true verify_host: true rsync_opts: - --include=/logs/** - --include=*/ - --exclude=* - --prune-empty-dirs oslo.messaging-5.35.0/playbooks/oslo.messaging-telemetry-dsvm-integration-kafka/run.yaml0000666000175100017510000000636413224676046031603 0ustar zuulzuul00000000000000- hosts: all name: Autoconverted job legacy-oslo.messaging-telemetry-dsvm-integration-kafka from old job gate-oslo.messaging-telemetry-dsvm-integration-kafka-ubuntu-xenial-nv tasks: - name: Ensure legacy workspace directory file: path: '{{ ansible_user_dir }}/workspace' state: directory - shell: cmd: | set -e set -x cat > clonemap.yaml << EOF clonemap: - name: openstack-infra/devstack-gate dest: devstack-gate EOF /usr/zuul-env/bin/zuul-cloner -m clonemap.yaml --cache-dir /opt/git \ git://git.openstack.org \ openstack-infra/devstack-gate executable: /bin/bash chdir: '{{ ansible_user_dir }}/workspace' environment: '{{ zuul | zuul_legacy_vars }}' - shell: cmd: | set -e set -x export PYTHONUNBUFFERED=true export DEVSTACK_GATE_HEAT=1 export DEVSTACK_GATE_NEUTRON=1 export DEVSTACK_GATE_TEMPEST=1 export DEVSTACK_GATE_EXERCISES=0 export DEVSTACK_GATE_INSTALL_TESTONLY=1 export PROJECTS="openstack/ceilometer $PROJECTS" export PROJECTS="openstack/aodh $PROJECTS" export PROJECTS="openstack/devstack-plugin-kafka $PROJECTS" case "$ZUUL_BRANCH" in "stable/ocata") export DEVSTACK_LOCAL_CONFIG+=$'\n'"enable_plugin gnocchi git://git.openstack.org/openstack/gnocchi" export DEVSTACK_LOCAL_CONFIG+=$'\n'"enable_plugin panko git://git.openstack.org/openstack/panko" export OVERRIDE_GNOCCHI_PROJECT_BRANCH="stable/3.1" export PROJECTS="openstack/panko $PROJECTS openstack/gnocchi" ;; *) export DEVSTACK_LOCAL_CONFIG+=$'\n'"enable_plugin panko git://git.openstack.org/openstack/panko" export PROJECTS="openstack/panko $PROJECTS" ;; esac export DEVSTACK_LOCAL_CONFIG+=$'\n'"enable_plugin ceilometer git://git.openstack.org/openstack/ceilometer" export DEVSTACK_LOCAL_CONFIG+=$'\n'"enable_plugin aodh git://git.openstack.org/openstack/aodh" export DEVSTACK_LOCAL_CONFIG+=$'\n'"enable_plugin heat git://git.openstack.org/openstack/heat" export DEVSTACK_LOCAL_CONFIG+=$'\n'"CEILOMETER_BACKEND=gnocchi" export DEVSTACK_LOCAL_CONFIG+=$'\n'"GNOCCHI_ARCHIVE_POLICY=high" export DEVSTACK_LOCAL_CONFIG+=$'\n'"CEILOMETER_PIPELINE_INTERVAL=5" export DEVSTACK_LOCAL_CONFIG+=$'\n'"GNOCCHI_STORAGE_BACKEND=file" export DEVSTACK_LOCAL_CONFIG+=$'\n'"enable_plugin devstack-plugin-kafka git://git.openstack.org/openstack/devstack-plugin-kafka" export DEVSTACK_PROJECT_FROM_GIT="oslo.messaging" function post_test_hook { cd /opt/stack/new/ceilometer/ceilometer/tests/integration/hooks/ ./post_test_hook.sh } export -f post_test_hook cp devstack-gate/devstack-vm-gate-wrap.sh ./safe-devstack-vm-gate-wrap.sh ./safe-devstack-vm-gate-wrap.sh executable: /bin/bash chdir: '{{ ansible_user_dir }}/workspace' environment: '{{ zuul | zuul_legacy_vars }}' oslo.messaging-5.35.0/playbooks/oslo.messaging-tempest-neutron-dsvm-src-pika-default/0000775000175100017510000000000013224676256030777 5ustar zuulzuul00000000000000oslo.messaging-5.35.0/playbooks/oslo.messaging-tempest-neutron-dsvm-src-pika-default/post.yaml0000666000175100017510000000063313224676046032651 0ustar zuulzuul00000000000000- hosts: primary tasks: - name: Copy files from {{ ansible_user_dir }}/workspace/ on node synchronize: src: '{{ ansible_user_dir }}/workspace/' dest: '{{ zuul.executor.log_root }}' mode: pull copy_links: true verify_host: true rsync_opts: - --include=/logs/** - --include=*/ - --exclude=* - --prune-empty-dirs oslo.messaging-5.35.0/playbooks/oslo.messaging-tempest-neutron-dsvm-src-pika-default/run.yaml0000666000175100017510000000307413224676046032472 0ustar zuulzuul00000000000000- hosts: all name: Autoconverted job legacy-tempest-neutron-dsvm-src-oslo.messaging-pika-default from old job gate-tempest-neutron-dsvm-src-oslo.messaging-pika-default-ubuntu-xenial-nv tasks: - name: Ensure legacy workspace directory file: path: '{{ ansible_user_dir }}/workspace' state: directory - shell: cmd: | set -e set -x cat > clonemap.yaml << EOF clonemap: - name: openstack-infra/devstack-gate dest: devstack-gate EOF /usr/zuul-env/bin/zuul-cloner -m clonemap.yaml --cache-dir /opt/git \ git://git.openstack.org \ openstack-infra/devstack-gate executable: /bin/bash chdir: '{{ ansible_user_dir }}/workspace' environment: '{{ zuul | zuul_legacy_vars }}' - shell: cmd: | set -e set -x export PYTHONUNBUFFERED=true export DEVSTACK_GATE_TEMPEST=1 export DEVSTACK_GATE_TEMPEST_FULL=1 export DEVSTACK_GATE_NEUTRON=1 export PROJECTS="openstack/devstack-plugin-pika $PROJECTS" export DEVSTACK_LOCAL_CONFIG="enable_plugin devstack-plugin-pika git://git.openstack.org/openstack/devstack-plugin-pika" export DEVSTACK_PROJECT_FROM_GIT="oslo.messaging" cp devstack-gate/devstack-vm-gate-wrap.sh ./safe-devstack-vm-gate-wrap.sh ./safe-devstack-vm-gate-wrap.sh executable: /bin/bash chdir: '{{ ansible_user_dir }}/workspace' environment: '{{ zuul | zuul_legacy_vars }}' oslo.messaging-5.35.0/playbooks/oslo.messaging-telemetry-dsvm-integration-pika/0000775000175100017510000000000013224676256027752 5ustar zuulzuul00000000000000oslo.messaging-5.35.0/playbooks/oslo.messaging-telemetry-dsvm-integration-pika/post.yaml0000666000175100017510000000455113224676046031627 0ustar zuulzuul00000000000000- hosts: primary tasks: - name: Copy files from {{ ansible_user_dir }}/workspace/ on node synchronize: src: '{{ ansible_user_dir }}/workspace/' dest: '{{ zuul.executor.log_root }}' mode: pull copy_links: true verify_host: true rsync_opts: - --include=**/*nose_results.html - --include=*/ - --exclude=* - --prune-empty-dirs - name: Copy files from {{ ansible_user_dir }}/workspace/ on node synchronize: src: '{{ ansible_user_dir }}/workspace/' dest: '{{ zuul.executor.log_root }}' mode: pull copy_links: true verify_host: true rsync_opts: - --include=**/*testr_results.html.gz - --include=*/ - --exclude=* - --prune-empty-dirs - name: Copy files from {{ ansible_user_dir }}/workspace/ on node synchronize: src: '{{ ansible_user_dir }}/workspace/' dest: '{{ zuul.executor.log_root }}' mode: pull copy_links: true verify_host: true rsync_opts: - --include=/.testrepository/tmp* - --include=*/ - --exclude=* - --prune-empty-dirs - name: Copy files from {{ ansible_user_dir }}/workspace/ on node synchronize: src: '{{ ansible_user_dir }}/workspace/' dest: '{{ zuul.executor.log_root }}' mode: pull copy_links: true verify_host: true rsync_opts: - --include=**/*testrepository.subunit.gz - --include=*/ - --exclude=* - --prune-empty-dirs - name: Copy files from {{ ansible_user_dir }}/workspace/ on node synchronize: src: '{{ ansible_user_dir }}/workspace/' dest: '{{ zuul.executor.log_root }}/tox' mode: pull copy_links: true verify_host: true rsync_opts: - --include=/.tox/*/log/* - --include=*/ - --exclude=* - --prune-empty-dirs - name: Copy files from {{ ansible_user_dir }}/workspace/ on node synchronize: src: '{{ ansible_user_dir }}/workspace/' dest: '{{ zuul.executor.log_root }}' mode: pull copy_links: true verify_host: true rsync_opts: - --include=/logs/** - --include=*/ - --exclude=* - --prune-empty-dirs oslo.messaging-5.35.0/playbooks/oslo.messaging-telemetry-dsvm-integration-pika/run.yaml0000666000175100017510000000635713224676046031454 0ustar zuulzuul00000000000000- hosts: all name: Autoconverted job legacy-oslo.messaging-telemetry-dsvm-integration-pika from old job gate-oslo.messaging-telemetry-dsvm-integration-pika-ubuntu-xenial-nv tasks: - name: Ensure legacy workspace directory file: path: '{{ ansible_user_dir }}/workspace' state: directory - shell: cmd: | set -e set -x cat > clonemap.yaml << EOF clonemap: - name: openstack-infra/devstack-gate dest: devstack-gate EOF /usr/zuul-env/bin/zuul-cloner -m clonemap.yaml --cache-dir /opt/git \ git://git.openstack.org \ openstack-infra/devstack-gate executable: /bin/bash chdir: '{{ ansible_user_dir }}/workspace' environment: '{{ zuul | zuul_legacy_vars }}' - shell: cmd: | set -e set -x export PYTHONUNBUFFERED=true export DEVSTACK_GATE_HEAT=1 export DEVSTACK_GATE_NEUTRON=1 export DEVSTACK_GATE_TEMPEST=1 export DEVSTACK_GATE_EXERCISES=0 export DEVSTACK_GATE_INSTALL_TESTONLY=1 export PROJECTS="openstack/ceilometer $PROJECTS" export PROJECTS="openstack/aodh $PROJECTS" export PROJECTS="openstack/devstack-plugin-pika $PROJECTS" case "$ZUUL_BRANCH" in "stable/ocata") export DEVSTACK_LOCAL_CONFIG+=$'\n'"enable_plugin gnocchi git://git.openstack.org/openstack/gnocchi" export DEVSTACK_LOCAL_CONFIG+=$'\n'"enable_plugin panko git://git.openstack.org/openstack/panko" export OVERRIDE_GNOCCHI_PROJECT_BRANCH="stable/3.1" export PROJECTS="openstack/panko $PROJECTS openstack/gnocchi" ;; *) export DEVSTACK_LOCAL_CONFIG+=$'\n'"enable_plugin panko git://git.openstack.org/openstack/panko" export PROJECTS="openstack/panko $PROJECTS" ;; esac export DEVSTACK_LOCAL_CONFIG+=$'\n'"enable_plugin ceilometer git://git.openstack.org/openstack/ceilometer" export DEVSTACK_LOCAL_CONFIG+=$'\n'"enable_plugin aodh git://git.openstack.org/openstack/aodh" export DEVSTACK_LOCAL_CONFIG+=$'\n'"enable_plugin heat git://git.openstack.org/openstack/heat" export DEVSTACK_LOCAL_CONFIG+=$'\n'"CEILOMETER_BACKEND=gnocchi" export DEVSTACK_LOCAL_CONFIG+=$'\n'"GNOCCHI_ARCHIVE_POLICY=high" export DEVSTACK_LOCAL_CONFIG+=$'\n'"CEILOMETER_PIPELINE_INTERVAL=5" export DEVSTACK_LOCAL_CONFIG+=$'\n'"GNOCCHI_STORAGE_BACKEND=file" export DEVSTACK_LOCAL_CONFIG+=$'\n'"enable_plugin devstack-plugin-pika git://git.openstack.org/openstack/devstack-plugin-pika" export DEVSTACK_PROJECT_FROM_GIT="oslo.messaging" function post_test_hook { cd /opt/stack/new/ceilometer/ceilometer/tests/integration/hooks/ ./post_test_hook.sh } export -f post_test_hook cp devstack-gate/devstack-vm-gate-wrap.sh ./safe-devstack-vm-gate-wrap.sh ./safe-devstack-vm-gate-wrap.sh executable: /bin/bash chdir: '{{ ansible_user_dir }}/workspace' environment: '{{ zuul | zuul_legacy_vars }}' oslo.messaging-5.35.0/playbooks/oslo.messaging-tempest-neutron-dsvm-src-amqp1-hybrid/0000775000175100017510000000000013224676256030727 5ustar zuulzuul00000000000000oslo.messaging-5.35.0/playbooks/oslo.messaging-tempest-neutron-dsvm-src-amqp1-hybrid/post.yaml0000666000175100017510000000063313224676046032601 0ustar zuulzuul00000000000000- hosts: primary tasks: - name: Copy files from {{ ansible_user_dir }}/workspace/ on node synchronize: src: '{{ ansible_user_dir }}/workspace/' dest: '{{ zuul.executor.log_root }}' mode: pull copy_links: true verify_host: true rsync_opts: - --include=/logs/** - --include=*/ - --exclude=* - --prune-empty-dirs oslo.messaging-5.35.0/playbooks/oslo.messaging-tempest-neutron-dsvm-src-amqp1-hybrid/run.yaml0000666000175100017510000000321013224676046032412 0ustar zuulzuul00000000000000- hosts: all name: Autoconverted job legacy-tempest-neutron-dsvm-src-oslo.messaging-amqp1-hybrid from old job gate-tempest-neutron-dsvm-src-oslo.messaging-amqp1-hybrid-ubuntu-xenial-nv tasks: - name: Ensure legacy workspace directory file: path: '{{ ansible_user_dir }}/workspace' state: directory - shell: cmd: | set -e set -x cat > clonemap.yaml << EOF clonemap: - name: openstack-infra/devstack-gate dest: devstack-gate EOF /usr/zuul-env/bin/zuul-cloner -m clonemap.yaml --cache-dir /opt/git \ git://git.openstack.org \ openstack-infra/devstack-gate executable: /bin/bash chdir: '{{ ansible_user_dir }}/workspace' environment: '{{ zuul | zuul_legacy_vars }}' - shell: cmd: | set -e set -x export PYTHONUNBUFFERED=true export DEVSTACK_GATE_TEMPEST=1 export DEVSTACK_GATE_TEMPEST_FULL=1 export DEVSTACK_GATE_NEUTRON=1 export PROJECTS="openstack/devstack-plugin-amqp1 $PROJECTS" export DEVSTACK_LOCAL_CONFIG="enable_plugin devstack-plugin-amqp1 git://git.openstack.org/openstack/devstack-plugin-amqp1" export DEVSTACK_PROJECT_FROM_GIT="oslo.messaging" export DEVSTACK_LOCAL_CONFIG+=$'\n'"AMQP1_SERVICE=qpid-hybrid" cp devstack-gate/devstack-vm-gate-wrap.sh ./safe-devstack-vm-gate-wrap.sh ./safe-devstack-vm-gate-wrap.sh executable: /bin/bash chdir: '{{ ansible_user_dir }}/workspace' environment: '{{ zuul | zuul_legacy_vars }}' oslo.messaging-5.35.0/playbooks/oslo.messaging-src-dsvm-full-rabbit-default/0000775000175100017510000000000013224676256027107 5ustar zuulzuul00000000000000oslo.messaging-5.35.0/playbooks/oslo.messaging-src-dsvm-full-rabbit-default/post.yaml0000666000175100017510000000063313224676046030761 0ustar zuulzuul00000000000000- hosts: primary tasks: - name: Copy files from {{ ansible_user_dir }}/workspace/ on node synchronize: src: '{{ ansible_user_dir }}/workspace/' dest: '{{ zuul.executor.log_root }}' mode: pull copy_links: true verify_host: true rsync_opts: - --include=/logs/** - --include=*/ - --exclude=* - --prune-empty-dirs oslo.messaging-5.35.0/playbooks/oslo.messaging-src-dsvm-full-rabbit-default/run.yaml0000666000175100017510000000230013224676046030571 0ustar zuulzuul00000000000000- hosts: all name: Full tempest tests with rabbitmq tasks: - name: Ensure legacy workspace directory file: path: '{{ ansible_user_dir }}/workspace' state: directory - shell: cmd: | set -e set -x cat > clonemap.yaml << EOF clonemap: - name: openstack-infra/devstack-gate dest: devstack-gate EOF /usr/zuul-env/bin/zuul-cloner -m clonemap.yaml --cache-dir /opt/git \ git://git.openstack.org \ openstack-infra/devstack-gate executable: /bin/bash chdir: '{{ ansible_user_dir }}/workspace' environment: '{{ zuul | zuul_legacy_vars }}' - shell: cmd: | set -e set -x export PYTHONUNBUFFERED=true export DEVSTACK_GATE_TEMPEST=1 export DEVSTACK_GATE_TEMPEST_FULL=1 export DEVSTACK_PROJECT_FROM_GIT="oslo.messaging" cp devstack-gate/devstack-vm-gate-wrap.sh ./safe-devstack-vm-gate-wrap.sh ./safe-devstack-vm-gate-wrap.sh executable: /bin/bash chdir: '{{ ansible_user_dir }}/workspace' environment: '{{ zuul | zuul_legacy_vars }}' oslo.messaging-5.35.0/playbooks/oslo.messaging-telemetry-dsvm-integration-rabbit/0000775000175100017510000000000013224676256030271 5ustar zuulzuul00000000000000oslo.messaging-5.35.0/playbooks/oslo.messaging-telemetry-dsvm-integration-rabbit/post.yaml0000666000175100017510000000455113224676046032146 0ustar zuulzuul00000000000000- hosts: primary tasks: - name: Copy files from {{ ansible_user_dir }}/workspace/ on node synchronize: src: '{{ ansible_user_dir }}/workspace/' dest: '{{ zuul.executor.log_root }}' mode: pull copy_links: true verify_host: true rsync_opts: - --include=**/*nose_results.html - --include=*/ - --exclude=* - --prune-empty-dirs - name: Copy files from {{ ansible_user_dir }}/workspace/ on node synchronize: src: '{{ ansible_user_dir }}/workspace/' dest: '{{ zuul.executor.log_root }}' mode: pull copy_links: true verify_host: true rsync_opts: - --include=**/*testr_results.html.gz - --include=*/ - --exclude=* - --prune-empty-dirs - name: Copy files from {{ ansible_user_dir }}/workspace/ on node synchronize: src: '{{ ansible_user_dir }}/workspace/' dest: '{{ zuul.executor.log_root }}' mode: pull copy_links: true verify_host: true rsync_opts: - --include=/.testrepository/tmp* - --include=*/ - --exclude=* - --prune-empty-dirs - name: Copy files from {{ ansible_user_dir }}/workspace/ on node synchronize: src: '{{ ansible_user_dir }}/workspace/' dest: '{{ zuul.executor.log_root }}' mode: pull copy_links: true verify_host: true rsync_opts: - --include=**/*testrepository.subunit.gz - --include=*/ - --exclude=* - --prune-empty-dirs - name: Copy files from {{ ansible_user_dir }}/workspace/ on node synchronize: src: '{{ ansible_user_dir }}/workspace/' dest: '{{ zuul.executor.log_root }}/tox' mode: pull copy_links: true verify_host: true rsync_opts: - --include=/.tox/*/log/* - --include=*/ - --exclude=* - --prune-empty-dirs - name: Copy files from {{ ansible_user_dir }}/workspace/ on node synchronize: src: '{{ ansible_user_dir }}/workspace/' dest: '{{ zuul.executor.log_root }}' mode: pull copy_links: true verify_host: true rsync_opts: - --include=/logs/** - --include=*/ - --exclude=* - --prune-empty-dirs oslo.messaging-5.35.0/playbooks/oslo.messaging-telemetry-dsvm-integration-rabbit/run.yaml0000666000175100017510000000630113224676046031760 0ustar zuulzuul00000000000000- hosts: all name: Autoconverted job legacy-telemetry-dsvm-integration-oslo.messaging from old job gate-telemetry-dsvm-integration-oslo.messaging-ubuntu-xenial tasks: - name: Ensure legacy workspace directory file: path: '{{ ansible_user_dir }}/workspace' state: directory - shell: cmd: | set -e set -x cat > clonemap.yaml << EOF clonemap: - name: openstack-infra/devstack-gate dest: devstack-gate EOF /usr/zuul-env/bin/zuul-cloner -m clonemap.yaml --cache-dir /opt/git \ git://git.openstack.org \ openstack-infra/devstack-gate executable: /bin/bash chdir: '{{ ansible_user_dir }}/workspace' environment: '{{ zuul | zuul_legacy_vars }}' - shell: cmd: | set -e set -x export PYTHONUNBUFFERED=true export DEVSTACK_GATE_HEAT=1 export DEVSTACK_GATE_NEUTRON=1 export DEVSTACK_GATE_TEMPEST=1 export DEVSTACK_GATE_EXERCISES=0 export DEVSTACK_GATE_INSTALL_TESTONLY=1 export DEVSTACK_GATE_TEMPEST_NOTESTS=1 export PROJECTS="openstack/ceilometer openstack/aodh" # NOTE(sileht): This job runs on Aodh and Ceilometer. # Gnocchi uses an independent release cycle. So we map here # which Gnocchi version can be used with other OpenStack # components and the reverse. case "$ZUUL_BRANCH" in "stable/ocata") export DEVSTACK_LOCAL_CONFIG+=$'\n'"enable_plugin gnocchi git://git.openstack.org/openstack/gnocchi" export DEVSTACK_LOCAL_CONFIG+=$'\n'"enable_plugin panko git://git.openstack.org/openstack/panko" export OVERRIDE_GNOCCHI_PROJECT_BRANCH="stable/3.1" export PROJECTS="openstack/panko $PROJECTS openstack/gnocchi" ;; *) export DEVSTACK_LOCAL_CONFIG+=$'\n'"enable_plugin panko git://git.openstack.org/openstack/panko" export PROJECTS="openstack/panko $PROJECTS" ;; esac export DEVSTACK_LOCAL_CONFIG+=$'\n'"enable_plugin ceilometer git://git.openstack.org/openstack/ceilometer" export DEVSTACK_LOCAL_CONFIG+=$'\n'"enable_plugin aodh git://git.openstack.org/openstack/aodh" export DEVSTACK_LOCAL_CONFIG+=$'\n'"enable_plugin heat git://git.openstack.org/openstack/heat" export DEVSTACK_LOCAL_CONFIG+=$'\n'"CEILOMETER_BACKEND=gnocchi" export DEVSTACK_LOCAL_CONFIG+=$'\n'"GNOCCHI_ARCHIVE_POLICY=high" export DEVSTACK_LOCAL_CONFIG+=$'\n'"CEILOMETER_PIPELINE_INTERVAL=15" export DEVSTACK_PROJECT_FROM_GIT=$ZUUL_SHORT_PROJECT_NAME function post_test_hook { cd /opt/stack/new/ceilometer/ceilometer/tests/integration/hooks/ ./post_test_hook.sh } export -f post_test_hook cp devstack-gate/devstack-vm-gate-wrap.sh ./safe-devstack-vm-gate-wrap.sh ./safe-devstack-vm-gate-wrap.sh executable: /bin/bash chdir: '{{ ansible_user_dir }}/workspace' environment: '{{ zuul | zuul_legacy_vars }}' oslo.messaging-5.35.0/playbooks/oslo.messaging-tempest-neutron-dsvm-src-zmq-default/0000775000175100017510000000000013224676256030662 5ustar zuulzuul00000000000000oslo.messaging-5.35.0/playbooks/oslo.messaging-tempest-neutron-dsvm-src-zmq-default/post.yaml0000666000175100017510000000063313224676046032534 0ustar zuulzuul00000000000000- hosts: primary tasks: - name: Copy files from {{ ansible_user_dir }}/workspace/ on node synchronize: src: '{{ ansible_user_dir }}/workspace/' dest: '{{ zuul.executor.log_root }}' mode: pull copy_links: true verify_host: true rsync_opts: - --include=/logs/** - --include=*/ - --exclude=* - --prune-empty-dirs oslo.messaging-5.35.0/playbooks/oslo.messaging-tempest-neutron-dsvm-src-zmq-default/run.yaml0000666000175100017510000000306713224676046032357 0ustar zuulzuul00000000000000- hosts: all name: Autoconverted job legacy-tempest-neutron-dsvm-src-oslo.messaging-zmq-default from old job gate-tempest-neutron-dsvm-src-oslo.messaging-zmq-default-ubuntu-xenial-nv tasks: - name: Ensure legacy workspace directory file: path: '{{ ansible_user_dir }}/workspace' state: directory - shell: cmd: | set -e set -x cat > clonemap.yaml << EOF clonemap: - name: openstack-infra/devstack-gate dest: devstack-gate EOF /usr/zuul-env/bin/zuul-cloner -m clonemap.yaml --cache-dir /opt/git \ git://git.openstack.org \ openstack-infra/devstack-gate executable: /bin/bash chdir: '{{ ansible_user_dir }}/workspace' environment: '{{ zuul | zuul_legacy_vars }}' - shell: cmd: | set -e set -x export PYTHONUNBUFFERED=true export DEVSTACK_GATE_TEMPEST=1 export DEVSTACK_GATE_TEMPEST_FULL=1 export DEVSTACK_GATE_NEUTRON=1 export PROJECTS="openstack/devstack-plugin-zmq $PROJECTS" export DEVSTACK_LOCAL_CONFIG="enable_plugin devstack-plugin-zmq git://git.openstack.org/openstack/devstack-plugin-zmq" export DEVSTACK_PROJECT_FROM_GIT="oslo.messaging" cp devstack-gate/devstack-vm-gate-wrap.sh ./safe-devstack-vm-gate-wrap.sh ./safe-devstack-vm-gate-wrap.sh executable: /bin/bash chdir: '{{ ansible_user_dir }}/workspace' environment: '{{ zuul | zuul_legacy_vars }}' oslo.messaging-5.35.0/playbooks/oslo.messaging-src-dsvm-full-pika-default/0000775000175100017510000000000013224676256026570 5ustar zuulzuul00000000000000oslo.messaging-5.35.0/playbooks/oslo.messaging-src-dsvm-full-pika-default/post.yaml0000666000175100017510000000063313224676046030442 0ustar zuulzuul00000000000000- hosts: primary tasks: - name: Copy files from {{ ansible_user_dir }}/workspace/ on node synchronize: src: '{{ ansible_user_dir }}/workspace/' dest: '{{ zuul.executor.log_root }}' mode: pull copy_links: true verify_host: true rsync_opts: - --include=/logs/** - --include=*/ - --exclude=* - --prune-empty-dirs oslo.messaging-5.35.0/playbooks/oslo.messaging-src-dsvm-full-pika-default/run.yaml0000666000175100017510000000277313224676046030270 0ustar zuulzuul00000000000000- hosts: all name: Autoconverted job legacy-oslo.messaging-src-dsvm-full-pika-default from old job gate-oslo.messaging-src-dsvm-full-pika-default-ubuntu-xenial-nv tasks: - name: Ensure legacy workspace directory file: path: '{{ ansible_user_dir }}/workspace' state: directory - shell: cmd: | set -e set -x cat > clonemap.yaml << EOF clonemap: - name: openstack-infra/devstack-gate dest: devstack-gate EOF /usr/zuul-env/bin/zuul-cloner -m clonemap.yaml --cache-dir /opt/git \ git://git.openstack.org \ openstack-infra/devstack-gate executable: /bin/bash chdir: '{{ ansible_user_dir }}/workspace' environment: '{{ zuul | zuul_legacy_vars }}' - shell: cmd: | set -e set -x export PYTHONUNBUFFERED=true export DEVSTACK_GATE_TEMPEST=1 export DEVSTACK_GATE_TEMPEST_FULL=1 export PROJECTS="openstack/devstack-plugin-pika $PROJECTS" export DEVSTACK_LOCAL_CONFIG="enable_plugin devstack-plugin-pika git://git.openstack.org/openstack/devstack-plugin-pika" export DEVSTACK_PROJECT_FROM_GIT="oslo.messaging" cp devstack-gate/devstack-vm-gate-wrap.sh ./safe-devstack-vm-gate-wrap.sh ./safe-devstack-vm-gate-wrap.sh executable: /bin/bash chdir: '{{ ansible_user_dir }}/workspace' environment: '{{ zuul | zuul_legacy_vars }}' oslo.messaging-5.35.0/playbooks/oslo.messaging-tempest-neutron-dsvm-src-kafka-default/0000775000175100017510000000000013224676256031130 5ustar zuulzuul00000000000000oslo.messaging-5.35.0/playbooks/oslo.messaging-tempest-neutron-dsvm-src-kafka-default/post.yaml0000666000175100017510000000063313224676046033002 0ustar zuulzuul00000000000000- hosts: primary tasks: - name: Copy files from {{ ansible_user_dir }}/workspace/ on node synchronize: src: '{{ ansible_user_dir }}/workspace/' dest: '{{ zuul.executor.log_root }}' mode: pull copy_links: true verify_host: true rsync_opts: - --include=/logs/** - --include=*/ - --exclude=* - --prune-empty-dirs oslo.messaging-5.35.0/playbooks/oslo.messaging-tempest-neutron-dsvm-src-kafka-default/run.yaml0000666000175100017510000000310113224676046032612 0ustar zuulzuul00000000000000- hosts: all name: Autoconverted job legacy-tempest-neutron-dsvm-src-oslo.messaging-kafka-default from old job gate-tempest-neutron-dsvm-src-oslo.messaging-kafka-default-ubuntu-xenial-nv tasks: - name: Ensure legacy workspace directory file: path: '{{ ansible_user_dir }}/workspace' state: directory - shell: cmd: | set -e set -x cat > clonemap.yaml << EOF clonemap: - name: openstack-infra/devstack-gate dest: devstack-gate EOF /usr/zuul-env/bin/zuul-cloner -m clonemap.yaml --cache-dir /opt/git \ git://git.openstack.org \ openstack-infra/devstack-gate executable: /bin/bash chdir: '{{ ansible_user_dir }}/workspace' environment: '{{ zuul | zuul_legacy_vars }}' - shell: cmd: | set -e set -x export PYTHONUNBUFFERED=true export DEVSTACK_GATE_TEMPEST=1 export DEVSTACK_GATE_TEMPEST_FULL=1 export DEVSTACK_GATE_NEUTRON=1 export PROJECTS="openstack/devstack-plugin-kafka $PROJECTS" export DEVSTACK_LOCAL_CONFIG="enable_plugin devstack-plugin-kafka git://git.openstack.org/openstack/devstack-plugin-kafka" export DEVSTACK_PROJECT_FROM_GIT="oslo.messaging" cp devstack-gate/devstack-vm-gate-wrap.sh ./safe-devstack-vm-gate-wrap.sh ./safe-devstack-vm-gate-wrap.sh executable: /bin/bash chdir: '{{ ansible_user_dir }}/workspace' environment: '{{ zuul | zuul_legacy_vars }}' oslo.messaging-5.35.0/playbooks/oslo.messaging-src-dsvm-full-amqp1-hybrid/0000775000175100017510000000000013224676256026520 5ustar zuulzuul00000000000000oslo.messaging-5.35.0/playbooks/oslo.messaging-src-dsvm-full-amqp1-hybrid/post.yaml0000666000175100017510000000063313224676046030372 0ustar zuulzuul00000000000000- hosts: primary tasks: - name: Copy files from {{ ansible_user_dir }}/workspace/ on node synchronize: src: '{{ ansible_user_dir }}/workspace/' dest: '{{ zuul.executor.log_root }}' mode: pull copy_links: true verify_host: true rsync_opts: - --include=/logs/** - --include=*/ - --exclude=* - --prune-empty-dirs oslo.messaging-5.35.0/playbooks/oslo.messaging-src-dsvm-full-amqp1-hybrid/run.yaml0000666000175100017510000000310713224676046030210 0ustar zuulzuul00000000000000- hosts: all name: Autoconverted job legacy-oslo.messaging-src-dsvm-full-amqp1-hybrid from old job gate-oslo.messaging-src-dsvm-full-amqp1-hybrid-ubuntu-xenial-nv tasks: - name: Ensure legacy workspace directory file: path: '{{ ansible_user_dir }}/workspace' state: directory - shell: cmd: | set -e set -x cat > clonemap.yaml << EOF clonemap: - name: openstack-infra/devstack-gate dest: devstack-gate EOF /usr/zuul-env/bin/zuul-cloner -m clonemap.yaml --cache-dir /opt/git \ git://git.openstack.org \ openstack-infra/devstack-gate executable: /bin/bash chdir: '{{ ansible_user_dir }}/workspace' environment: '{{ zuul | zuul_legacy_vars }}' - shell: cmd: | set -e set -x export PYTHONUNBUFFERED=true export DEVSTACK_GATE_TEMPEST=1 export DEVSTACK_GATE_TEMPEST_FULL=1 export PROJECTS="openstack/devstack-plugin-amqp1 $PROJECTS" export DEVSTACK_LOCAL_CONFIG="enable_plugin devstack-plugin-amqp1 git://git.openstack.org/openstack/devstack-plugin-amqp1" export DEVSTACK_PROJECT_FROM_GIT="oslo.messaging" export DEVSTACK_LOCAL_CONFIG+=$'\n'"AMQP1_SERVICE=qpid-hybrid" cp devstack-gate/devstack-vm-gate-wrap.sh ./safe-devstack-vm-gate-wrap.sh ./safe-devstack-vm-gate-wrap.sh executable: /bin/bash chdir: '{{ ansible_user_dir }}/workspace' environment: '{{ zuul | zuul_legacy_vars }}' oslo.messaging-5.35.0/playbooks/oslo.messaging-src-dsvm-full-kafka-default-centos-7/0000775000175100017510000000000013224676256030356 5ustar zuulzuul00000000000000oslo.messaging-5.35.0/playbooks/oslo.messaging-src-dsvm-full-kafka-default-centos-7/post.yaml0000666000175100017510000000063313224676046032230 0ustar zuulzuul00000000000000- hosts: primary tasks: - name: Copy files from {{ ansible_user_dir }}/workspace/ on node synchronize: src: '{{ ansible_user_dir }}/workspace/' dest: '{{ zuul.executor.log_root }}' mode: pull copy_links: true verify_host: true rsync_opts: - --include=/logs/** - --include=*/ - --exclude=* - --prune-empty-dirs oslo.messaging-5.35.0/playbooks/oslo.messaging-src-dsvm-full-kafka-default-centos-7/run.yaml0000666000175100017510000000300413224676046032042 0ustar zuulzuul00000000000000- hosts: all name: Autoconverted job legacy-oslo.messaging-src-dsvm-full-kafka-default-centos-7 from old job gate-oslo.messaging-src-dsvm-full-kafka-default-centos-7-nv tasks: - name: Ensure legacy workspace directory file: path: '{{ ansible_user_dir }}/workspace' state: directory - shell: cmd: | set -e set -x cat > clonemap.yaml << EOF clonemap: - name: openstack-infra/devstack-gate dest: devstack-gate EOF /usr/zuul-env/bin/zuul-cloner -m clonemap.yaml --cache-dir /opt/git \ git://git.openstack.org \ openstack-infra/devstack-gate executable: /bin/bash chdir: '{{ ansible_user_dir }}/workspace' environment: '{{ zuul | zuul_legacy_vars }}' - shell: cmd: | set -e set -x export PYTHONUNBUFFERED=true export DEVSTACK_GATE_TEMPEST=1 export DEVSTACK_GATE_TEMPEST_FULL=1 export PROJECTS="openstack/devstack-plugin-kafka $PROJECTS" export DEVSTACK_LOCAL_CONFIG="enable_plugin devstack-plugin-kafka git://git.openstack.org/openstack/devstack-plugin-kafka" export DEVSTACK_PROJECT_FROM_GIT="oslo.messaging" cp devstack-gate/devstack-vm-gate-wrap.sh ./safe-devstack-vm-gate-wrap.sh ./safe-devstack-vm-gate-wrap.sh executable: /bin/bash chdir: '{{ ansible_user_dir }}/workspace' environment: '{{ zuul | zuul_legacy_vars }}' oslo.messaging-5.35.0/playbooks/oslo.messaging-src-dsvm-full-zmq-default/0000775000175100017510000000000013224676256026453 5ustar zuulzuul00000000000000oslo.messaging-5.35.0/playbooks/oslo.messaging-src-dsvm-full-zmq-default/post.yaml0000666000175100017510000000063313224676046030325 0ustar zuulzuul00000000000000- hosts: primary tasks: - name: Copy files from {{ ansible_user_dir }}/workspace/ on node synchronize: src: '{{ ansible_user_dir }}/workspace/' dest: '{{ zuul.executor.log_root }}' mode: pull copy_links: true verify_host: true rsync_opts: - --include=/logs/** - --include=*/ - --exclude=* - --prune-empty-dirs oslo.messaging-5.35.0/playbooks/oslo.messaging-src-dsvm-full-zmq-default/run.yaml0000666000175100017510000000276613224676046030155 0ustar zuulzuul00000000000000- hosts: all name: Autoconverted job legacy-oslo.messaging-src-dsvm-full-zmq-default from old job gate-oslo.messaging-src-dsvm-full-zmq-default-ubuntu-xenial-nv tasks: - name: Ensure legacy workspace directory file: path: '{{ ansible_user_dir }}/workspace' state: directory - shell: cmd: | set -e set -x cat > clonemap.yaml << EOF clonemap: - name: openstack-infra/devstack-gate dest: devstack-gate EOF /usr/zuul-env/bin/zuul-cloner -m clonemap.yaml --cache-dir /opt/git \ git://git.openstack.org \ openstack-infra/devstack-gate executable: /bin/bash chdir: '{{ ansible_user_dir }}/workspace' environment: '{{ zuul | zuul_legacy_vars }}' - shell: cmd: | set -e set -x export PYTHONUNBUFFERED=true export DEVSTACK_GATE_TEMPEST=1 export DEVSTACK_GATE_TEMPEST_FULL=1 export PROJECTS="openstack/devstack-plugin-zmq $PROJECTS" export DEVSTACK_LOCAL_CONFIG="enable_plugin devstack-plugin-zmq git://git.openstack.org/openstack/devstack-plugin-zmq" export DEVSTACK_PROJECT_FROM_GIT="oslo.messaging" cp devstack-gate/devstack-vm-gate-wrap.sh ./safe-devstack-vm-gate-wrap.sh ./safe-devstack-vm-gate-wrap.sh executable: /bin/bash chdir: '{{ ansible_user_dir }}/workspace' environment: '{{ zuul | zuul_legacy_vars }}' oslo.messaging-5.35.0/playbooks/oslo.messaging-src-grenade-dsvm-multinode/0000775000175100017510000000000013224676256026665 5ustar zuulzuul00000000000000oslo.messaging-5.35.0/playbooks/oslo.messaging-src-grenade-dsvm-multinode/post.yaml0000666000175100017510000000063313224676046030537 0ustar zuulzuul00000000000000- hosts: primary tasks: - name: Copy files from {{ ansible_user_dir }}/workspace/ on node synchronize: src: '{{ ansible_user_dir }}/workspace/' dest: '{{ zuul.executor.log_root }}' mode: pull copy_links: true verify_host: true rsync_opts: - --include=/logs/** - --include=*/ - --exclude=* - --prune-empty-dirs oslo.messaging-5.35.0/playbooks/oslo.messaging-src-grenade-dsvm-multinode/run.yaml0000666000175100017510000000365013224676046030360 0ustar zuulzuul00000000000000- hosts: primary name: Autoconverted job legacy-oslo.messaging-src-grenade-dsvm-multinode from old job gate-oslo.messaging-src-grenade-dsvm-multinode-ubuntu-xenial-nv tasks: - name: Ensure legacy workspace directory file: path: '{{ ansible_user_dir }}/workspace' state: directory - shell: cmd: | set -e set -x cat > clonemap.yaml << EOF clonemap: - name: openstack-infra/devstack-gate dest: devstack-gate EOF /usr/zuul-env/bin/zuul-cloner -m clonemap.yaml --cache-dir /opt/git \ git://git.openstack.org \ openstack-infra/devstack-gate executable: /bin/bash chdir: '{{ ansible_user_dir }}/workspace' environment: '{{ zuul | zuul_legacy_vars }}' - shell: cmd: | set -e set -x export PROJECTS="openstack-dev/grenade $PROJECTS" export PYTHONUNBUFFERED=true export DEVSTACK_GATE_TEMPEST=1 export DEVSTACK_GATE_GRENADE=pullup export BRANCH_OVERRIDE=default if [ "$BRANCH_OVERRIDE" != "default" ] ; then export OVERRIDE_ZUUL_BRANCH=$BRANCH_OVERRIDE fi export DEVSTACK_GATE_TOPOLOGY="multinode" export DEVSTACK_PROJECT_FROM_GIT=$ZUUL_SHORT_PROJECT_NAME # Even if the branch is overridden, make sure we use # the correct branch using the OVERRIDE_*_PROJECT_BRANCH # variable. uc_project=`echo $DEVSTACK_PROJECT_FROM_GIT | tr [:lower:] [:upper:] | tr '-' '_' | sed 's/[^A-Z_]//'` export "OVERRIDE_"$uc_project"_PROJECT_BRANCH"=$ZUUL_BRANCH cp devstack-gate/devstack-vm-gate-wrap.sh ./safe-devstack-vm-gate-wrap.sh ./safe-devstack-vm-gate-wrap.sh executable: /bin/bash chdir: '{{ ansible_user_dir }}/workspace' environment: '{{ zuul | zuul_legacy_vars }}' oslo.messaging-5.35.0/playbooks/oslo.messaging-telemetry-dsvm-integration-amqp1/0000775000175100017510000000000013224676256030045 5ustar zuulzuul00000000000000oslo.messaging-5.35.0/playbooks/oslo.messaging-telemetry-dsvm-integration-amqp1/post.yaml0000666000175100017510000000455113224676046031722 0ustar zuulzuul00000000000000- hosts: primary tasks: - name: Copy files from {{ ansible_user_dir }}/workspace/ on node synchronize: src: '{{ ansible_user_dir }}/workspace/' dest: '{{ zuul.executor.log_root }}' mode: pull copy_links: true verify_host: true rsync_opts: - --include=**/*nose_results.html - --include=*/ - --exclude=* - --prune-empty-dirs - name: Copy files from {{ ansible_user_dir }}/workspace/ on node synchronize: src: '{{ ansible_user_dir }}/workspace/' dest: '{{ zuul.executor.log_root }}' mode: pull copy_links: true verify_host: true rsync_opts: - --include=**/*testr_results.html.gz - --include=*/ - --exclude=* - --prune-empty-dirs - name: Copy files from {{ ansible_user_dir }}/workspace/ on node synchronize: src: '{{ ansible_user_dir }}/workspace/' dest: '{{ zuul.executor.log_root }}' mode: pull copy_links: true verify_host: true rsync_opts: - --include=/.testrepository/tmp* - --include=*/ - --exclude=* - --prune-empty-dirs - name: Copy files from {{ ansible_user_dir }}/workspace/ on node synchronize: src: '{{ ansible_user_dir }}/workspace/' dest: '{{ zuul.executor.log_root }}' mode: pull copy_links: true verify_host: true rsync_opts: - --include=**/*testrepository.subunit.gz - --include=*/ - --exclude=* - --prune-empty-dirs - name: Copy files from {{ ansible_user_dir }}/workspace/ on node synchronize: src: '{{ ansible_user_dir }}/workspace/' dest: '{{ zuul.executor.log_root }}/tox' mode: pull copy_links: true verify_host: true rsync_opts: - --include=/.tox/*/log/* - --include=*/ - --exclude=* - --prune-empty-dirs - name: Copy files from {{ ansible_user_dir }}/workspace/ on node synchronize: src: '{{ ansible_user_dir }}/workspace/' dest: '{{ zuul.executor.log_root }}' mode: pull copy_links: true verify_host: true rsync_opts: - --include=/logs/** - --include=*/ - --exclude=* - --prune-empty-dirs oslo.messaging-5.35.0/playbooks/oslo.messaging-telemetry-dsvm-integration-amqp1/run.yaml0000666000175100017510000000647513224676046031550 0ustar zuulzuul00000000000000- hosts: all name: Autoconverted job legacy-oslo.messaging-telemetry-dsvm-integration-amqp1 from old job gate-oslo.messaging-telemetry-dsvm-integration-amqp1-ubuntu-xenial-nv tasks: - name: Ensure legacy workspace directory file: path: '{{ ansible_user_dir }}/workspace' state: directory - shell: cmd: | set -e set -x cat > clonemap.yaml << EOF clonemap: - name: openstack-infra/devstack-gate dest: devstack-gate EOF /usr/zuul-env/bin/zuul-cloner -m clonemap.yaml --cache-dir /opt/git \ git://git.openstack.org \ openstack-infra/devstack-gate executable: /bin/bash chdir: '{{ ansible_user_dir }}/workspace' environment: '{{ zuul | zuul_legacy_vars }}' - shell: cmd: | set -e set -x export PYTHONUNBUFFERED=true export DEVSTACK_GATE_HEAT=1 export DEVSTACK_GATE_NEUTRON=1 export DEVSTACK_GATE_TEMPEST=1 export DEVSTACK_GATE_EXERCISES=0 export DEVSTACK_GATE_INSTALL_TESTONLY=1 export PROJECTS="openstack/ceilometer $PROJECTS" export PROJECTS="openstack/aodh $PROJECTS" export PROJECTS="openstack/devstack-plugin-amqp1 $PROJECTS" case "$ZUUL_BRANCH" in "stable/ocata") export DEVSTACK_LOCAL_CONFIG+=$'\n'"enable_plugin gnocchi git://git.openstack.org/openstack/gnocchi" export DEVSTACK_LOCAL_CONFIG+=$'\n'"enable_plugin panko git://git.openstack.org/openstack/panko" export OVERRIDE_GNOCCHI_PROJECT_BRANCH="stable/3.1" export PROJECTS="openstack/panko $PROJECTS openstack/gnocchi" ;; *) export DEVSTACK_LOCAL_CONFIG+=$'\n'"enable_plugin panko git://git.openstack.org/openstack/panko" export PROJECTS="openstack/panko $PROJECTS" ;; esac export DEVSTACK_LOCAL_CONFIG+=$'\n'"enable_plugin ceilometer git://git.openstack.org/openstack/ceilometer" export DEVSTACK_LOCAL_CONFIG+=$'\n'"enable_plugin aodh git://git.openstack.org/openstack/aodh" export DEVSTACK_LOCAL_CONFIG+=$'\n'"enable_plugin heat git://git.openstack.org/openstack/heat" export DEVSTACK_LOCAL_CONFIG+=$'\n'"CEILOMETER_BACKEND=gnocchi" export DEVSTACK_LOCAL_CONFIG+=$'\n'"GNOCCHI_ARCHIVE_POLICY=high" export DEVSTACK_LOCAL_CONFIG+=$'\n'"CEILOMETER_PIPELINE_INTERVAL=5" export DEVSTACK_LOCAL_CONFIG+=$'\n'"GNOCCHI_STORAGE_BACKEND=file" export DEVSTACK_LOCAL_CONFIG+=$'\n'"AMQP1_SERVICE=qpid-hybrid" export DEVSTACK_LOCAL_CONFIG+=$'\n'"enable_plugin devstack-plugin-amqp1 git://git.openstack.org/openstack/devstack-plugin-amqp1" export DEVSTACK_PROJECT_FROM_GIT="oslo.messaging" function post_test_hook { cd /opt/stack/new/ceilometer/ceilometer/tests/integration/hooks/ ./post_test_hook.sh } export -f post_test_hook cp devstack-gate/devstack-vm-gate-wrap.sh ./safe-devstack-vm-gate-wrap.sh ./safe-devstack-vm-gate-wrap.sh executable: /bin/bash chdir: '{{ ansible_user_dir }}/workspace' environment: '{{ zuul | zuul_legacy_vars }}' oslo.messaging-5.35.0/setup-test-env-zmq-direct-dynamic.sh0000777000175100017510000000150513224676046023536 0ustar zuulzuul00000000000000#!/bin/bash set -e . tools/functions.sh DATADIR=$(mktemp -d /tmp/OSLOMSG-ZEROMQ.XXXXX) trap "clean_exit $DATADIR" EXIT export ZMQ_MATCHMAKER=redis export ZMQ_REDIS_PORT=65123 export ZMQ_IPC_DIR=${DATADIR} export ZMQ_USE_PUB_SUB=false export ZMQ_USE_ROUTER_PROXY=false export ZMQ_USE_DYNAMIC_CONNECTIONS=true export ZMQ_USE_ACKS=false export TRANSPORT_URL="zmq+${ZMQ_MATCHMAKER}://127.0.0.1:${ZMQ_REDIS_PORT}" cat > ${DATADIR}/zmq.conf < ${DATADIR}/zmq-proxy.log 2>&1 & $* oslo.messaging-5.35.0/bindep.txt0000666000175100017510000000227313224676046016602 0ustar zuulzuul00000000000000# common dpkg gettext [platform:dpkg] # For releasenotes job build-essential [platform:dpkg] libffi-dev [platform:dpkg] # common rpm gcc [platform:rpm] gcc-c++ [platform:rpm] make [platform:rpm] pkgconfig [platform:rpm] libffi-devel [platform:rpm] # kombu/pika rabbitmq-server [platform:dpkg rabbit pika] # zmq redis [platform:rpm zmq] redis-sentinel [platform:ubuntu !platform:ubuntu-trusty zmq] redis-server [platform:dpkg zmq] dev-db/redis [platform:gentoo zmq] python-redis [platform:dpkg zmq] zookeeperd [platform:dpkg zmq] python-zmq [!platform:gentoo !platform:fedora !platform:suse zmq] python2-zmq [platform:fedora zmq] dev-python/pyzmq [platform:gentoo zmq] # AMQP1 dpkg qpidd [platform:dpkg amqp1] sasl2-bin [platform:dpkg amqp1] uuid-dev [platform:dpkg amqp1] swig [platform:dpkg amqp1] libsasl2-modules [platform:dpkg amqp1] # AMQP1 rpm qpid-cpp-server [platform:rpm amqp1] qpid-proton-c-devel [platform:rpm amqp1] python-qpid-proton [platform:rpm amqp1] cyrus-sasl-lib [platform:rpm amqp1] cyrus-sasl-plain [platform:rpm amqp1] libuuid-devel [platform:rpm amqp1] swig [platform:rpm amqp1] # kafka dpkg openjdk-8-jdk [platform:dpkg kafka] # kafka rpm java-1.8.0-openjdk [platform:rpm kafka] oslo.messaging-5.35.0/doc/0000775000175100017510000000000013224676256015342 5ustar zuulzuul00000000000000oslo.messaging-5.35.0/doc/requirements.txt0000666000175100017510000000071613224676046020631 0ustar zuulzuul00000000000000# The order of packages is significant, because pip processes them in the order # of appearance. Changing the order has an impact on the overall integration # process, which may cause wedges in the gate later. openstackdocstheme>=1.17.0 # Apache-2.0 sphinx>=1.6.2 # BSD reno>=2.5.0 # Apache-2.0 # imported when the source code is parsed for generating documentation: fixtures>=3.0.0 # Apache-2.0/BSD kafka-python>=1.3.1 # Apache-2.0 pyngus>=2.2.0 # Apache-2.0 oslo.messaging-5.35.0/doc/source/0000775000175100017510000000000013224676256016642 5ustar zuulzuul00000000000000oslo.messaging-5.35.0/doc/source/user/0000775000175100017510000000000013224676256017620 5ustar zuulzuul00000000000000oslo.messaging-5.35.0/doc/source/user/FAQ.rst0000666000175100017510000000417213224676046020764 0ustar zuulzuul00000000000000============================ Frequently Asked Questions ============================ I don't need notifications on the message bus. How do I disable them? ===================================================================== Notification messages can be disabled using the ``noop`` notify driver. Set ``driver = noop`` in your configuration file under the [oslo_messaging_notifications] section. Why does the notification publisher create queues, too? Shouldn't the subscriber do that? ========================================================================================= The notification messages are meant to be used for integration with external services, including services that are not part of OpenStack. To ensure that the subscriber does not miss any messages if it starts after the publisher, ``oslo.messaging`` ensures that subscriber queues exist when notifications are sent. How do I change the queue names where notifications are published? ================================================================== Notifications are published to the configured exchange using a topic built from a base value specified in the configuration file and the notification "level". The default topic is ``notifications``, so an info-level notification is published to the topic ``notifications.info``. A subscriber queue of the same name is created automatically for each of these topics. To change the queue names, change the notification topic using the ``topics`` configuration option in ``[oslo_messaging_notifications]``. The option accepts a list of values, so it is possible to publish to multiple topics. What are the other choices of notification drivers available? ============================================================= - messaging Send notifications using the 1.0 message format. - messagingv2 Send notifications using the 2.0 message format (with a message envelope). - routing Configurable routing notifier (by priority or event_type). - log Publish notifications via Python logging infrastructure. - test Store notifications in memory for test verification. - noop Disable sending notifications entirely. oslo.messaging-5.35.0/doc/source/user/history.rst0000666000175100017510000000004013224676046022044 0ustar zuulzuul00000000000000.. include:: ../../../ChangeLog oslo.messaging-5.35.0/doc/source/user/index.rst0000666000175100017510000000021713224676046021460 0ustar zuulzuul00000000000000==================== Using oslo.messaging ==================== .. toctree:: :maxdepth: 2 FAQ .. toctree:: :maxdepth: 1 history oslo.messaging-5.35.0/doc/source/conf.py0000666000175100017510000000534113224676046020143 0ustar zuulzuul00000000000000# -*- coding: utf-8 -*- # Copyright 2017 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import sys sys.path.insert(0, os.path.abspath('../..')) # -- General configuration ---------------------------------------------------- # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = [ 'sphinx.ext.autodoc', 'openstackdocstheme', 'stevedore.sphinxext', 'oslo_config.sphinxext', ] # openstackdocstheme options repository_name = 'openstack/oslo.messaging' bug_project = 'oslo.messaging' bug_tag = '' # Must set this variable to include year, month, day, hours, and minutes. html_last_updated_fmt = '%Y-%m-%d %H:%M' # autodoc generation is a bit aggressive and a nuisance when doing heavy # text edit cycles. # execute "export SPHINX_DEBUG=1" in your terminal to disable # Add any paths that contain templates here, relative to this directory. # templates_path = [] # The suffix of source filenames. source_suffix = '.rst' # The master toctree document. master_doc = 'index' # General information about the project. project = u'oslo.messaging' copyright = u'2013, OpenStack Foundation' # If true, '()' will be appended to :func: etc. cross-reference text. add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). add_module_names = True # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # -- Options for HTML output -------------------------------------------------- # The theme to use for HTML and HTML Help pages. Major themes that come with # Sphinx are currently 'default' and 'sphinxdoc'. # html_theme_path = ["."] html_theme = 'openstackdocs' # html_static_path = ['static'] # Output file base name for HTML help builder. htmlhelp_basename = '%sdoc' % project # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass # [howto/manual]). latex_documents = [ ('index', '%s.tex' % project, '%s Documentation' % project, 'OpenStack Foundation', 'manual'), ] oslo.messaging-5.35.0/doc/source/configuration/0000775000175100017510000000000013224676256021511 5ustar zuulzuul00000000000000oslo.messaging-5.35.0/doc/source/configuration/opts.rst0000666000175100017510000000053513224676046023232 0ustar zuulzuul00000000000000======================= Configuration Options ======================= oslo.messaging uses oslo.config to define and manage configuration options to allow the deployer to control how an application uses the underlying messaging system. .. show-options:: oslo.messaging API === .. currentmodule:: oslo_messaging.opts .. autofunction:: list_opts oslo.messaging-5.35.0/doc/source/configuration/index.rst0000666000175100017510000000014013224676046023344 0ustar zuulzuul00000000000000============= Configuration ============= .. toctree:: :maxdepth: 2 opts conffixture oslo.messaging-5.35.0/doc/source/configuration/conffixture.rst0000666000175100017510000000023613224676046024577 0ustar zuulzuul00000000000000---------------------- Testing Configurations ---------------------- .. currentmodule:: oslo_messaging.conffixture .. autoclass:: ConfFixture :members: oslo.messaging-5.35.0/doc/source/admin/0000775000175100017510000000000013224676256017732 5ustar zuulzuul00000000000000oslo.messaging-5.35.0/doc/source/admin/AMQP1.0.rst0000666000175100017510000005666113224676046021456 0ustar zuulzuul00000000000000----------------------------------------- AMQP 1.0 Protocol Driver Deployment Guide ----------------------------------------- .. currentmodule:: oslo_messaging ============ Introduction ============ The AMQP 1.0 Protocol Driver is a messaging transport backend supported in oslo.messaging. The driver maps the base oslo.messaging capabilities for RPC and Notification message exchange onto version 1.0 of the Advanced Message Queuing Protocol (AMQP 1.0, ISO/IEC 19464). The driver is intended to support any messaging intermediary (e.g. broker or router) that implements version 1.0 of the AMQP protocol. More detail regarding the AMQP 1.0 Protocol is available from the `AMQP specification`_. .. _AMQP specification: http://docs.oasis-open.org/amqp/core/v1.0/os/amqp-core-overview-v1.0-os.html More detail regarding the driver's implementation is available from the `oslo specification`_. .. _oslo specification: https://git.openstack.org/cgit/openstack/oslo-specs/tree/specs/juno/amqp10-driver-implementation.rst ======== Abstract ======== The AMQP 1.0 driver is one of a family of oslo.messaging backend drivers. It currently supports two types of message intermediaries. The first type is an AMQP 1.0 messaging broker and the second type is an AMQP 1.0 message router. The driver should support additional intermediary types in the future but may require additions to driver configuration parameters in order to do so. +--------------+-----------+------------+------------+-----------+ | Intermediary | RPC | Notify | Message | Topology | | Type | Pattern | Pattern | Treatment | | +--------------+-----------+------------+------------+-----------+ | Message | Yes | `Limited`_ | Direct | Single or | | Router | | | Messaging | Mesh | +--------------+-----------+------------+------------+-----------+ | Message | Yes | Yes | Store and | Single or | | Broker | | | Forward | Cluster | +--------------+-----------+------------+------------+-----------+ Direct Messaging ---------------- The RPC messaging pattern is a synchronous exchange between client and server that is temporally bracketed. The direct messaging capabilities provided by the message router are optimal for the RPC messaging pattern. The driver can readily scale operation from working with a single instances of a message router to working with a large scale routed mesh interconnect topology. Store and Forward ----------------- The Notification messaging pattern is an asynchronous exchange from a notifier to a listener (e.g. consumer). The listener need not be present when the notification is sent. Thus, the store and forwarding capabilities provided by the message broker are required for the Notification messaging pattern. This driver is able to work with a single instance of a message broker or a clustered broker deployment. .. _Limited: It is recommended that the message router intermediary not be used for the Notification messaging pattern due to the consideration that notification messages will be dropped when there is no active consumer. The message router does not provide durability or store-and-forward capabilities for notification messages. Hybrid Messaging Backends ------------------------- Oslo.messaging provides a mechanism to configure separate backends for RPC and Notification communications. This is supported through the specification of separate RPC and Notification `transport urls`_ in the service configuration. This capability enables the optimal alignment of messaging patterns to messaging backend and allows for different messaging backend types to be deployed. This document provides deployment and configuration information for use of this driver in hybrid messaging configurations. Addressing ---------- A new address syntax was added to the driver to support efficient direct message routing. This new syntax will also work with a broker intermediary backend but is not compatible with the address syntax previously used by the driver. In order to allow backward compatibility, the driver will attempt to identify the intermediary type for the backend in use and will automatically select the 'legacy' syntax for broker-based backends or the new 'routable' syntax for router-based backends. An `address mode`_ configuration option is provided to override this dynamic behavior and force the use of either the legacy or routable address syntax. Message Acknowledgement ----------------------- A primary functional difference between a router and a broker intermediary type is when message acknowledgement occurs. The router does not "store" the message hence it does not generate an acknowledgement. Instead the consuming endpoint is responsible for message acknowledgement and the router forwards the acknowledgement back to the sender. This is known as 'end-to-end' acknowledgement. In contrast, a broker stores then forwards the message so that message acknowledgement is performed in two stages. In the first stage, a message acknowledgement occurs between the broker and the Sender. In the second stage, an acknowledgement occurs between the Server and the broker. This difference affects how long the Sender waits for the message transfer to complete. :: +dispatch+ | (3) | | | | v +--------------+ (1) +----------+ (2) +--------------+ | Client |---------->| Router |----------->| Server | | (Sender) |<----------| (Direct) |<-----------| (Listener) | +--------------+ (5) +----------+ (4) +--------------+ For example when a router intermediary is used, the following sequence occurs: 1. The message is sent to the router 2. The router forwards the message to the Server 3. The Server dispatches the message to the application 4. The Server indicates the acknowledgement via the router 5. The router forwards the acknowledgement to the Sender In this sequence, a Sender waits for the message acknowledgement until step (5) occurs. :: +dispatch+ | (4) | | | | v +--------------+ (1) +----------+ (3) +--------------+ | Client |---------->| Broker |----------->| Server | | (Sender) |<----------| (Queue) |<-----------| (Listener) | +--------------+ (2) +----------+ (5) +--------------+ And when a broker intermediary is used, the following sequence occurs: 1. The message is sent to the broker 2. The broker stores the message and acknowledges the message to the Sender 3. The broker sends the message to the Server 4. The Server dispatches the message to the application 5. The Server indicates the acknowledgement to the broker In this sequence, a Sender waits for the message acknowledgement until step (2) occurs. Therefore the broker-based Sender receives the acknowledgement earlier in the transfer than the routed case. However in the brokered case receipt of the acknowledgement does not signify that the message has been (or will ever be) received by the Server. Batched Notifications **Note Well** ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ While the use of a router intermediary for oslo.messaging Notification is currently not recommended, it should be noted that the use of a router intermediary with batched notifications may exacerbate the acknowledgement wait time for a Sender. For example, when a batched notification configuration is used where batch size is set to 100, the Server will wait until 100 notification messages are buffered (or timeout occurs) before dispatching the notifications to the application for message acknowledgement. Since each notifier client can have at most one message outstanding (e.g. pending acknowledgement), then if the total number of notifying clients are less than 100 the batch limit will never be met. This will effectively pause all notifying clients until the batch timeout expires. ============= Prerequisites ============= Protocol Engine --------------- This driver uses the Apache QPID `Proton`_ AMQP 1.0 protocol engine. This engine consists of a platform specific library and a python binding. The driver does not directly interface with the engine API, as the API is a very low-level interface to the AMQP protocol. Instead, the driver uses the pure python `Pyngus`_ client API, which is layered on top of the protocol engine. .. _Proton: http://qpid.apache.org/proton/index.html .. _Pyngus: https://github.com/kgiusti/pyngus In order to run the driver the Proton Python bindings, Proton library, Proton header files, and Pyngus must be installed. Pre-built packages for both Pyngus and the Proton protocol engine are available for various Linux distributions (see `packages`_ below). It is recommended to use the pre-built packages if they are available for your platform. The Proton package includes a C extension that links to the Proton library. If this library is not installed, then the Proton install script will attempt to download the necessary Proton C source files from the Apache repository and build the library locally. In order to build the Proton C source locally, there are a number of tools and libraries that need to be present: * The tools and library necessary for Python development * The `SWIG`_ wrapper generator * The `OpenSSL`_ development libraries and headers * The `Cyrus SASL`_ development libraries and headers .. _SWIG: http://www.swig.org/index.php .. _OpenSSL: https://www.openssl.org .. _Cyrus SASL: https://cyrusimap.org **Note well**: Currently the Proton Pypi package only supports building the C extension on Linux systems. Router Intermediary ------------------- This driver supports a *router* intermediary that supports version 1.0 of the AMQP protocol. The direct messaging capabilities provided by this intermediary type are recommended for oslo.messaging RPC. The driver has been tested with `qpid-dispatch-router`_ router in a `devstack`_ environment. The version of qpid-dispatch-router **must** be at least 0.7.0. The qpid-dispatch-router also uses the Proton engine for its AMQP 1.0 support, so the Proton library must be installed on the system hosting the qpid-dispatch-router daemon. .. _qpid-dispatch-router: http://qpid.apache.org/components/dispatch-router/ Pre-built packages for the router are available. See `packages`_ below. Broker Intermediary ------------------- This driver supports a *broker* intermediary that supports version 1.0 of the AMQP protocol. The store and forward capabilities provided by this intermediary type are recommended for oslo.messaging Notifications. The driver has been tested with the `qpidd`_ broker in a `devstack`_ environment. The version of qpidd **must** be at least 0.34. qpidd also uses the Proton engine for its AMQP 1.0 support, so the Proton library must be installed on the system hosting the qpidd daemon. .. _qpidd: http://qpid.apache.org/components/cpp-broker/index.html Pre-built packages for the broker are available. See `packages`_ below. See the `oslo specification`_ for additional information regarding testing done on the driver. ============= Configuration ============= .. _transport urls: Transport URL Enable -------------------- In oslo.messaging, the transport_url parameters define the OpenStack service backends for RPC and Notify. The url is of the form: transport://user:pass@host1:port[,hostN:portN]/virtual_host Where the transport value specifies the rpc or notification backend as one of **amqp**, rabbit, zmq, etc. To specify and enable the AMQP 1.0 driver for RPC, in the section [DEFAULT] of the service configuration file, specify the 'transport_url' parameter: :: [DEFAULT] transport_url = amqp://username:password@routerhostname:5672 To specify and enable the AMQP 1.0 driver for Notify, in the section [NOTIFICATIONS] of the service configuration file, specify the 'transport_url' parameter: :: [NOTIFICATIONS] transport_url = amqp://username:password@brokerhostname:5672 Note, that if a 'transport_url' parameter is not specified in the [NOTIFICATIONS] section, the [DEFAULT] transport_url will be used for both RPC and Notify backends. Driver Options -------------- It is recommended that the default configuration options provided by the AMQP 1.0 driver be used. The configuration options can be modified in the oslo_messaging_amqp section of the service configuration file. Connection Options ^^^^^^^^^^^^^^^^^^ In section [oslo_messaging_amqp]: #. idle_timeout: Timeout in seconds for inactive connections. Default is disabled. #. connection_retry_interval: Seconds to pause before attempting to re-connect. #. connection_retry_backoff: Connection retry interval increment after unsuccessful failover attempt. #. connection_retry_interval_max: The maximum duration for a connection retry interval. Message Send Options ^^^^^^^^^^^^^^^^^^^^ In section [oslo_messaging_amqp]: #. pre_settled: Send message types as pre-settled. Pre-settled messages will not receive acknowledgement from the peer. #. link_retry_delay: Time to pause between re-connecting to an AMQP 1.0 link. #. default_reply_timeout: The deadline for an rpc reply message delivery. #. default_send_timeout: The deadline for an rpc cast or call message delivery. #. default_notify_timeout: The deadline for a sent notification message delivery. .. _address mode: Addressing Options ^^^^^^^^^^^^^^^^^^ In section [oslo_messaging_amqp]: #. addressing_mode: Indicates addressing mode used by the driver. #. server_request_prefix: Legacy address prefix used when sending to a specific server. #. broadcast_prefix: Legacy broadcast prefix used when broadcasting to all servers. #. group_request_prefix: Legacy address prefix when sending to any server in a group. #. rpc_address_prefix: Routable address prefix for all generated RPC addresses. #. notify_address_prefix: Routable address prefix for all generated Notification addresses. #. multicast_address: Appended to address prefix when sending a fanout address. #. unicast_address: Appended to address prefix when sending to a particular RPC/Notification server. #. anycast_address: Appended to address prefix when sending to a group of consumers. #. default_notification_exchange: Exchange name used in notification addresses if not supplied by the application. #. default_rpc_exchange: Exchange name used in RPC addresses if not supplied by the application. SSL Options ^^^^^^^^^^^ In section [oslo_messaging_amqp]: #. ssl: Attempt to connect via SSL. If no other ssl-related parameters are given, use the system's CA-bundle to verify the server's certificate. #. ssl_ca_file: A file containing the trusted Certificate Authority's digital certificate (in PEM format). This certificate is used to authenticate the messaging backend. #. ssl_cert_file: A file containing a digital certificate (in PEM format) that is used to identify the driver with the messaging bus (i.e. client authentication). #. ssl_key_file:A file containing the private key used to sign the ssl_cert_file certificate (PEM format, optional if private key is stored in the certificate itself). #. ssl_key_password: The password used to decrypt the private key (not required if private key is not encrypted). SASL Options ^^^^^^^^^^^^ In section [oslo_messaging_amqp]: #. sasl_mechanisms: Space separated list of acceptable SASL mechanisms. #. sasl_config_dir: Path to the *directory* that contains the SASL configuration. #. sasl_config_name: The name of SASL configuration file (without .conf suffix) in sasl_config_dir #. sasl_default_realm: SASL realm to use if no realm present in username. #. username: SASL user identifier for authentication with the message bus. Can be overridden by URL. #. password: Password for username AMQP Generic Options (**Note Well**) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ The AMQP 1.0 driver currently does **not** support the generic *amqp* options used by pre-1.0 drivers such as *amqp_durable_queues* or *amqp_auto_delete*. qpid-dispatch-router -------------------- First, verify that the Proton library has been installed and is imported by the qpid-dispatch-router intermediary. This can be checked by running: :: $ qdrouterd --help and looking for references to qpid-dispatch include and config path options in the help text. If no qpid-dispatch information is listed, verify that the Proton libraries are installed and that the version of the qdrouterd is greater than or equal to 0.6.0. Second, configure the address patterns used by the driver. This is done by adding the following to /etc/qpid-dispatch/qdrouterd.conf. If the legacy syntax for the addressing mode is required, include the following: :: address { prefix: unicast distribution: closest } address { prefix: exclusive distribution: closest } address { prefix: broadcast distribution: multicast } For the routable syntax addressing mode, include the following: :: address { prefix: openstack.org/om/rpc/multicast distribution: multicast } address { prefix: openstack.org/om/rpc/unicast distribution: closest } address { prefix: openstack.org/om/rpc/anycast distribution: balanced } address { prefix: openstack.org/om/notify/multicast distribution: multicast } address { prefix: openstack.org/om/notify/unicast distribution: closest } address { prefix: openstack.org/om/notify/anycast distribution: balanced } **Note well**: For any customization of the `address mode`_ and syntax used, it is required that the address entity configurations in the /etc/qpid-dispatch/qdrouterd.conf be updated. qpidd ----- First, verify that the Proton library has been installed and is imported by the qpidd broker. This can checked by running: :: $ qpidd --help and looking for the AMQP 1.0 options in the help text. If no AMQP 1.0 options are listed, verify that the Proton libraries are installed and that the version of qpidd is greater than or equal to 0.34. Second, configure the default address patterns used by the driver for a broker-based backend. This is done by adding the following to /etc/qpid/qpidd.conf: :: queue-patterns=exclusive queue-patterns=unicast topic-patterns=broadcast These patterns, *exclusive*, *unicast*, and *broadcast* are the legacy addressing values used by the driver. These can be overridden via the driver configuration options if desired (see above). If manually overridden, update the qpidd.conf values to match. .. _devstack: ================ DevStack Support ================ The plugin for the AMQP 1.0 oslo.messaging driver is supported by DevStack. The plugin supports the deployment of several different message bus configurations. In local.conf [localrc] section, the `devstack-plugin-amqp1`_ plugin repository must be enabled. For example: :: [[local|localrc]] enable_plugin amqp1 https://git.openstack.org/openstack/devstack-plugin-amqp1 Set the username and password variables if needed for the configuration: :: AMQP1_USERNAME=queueuser AMQP1_PASSWORD=queuepassword The AMQP1_SERVICE variable identifies the message bus configuration that will be used. In addition to the AMQP 1.0 driver being used for both the RPC and Notification messaging communications, a hybrid configuration is supported in the plugin that will deploy AMQP 1.0 for the RPC backend and the oslo_messaging rabbit driver for the Notification backend. Additionally, the plugin supports a setting for a pre-provisioned messaging bus that prevents the plugin from creating the messaging bus. The setting of the AMQP1_SERVICE variable will select which messaging intermediary will be used for the RPC and Notification messaging backends: +---------------+------------------+------------------+ | AMQP1_SERVICE | RPC Backend | Notify Backend | +---------------+------------------+------------------+ | | | | | qpid | qpidd broker | qpidd broker | | | | | +---------------+------------------+------------------+ | | | | | qpid-dual | qdrouterd router | qpidd broker | | | | | +---------------+------------------+------------------+ | | | | | qpid-hybrid | qdrouterd router | rabbitmq broker | | | | | +---------------+------------------+------------------+ | | | | | external | pre-provisioned | pre-provisioned | | | message bus | message bus | | | | | +---------------+------------------+------------------+ .. _devstack-plugin-amqp1: https://github.com/openstack/devstack-plugin-amqp1.git .. _packages: ====================== Platforms and Packages ====================== PyPi ---- Packages for `Pyngus pypi`_ and the `Proton pypi`_ engine are available on Pypi. .. _Pyngus pypi: https://pypi.python.org/pypi/pyngus .. _Proton pypi: https://pypi.python.org/pypi/python-qpid-proton RHEL and Fedora --------------- Packages exist in EPEL for RHEL/Centos 7, and Fedora 26+. Unfortunately, RHEL/Centos 6 base packages include a very old version of qpidd that does not support AMQP 1.0. EPEL's policy does not allow a newer version of qpidd for RHEL/Centos 6. The following packages must be installed on the system running the intermediary daemon: +--------------+--------------------------+ | Intermediary | Package | +--------------+--------------------------+ | qdrouterd | qpid-dispatch-router | | | python-qpid-proton | +--------------+--------------------------+ | qpidd | qpid-cpp-server | | | qpid-proton-c | +--------------+--------------------------+ qpidd daemon: - qpid-cpp-server (version 0.26+) - qpid-proton-c The following packages must be installed on the systems running the services that use the new driver: - Proton libraries: qpid-proton-c-devel - Proton python bindings: python-qpid-proton - pyngus (via Pypi) Debian and Ubuntu ----------------- Packages for the Proton library, headers, and Python bindings are available in the Debian/Testing repository. Proton packages are not yet available in the Ubuntu repository. The version of qpidd on both platforms is too old and does not support AMQP 1.0. Until the proper package version arrive the latest packages can be pulled from the `Apache Qpid PPA`_ on Launchpad: :: sudo add-apt-repository ppa:qpid/released .. _Apache Qpid PPA: https://launchpad.net/~qpid/+archive/ubuntu/released The following packages must be installed on the system running the qdrouterd daemon: - qdrouterd (version 0.8.0+) The following packages must be installed on the system running the qpidd daemon: - qpidd (version 0.34+) The following packages must be installed on the systems running the services that use the new driver: - Proton libraries: libqpid-proton2-dev - Proton python bindings: python-qpid-proton - pyngus (via Pypi) .. LocalWords: Acknowledgement acknowledgement oslo.messaging-5.35.0/doc/source/admin/pika_driver.rst0000666000175100017510000001235413224676046022767 0ustar zuulzuul00000000000000------------------------------ Pika Driver Deployment Guide ------------------------------ .. currentmodule:: oslo_messaging .. warning:: the Pika driver is no longer maintained and will be removed from Oslo.Messaging at a future date. It is recommended that all users of the Pika driver transition to using the Rabbit driver. ============ Introduction ============ Pika is a pure-Python implementation of the AMQP 0-9-1 protocol including RabbitMQ's extensions. It is very actively supported and recommended by RabbitMQ developers ======== Abstract ======== PikaDriver is one of oslo.messaging backend drivers. It supports RPC and Notify patterns. Currently it could be the only oslo.messaging driver across the OpenStack cluster. This document provides deployment information for this driver in oslo_messaging. This driver is able to work with single instance of RabbitMQ server or RabbitMQ cluster. ============= Configuration ============= Enabling (mandatory) -------------------- To enable the driver, in the section [DEFAULT] of the conf file, the 'transport_url' parameter should be set to `pika://user:pass@host1:port[,hostN:portN]` [DEFAULT] transport_url = pika://guest:guest@localhost:5672 Connection options (optional) ----------------------------- In section [oslo_messaging_pika]: #. channel_max - Maximum number of channels to allow, #. frame_max (default - pika default value): The maximum byte size for an AMQP frame, #. heartbeat_interval (default=1): How often to send heartbeats for consumer's connections in seconds. If 0 - disable heartbeats, #. ssl (default=False): Enable SSL if True, #. ssl_options (default=None): Arguments passed to ssl.wrap_socket, #. socket_timeout (default=0.25): Set timeout for opening new connection's socket, #. tcp_user_timeout (default=0.25): Set TCP_USER_TIMEOUT in seconds for connection's socket, #. host_connection_reconnect_delay (default=0.25): Set delay for reconnection to some host after connection error Connection pool options (optional) ---------------------------------- In section [oslo_messaging_pika]: #. pool_max_size (default=10): Maximum number of connections to keep queued, #. pool_max_overflow (default=0): Maximum number of connections to create above `pool_max_size`, #. pool_timeout (default=30): Default number of seconds to wait for a connections to available, #. pool_recycle (default=600): Lifetime of a connection (since creation) in seconds or None for no recycling. Expired connections are closed on acquire, #. pool_stale (default=60): Threshold at which inactive (since release) connections are considered stale in seconds or None for no staleness. Stale connections are closed on acquire.") RPC related options (optional) ------------------------------ In section [oslo_messaging_pika]: #. rpc_queue_expiration (default=60): Time to live for rpc queues without consumers in seconds, #. default_rpc_exchange (default="${control_exchange}_rpc"): Exchange name for sending RPC messages, #. rpc_reply_exchange', default=("${control_exchange}_rpc_reply"): Exchange name for receiving RPC replies, #. rpc_listener_prefetch_count (default=100): Max number of not acknowledged message which RabbitMQ can send to rpc listener, #. rpc_reply_listener_prefetch_count (default=100): Max number of not acknowledged message which RabbitMQ can send to rpc reply listener, #. rpc_reply_retry_attempts (default=-1): Reconnecting retry count in case of connectivity problem during sending reply. -1 means infinite retry during rpc_timeout, #. rpc_reply_retry_delay (default=0.25) Reconnecting retry delay in case of connectivity problem during sending reply, #. default_rpc_retry_attempts (default=-1): Reconnecting retry count in case of connectivity problem during sending RPC message, -1 means infinite retry. If actual retry attempts in not 0 the rpc request could be processed more than one time, #. rpc_retry_delay (default=0.25): Reconnecting retry delay in case of connectivity problem during sending RPC message $control_exchange in this code is value of [DEFAULT].control_exchange option, which is "openstack" by default Notification related options (optional) --------------------------------------- In section [oslo_messaging_pika]: #. notification_persistence (default=False): Persist notification messages, #. default_notification_exchange (default="${control_exchange}_notification"): Exchange name for sending notifications, #. notification_listener_prefetch_count (default=100): Max number of not acknowledged message which RabbitMQ can send to notification listener, #. default_notification_retry_attempts (default=-1): Reconnecting retry count in case of connectivity problem during sending notification, -1 means infinite retry, #. notification_retry_delay (default=0.25): Reconnecting retry delay in case of connectivity problem during sending notification message $control_exchange in this code is value of [DEFAULT].control_exchange option, which is "openstack" by default DevStack Support ---------------- Pika driver is supported by DevStack. To enable it you should edit local.conf [localrc] section and add next there: enable_plugin pika https://git.openstack.org/openstack/devstack-plugin-pika oslo.messaging-5.35.0/doc/source/admin/drivers.rst0000666000175100017510000000016313224676046022141 0ustar zuulzuul00000000000000=================== Available Drivers =================== .. list-plugins:: oslo.messaging.drivers :detailed: oslo.messaging-5.35.0/doc/source/admin/index.rst0000666000175100017510000000020513224676046021567 0ustar zuulzuul00000000000000================ Deployment Guide ================ .. toctree:: :maxdepth: 2 drivers AMQP1.0 pika_driver zmq_driver oslo.messaging-5.35.0/doc/source/admin/zmq_driver.rst0000666000175100017510000005575013224676046022661 0ustar zuulzuul00000000000000------------------------------ ZeroMQ Driver Deployment Guide ------------------------------ .. currentmodule:: oslo_messaging ============ Introduction ============ 0MQ (also known as ZeroMQ or zmq) is embeddable networking library but acts like a concurrency framework. It gives you sockets that carry atomic messages across various transports like in-process, inter-process, TCP, and multicast. You can connect sockets N-to-N with patterns like fan-out, pub-sub, task distribution, and request-reply. It's fast enough to be the fabric for clustered products. Its asynchronous I/O model gives you scalable multi-core applications, built as asynchronous message-processing tasks. It has a score of language APIs and runs on most operating systems. Originally the zero in 0MQ was meant as "zero broker" and (as close to) "zero latency" (as possible). Since then, it has come to encompass different goals: zero administration, zero cost, and zero waste. More generally, "zero" refers to the culture of minimalism that permeates the project. More detail regarding ZeroMQ library is available from the `specification`_. .. _specification: http://zguide.zeromq.org/page:all ======== Abstract ======== Currently, ZeroMQ is one of the RPC backend drivers in oslo.messaging. ZeroMQ can be the only RPC driver across the OpenStack cluster. This document provides deployment information for this driver in oslo_messaging. Other than AMQP-based drivers, like RabbitMQ, default ZeroMQ doesn't have any central brokers in oslo.messaging, instead, each host (running OpenStack services) is both ZeroMQ client and server. As a result, each host needs to listen to a certain TCP port for incoming connections and directly connect to other hosts simultaneously. Another option is to use a router proxy. It is not a broker because it doesn't assume any message ownership or persistence or replication etc. It performs only a redirection of messages to endpoints taking routing info from message envelope. Topics are used to identify the destination for a ZeroMQ RPC call. There are two types of topics, bare topics and directed topics. Bare topics look like 'compute', while directed topics look like 'compute.machine1'. ======== Scenario ======== Assuming the following systems as a goal. :: +--------+ | Client | +----+---+ | -----+---------+-----------------------+--------------------- | | +--------+------------+ +-------+----------------+ | Controller Node | | Compute Node | | Nova | | Neutron | | Keystone | | Nova | | Glance | | nova-compute | | Neutron | | Ceilometer | | Cinder | | | | Ceilometer | +------------------------+ | zmq-proxy | | Redis | | Horizon | +---------------------+ =================== Basic Configuration =================== Enabling (mandatory) -------------------- To enable the driver the 'transport_url' option must be set to 'zmq://' in the section [DEFAULT] of the conf file, the 'rpc_zmq_host' option must be set to the hostname of the current node. :: [DEFAULT] transport_url = "zmq://" [oslo_messaging_zmq] rpc_zmq_host = {hostname} Default configuration of zmq driver is called 'Static Direct Connections' (To learn more about zmq driver configurations please proceed to the corresponding section 'Existing Configurations'). That means that all services connect directly to each other and all connections are static so we open them at the beginning of service's lifecycle and close them only when service quits. This configuration is the simplest one since it doesn't require any helper services (proxies) other than matchmaker to be running. Matchmaking (mandatory) ----------------------- The ZeroMQ driver implements a matching capability to discover hosts available for communication when sending to a bare topic. This allows broker-less communications. The Matchmaker is pluggable and it provides two different Matchmaker classes. MatchmakerDummy: default matchmaker driver for all-in-one scenario (messages are sent to itself; used mainly for testing). MatchmakerRedis: loads the hash table from a remote Redis server, supports dynamic host/topic registrations, host expiration, and hooks for consuming applications to acknowledge or neg-acknowledge topic.host service availability. For ZeroMQ driver Redis is configured in transport_url also. For using Redis specify the URL as follows:: [DEFAULT] transport_url = "zmq+redis://127.0.0.1:6379" In order to cleanup redis storage from expired records (e.g. target listener goes down) TTL may be applied for keys. Configure 'zmq_target_expire' option which is 300 (seconds) by default. The option is related not specifically to redis so it is also defined in [oslo_messaging_zmq] section. If option value is <= 0 then keys don't expire and live forever in the storage. The other option is 'zmq_target_update' (180 seconds by default) which specifies how often each RPC-Server should update the matchmaker. This option's optimal value generally is zmq_target_expire / 2 (or 1.5). It is recommended to calculate it based on 'zmq_target_expire' so services records wouldn't expire earlier than being updated from alive services. Generally matchmaker can be considered as an alternate approach to services heartbeating. Matchmaker Data Source (mandatory) ---------------------------------- Matchmaker data source is stored in files or Redis server discussed in the previous section. How to make up the database is the key issue for making ZeroMQ driver work. If deploying the MatchmakerRedis, a Redis server is required. Each (K, V) pair stored in Redis is that the key is a base topic and the corresponding values are hostname arrays to be sent to. HA for Redis database --------------------- Single node Redis works fine for testing, but for production there is some availability guarantees wanted. Without Redis database zmq deployment should continue working anyway, because there is no need in Redis for services when connections established already. But if you would like to restart some services or run more workers or add more hardware nodes to the deployment you will need nodes discovery mechanism to work and it requires Redis. To provide database recovery in situations when redis node goes down for example, we use Sentinel solution and redis master-slave-slave configuration (if we have 3 controllers and run Redis on each of them). To deploy redis with HA follow the `sentinel-install`_ instructions. From the messaging driver's side you will need to setup following configuration :: [DEFAULT] transport_url = "zmq+sentinel://host1:26379,host2:26379,host3:26379" Listening Address (optional) ---------------------------- All services bind to an IP address or Ethernet adapter. By default, all services bind to '*', effectively binding to 0.0.0.0. This may be changed with the option 'rpc_zmq_bind_address' which accepts a wildcard, IP address, or Ethernet adapter. This configuration can be set in [oslo_messaging_zmq] section. For example:: rpc_zmq_bind_address = * Currently zmq driver uses dynamic port binding mechanism, which means that each listener will allocate port of a random number (static, i.e. fixed, ports may only be used for sockets inside proxies now). Ports range is controlled by two options 'rpc_zmq_min_port' and 'rpc_zmq_max_port'. Change them to restrict current service's port binding range. 'rpc_zmq_bind_port_retries' controls number of retries before 'ports range exceeded' failure. For example:: rpc_zmq_min_port = 49153 rpc_zmq_max_port = 65536 rpc_zmq_bind_port_retries = 100 ======================= Existing Configurations ======================= Static Direct Connections ------------------------- The example of service config file:: [DEFAULT] transport_url = "zmq+redis://host-1:6379" [oslo_messaging_zmq] use_pub_sub = false use_router_proxy = false use_dynamic_connections = false zmq_target_expire = 60 zmq_target_update = 30 rpc_zmq_min_port = 49153 rpc_zmq_max_port = 65536 In both static and dynamic direct connections configuration it is necessary to configure firewall to open binding port range on each node:: iptables -A INPUT -p tcp --match multiport --dports 49152:65535 -j ACCEPT The sequrity recommendation here (it is general for any RPC backend) is to setup private network for message bus and another open network for public APIs. ZeroMQ driver doesn't support authentication and encryption on its level. As stated above this configuration is the simplest one since it requires only a Matchmaker service to be running. That is why driver's options configured by default in a way to use this type of topology. The biggest advantage of static direct connections (other than simplicity) is it's huge performance. On small deployments (20 - 50 nodes) it can outperform brokered solutions (or solutions with proxies) 3x - 5x times. It becomes possible because this configuration doesn't have a central node bottleneck so it's throughput is limited by only a TCP and network bandwidth. Unfortunately this approach can not be applied as is on a big scale (over 500 nodes). The main problem is the number of connections between services and particularly the number of connections on each controller node grows (in a worst case) as a square function of number of the whole running services. That's not appropriate. However this approach can be successfully used and is recommended to be used when services on controllers doesn't talk to agent services on resource nodes using oslo.messaging RPC, but RPC is used only to communicate controller services between each other. Examples here may be Cinder+Ceph backend and Ironic how it utilises oslo.messaging. For all the other cases like Nova and Neutron on a big scale using proxy-based configurations or dynamic connections configuration is more appropriate. The exception here may be the case when using OpenStack services inside Docker containers with Kubernetes. Since Kubernetes already solves similar problems by using KubeProxy and virtual IP addresses for each container. So it manages all the traffic using iptables which is more than appropriate to solve the problem described above. Summing up it is recommended to use this type of zmq configuration for 1. Small clouds (up to 100 nodes) 2. Cinder+Ceph deployment 3. Ironic deployment 4. OpenStack + Kubernetes (OpenStack in containers) deployment Dynamic Direct Connections -------------------------- The example of service config file:: [DEFAULT] transport_url = "zmq+redis://host-1:6379" [oslo_messaging_zmq] use_pub_sub = false use_router_proxy = false use_dynamic_connections = true zmq_failover_connections = 2 zmq_linger = 60 zmq_target_expire = 60 zmq_target_update = 30 rpc_zmq_min_port = 49153 rpc_zmq_max_port = 65536 The 'use_dynamic_connections = true' obviously states that connections are dynamic. 'zmq_linger' become crucial with dynamic connections in order to avoid socket leaks. If socket being connected to a wrong (dead) host which somehow still present in the Matchmaker and message was sent, then the socket can not be closed until message stays in the queue (the default linger is infinite waiting). So need to specify linger explicitly. Services often run more than one worker on the same topic. Workers are equal, so any can handle the message. In order to connect to more than one available worker need to setup 'zmq_failover_connections' option to some value (2 by default which means 2 additional connections). Take care because it may also result in slow-down. All recommendations regarding port ranges described in previous section are also valid here. Most things are similar to what we had with static connections the only difference is that each message causes connection setup and disconnect afterwards immediately after message was sent. The advantage of this deployment is that average number of connections on controller node at any moment is not high even for quite large deployments. The disadvantage is overhead caused by need to connect/disconnect per message. So this configuration can with no doubt be considered as the slowest one. The good news is the RPC of OpenStack doesn't require "thousands message per second" bandwidth per each particular service (do not confuse with central broker/proxy bandwidth which is needed as high as possible for a big scale and can be a serious bottleneck). One more bad thing about this particular configuration is fanout. Here it is completely linear complexity operation and it suffers the most from connect/disconnect overhead per message. So for fanout it is fair to say that services can have significant slow-down with dynamic connections. The recommended way to solve this problem is to use combined solution with proxied PUB/SUB infrastructure for fanout and dynamic direct connections for direct message types (plain CAST and CALL messages). This combined approach will be described later in the text. Router Proxy ------------ The example of service config file:: [DEFAULT] transport_url = "zmq+redis://host-1:6379" [oslo_messaging_zmq] use_pub_sub = false use_router_proxy = true use_dynamic_connections = false The example of proxy config file:: [DEFAULT] transport_url = "zmq+redis://host-1:6379" [oslo_messaging_zmq] use_pub_sub = false [zmq_proxy_opt] host = host-1 RPC may consume too many TCP sockets on controller node in directly connected configuration. To solve the issue ROUTER proxy may be used. In order to configure driver to use ROUTER proxy set up the 'use_router_proxy' option to true in [oslo_messaging_zmq] section (false is set by default). Pay attention to 'use_pub_sub = false' line, which has to match for all services and proxies configs, so it wouldn't work if proxy uses PUB/SUB and services don't. Not less than 3 proxies should be running on controllers or on stand alone nodes. The parameters for the script oslo-messaging-zmq-proxy should be:: oslo-messaging-zmq-proxy --config-file /etc/oslo/zeromq.conf --log-file /var/log/oslo/zeromq-router-proxy.log --host node-123 --frontend-port 50001 --backend-port 50002 --debug Config file for proxy consists of default section, 'oslo_messaging_zmq' section and additional 'zmq_proxy_opts' section. Command line arguments like host, frontend_port, backend_port and publisher_port respectively can also be set in 'zmq_proxy_opts' section of a configuration file (i.e., /etc/oslo/zeromq.conf). All arguments are optional. Port value of 0 means random port (see the next section for more details). Take into account that --debug flag makes proxy to make a log record per every dispatched message which influences proxy performance significantly. So it is not recommended flag to use in production. Without --debug there will be only Matchmaker updates or critical errors in proxy logs. In this configuration we use proxy as a very simple dispatcher (so it has the best performance with minimal overhead). The only thing proxy does is getting binary routing-key frame from the message and dispatch message on this key. In this kind of deployment client is in charge of doing fanout. Before sending fanout message client takes a list of available hosts for the topic and sends as many messages as the number of hosts it got. This configuration just uses DEALER/ROUTER pattern of ZeroMQ and doesn't use PUB/SUB as it was stated above. Disadvantage of this approach is again slower client fanout. But it is much better than with dynamic direct connections because we don't need to connect and disconnect per each message. ZeroMQ PUB/SUB Infrastructure ----------------------------- The example of service config file:: [DEFAULT] transport_url = "zmq+redis://host-1:6379" [oslo_messaging_zmq] use_pub_sub = true use_router_proxy = true use_dynamic_connections = false The example of proxy config file:: [DEFAULT] transport_url = "zmq+redis://host-1:6379" [oslo_messaging_zmq] use_pub_sub = true [zmq_proxy_opt] host = host-1 It seems obvious that fanout pattern of oslo.messaging maps on ZeroMQ PUB/SUB pattern, but it is only at first glance. It does really, but lets look a bit closer. First caveat is that in oslo.messaging it is a client who makes fanout (and generally initiates conversation), server is passive. While in ZeroMQ publisher is a server and subscribers are clients. And here is the problem: RPC-servers are subscribers in terms of ZeroMQ PUB/SUB, they hold the SUB socket and wait for messages. And they don't know anything about RPC-clients, and clients generally come later than servers. So servers don't have a PUB to subscribe on start, so we need to introduce something in the middle, and here the proxy plays the role. Publisher proxy has ROUTER socket on the front-end and PUB socket on the back-end. So client connects to ROUTER and sends a single message to a publisher proxy. Proxy redirects this message to PUB socket which performs actual publishing. Command to run central publisher proxy:: oslo-messaging-zmq-proxy --config-file /etc/oslo/zeromq.conf --log-file /var/log/oslo/zeromq-router-proxy.log --host node-123 --frontend-port 50001 --publisher-port 50003 --debug When we run a publisher proxy we need to specify a --publisher-port option. Random port will be picked up otherwise and clients will get it from the Matchmaker. The advantage of this approach is really fast fanout, while it takes time on proxy to publish, but ZeroMQ PUB/SUB is one of the fastest fanout pattern implementations. It also makes clients faster, because they need to send only a single message to a proxy. In order to balance load and HA it is recommended to have at least 3 proxies basically, but the number of running proxies is not limited. They also don't form a cluster, so there are no limitations on number caused by consistency algorithm requirements. The disadvantage is that number of connections on proxy increased twice compared to previous deployment, because we still need to use router for direct messages. The documented limitation of ZeroMQ PUB/SUB is 10k subscribers. In order to limit the number of subscribers and connections the local proxies may be used. In order to run local publisher the following command may be used:: oslo-messaging-zmq-proxy --local-publisher --config-file /etc/oslo/zeromq.conf --log-file /var/log/oslo/zeromq-router-proxy.log --host localhost --publisher-port 60001 --debug Pay attention to --local-publisher flag which specifies the type of a proxy. Local publishers may be running on every single node of a deployment. To make services use of local publishers the 'subscribe_on' option has to be specified in service's config file:: [DEFAULT] transport_url = "zmq+redis://host-1:6379" [oslo_messaging_zmq] use_pub_sub = true use_router_proxy = true use_dynamic_connections = false subscribe_on = localhost:60001 If we forgot to specify the 'subscribe_on' services will take info from Matchmaker and still connect to a central proxy, so the trick wouldn't work. Local proxy gets all the needed info from the matchmaker in order to find central proxies and subscribes on them. Frankly speaking you can pub a central proxy in the 'subscribe_on' value, even a list of hosts may be passed the same way as we do for the transport_url:: subscribe_on = host-1:50003,host-2:50003,host-3:50003 This is completely valid, just not necessary because we have information about central proxies in Matchmaker. One more thing to highlight about 'subscribe_on' is that it has higher priority than Matchmaker if being explicitly mentioned. Concluding all the above, fanout over PUB/SUB proxies is the best choice because of static connections infrastructure, fail over when one or some publishers die, and ZeroMQ PUB/SUB high performance. What If Mix Different Configurations? ------------------------------------- Three boolean variables 'use_pub_sub', 'use_router_proxy' and 'use_dynamic_connections' give us exactly 8 possible combinations. But from practical perspective not all of them are usable. So lets discuss only those which make sense. The main recommended combination is Dynamic Direct Connections plus PUB/SUB infrastructure. So we deploy PUB/SUB proxies as described in corresponding paragraph (either with local+central proxies or with only a central proxies). And the services configuration file will look like the following:: [DEFAULT] transport_url = "zmq+redis://host-1:6379" [oslo_messaging_zmq] use_pub_sub = true use_router_proxy = false use_dynamic_connections = true So we just tell the driver not to pass direct messages CALL and CAST over router, but send them directly to RPC servers. All the details of configuring services and port ranges has to be taken from 'Dynamic Direct Connections' paragraph. So it's combined configuration. Currently it is the best choice from number of connections perspective. Frankly speaking, deployment from the 'ZeroMQ PUB/SUB Infrastructure' section is also a combination of 'Router Proxy' with PUB/SUB, we've just used the same proxies for both. Here we've discussed combination inside the same service. But configurations can also be combined on a higher level, a level of services. So you could have for example a deployment where Cinder uses static direct connections and Nova/Neutron use combined PUB/SUB + dynamic direct connections. But such approach needs additional caution and may be confusing for cloud operators. Still it provides maximum optimization of performance and number of connections on proxies and controller nodes. ================ DevStack Support ================ ZeroMQ driver can be tested on a single node deployment with DevStack. Take into account that on a single node it is not that obvious any performance increase compared to other backends. To see significant speed up you need at least 20 nodes. In local.conf [localrc] section need to enable zmq plugin which lives in `devstack-plugin-zmq`_ repository. For example:: enable_plugin zmq https://github.com/openstack/devstack-plugin-zmq.git Example of local.conf:: [[local|localrc]] DATABASE_PASSWORD=password ADMIN_PASSWORD=password SERVICE_PASSWORD=password SERVICE_TOKEN=password enable_plugin zmq https://github.com/openstack/devstack-plugin-zmq.git OSLOMSG_REPO=https://review.openstack.org/openstack/oslo.messaging OSLOMSG_BRANCH=master ZEROMQ_MATCHMAKER=redis LIBS_FROM_GIT=oslo.messaging ENABLE_DEBUG_LOG_LEVEL=True .. _devstack-plugin-zmq: https://github.com/openstack/devstack-plugin-zmq.git .. _sentinel-install: http://redis.io/topics/sentinel oslo.messaging-5.35.0/doc/source/index.rst0000666000175100017510000000055613224676046020510 0ustar zuulzuul00000000000000============== oslo.messaging ============== The Oslo messaging API supports RPC and notifications over a number of different messaging transports. .. toctree:: :maxdepth: 1 contributor/index configuration/index admin/index user/index reference/index Indices and tables ================== * :ref:`genindex` * :ref:`modindex` * :ref:`search` oslo.messaging-5.35.0/doc/source/reference/0000775000175100017510000000000013224676256020600 5ustar zuulzuul00000000000000oslo.messaging-5.35.0/doc/source/reference/notification_listener.rst0000666000175100017510000000036513224676046025730 0ustar zuulzuul00000000000000--------------------- Notification Listener --------------------- .. automodule:: oslo_messaging.notify.listener .. currentmodule:: oslo_messaging .. autofunction:: get_notification_listener .. autofunction:: get_batch_notification_listener oslo.messaging-5.35.0/doc/source/reference/executors.rst0000666000175100017510000000207613224676046023357 0ustar zuulzuul00000000000000========= Executors ========= Executors control how a received message is scheduled for processing by a Server. This scheduling can be *synchronous* or *asynchronous*. A synchronous executor will process the message on the Server's thread. This means the Server can process only one message at a time. Other incoming messages will not be processed until the current message is done processing. For example, in the case of an RPCServer only one method call will be invoked at a time. A synchronous executor guarantees that messages complete processing in the order that they are received. An asynchronous executor will process received messages concurrently. The Server thread will not be blocked by message processing and can continue to service incoming messages. There are no ordering guarantees - message processing may complete in a different order than they were received. The executor may be configured to limit the maximum number of messages that are processed at once. Available Executors =================== .. list-plugins:: oslo.messaging.executors :detailed: oslo.messaging-5.35.0/doc/source/reference/exceptions.rst0000666000175100017510000000100413224676046023505 0ustar zuulzuul00000000000000---------- Exceptions ---------- .. currentmodule:: oslo_messaging .. autoexception:: ClientSendError .. autoexception:: DriverLoadFailure .. autoexception:: ExecutorLoadFailure .. autoexception:: InvalidTransportURL .. autoexception:: MessagingException .. autoexception:: MessagingTimeout .. autoexception:: MessagingServerError .. autoexception:: NoSuchMethod .. autoexception:: RPCDispatcherError .. autoexception:: RPCVersionCapError .. autoexception:: ServerListenError .. autoexception:: UnsupportedVersion oslo.messaging-5.35.0/doc/source/reference/notification_driver.rst0000666000175100017510000000042313224676046025371 0ustar zuulzuul00000000000000------------------- Notification Driver ------------------- .. automodule:: oslo_messaging.notify.messaging .. autoclass:: MessagingDriver .. autoclass:: MessagingV2Driver .. currentmodule:: oslo_messaging.notify.notifier .. autoclass:: Driver :members: :noindex: oslo.messaging-5.35.0/doc/source/reference/index.rst0000666000175100017510000000033713224676046022443 0ustar zuulzuul00000000000000.. _using: ========= Reference ========= .. toctree:: :maxdepth: 2 transport executors target server rpcclient notifier notification_driver notification_listener serializer exceptions oslo.messaging-5.35.0/doc/source/reference/server.rst0000666000175100017510000000074213224676046022642 0ustar zuulzuul00000000000000---------- RPC Server ---------- .. automodule:: oslo_messaging.rpc.server .. currentmodule:: oslo_messaging .. autofunction:: get_rpc_server .. autoclass:: RPCAccessPolicyBase .. autoclass:: LegacyRPCAccessPolicy .. autoclass:: DefaultRPCAccessPolicy .. autoclass:: ExplicitRPCAccessPolicy .. autoclass:: RPCDispatcher .. autoclass:: MessageHandlingServer :members: .. autofunction:: expected_exceptions .. autofunction:: expose .. autoexception:: ExpectedException oslo.messaging-5.35.0/doc/source/reference/serializer.rst0000666000175100017510000000021313224676046023476 0ustar zuulzuul00000000000000---------- Serializer ---------- .. currentmodule:: oslo_messaging .. autoclass:: Serializer :members: .. autoclass:: NoOpSerializer oslo.messaging-5.35.0/doc/source/reference/transport.rst0000666000175100017510000000121413224676046023363 0ustar zuulzuul00000000000000--------- Transport --------- .. currentmodule:: oslo_messaging .. autoclass:: Transport .. autoclass:: TransportURL :members: .. autoclass:: TransportHost .. autofunction:: set_transport_defaults Forking Processes and oslo.messaging Transport objects ------------------------------------------------------ oslo.messaging can't ensure that forking a process that shares the same transport object is safe for the library consumer, because it relies on different 3rd party libraries that don't ensure that. In certain cases, with some drivers, it does work: * rabbit: works only if no connection have already been established. * amqp1: works oslo.messaging-5.35.0/doc/source/reference/target.rst0000666000175100017510000000427213224676046022624 0ustar zuulzuul00000000000000------ Target ------ .. currentmodule:: oslo_messaging .. autoclass:: Target =============== Target Versions =============== Target version numbers take the form Major.Minor. For a given message with version X.Y, the server must be marked as able to handle messages of version A.B, where A == X and B >= Y. The Major version number should be incremented for an almost completely new API. The Minor version number would be incremented for backwards compatible changes to an existing API. A backwards compatible change could be something like adding a new method, adding an argument to an existing method (but not requiring it), or changing the type for an existing argument (but still handling the old type as well). If no version is specified it defaults to '1.0'. In the case of RPC, if you wish to allow your server interfaces to evolve such that clients do not need to be updated in lockstep with the server, you should take care to implement the server changes in a backwards compatible and have the clients specify which interface version they require for each method. Adding a new method to an endpoint is a backwards compatible change and the version attribute of the endpoint's target should be bumped from X.Y to X.Y+1. On the client side, the new RPC invocation should have a specific version specified to indicate the minimum API version that must be implemented for the method to be supported. For example:: def get_host_uptime(self, ctxt, host): cctxt = self.client.prepare(server=host, version='1.1') return cctxt.call(ctxt, 'get_host_uptime') In this case, version '1.1' is the first version that supported the get_host_uptime() method. Adding a new parameter to an RPC method can be made backwards compatible. The endpoint version on the server side should be bumped. The implementation of the method must not expect the parameter to be present.:: def some_remote_method(self, arg1, arg2, newarg=None): # The code needs to deal with newarg=None for cases # where an older client sends a message without it. pass On the client side, the same changes should be made as in example 1. The minimum version that supports the new parameter should be specified. oslo.messaging-5.35.0/doc/source/reference/notifier.rst0000666000175100017510000000052413224676046023151 0ustar zuulzuul00000000000000========== Notifier ========== .. currentmodule:: oslo_messaging .. autoclass:: Notifier :members: .. autoclass:: LoggingNotificationHandler :members: .. autoclass:: LoggingErrorNotificationHandler :members: Available Notifier Drivers ========================== .. list-plugins:: oslo.messaging.notify.drivers :detailed: oslo.messaging-5.35.0/doc/source/reference/rpcclient.rst0000666000175100017510000000021313224676046023310 0ustar zuulzuul00000000000000---------- RPC Client ---------- .. currentmodule:: oslo_messaging .. autoclass:: RPCClient :members: .. autoexception:: RemoteError oslo.messaging-5.35.0/doc/source/contributor/0000775000175100017510000000000013224676256021214 5ustar zuulzuul00000000000000oslo.messaging-5.35.0/doc/source/contributor/supported-messaging-drivers.rst0000666000175100017510000000416113224676046027423 0ustar zuulzuul00000000000000============================= Supported Messaging Drivers ============================= RabbitMQ may not be sufficient for the entire community as the community grows. Pluggability is still something we should maintain, but we should have a very high standard for drivers that are shipped and documented as being supported. This document defines a very clear policy as to the requirements for drivers to be carried in oslo.messaging and thus supported by the OpenStack community as a whole. We will deprecate any drivers that do not meet the requirements, and announce said deprecations in any appropriate channels to give users time to signal their needs. Deprecation will last for two release cycles before removing the code. We will also review and update documentation to annotate which drivers are supported and which are deprecated given these policies Policy ------ Testing ~~~~~~~ * Must have unit and/or functional test coverage of at least 60% as reported by coverage report. Unit tests must be run for all versions of python oslo.messaging currently gates on. * Must have integration testing including at least 3 popular oslo.messaging dependents, preferably at the minimum a devstack-gate job with Nova, Cinder, and Neutron. * All testing above must be voting in the gate of oslo.messaging. Documentation ~~~~~~~~~~~~~ * Must have a reasonable amount of documentation including documentation in the official OpenStack deployment guide. Support ~~~~~~~ * Must have at least two individuals from the community committed to triaging and fixing bugs, and responding to test failures in a timely manner. Prospective Drivers ~~~~~~~~~~~~~~~~~~~ * Drivers that intend to meet the requirements above, but that do not yet meet them will be given one full release cycle, or 6 months, whichever is longer, to comply before being marked for deprecation. Their use, however, will not be supported by the community. This will prevent a chicken and egg problem for new drivers. .. note:: This work is licensed under a Creative Commons Attribution 3.0 Unported License. http://creativecommons.org/licenses/by/3.0/legalcode oslo.messaging-5.35.0/doc/source/contributor/driver-dev-guide.rst0000666000175100017510000000315113224676046025107 0ustar zuulzuul00000000000000--------------------------------------- Guide for Transport Driver Implementors --------------------------------------- .. currentmodule:: oslo_messaging .. automodule:: oslo_messaging._drivers.base ============ Introduction ============ This document is a *best practices* guide for the developer interested in creating a new transport driver for Oslo.Messaging. It should also be used by maintainers as a reference for proper driver behavior. This document will describe the driver interface and prescribe the expected behavior of any driver implemented to this interface. **Note well:** The API described in this document is internal to the oslo.messaging library and therefore **private**. Under no circumstances should this API be referenced by code external to the oslo.messaging library. ================ Driver Interface ================ The driver interface is defined by a set of abstract base classes. The developer creates a driver by defining concrete classes from these bases. The derived classes embody the logic that is specific for the messaging back-end that is to be supported. These base classes are defined in the *base.py* file in the *_drivers* subdirectory. =============== IncomingMessage =============== .. autoclass:: IncomingMessage :members: ================== RpcIncomingMessage ================== .. autoclass:: RpcIncomingMessage :members: ======== Listener ======== .. autoclass:: Listener :members: ================= PollStyleListener ================= .. autoclass:: PollStyleListener :members: ========== BaseDriver ========== .. autoclass:: BaseDriver :members: oslo.messaging-5.35.0/doc/source/contributor/contributing.rst0000666000175100017510000000012413224676046024451 0ustar zuulzuul00000000000000============== Contributing ============== .. include:: ../../../CONTRIBUTING.rst oslo.messaging-5.35.0/doc/source/contributor/index.rst0000666000175100017510000000027713224676046023062 0ustar zuulzuul00000000000000============================== Contributing to oslo.messaging ============================== .. toctree:: :maxdepth: 2 contributing driver-dev-guide supported-messaging-drivers oslo.messaging-5.35.0/setup.cfg0000666000175100017510000000736613224676256016434 0ustar zuulzuul00000000000000[metadata] name = oslo.messaging author = OpenStack author-email = openstack-dev@lists.openstack.org summary = Oslo Messaging API description-file = README.rst home-page = https://docs.openstack.org/oslo.messaging/latest/ classifier = Environment :: OpenStack Intended Audience :: Developers Intended Audience :: Information Technology License :: OSI Approved :: Apache Software License Operating System :: OS Independent Programming Language :: Python Programming Language :: Python :: 2 Programming Language :: Python :: 2.7 Programming Language :: Python :: 3 Programming Language :: Python :: 3.5 [files] packages = oslo_messaging [entry_points] console_scripts = oslo-messaging-zmq-proxy = oslo_messaging._cmd.zmq_proxy:main oslo-messaging-zmq-broker = oslo_messaging._cmd.zmq_proxy:main oslo-messaging-send-notification = oslo_messaging.notify.notifier:_send_notification oslo.messaging.drivers = rabbit = oslo_messaging._drivers.impl_rabbit:RabbitDriver zmq = oslo_messaging._drivers.impl_zmq:ZmqDriver amqp = oslo_messaging._drivers.impl_amqp1:ProtonDriver # This driver is supporting for only notification usage kafka = oslo_messaging._drivers.impl_kafka:KafkaDriver # To avoid confusion kombu = oslo_messaging._drivers.impl_rabbit:RabbitDriver # This is just for internal testing fake = oslo_messaging._drivers.impl_fake:FakeDriver pika = oslo_messaging._drivers.impl_pika:PikaDriver oslo.messaging.executors = blocking = futurist:SynchronousExecutor eventlet = futurist:GreenThreadPoolExecutor threading = futurist:ThreadPoolExecutor oslo.messaging.notify.drivers = messagingv2 = oslo_messaging.notify.messaging:MessagingV2Driver messaging = oslo_messaging.notify.messaging:MessagingDriver log = oslo_messaging.notify._impl_log:LogDriver test = oslo_messaging.notify._impl_test:TestDriver noop = oslo_messaging.notify._impl_noop:NoOpDriver routing = oslo_messaging.notify._impl_routing:RoutingDriver oslo.messaging.pika.connection_factory = # Creates new connection for each create_connection call. Old-style behaviour # Uses a much more connections then single and read_write factories but still available as # an option new = oslo_messaging._drivers.pika_driver.pika_connection_factory:PikaConnectionFactory # Creates only one connection for transport and return it for each create connection call # it is default, but you can not use it with synchronous executor single = oslo_messaging._drivers.pika_driver.pika_connection_factory:SinglePikaConnectionFactory # Create two connections - one for listening and another one for sending and return them # for each create connection call depending on connection purpose. Creates one more connection # but you can use it with synchronous executor read_write = oslo_messaging._drivers.pika_driver.pika_connection_factory:ReadWritePikaConnectionFactory oslo.messaging.zmq.matchmaker = # Matchmakers for ZeroMQ dummy = oslo_messaging._drivers.zmq_driver.matchmaker.zmq_matchmaker_base:MatchmakerDummy redis = oslo_messaging._drivers.zmq_driver.matchmaker.zmq_matchmaker_redis:MatchmakerRedis sentinel = oslo_messaging._drivers.zmq_driver.matchmaker.zmq_matchmaker_redis:MatchmakerSentinel oslo.config.opts = oslo.messaging = oslo_messaging.opts:list_opts [wheel] universal = 1 [build_sphinx] warning-is-error = 1 all-files = 1 source-dir = doc/source build-dir = doc/build [upload_sphinx] upload-dir = doc/build/html [compile_catalog] directory = oslo_messaging/locale domain = oslo_messaging [update_catalog] domain = oslo_messaging output_dir = oslo_messaging/locale input_file = oslo_messaging/locale/oslo_messaging.pot [extract_messages] keywords = _ gettext ngettext l_ lazy_gettext mapping_file = babel.cfg output_file = oslo_messaging/locale/oslo_messaging.pot [egg_info] tag_build = tag_date = 0 oslo.messaging-5.35.0/babel.cfg0000666000175100017510000000002013224676046016312 0ustar zuulzuul00000000000000[python: **.py] oslo.messaging-5.35.0/setup.py0000666000175100017510000000200613224676046016304 0ustar zuulzuul00000000000000# Copyright (c) 2013 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # THIS FILE IS MANAGED BY THE GLOBAL REQUIREMENTS REPO - DO NOT EDIT import setuptools # In python < 2.7.4, a lazy loading of package `pbr` will break # setuptools if some other modules registered functions in `atexit`. # solution from: http://bugs.python.org/issue15881#msg170215 try: import multiprocessing # noqa except ImportError: pass setuptools.setup( setup_requires=['pbr>=2.0.0'], pbr=True)