pax_global_header 0000666 0000000 0000000 00000000064 14545022514 0014514 g ustar 00root root 0000000 0000000 52 comment=dfecdae207ecd73b187db4d2b2db7e00d53a09d3
srpc-0.10.1/ 0000775 0000000 0000000 00000000000 14545022514 0012542 5 ustar 00root root 0000000 0000000 srpc-0.10.1/.bazelignore 0000664 0000000 0000000 00000000127 14545022514 0015044 0 ustar 00root root 0000000 0000000 ./third_party/snappy/third_party/benchmark
./third_party/snappy/third_party/googletest
srpc-0.10.1/.editorconfig 0000664 0000000 0000000 00000000136 14545022514 0015217 0 ustar 00root root 0000000 0000000 # top-most EditorConfig file
root = true
# all files
[*]
indent_style = tab
indent_size = 4
srpc-0.10.1/.github/ 0000775 0000000 0000000 00000000000 14545022514 0014102 5 ustar 00root root 0000000 0000000 srpc-0.10.1/.github/workflows/ 0000775 0000000 0000000 00000000000 14545022514 0016137 5 ustar 00root root 0000000 0000000 srpc-0.10.1/.github/workflows/ci.yml 0000664 0000000 0000000 00000003545 14545022514 0017264 0 ustar 00root root 0000000 0000000 name: ci build
on:
push:
branches: [ master ]
pull_request:
branches: [ master ]
jobs:
ubuntu-cmake:
name: ubuntu
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@master
- name: install deps
run: |
sudo apt-get update
sudo apt-get install -y libprotobuf-dev protobuf-compiler libgtest-dev valgrind
- name: update submodules
run: git submodule update --init --recursive
- name: make
run: make -j4
- name: make tutorial
run: make tutorial -j4
- name: make check
run: make check -j4
- name: make install
run: sudo make install
fedora-cmake:
name: fedora
runs-on: ubuntu-latest
steps:
- name: Setup Podman
run: |
sudo apt update
sudo apt-get -y install podman
podman pull fedora:rawhide
- name: Get source
uses: actions/checkout@master
with:
path: 'workflow'
- name: Create container and run tests
run: |
{
echo 'FROM fedora:rawhide'
echo 'RUN dnf -y update'
echo 'RUN dnf -y install cmake gcc-c++ gtest-devel git make'
echo 'RUN dnf -y install openssl-devel protobuf-devel'
echo 'RUN dnf -y install lz4-devel snappy-devel'
echo 'RUN dnf clean all'
echo 'COPY workflow workflow'
echo 'WORKDIR /workflow'
echo 'RUN git submodule update --init --recursive'
echo 'RUN cmake'
echo 'RUN make'
echo 'RUN make check'
echo 'RUN make tutorial'
} > podmanfile
podman build --tag fedorarawhide -f ./podmanfile
ubuntu-bazel:
name: bazel
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@master
- name: bazel build
run: bazel build ...
srpc-0.10.1/.gitignore 0000664 0000000 0000000 00000000543 14545022514 0014534 0 ustar 00root root 0000000 0000000 *.a
*.bak
*.gz
*.zip
*.tar
*.la
*.lo
*.o
*.rpm
*.so
*.so.*
*.cmake
*.vcxproj
*.filters
*.sln
*.pb.h
*.pb.cc
*.log
*.srpc.h
*.thrift.h
*.pb_skeleton.h
*.pb_skeleton.cc
*.thrift_skeleton.h
*.thrift_skeleton.cc
_bin/
_include/
_lib/
.deps/
build/
build_pkg/
CMakeFiles/
Debug/
Release/
missing
SRCINFO
SRCNUMVER
SRCVERSION
CMakeCache.txt
Makefile
bazel-*
srpc-0.10.1/.gitmodules 0000664 0000000 0000000 00000000462 14545022514 0014721 0 ustar 00root root 0000000 0000000 [submodule "workflow"]
path = workflow
url = https://github.com/sogou/workflow.git
[submodule "third_party/snappy"]
path = third_party/snappy
url = https://github.com/google/snappy
branch = 1.1.9
[submodule "third_party/lz4"]
path = third_party/lz4
url = https://github.com/lz4/lz4
branch = v1.9.3
srpc-0.10.1/BUILD 0000664 0000000 0000000 00000016233 14545022514 0013331 0 ustar 00root root 0000000 0000000 load("@rules_cc//cc:defs.bzl", "cc_proto_library")
load("@rules_proto//proto:defs.bzl", "proto_library")
load(":srpc.bzl", "srpc_cc_library")
proto_library(
name = "message_proto",
srcs = [
"src/message/rpc_meta.proto",
"src/message/rpc_meta_brpc.proto",
"src/message/rpc_meta_trpc.proto",
],
strip_import_prefix = "src/message",
)
cc_proto_library(
name = "MessageProto",
deps = [":message_proto"],
)
proto_library(
name = "module_proto",
srcs = [
"src/module/proto/opentelemetry_common.proto",
"src/module/proto/opentelemetry_resource.proto",
"src/module/proto/opentelemetry_trace.proto",
"src/module/proto/opentelemetry_metrics.proto",
"src/module/proto/opentelemetry_metrics_service.proto",
],
strip_import_prefix = "src/module/proto",
)
cc_proto_library(
name = "ModuleProto",
deps = [":module_proto"],
)
cc_library(
name = "srpc_hdrs",
hdrs = glob(["src/include/srpc/*"]),
includes = ["src/include"],
visibility = ["//visibility:public"],
deps = [
"@workflow//:workflow_hdrs",
],
)
cc_library(
name = "srpc",
srcs = glob(["src/**/*.cc"]),
hdrs = glob([
"src/**/*.h",
"src/**/*.inl",
]),
includes = [
"src",
"src/compress",
"src/message",
"src/module",
"src/thrift",
"src/var",
],
visibility = ["//visibility:public"],
deps = [
":MessageProto",
":ModuleProto",
"@lz4",
"@snappy",
"@workflow//:http",
"@workflow//:redis",
"@workflow//:upstream",
],
)
cc_library(
name = "srpc_generator_lib",
srcs = glob(
[
"src/generator/*.cc",
],
exclude = [
"src/compiler.cc",
],
),
hdrs = glob([
"src/generator/*.h",
]),
includes = ["src/generator"],
visibility = ["//visibility:public"],
deps = [
":srpc",
],
)
cc_binary(
name = "srpc_generator",
srcs = ["src/generator/compiler.cc"],
visibility = ["//visibility:public"],
deps = [
":srpc",
":srpc_generator_lib",
],
)
proto_library(
name = "echo_pb_proto",
srcs = [
"tutorial/echo_pb.proto",
],
strip_import_prefix = "tutorial",
)
cc_proto_library(
name = "EchoProto",
deps = [":echo_pb_proto"],
)
srpc_cc_library(
name = "echo_pb_srpc",
srcs = ["tutorial/echo_pb.proto"],
deps = [":EchoProto"],
)
cc_binary(
name = "srpc_pb_server",
srcs = ["tutorial/tutorial-01-srpc_pb_server.cc"],
linkopts = [
"-lpthread",
"-lssl",
"-lcrypto",
],
deps = [
":echo_pb_srpc",
":srpc",
":srpc_hdrs",
],
)
cc_binary(
name = "srpc_pb_client",
srcs = ["tutorial/tutorial-02-srpc_pb_client.cc"],
linkopts = [
"-lpthread",
"-lssl",
"-lcrypto",
],
deps = [
":echo_pb_srpc",
":srpc",
":srpc_hdrs",
],
)
srpc_cc_library(
name = "echo_thrift_srpc",
srcs = ["tutorial/echo_thrift.thrift"],
type = "thrift",
)
cc_binary(
name = "srpc_thrift_server",
srcs = ["tutorial/tutorial-03-srpc_thrift_server.cc"],
linkopts = [
"-lpthread",
"-lssl",
"-lcrypto",
],
deps = [
":echo_thrift_srpc",
":srpc",
":srpc_hdrs",
],
)
cc_binary(
name = "srpc_thrift_client",
srcs = ["tutorial/tutorial-04-srpc_thrift_client.cc"],
linkopts = [
"-lpthread",
"-lssl",
"-lcrypto",
],
deps = [
":echo_thrift_srpc",
":srpc",
":srpc_hdrs",
],
)
cc_binary(
name = "brpc_pb_server",
srcs = ["tutorial/tutorial-05-brpc_pb_server.cc"],
linkopts = [
"-lpthread",
"-lssl",
"-lcrypto",
],
deps = [
":echo_pb_srpc",
":srpc",
":srpc_hdrs",
],
)
cc_binary(
name = "brpc_pb_client",
srcs = ["tutorial/tutorial-06-brpc_pb_client.cc"],
linkopts = [
"-lpthread",
"-lssl",
"-lcrypto",
],
deps = [
":echo_pb_srpc",
":srpc",
":srpc_hdrs",
],
)
cc_binary(
name = "thrift_thrift_server",
srcs = ["tutorial/tutorial-07-thrift_thrift_server.cc"],
linkopts = [
"-lpthread",
"-lssl",
"-lcrypto",
],
deps = [
":echo_thrift_srpc",
":srpc",
":srpc_hdrs",
],
)
cc_binary(
name = "thrift_thrift_client",
srcs = ["tutorial/tutorial-08-thrift_thrift_client.cc"],
linkopts = [
"-lpthread",
"-lssl",
"-lcrypto",
],
deps = [
":echo_thrift_srpc",
":srpc",
":srpc_hdrs",
],
)
cc_binary(
name = "client_task",
srcs = ["tutorial/tutorial-09-client_task.cc"],
linkopts = [
"-lpthread",
"-lssl",
"-lcrypto",
],
deps = [
":echo_pb_srpc",
":srpc",
":srpc_hdrs",
],
)
cc_binary(
name = "server_async",
srcs = ["tutorial/tutorial-10-server_async.cc"],
linkopts = [
"-lpthread",
"-lssl",
"-lcrypto",
],
deps = [
":echo_pb_srpc",
":srpc",
":srpc_hdrs",
],
)
proto_library(
name = "helloworld_proto",
srcs = [
"tutorial/helloworld.proto",
],
strip_import_prefix = "tutorial",
)
cc_proto_library(
name = "HelloworldProto",
deps = [":helloworld_proto"],
)
srpc_cc_library(
name = "helloworld_pb_srpc",
srcs = ["tutorial/helloworld.proto"],
deps = [":HelloworldProto"],
)
cc_binary(
name = "trpc_pb_server",
srcs = ["tutorial/tutorial-11-trpc_pb_server.cc"],
linkopts = [
"-lpthread",
"-lssl",
"-lcrypto",
],
deps = [
":helloworld_pb_srpc",
":srpc",
":srpc_hdrs",
],
)
cc_binary(
name = "trpc_pb_client",
srcs = ["tutorial/tutorial-12-trpc_pb_client.cc"],
linkopts = [
"-lpthread",
"-lssl",
"-lcrypto",
],
deps = [
":helloworld_pb_srpc",
":srpc",
":srpc_hdrs",
],
)
cc_binary(
name = "trpc_http_server",
srcs = ["tutorial/tutorial-13-trpc_http_server.cc"],
linkopts = [
"-lpthread",
"-lssl",
"-lcrypto",
],
deps = [
":helloworld_pb_srpc",
":srpc",
":srpc_hdrs",
],
)
cc_binary(
name = "trpc_http_client",
srcs = ["tutorial/tutorial-14-trpc_http_client.cc"],
linkopts = [
"-lpthread",
"-lssl",
"-lcrypto",
],
deps = [
":helloworld_pb_srpc",
":srpc",
":srpc_hdrs",
],
)
cc_binary(
name = "srpc_pb_proxy",
srcs = ["tutorial/tutorial-15-srpc_pb_proxy.cc"],
linkopts = [
"-lpthread",
"-lssl",
"-lcrypto",
],
deps = [
":echo_pb_srpc",
":srpc",
":srpc_hdrs",
],
)
cc_binary(
name = "server_with_metrics",
srcs = ["tutorial/tutorial-16-server_with_metrics.cc"],
linkopts = [
"-lpthread",
"-lssl",
"-lcrypto",
],
deps = [
":echo_pb_srpc",
":srpc",
":srpc_hdrs",
],
)
srpc-0.10.1/CMakeLists.txt 0000664 0000000 0000000 00000013207 14545022514 0015305 0 ustar 00root root 0000000 0000000 cmake_minimum_required(VERSION 3.6)
set(CMAKE_BUILD_TYPE RelWithDebInfo CACHE STRING "build type")
set(CMAKE_SKIP_RPATH TRUE)
project(srpc
VERSION 0.10.1
LANGUAGES C CXX)
###Options
if (WIN32)
option(SRPC_BUILD_STATIC_RUNTIME "Use static runtime" ON)
endif ()
#### CHECK
include(CheckIncludeFile)
include(CheckIncludeFileCXX)
set(THIRD_PARTY_FATAL_MESSAGE
" is neither installed nor found in third_party!
Sugguestion to initial third_party:
\"git submodule update --init\""
)
find_library(LZ4_LIBRARY NAMES lz4)
check_include_file("lz4.h" LZ4_INSTALLED)
if (NOT LZ4_INSTALLED AND ${LZ4_LIBRARY} STREQUAL "LZ4_LIBRARY-NOTFOUND")
if (NOT EXISTS "${CMAKE_CURRENT_SOURCE_DIR}/third_party/lz4/lib/lz4.h")
message( FATAL_ERROR "\nLz4" ${THIRD_PARTY_FATAL_MESSAGE} )
endif ()
else ()
if (EXISTS "${CMAKE_CURRENT_SOURCE_DIR}/third_party/lz4/lib/lz4.h")
message("Lz4 third_party FOUND. Use for source code dependencies.")
set(LZ4_INSTALLED 0 CACHE INTERNAL "check_lz4_installed")
else ()
find_path(LZ4_INCLUDE_DIR NAMES "lz4.h")
include_directories(${LZ4_INCLUDE_DIR})
set(LZ4_INSTALLED 1 CACHE INTERNAL "check_lz4_installed")
endif ()
endif ()
find_package(Snappy)
check_include_file_cxx("snappy.h" SNAPPY_INSTALLED)
if (NOT SNAPPY_INSTALLED AND NOT ${Snappy_FOUND})
if (NOT EXISTS "${CMAKE_CURRENT_SOURCE_DIR}/third_party/snappy/cmake")
message( FATAL_ERROR "\nSnappy" ${THIRD_PARTY_FATAL_MESSAGE} )
endif ()
else ()
if (EXISTS "${CMAKE_CURRENT_SOURCE_DIR}/third_party/snappy/cmake")
message("Snappy third_party FOUND. Use for source code dependencies.")
set(SNAPPY_INSTALLED 0 CACHE INTERNAL "check_snappy_installed")
else ()
find_path(Snappy_INCLUDE_DIR NAMES "snappy.h")
include_directories(${Snappy_INCLUDE_DIR})
set(SNAPPY_INSTALLED 1 CACHE INTERNAL "check_snappy_installed")
endif ()
endif ()
check_include_file_cxx("workflow/Workflow.h" WORKFLOW_INSTALLED)
if (NOT WORKFLOW_INSTALLED)
if (NOT EXISTS "${CMAKE_CURRENT_SOURCE_DIR}/workflow/workflow-config.cmake.in")
message( FATAL_ERROR "\nWorkflow" ${THIRD_PARTY_FATAL_MESSAGE} )
endif ()
else ()
if (EXISTS "${CMAKE_CURRENT_SOURCE_DIR}/workflow/workflow-config.cmake.in")
message("Workflow third_party FOUND. Use for source code dependencies.")
set(WORKFLOW_INSTALLED 0)
endif ()
endif ()
find_program(PROTOC "protoc")
if(${PROTOC} STREQUAL "PROTOC-NOTFOUND")
message( FATAL_ERROR "Protobuf compiler is missing!")
endif()
#### PREPARE
set(INC_DIR ${PROJECT_SOURCE_DIR}/_include CACHE PATH "srpc inc")
set(LIB_DIR ${PROJECT_SOURCE_DIR}/_lib CACHE PATH "srpc lib")
set(BIN_DIR ${PROJECT_SOURCE_DIR}/_bin CACHE PATH "srpc bin")
include(GNUInstallDirs)
set(CMAKE_CONFIG_INSTALL_FILE ${PROJECT_BINARY_DIR}/config.toinstall.cmake)
set(CMAKE_CONFIG_INSTALL_DIR ${CMAKE_INSTALL_LIBDIR}/cmake/${PROJECT_NAME})
set(CMAKE_LIBRARY_OUTPUT_DIRECTORY ${LIB_DIR})
set(CMAKE_ARCHIVE_OUTPUT_DIRECTORY ${LIB_DIR})
set(CMAKE_RUNTIME_OUTPUT_DIRECTORY ${BIN_DIR})
add_custom_target(
LINK_HEADERS ALL
COMMENT "link headers..."
)
INCLUDE(CMakeLists_Headers.txt)
macro(makeLink src dest target)
add_custom_command(
TARGET ${target} PRE_BUILD
COMMAND ${CMAKE_COMMAND} -E copy_if_different ${src} ${dest}
DEPENDS ${dest}
)
endmacro()
add_custom_command(
TARGET LINK_HEADERS PRE_BUILD
COMMAND ${CMAKE_COMMAND} -E make_directory ${INC_DIR}/${PROJECT_NAME}
)
foreach(header_file ${INCLUDE_HEADERS})
string(REPLACE "/" ";" arr ${header_file})
list(GET arr -1 file_name)
makeLink(${PROJECT_SOURCE_DIR}/${header_file} ${INC_DIR}/${PROJECT_NAME}/${file_name} LINK_HEADERS)
endforeach()
if (WIN32)
if (SRPC_BUILD_STATIC_RUNTIME)
set(CompilerFlags
CMAKE_CXX_FLAGS
CMAKE_CXX_FLAGS_DEBUG
CMAKE_CXX_FLAGS_RELEASE
CMAKE_CXX_FLAGS_MINSIZEREL
CMAKE_C_FLAGS
CMAKE_C_FLAGS_DEBUG
CMAKE_C_FLAGS_RELEASE
CMAKE_C_FLAGS_MINSIZEREL
)
foreach(CompilerFlag ${CompilerFlags})
string(REPLACE "/MD" "/MT" ${CompilerFlag} "${${CompilerFlag}}")
endforeach ()
endif ()
endif ()
add_subdirectory(src)
#### CONFIG
include(CMakePackageConfigHelpers)
set(CONFIG_INC_DIR ${INC_DIR})
set(CONFIG_LIB_DIR ${LIB_DIR})
if (VCPKG_TOOLCHAIN AND EXISTS "${CMAKE_INSTALL_PREFIX}/tools/srpc")
set(CONFIG_BIN_DIR ${CMAKE_INSTALL_PREFIX}/tools/srpc)
set(WITH_VCPKG_TOOLCHAIN 1 CACHE INTERNAL "build_with_vcpkg_toolchain")
message("Install with VCPKG toolchain. Dir ${CMAKE_INSTALL_PREFIX}/tools/srpc.")
else()
set(CONFIG_BIN_DIR ${BIN_DIR})
set(WITH_VCPKG_TOOLCHAIN 0 CACHE INTERNAL "build_with_vcpkg_toolchain")
endif()
configure_package_config_file(
${PROJECT_NAME}-config.cmake.in
${PROJECT_SOURCE_DIR}/${PROJECT_NAME}-config.cmake
INSTALL_DESTINATION ${CMAKE_CONFIG_INSTALL_DIR}
PATH_VARS CONFIG_INC_DIR CONFIG_LIB_DIR CONFIG_BIN_DIR
)
set(CONFIG_INC_DIR ${CMAKE_INSTALL_INCLUDEDIR})
set(CONFIG_LIB_DIR ${CMAKE_INSTALL_LIBDIR})
set(CONFIG_BIN_DIR ${CMAKE_INSTALL_BINDIR})
configure_package_config_file(
${PROJECT_NAME}-config.cmake.in
${CMAKE_CONFIG_INSTALL_FILE}
INSTALL_DESTINATION ${CMAKE_CONFIG_INSTALL_DIR}
PATH_VARS CONFIG_INC_DIR CONFIG_LIB_DIR CONFIG_BIN_DIR
)
write_basic_package_version_file(
${CMAKE_CURRENT_BINARY_DIR}/${PROJECT_NAME}-config-version.cmake
VERSION ${WORKFLOW_VERSION}
COMPATIBILITY AnyNewerVersion
)
install(
FILES ${CMAKE_CONFIG_INSTALL_FILE}
DESTINATION ${CMAKE_CONFIG_INSTALL_DIR}
COMPONENT devel
RENAME ${PROJECT_NAME}-config.cmake
)
install(
FILES ${CMAKE_CURRENT_BINARY_DIR}/${PROJECT_NAME}-config-version.cmake
DESTINATION ${CMAKE_CONFIG_INSTALL_DIR}
COMPONENT devel
)
install(
FILES ${INCLUDE_HEADERS}
DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}/${PROJECT_NAME}
COMPONENT devel
)
install(
FILES README.md
DESTINATION "${CMAKE_INSTALL_DOCDIR}-${PROJECT_VERSION}"
COMPONENT devel
)
srpc-0.10.1/CMakeLists_Headers.txt 0000664 0000000 0000000 00000002772 14545022514 0016745 0 ustar 00root root 0000000 0000000 cmake_minimum_required(VERSION 3.6)
set(SRC_HEADERS
src/compress/rpc_compress.h
src/compress/rpc_compress_gzip.h
src/message/rpc_message.h
src/message/rpc_message_srpc.h
src/message/rpc_message_thrift.h
src/message/rpc_message_brpc.h
src/message/rpc_message_trpc.h
src/thrift/rpc_thrift_buffer.h
src/thrift/rpc_thrift_enum.h
src/thrift/rpc_thrift_idl.h
src/thrift/rpc_thrift_idl.inl
src/var/ckms_quantiles.h
src/var/time_window_quantiles.h
src/var/rpc_var.h
src/module/rpc_module.h
src/module/rpc_trace_module.h
src/module/rpc_metrics_module.h
src/module/rpc_filter.h
src/module/rpc_trace_filter.h
src/module/rpc_metrics_filter.h
src/rpc_basic.h
src/rpc_buffer.h
src/rpc_client.h
src/rpc_context.h
src/rpc_context.inl
src/rpc_global.h
src/rpc_options.h
src/rpc_server.h
src/rpc_service.h
src/rpc_task.inl
src/rpc_types.h
src/rpc_zero_copy_stream.h
src/rpc_define.h
)
if (NOT WIN32)
set(SRC_HEADERS
${SRC_HEADERS}
src/http/http_task.h
src/http/http_module.h
src/http/http_client.h
src/http/http_server.h
)
endif ()
if (NOT SNAPPY_INSTALLED)
set(SNAPPY_HEADERS
third_party/snappy/snappy.h
third_party/snappy/snappy-c.h
third_party/snappy/snappy-sinksource.h
third_party/snappy/snappy-stubs-public.h
)
endif ()
if (NOT LZ4_INSTALLED)
set(LZ4_HEADERS
third_party/lz4/lib/lz4.h
third_party/lz4/lib/lz4frame.h
)
endif ()
if (WITH_VCPKG_TOOLCHAIN)
set(INCLUDE_HEADERS ${SRC_HEADERS})
else()
set(INCLUDE_HEADERS ${SRC_HEADERS} ${SNAPPY_HEADERS} ${LZ4_HEADERS})
endif()
srpc-0.10.1/CODE_OF_CONDUCT.md 0000664 0000000 0000000 00000006257 14545022514 0015353 0 ustar 00root root 0000000 0000000 # Contributor Covenant Code of Conduct
## Our Pledge
In the interest of fostering an open and welcoming environment, we as contributors and maintainers pledge to making participation in our project and our community a harassment-free experience for everyone, regardless of age, body size, disability, ethnicity, gender identity and expression, level of experience, nationality, personal appearance, race, religion, or sexual identity and orientation.
## Our Standards
Examples of behavior that contributes to creating a positive environment include:
* Using welcoming and inclusive language
* Being respectful of differing viewpoints and experiences
* Gracefully accepting constructive criticism
* Focusing on what is best for the community
* Showing empathy towards other community members
Examples of unacceptable behavior by participants include:
* The use of sexualized language or imagery and unwelcome sexual attention or advances
* Trolling, insulting/derogatory comments, and personal or political attacks
* Public or private harassment
* Publishing others’ private information, such as a physical or electronic address, without explicit permission
* Other conduct which could reasonably be considered inappropriate in a professional setting
## Our Responsibilities
Project maintainers are responsible for clarifying the standards of acceptable behavior and are expected to take appropriate and fair corrective action in response to any instances of unacceptable behavior.
Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors that they deem inappropriate, threatening, offensive, or harmful.
## Scope
This Code of Conduct applies within all project spaces, and it also applies when an individual is representing the project or its community in public spaces. Examples of representing a project or community include using an official project e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. Representation of a project may be further defined and clarified by project maintainers.
## Enforcement
Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project team at liyingxin@sogou-inc.com. All complaints will be reviewed and investigated and will result in a response that is deemed necessary and appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately.
Project maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other members of the project’s leadership.
## Attribution
This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, available at [http://contributor-covenant.org/version/1/4][version]
[homepage]: http://contributor-covenant.org
[version]: http://contributor-covenant.org/version/1/4/
srpc-0.10.1/GNUmakefile 0000664 0000000 0000000 00000003253 14545022514 0014617 0 ustar 00root root 0000000 0000000 ROOT_DIR := $(shell dirname $(realpath $(firstword $(MAKEFILE_LIST))))
ALL_TARGETS := all base check install preinstall package rpm clean tutorial example
MAKE_FILE := Makefile
DEFAULT_BUILD_DIR := build.cmake
BUILD_DIR := $(shell if [ -f $(MAKE_FILE) ]; then echo "."; else echo $(DEFAULT_BUILD_DIR); fi)
CMAKE3 := $(shell if which cmake3>/dev/null ; then echo cmake3; else echo cmake; fi;)
WORKFLOW := $(shell if [ -f "workflow/workflow-config.cmake.in" ]; then echo "Found"; else echo "NotFound"; fi)
.PHONY: $(ALL_TARGETS)
all: base
make -C $(BUILD_DIR) -f Makefile
base:
ifeq ("$(WORKFLOW)","Found")
make -C workflow
endif
mkdir -p $(BUILD_DIR)
ifeq ($(DEBUG),y)
cd $(BUILD_DIR) && $(CMAKE3) -D CMAKE_BUILD_TYPE=Debug $(ROOT_DIR)
else ifneq ("${INSTALL_PREFIX}install_prefix", "install_prefix")
cd $(BUILD_DIR) && $(CMAKE3) -DCMAKE_INSTALL_PREFIX:STRING=${INSTALL_PREFIX} $(ROOT_DIR)
else
cd $(BUILD_DIR) && $(CMAKE3) $(ROOT_DIR)
endif
tutorial: all
make -C tutorial
check: all
make -C test check
install preinstall package: base
mkdir -p $(BUILD_DIR)
cd $(BUILD_DIR) && $(CMAKE3) $(ROOT_DIR)
make -C $(BUILD_DIR) -f Makefile $@
rpm: package
ifneq ($(BUILD_DIR),.)
mv $(BUILD_DIR)/*.rpm ./
endif
clean:
ifeq ("$(WORKFLOW)","Found")
-make -C workflow clean
endif
-make -C test clean
-make -C tutorial clean
-make -C benchmark clean
rm -rf $(DEFAULT_BUILD_DIR)
rm -rf _include
rm -rf _lib
rm -rf _bin
rm -f SRCINFO SRCNUMVER SRCVERSION
rm -f ./*.rpm
rm -f src/message/*.pb.h src/message/*.pb.cc
find . -name CMakeCache.txt | xargs rm -f
find . -name Makefile | xargs rm -f
find . -name "*.cmake" | xargs rm -f
find . -name CMakeFiles | xargs rm -rf
srpc-0.10.1/LICENSE 0000664 0000000 0000000 00000023676 14545022514 0013565 0 ustar 00root root 0000000 0000000
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
srpc-0.10.1/README.md 0000664 0000000 0000000 00000020367 14545022514 0014031 0 ustar 00root root 0000000 0000000 [中文版入口](README_cn.md)
### NEW !!! 👉 [SRPC tools : build Workflow and SRPC projects easily.](/tools/README.md)
## Introduction
**SRPC is an enterprise-level RPC system used by almost all online services in Sogou. It handles tens of billions of requests every day, covering searches, recommendations, advertising system, and other types of services.**
Bases on [Sogou C++ Workflow](https://github.com/sogou/workflow), it is an excellent choice for high-performance, low-latency, lightweight RPC systems. Contains AOP aspect-oriented modules that can report Metrics and Trace to a variety of cloud-native systems, such as OpenTelemetry, etc.
Its main features include:
* Support multiple RPC protocols: [`SPRC`](/tutorial/tutorial-01-srpc_pb_server.cc), [`bRPC`](/tutorial/tutorial-05-brpc_pb_server.cc), [`Thrift`](/tutorial/tutorial-07-thrift_thrift_server.cc), [`tRPC`](/tutorial/tutorial-11-trpc_pb_server.cc)
* Support multiple operating systems: `Linux`, `MacOS`, `Windows`
* Support several IDL formats: [`Protobuf`](/tutorial/echo_pb.proto), [`Thrift`](/tutorial/echo_thrift.thrift)
* Support several data formats transparently: `Json`, `Protobuf`, `Thrift Binary`
* Support several compression formats, the framework automatically decompresses: `gzip`, `zlib`, `snappy`, `lz4`
* Support several communication protocols transparently: `tcp`, `http`, `ssl`, `https`
* With [HTTP+JSON](/docs/docs-07-srpc-http.md), you can communicate with the client or server in any language
* Use it together with [Workflow Series and Parallel](/docs/docs-06-workflow.md) to facilitate the use of calculations and other asynchronous resources
* Perfectly compatible with all Workflow functions, such as name service, [upstream](docs/docs-06-workflow.md#3-upstream) and other components
* Report [Tracing](/docs/docs-08-tracing.md) to [OpenTelemetry](https://opentelemetry.io)
* Report [Metrics](/docs/docs-09-metrics.md) to [OpenTelemetry](https://opentelemetry.io) and [Prometheus](https://prometheus.io)
* [More features...](/docs/en/rpc.md)
## Installation
srpc has been packaged for Debian and Fedora. Therefore, we can install it from source code or from the package in the system.
reference: [Linux, MacOS, Windows Installation and Compilation Guide](docs/en/installation.md)
## Quick Start
Let's quickly learn how to use it in a few steps.
For more detailed usage, please refer to [Documents](/docs) and [Tutorial](/tutorial).
#### 1\. example.proto
~~~proto
syntax = "proto3";// You can use either proto2 or proto3. Both are supported by srpc
message EchoRequest {
string message = 1;
string name = 2;
};
message EchoResponse {
string message = 1;
};
service Example {
rpc Echo(EchoRequest) returns (EchoResponse);
};
~~~
#### 2\. generate code
~~~sh
protoc example.proto --cpp_out=./ --proto_path=./
srpc_generator protobuf ./example.proto ./
~~~
#### 3\. server.cc
~~~cpp
#include
#include
#include "example.srpc.h"
using namespace srpc;
class ExampleServiceImpl : public Example::Service
{
public:
void Echo(EchoRequest *request, EchoResponse *response, RPCContext *ctx) override
{
response->set_message("Hi, " + request->name());
printf("get_req:\n%s\nset_resp:\n%s\n",
request->DebugString().c_str(), response->DebugString().c_str());
}
};
void sig_handler(int signo) { }
int main()
{
signal(SIGINT, sig_handler);
signal(SIGTERM, sig_handler);
SRPCServer server_tcp;
SRPCHttpServer server_http;
ExampleServiceImpl impl;
server_tcp.add_service(&impl);
server_http.add_service(&impl);
server_tcp.start(1412);
server_http.start(8811);
getchar(); // press "Enter" to end.
server_http.stop();
server_tcp.stop();
return 0;
}
~~~
#### 4\. client.cc
~~~cpp
#include
#include "example.srpc.h"
using namespace srpc;
int main()
{
Example::SRPCClient client("127.0.0.1", 1412);
EchoRequest req;
req.set_message("Hello, srpc!");
req.set_name("workflow");
client.Echo(&req, [](EchoResponse *response, RPCContext *ctx) {
if (ctx->success())
printf("%s\n", response->DebugString().c_str());
else
printf("status[%d] error[%d] errmsg:%s\n",
ctx->get_status_code(), ctx->get_error(), ctx->get_errmsg());
});
getchar(); // press "Enter" to end.
return 0;
}
~~~
#### 5\. make
These compile commands are only for Linux system. On other system, complete cmake in [tutorial](/tutorial) is recommanded.
~~~sh
g++ -o server server.cc example.pb.cc -std=c++11 -lsrpc
g++ -o client client.cc example.pb.cc -std=c++11 -lsrpc
~~~
#### 6\. run
Terminal 1:
~~~sh
./server
~~~
Terminal 2:
~~~sh
./client
~~~
We can also use CURL to post Http request:
~~~sh
curl 127.0.0.1:8811/Example/Echo -H 'Content-Type: application/json' -d '{message:"from curl",name:"CURL"}'
~~~
Output of Terminal 1:
~~~sh
get_req:
message: "Hello, srpc!"
name: "workflow"
set_resp:
message: "Hi, workflow"
get_req:
message: "from curl"
name: "CURL"
set_resp:
message: "Hi, CURL"
~~~
Output of Terminal 2:
~~~sh
message: "Hi, workflow"
~~~
Output of CURL:
~~~sh
{"message":"Hi, CURL"}
~~~
## Benchmark
* CPU 2-chip/8-core/32-processor Intel(R) Xeon(R) CPU E5-2630 v3 @2.40GHz
* Memory all 128G
* 10 Gigabit Ethernet
* BAIDU brpc-client in pooled (connection pool) mode
#### QPS at cross-machine single client→ single server under different concurrency
~~~
Client = 1
ClientThread = 64, 128, 256, 512, 1024
RequestSize = 32
Duration = 20s
Server = 1
ServerIOThread = 16
ServerHandlerThread = 16
~~~

#### QPS at cross-machine multi-client→ single server under different client processes
~~~
Client = 1, 2, 4, 8, 16
ClientThread = 32
RequestSize = 32
Duration = 20s
Server = 1
ServerIOThread = 16
ServerHandlerThread = 16
~~~

#### QPS at same-machine single client→ single server under different concurrency
~~~
Client = 1
ClientThread = 1, 2, 4, 8, 16, 32, 64, 128, 256
RequestSize = 1024
Duration = 20s
Server = 1
ServerIOThread = 16
ServerHandlerThread = 16
~~~

#### QPS at same-machine single client→ single server under different request sizes
~~~
Client = 1
ClientThread = 100
RequestSize = 16, 32, 64, 128, 256, 512, 1024, 2048, 4096, 8192, 16384, 32768
Duration = 20s
Server = 1
ServerIOThread = 16
ServerHandlerThread = 16
~~~

#### Latency CDF for fixed QPS at same-machine single client→ single server
~~~
Client = 1
ClientThread = 50
ClientQPS = 10000
RequestSize = 1024
Duration = 20s
Server = 1
ServerIOThread = 16
ServerHandlerThread = 16
Outiler = 1%
~~~

#### Latency CDF for fixed QPS at cross-machine multi-client→ single server
~~~
Client = 32
ClientThread = 16
ClientQPS = 2500
RequestSize = 512
Duration = 20s
Server = 1
ServerIOThread = 16
ServerHandlerThread = 16
Outiler = 1%
~~~

## Contact
* **Email** - **[liyingxin@sogou-inc.com](mailto:liyingxin@sogou-inc.com)** - main author
* **Issue** - You are very welcome to post questions to [issues](https://github.com/sogou/srpc/issues) list.
* **QQ** - Group number: ``618773193``
srpc-0.10.1/README_cn.md 0000664 0000000 0000000 00000017560 14545022514 0014512 0 ustar 00root root 0000000 0000000 [English version](README.md) | [Wiki:SRPC架构介绍](/docs/wiki.md)
### NEW !!! 👉 [SRPC tools : 一个帮你快速构建Workflow和SRPC项目的小工具.](/tools/README_cn.md)
## Introduction
**SRPC是全搜狗业务线上使用的企业级RPC系统,目前每天承载上百亿的请求量,涵盖搜广推及其他类型业务。**
底层基于[Sogou C++ Workflow](https://github.com/sogou/workflow),是高性能、低延迟、轻量级RPC系统的极佳选择。且加入了AOP面向切面的模块,支持Metrics(监控指标)和Trace(链路追踪)功能,可上报到多种云原生系统,包括OpenTelemetry。
主要功能和示例:
* 支持多种RPC协议:[`SPRC`](/tutorial/tutorial-01-srpc_pb_server.cc)、[`bRPC`](/tutorial/tutorial-05-brpc_pb_server.cc)、[`Thrift`](/tutorial/tutorial-07-thrift_thrift_server.cc)、[`tRPC`](/tutorial/tutorial-11-trpc_pb_server.cc)
* 支持多种操作系统:`Linux` / `MacOS` / `Windows`
* 支持多种IDL格式:[`Protobuf`](/tutorial/echo_pb.proto)、[`Thrift`](/tutorial/echo_thrift.thrift)
* 支持多种数据布局,使用上完全透明:`Json`、`Protobuf`、`Thrift Binary`
* 支持多种压缩,框架自动解压:`gzip`、`zlib`、`snappy`、`lz4`
* 支持多种通信协议:`tcp`、`http`、`sctp`、`ssl`、`https`
* 可以通过[http+json实现跨语言](/docs/docs-07-srpc-http.md)
* 可以使用[Workflow串并联任务流](/docs/docs-06-workflow.md),打通计算及其他异步资源
* 完美兼容Workflow所有功能:命名服务体系、[upstream](docs/docs-06-workflow.md#3-upstream)、其他组件等
* 链路追踪功能:上报[Tracing](/docs/docs-08-tracing.md)到[OpenTelemetry](https://opentelemetry.io)
* 监控功能:上报[Metrics](/docs/docs-09-metrics.md)到OpenTelemetry和[Prometheus](https://prometheus.io)
## Installation
SRPC是Debian Linux和Fedora的自带安装包,因此可以通过源码安装,和系统自带安装包安装。
具体参考:[Linux、MacOS、Windows安装和编译指南](docs/installation.md)
## Quick Start
我们通过几个步骤快速了解如何使用。
更详细的用法可以参考[更多文档](/docs),和[官方教程](/tutorial)。
#### 1. example.proto
~~~proto
syntax = "proto3";//这里proto2和proto3都可以,srpc都支持
message EchoRequest {
string message = 1;
string name = 2;
};
message EchoResponse {
string message = 1;
};
service Example {
rpc Echo(EchoRequest) returns (EchoResponse);
};
~~~
#### 2. generate code
~~~sh
protoc example.proto --cpp_out=./ --proto_path=./
srpc_generator protobuf ./example.proto ./
~~~
#### 3. server.cc
~~~cpp
#include
#include
#include "example.srpc.h"
using namespace srpc;
class ExampleServiceImpl : public Example::Service
{
public:
void Echo(EchoRequest *request, EchoResponse *response, RPCContext *ctx) override
{
response->set_message("Hi, " + request->name());
printf("get_req:\n%s\nset_resp:\n%s\n",
request->DebugString().c_str(), response->DebugString().c_str());
}
};
void sig_handler(int signo) { }
int main()
{
signal(SIGINT, sig_handler);
signal(SIGTERM, sig_handler);
SRPCServer server_tcp;
SRPCHttpServer server_http;
ExampleServiceImpl impl;
server_tcp.add_service(&impl);
server_http.add_service(&impl);
server_tcp.start(1412);
server_http.start(8811);
getchar(); // press "Enter" to end.
server_http.stop();
server_tcp.stop();
return 0;
}
~~~
#### 4. client.cc
~~~cpp
#include
#include "example.srpc.h"
using namespace srpc;
int main()
{
Example::SRPCClient client("127.0.0.1", 1412);
EchoRequest req;
req.set_message("Hello, srpc!");
req.set_name("workflow");
client.Echo(&req, [](EchoResponse *response, RPCContext *ctx) {
if (ctx->success())
printf("%s\n", response->DebugString().c_str());
else
printf("status[%d] error[%d] errmsg:%s\n",
ctx->get_status_code(), ctx->get_error(), ctx->get_errmsg());
});
getchar(); // press "Enter" to end.
return 0;
}
~~~
#### 5. make
在Linux系统下的编译示例如下,其他平台建议到[tutorial](/tutorial)目录下使用完整的cmake文件协助解决编译依赖问题。
~~~sh
g++ -o server server.cc example.pb.cc -std=c++11 -lsrpc
g++ -o client client.cc example.pb.cc -std=c++11 -lsrpc
~~~
#### 6. run
终端1:
~~~sh
./server
~~~
终端2:
~~~sh
./client
~~~
也可以用CURL发送http请求:
~~~sh
curl 127.0.0.1:8811/Example/Echo -H 'Content-Type: application/json' -d '{message:"from curl",name:"CURL"}'
~~~
终端1输出:
~~~sh
get_req:
message: "Hello, srpc!"
name: "workflow"
set_resp:
message: "Hi, workflow"
get_req:
message: "from curl"
name: "CURL"
set_resp:
message: "Hi, CURL"
~~~
终端2输出:
~~~sh
message: "Hi, workflow"
~~~
CURL收到的回复:
~~~sh
{"message":"Hi, CURL"}
~~~
## Benchmark
* CPU 2-chip/8-core/32-processor Intel(R) Xeon(R) CPU E5-2630 v3 @2.40GHz
* Memory all 128G
* 10 Gigabit Ethernet
* BAIDU brpc-client使用连接池pooled模式
#### 跨机单client→单server在不同并发的QPS
~~~
Client = 1
ClientThread = 64, 128, 256, 512, 1024
RequestSize = 32
Duration = 20s
Server = 1
ServerIOThread = 16
ServerHandlerThread = 16
~~~

#### 跨机多client→单server在不同client进程数的QPS
~~~
Client = 1, 2, 4, 8, 16
ClientThread = 32
RequestSize = 32
Duration = 20s
Server = 1
ServerIOThread = 16
ServerHandlerThread = 16
~~~

#### 同机单client→单server在不同并发下的QPS
~~~
Client = 1
ClientThread = 1, 2, 4, 8, 16, 32, 64, 128, 256
RequestSize = 1024
Duration = 20s
Server = 1
ServerIOThread = 16
ServerHandlerThread = 16
~~~

#### 同机单client→单server在不同请求大小下的QPS
~~~
Client = 1
ClientThread = 100
RequestSize = 16, 32, 64, 128, 256, 512, 1024, 2048, 4096, 8192, 16384, 32768
Duration = 20s
Server = 1
ServerIOThread = 16
ServerHandlerThread = 16
~~~

#### 同机单client→单server在固定QPS下的延时CDF
~~~
Client = 1
ClientThread = 50
ClientQPS = 10000
RequestSize = 1024
Duration = 20s
Server = 1
ServerIOThread = 16
ServerHandlerThread = 16
Outiler = 1%
~~~

#### 跨机多client→单server在固定QPS下的延时CDF
~~~
Client = 32
ClientThread = 16
ClientQPS = 2500
RequestSize = 512
Duration = 20s
Server = 1
ServerIOThread = 16
ServerHandlerThread = 16
Outiler = 1%
~~~

## 与我们联系
* **Email** - **[liyingxin@sogou-inc.com](mailto:liyingxin@sogou-inc.com)** - 主要作者
* **Issue** - 使用中的任何问题都欢迎到[issues](https://github.com/sogou/srpc/issues)进行交流。
* **QQ** - 群号: ``618773193``
srpc-0.10.1/WORKSPACE 0000664 0000000 0000000 00000002237 14545022514 0014027 0 ustar 00root root 0000000 0000000 workspace(name = "srpc")
load("@bazel_tools//tools/build_defs/repo:git.bzl", "git_repository")
load("@bazel_tools//tools/build_defs/repo:git.bzl", "new_git_repository")
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
http_archive(
name = "rules_proto",
sha256 = "d8992e6eeec276d49f1d4e63cfa05bbed6d4a26cfe6ca63c972827a0d141ea3b",
strip_prefix = "rules_proto-cfdc2fa31879c0aebe31ce7702b1a9c8a4be02d2",
urls = [
"https://github.com/bazelbuild/rules_proto/archive/cfdc2fa31879c0aebe31ce7702b1a9c8a4be02d2.tar.gz",
],
)
load("@rules_proto//proto:repositories.bzl", "rules_proto_dependencies", "rules_proto_toolchains")
rules_proto_dependencies()
rules_proto_toolchains()
git_repository(
name = "workflow",
commit = "3a8c14ce6bf328978d8dca3b3bb29bf5fd02a122",
remote = "https://github.com/sogou/workflow.git")
new_git_repository(
name = "lz4",
build_file = "@//third_party:lz4.BUILD",
tag = "v1.9.3",
remote = "https://github.com/lz4/lz4.git")
new_git_repository(
name = "snappy",
build_file = "@//third_party:snappy.BUILD",
tag = "1.1.9",
remote = "https://github.com/google/snappy.git")
srpc-0.10.1/benchmark/ 0000775 0000000 0000000 00000000000 14545022514 0014474 5 ustar 00root root 0000000 0000000 srpc-0.10.1/benchmark/CMakeLists.txt 0000664 0000000 0000000 00000007031 14545022514 0017235 0 ustar 00root root 0000000 0000000 cmake_minimum_required(VERSION 3.6)
set(CMAKE_BUILD_TYPE RelWithDebInfo CACHE STRING "build type")
project(srpc_benchmark
LANGUAGES C CXX
)
set(CMAKE_RUNTIME_OUTPUT_DIRECTORY ${PROJECT_SOURCE_DIR})
if (NOT "$ENV{LIBRARY_PATH}" STREQUAL "")
string(REPLACE ":" ";" LIBRARY_PATH $ENV{LIBRARY_PATH})
set(CMAKE_SYSTEM_LIBRARY_PATH ${LIBRARY_PATH};${CMAKE_SYSTEM_LIBRARY_PATH})
endif ()
if (NOT "$ENV{CPLUS_INCLUDE_PATH}" STREQUAL "")
string(REPLACE ":" ";" INCLUDE_PATH $ENV{CPLUS_INCLUDE_PATH})
set(CMAKE_SYSTEM_INCLUDE_PATH ${INCLUDE_PATH};${CMAKE_SYSTEM_INCLUDE_PATH})
endif ()
find_package(OpenSSL REQUIRED)
set(protobuf_MODULE_COMPATIBLE ON CACHE BOOL "")
if (WIN32)
find_package(Protobuf CONFIG REQUIRED)
find_library(LZ4_LIBRARY NAMES lz4)
find_package(Snappy CONFIG REQUIRED)
else ()
find_package(Protobuf REQUIRED)
endif ()
if (NOT EXISTS "${CMAKE_CURRENT_SOURCE_DIR}/../third_party/lz4/lib/lz4.h")
set(LZ4_LIB lz4)
endif ()
if (NOT EXISTS "${CMAKE_CURRENT_SOURCE_DIR}/../third_party/snappy/cmake")
set(SNAPPY_LIB snappy)
endif ()
if (NOT EXISTS "${CMAKE_CURRENT_SOURCE_DIR}/workflow/workflow-config.cmake.in")
find_package(Workflow REQUIRED CONFIG HINTS ../workflow)
endif ()
find_package(srpc REQUIRED CONFIG HINTS ..)
include_directories(
${OPENSSL_INCLUDE_DIR}
${CMAKE_CURRENT_BINARY_DIR}
${Protobuf_INCLUDE_DIR}
${WORKFLOW_INCLUDE_DIR}
${SRPC_INCLUDE_DIR}
)
if (WIN32)
link_directories(${SRPC_LIB_DIR} ${WORKFLOW_LIB_DIR} ${Protobuf_LIB_DIR})
set(SRPC_GEN_PROGRAM ${SRPC_BIN_DIR}/Debug/srpc_generator.exe)
else ()
get_filename_component(Protobuf_LIB_DIR ${Protobuf_LIBRARY} DIRECTORY)
link_directories(${SRPC_LIB_DIR} ${WORKFLOW_LIB_DIR} ${Protobuf_LIB_DIR})
set(SRPC_GEN_PROGRAM ${SRPC_BIN_DIR}/srpc_generator)
endif ()
protobuf_generate_cpp(PROTO_SRCS PROTO_HDRS benchmark_pb.proto)
add_custom_target(
BENCHMARK_GEN ALL
COMMAND ${SRPC_GEN_PROGRAM} ${PROJECT_SOURCE_DIR}/benchmark_pb.proto ${PROJECT_SOURCE_DIR}
COMMAND ${SRPC_GEN_PROGRAM} ${PROJECT_SOURCE_DIR}/benchmark_thrift.thrift ${PROJECT_SOURCE_DIR}
COMMENT "srpc generator..."
)
if (WIN32)
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} /MP /wd4200")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /MP /wd4200 /Zc:__cplusplus /std:c++14")
else ()
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Wall -fPIC -pipe -std=gnu90")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wall -fPIC -pipe -std=c++11 -fno-exceptions")
endif ()
if (APPLE)
set(CMAKE_FIND_LIBRARY_SUFFIXES ".a" ${CMAKE_FIND_LIBRARY_SUFFIXES})
find_library(Workflow_LIB workflow HINTS ../workflow/_lib)
find_library(Srpc_LIB srpc HINTS ../_lib)
set(SRPC_LIB
${Srpc_LIB}
${Workflow_LIB}
pthread
OpenSSL::SSL
OpenSSL::Crypto
protobuf
z
)
elseif (WIN32)
set(SRPC_LIB
srpc
workflow
ws2_32
wsock32
OpenSSL::SSL
OpenSSL::Crypto
protobuf::libprotobuf
ZLIB::ZLIB
Snappy::snappy
${LZ4_LIBRARY}
)
else ()
set(SRPC_LIB
srpc
workflow
pthread
OpenSSL::SSL
OpenSSL::Crypto
protobuf
z
${SNAPPY_LIB}
${LZ4_LIB}
)
endif ()
add_executable(server server.cc ${PROTO_SRCS} ${PROTO_HDRS})
target_link_libraries(server ${SRPC_LIB})
add_dependencies(server BENCHMARK_GEN)
add_executable(client client.cc ${PROTO_SRCS} ${PROTO_HDRS})
target_link_libraries(client ${SRPC_LIB})
add_dependencies(client BENCHMARK_GEN)
add_executable(client_cdf client_cdf.cc ${PROTO_SRCS} ${PROTO_HDRS})
target_link_libraries(client_cdf ${SRPC_LIB})
add_dependencies(client_cdf BENCHMARK_GEN)
add_executable(proxy proxy.cc ${PROTO_SRCS} ${PROTO_HDRS})
target_link_libraries(proxy ${SRPC_LIB})
add_dependencies(proxy BENCHMARK_GEN)
srpc-0.10.1/benchmark/GNUmakefile 0000664 0000000 0000000 00000001773 14545022514 0016556 0 ustar 00root root 0000000 0000000 ROOT_DIR := $(shell dirname $(realpath $(firstword $(MAKEFILE_LIST))))
ALL_TARGETS := all clean
MAKE_FILE := Makefile
DEFAULT_BUILD_DIR := build.cmake
BUILD_DIR := $(shell if [ -f $(MAKE_FILE) ]; then echo "."; else echo $(DEFAULT_BUILD_DIR); fi)
CMAKE3 := $(shell if which cmake3>/dev/null ; then echo cmake3; else echo cmake; fi;)
.PHONY: $(ALL_TARGETS)
all:
mkdir -p $(BUILD_DIR)
ifeq ($(DEBUG),y)
cd $(BUILD_DIR) && $(CMAKE3) -D CMAKE_BUILD_TYPE=Debug $(ROOT_DIR)
else ifneq ("${Workflow_DIR}workflow", "workflow")
cd $(BUILD_DIR) && $(CMAKE3) -DWorkflow_DIR:STRING=${Workflow_DIR} $(ROOT_DIR)
else
cd $(BUILD_DIR) && $(CMAKE3) $(ROOT_DIR)
endif
make -C $(BUILD_DIR) -f Makefile
clean:
ifeq ($(MAKE_FILE), $(wildcard $(MAKE_FILE)))
-make -f Makefile clean
else ifeq ($(DEFAULT_BUILD_DIR), $(wildcard $(DEFAULT_BUILD_DIR)))
-make -C $(DEFAULT_BUILD_DIR) clean
endif
rm -rf $(DEFAULT_BUILD_DIR)
#g++ -o thrift_server thrift_server.cc gen-cpp/*.cpp -O2 -g -lthrift -lthriftnb -levent -lpthread -std=c++11
srpc-0.10.1/benchmark/benchmark_pb.proto 0000664 0000000 0000000 00000000324 14545022514 0020173 0 ustar 00root root 0000000 0000000 syntax="proto3";
message EmptyPBMsg { }
message FixLengthPBMsg { bytes msg = 1; }
service BenchmarkPB
{
rpc echo_pb(FixLengthPBMsg) returns (EmptyPBMsg);
rpc slow_pb(FixLengthPBMsg) returns (EmptyPBMsg);
}
srpc-0.10.1/benchmark/benchmark_thrift.thrift 0000664 0000000 0000000 00000000137 14545022514 0021231 0 ustar 00root root 0000000 0000000 service BenchmarkThrift
{
void echo_thrift(1:string msg);
void slow_thrift(1:string msg);
}
srpc-0.10.1/benchmark/client.cc 0000664 0000000 0000000 00000012042 14545022514 0016260 0 ustar 00root root 0000000 0000000 #include
#include
#include
#include
#include
#include
#include "benchmark_pb.srpc.h"
#include "benchmark_thrift.srpc.h"
using namespace srpc;
#define TEST_SECOND 20
#define GET_CURRENT_NS std::chrono::duration_cast(std::chrono::steady_clock::now().time_since_epoch()).count()
std::atomic query_count(0);
std::atomic success_count(0);
std::atomic error_count(0);
std::atomic latency_sum(0);
volatile bool stop_flag = false;
int PARALLEL_NUMBER;
std::string request_msg;
template
static void do_echo_pb(CLIENT *client)
{
FixLengthPBMsg req;
req.set_msg(request_msg);
int64_t ns_st = GET_CURRENT_NS;
++query_count;
client->echo_pb(&req, [client, ns_st](EmptyPBMsg *response, RPCContext *ctx) {
if (ctx->success())
{
//printf("%s\n", ctx->get_remote_ip().c_str());
latency_sum += GET_CURRENT_NS - ns_st;
++success_count;
}
else
{
printf("status[%d] error[%d] errmsg:%s\n",
ctx->get_status_code(), ctx->get_error(), ctx->get_errmsg());
++error_count;
}
if (!stop_flag)
do_echo_pb(client);
//printf("echo done. seq_id=%d\n", ctx->get_task_seq());
});
}
template
static void do_echo_thrift(CLIENT *client)
{
BenchmarkThrift::echo_thriftRequest req;
req.msg = request_msg;
int64_t ns_st = GET_CURRENT_NS;
++query_count;
client->echo_thrift(&req, [client, ns_st](BenchmarkThrift::echo_thriftResponse *response, RPCContext *ctx) {
if (ctx->success())
{
//printf("%s\n", ctx->get_remote_ip().c_str());
latency_sum += GET_CURRENT_NS - ns_st;
++success_count;
}
else
{
printf("status[%d] error[%d] errmsg:%s \n",
ctx->get_status_code(), ctx->get_error(), ctx->get_errmsg());
++error_count;
}
if (!stop_flag)
do_echo_thrift(client);
//printf("echo done. seq_id=%d\n", ctx->get_task_seq());
});
}
int main(int argc, char* argv[])
{
GOOGLE_PROTOBUF_VERIFY_VERSION;
if (argc != 7)
{
fprintf(stderr, "Usage: %s \n", argv[0]);
abort();
}
WFGlobalSettings setting = GLOBAL_SETTINGS_DEFAULT;
setting.endpoint_params.max_connections = 2048;
setting.poller_threads = 16;
setting.handler_threads = 16;
WORKFLOW_library_init(&setting);
RPCClientParams client_params = RPC_CLIENT_PARAMS_DEFAULT;
client_params.task_params.keep_alive_timeout = -1;
client_params.host = argv[1];
client_params.port = atoi(argv[2]);
std::string server_type = argv[3];
std::string idl_type = argv[4];
PARALLEL_NUMBER = atoi(argv[5]);
int REQUEST_BYTES = atoi(argv[6]);
request_msg.resize(REQUEST_BYTES, 'r');
//for (int i = 0; i < REQUEST_BYTES; i++)
// request_msg[i] = (unsigned char)(rand() % 256);
int64_t start = GET_CURRENT_MS();
if (server_type == "srpc")
{
if (idl_type == "pb")
{
auto *client = new BenchmarkPB::SRPCClient(&client_params);
for (int i = 0; i < PARALLEL_NUMBER; i++)
do_echo_pb(client);
}
else if (idl_type == "thrift")
{
auto *client = new BenchmarkThrift::SRPCClient(&client_params);
for (int i = 0; i < PARALLEL_NUMBER; i++)
do_echo_thrift(client);
}
else
abort();
}
else if (server_type == "brpc")
{
auto *client = new BenchmarkPB::BRPCClient(&client_params);
for (int i = 0; i < PARALLEL_NUMBER; i++)
{
if (idl_type == "pb")
do_echo_pb(client);
else if (idl_type == "thrift")
abort();
else
abort();
}
}
else if (server_type == "thrift")
{
auto *client = new BenchmarkThrift::ThriftClient(&client_params);
for (int i = 0; i < PARALLEL_NUMBER; i++)
{
if (idl_type == "pb")
abort();
else if (idl_type == "thrift")
do_echo_thrift(client);
else
abort();
}
}
else if (server_type == "srpc_http")
{
if (idl_type == "pb")
{
auto *client = new BenchmarkPB::SRPCHttpClient(&client_params);
for (int i = 0; i < PARALLEL_NUMBER; i++)
do_echo_pb(client);
}
else if (idl_type == "thrift")
{
auto *client = new BenchmarkThrift::SRPCHttpClient(&client_params);
for (int i = 0; i < PARALLEL_NUMBER; i++)
do_echo_thrift(client);
}
else
abort();
}
else if (server_type == "thrift_http")
{
auto *client = new BenchmarkThrift::ThriftHttpClient(&client_params);
for (int i = 0; i < PARALLEL_NUMBER; i++)
{
if (idl_type == "pb")
abort();
else if (idl_type == "thrift")
do_echo_thrift(client);
else
abort();
}
}
else
abort();
std::this_thread::sleep_for(std::chrono::seconds(TEST_SECOND));
stop_flag = true;
int64_t end = GET_CURRENT_MS();
int tot = query_count;
int s = success_count;
int e = error_count;
int64_t l = latency_sum;
fprintf(stderr, "\nquery\t%d\ttimes, %d success, %d error.\n", tot, s, e);
fprintf(stderr, "total\t%.3lf\tseconds\n", (end - start) / 1000.0);
fprintf(stderr, "qps=%.0lf\n", tot * 1000.0 / (end - start));
fprintf(stderr, "latency=%.0lfus\n", s > 0 ? l * 1.0 / s / 1000 : 0);
std::this_thread::sleep_for(std::chrono::seconds(1));
google::protobuf::ShutdownProtobufLibrary();
return 0;
}
srpc-0.10.1/benchmark/client_cdf.cc 0000664 0000000 0000000 00000017103 14545022514 0017077 0 ustar 00root root 0000000 0000000 #include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include "benchmark_pb.srpc.h"
#include "benchmark_thrift.srpc.h"
using namespace srpc;
#define TEST_SECOND 20
#define GET_CURRENT_NS std::chrono::duration_cast(std::chrono::steady_clock::now().time_since_epoch()).count()
std::atomic query_count(0);
std::atomic slow_count(0);
std::atomic success_count(0);
std::atomic error_count(0);
//std::atomic latency_sum(0);
std::vector> latency_lists;
volatile bool stop_flag = false;
int PARALLEL_NUMBER;
std::string request_msg;
int QPS;
template
static void do_echo_pb(CLIENT *client, int idx)
{
std::mutex mutex;
auto& latency_list = latency_lists[idx];
FixLengthPBMsg req;
req.set_msg(request_msg);
int usleep_gap = 1000000 / QPS * PARALLEL_NUMBER;
while (!stop_flag)
{
int64_t ns_st = GET_CURRENT_NS;
if (++query_count % 100 > 0)
{
client->echo_pb(&req, [ns_st, &latency_list, &mutex](EmptyPBMsg *response, RPCContext *ctx) {
if (ctx->success())
{
//printf("%s\n", ctx->get_remote_ip().c_str());
++success_count;
//latency_sum += GET_CURRENT_NS - ns_st;
mutex.lock();
latency_list.emplace_back(GET_CURRENT_NS - ns_st);
mutex.unlock();
}
else
{
printf("status[%d] error[%d] errmsg:%s\n", ctx->get_status_code(), ctx->get_error(), ctx->get_errmsg());
++error_count;
}
//printf("echo done. seq_id=%d\n", ctx->get_task_seq());
});
}
else
{
client->slow_pb(&req, [](EmptyPBMsg *response, RPCContext *ctx) {
slow_count++;
if (!ctx->success())
printf("status[%d] error[%d] errmsg:%s\n", ctx->get_status_code(), ctx->get_error(), ctx->get_errmsg());
});
}
std::this_thread::sleep_for(std::chrono::microseconds(usleep_gap));
}
}
template
static void do_echo_thrift(CLIENT *client, int idx)
{
std::mutex mutex;
auto& latency_list = latency_lists[idx];
BenchmarkThrift::echo_thriftRequest req;
req.msg = request_msg;
BenchmarkThrift::slow_thriftRequest slow_req;
slow_req.msg = request_msg;
int usleep_gap = 1000000 / QPS * PARALLEL_NUMBER;
while (!stop_flag)
{
int64_t ns_st = GET_CURRENT_NS;
if (++query_count % 100 > 0)
{
client->echo_thrift(&req, [ns_st, &latency_list, &mutex](BenchmarkThrift::echo_thriftResponse *response, RPCContext *ctx) {
if (ctx->success())
{
//printf("%s\n", ctx->get_remote_ip().c_str());
++success_count;
//latency_sum += GET_CURRENT_NS - ns_st;
mutex.lock();
latency_list.emplace_back(GET_CURRENT_NS - ns_st);
mutex.unlock();
}
else
{
printf("status[%d] error[%d] errmsg:%s \n", ctx->get_status_code(), ctx->get_error(), ctx->get_errmsg());
++error_count;
}
//printf("echo done. seq_id=%d\n", ctx->get_task_seq());
});
}
else
{
client->slow_thrift(&slow_req, [](BenchmarkThrift::slow_thriftResponse *response, RPCContext *ctx) {
slow_count++;
if (!ctx->success())
printf("status[%d] error[%d] errmsg:%s\n", ctx->get_status_code(), ctx->get_error(), ctx->get_errmsg());
});
}
std::this_thread::sleep_for(std::chrono::microseconds(usleep_gap));
}
}
int main(int argc, char* argv[])
{
GOOGLE_PROTOBUF_VERIFY_VERSION;
if (argc != 8)
{
fprintf(stderr, "Usage: %s \n", argv[0]);
abort();
}
WFGlobalSettings setting = GLOBAL_SETTINGS_DEFAULT;
setting.endpoint_params.max_connections = 2048;
setting.poller_threads = 16;
setting.handler_threads = 16;
WORKFLOW_library_init(&setting);
RPCClientParams client_params = RPC_CLIENT_PARAMS_DEFAULT;
client_params.task_params.keep_alive_timeout = -1;
client_params.host = argv[1];
client_params.port = atoi(argv[2]);
std::string server_type = argv[3];
std::string idl_type = argv[4];
PARALLEL_NUMBER = atoi(argv[5]);
int REQUEST_BYTES = atoi(argv[6]);
QPS = atoi(argv[7]);
request_msg.resize(REQUEST_BYTES, 'r');
//for (int i = 0; i < REQUEST_BYTES; i++)
// request_msg[i] = (unsigned char)(rand() % 256);
latency_lists.resize(PARALLEL_NUMBER);
std::vector th;
int64_t start = GET_CURRENT_MS();
if (server_type == "srpc")
{
if (idl_type == "pb")
{
auto *client = new BenchmarkPB::SRPCClient(&client_params);
for (int i = 0; i < PARALLEL_NUMBER; i++)
th.push_back(new std::thread(do_echo_pb, client, i));
}
else if (idl_type == "thrift")
{
auto *client = new BenchmarkThrift::SRPCClient(&client_params);
for (int i = 0; i < PARALLEL_NUMBER; i++)
th.push_back(new std::thread(do_echo_thrift, client, i));
}
else
abort();
}
else if (server_type == "brpc")
{
auto *client = new BenchmarkPB::BRPCClient(&client_params);
for (int i = 0; i < PARALLEL_NUMBER; i++)
{
if (idl_type == "pb")
th.push_back(new std::thread(do_echo_pb, client, i));
else if (idl_type == "thrift")
abort();
else
abort();
}
}
else if (server_type == "thrift")
{
auto *client = new BenchmarkThrift::ThriftClient(&client_params);
for (int i = 0; i < PARALLEL_NUMBER; i++)
{
if (idl_type == "pb")
abort();
else if (idl_type == "thrift")
th.push_back(new std::thread(do_echo_thrift, client, i));
else
abort();
}
}
else if (server_type == "srpc_http")
{
if (idl_type == "pb")
{
auto * client = new BenchmarkPB::SRPCHttpClient(&client_params);
for (int i = 0; i < PARALLEL_NUMBER; i++)
th.push_back(new std::thread(do_echo_pb, client, i));
}
else if (idl_type == "thrift")
{
auto *client = new BenchmarkThrift::SRPCHttpClient(&client_params);
for (int i = 0; i < PARALLEL_NUMBER; i++)
th.push_back(new std::thread(do_echo_thrift, client, i));
}
else
abort();
}
else if (server_type == "thrift_http")
{
auto *client = new BenchmarkThrift::ThriftHttpClient(&client_params);
for (int i = 0; i < PARALLEL_NUMBER; i++)
{
if (idl_type == "pb")
abort();
else if (idl_type == "thrift")
th.push_back(new std::thread(do_echo_thrift, client, i));
else
abort();
}
}
else
abort();
std::this_thread::sleep_for(std::chrono::seconds(TEST_SECOND));
stop_flag = true;
for (auto *t : th)
{
t->join();
delete t;
}
int64_t end = GET_CURRENT_MS();
int tot = query_count - slow_count;
int s = success_count;
int e = error_count;
int64_t l = 0;//latency_sum;
std::vector all_lc;
for (const auto& list : latency_lists)
{
for (auto v : list)
{
//fprintf(stderr, "%lld\n", (long long int)v);
l += v;
}
all_lc.insert(all_lc.end(), list.begin(), list.end());
}
sort(all_lc.begin(), all_lc.end());
for (double r = 0.950; r <= 0.999; r += 0.001)
{
double d = r * all_lc.size();
int idx = (int)(d + 1.0e-8);
if (fabs(d - int(d)) > 1.0e-8)
idx++;
printf("%.3lf %lld\n", r, (long long int)all_lc[idx - 1]/1000);
}
//printf("%.3lf %lld\n", 1.0, (long long int)all_lc[all_lc.size() - 1]/1000);
fprintf(stderr, "\nquery\t%d\ttimes, %d success, %d error.\n", tot, s, e);
fprintf(stderr, "total\t%.3lf\tseconds\n", (end - start) / 1000.0);
fprintf(stderr, "qps=%.0lf\n", tot * 1000.0 / (end - start));
fprintf(stderr, "latency=%.0lfus\n", s > 0 ? l * 1.0 / s / 1000 : 0);
std::this_thread::sleep_for(std::chrono::seconds(1));
google::protobuf::ShutdownProtobufLibrary();
return 0;
}
srpc-0.10.1/benchmark/proxy.cc 0000664 0000000 0000000 00000012477 14545022514 0016177 0 ustar 00root root 0000000 0000000 #include
#include
#include "benchmark_pb.srpc.h"
#include "benchmark_thrift.srpc.h"
#include "workflow/WFFacilities.h"
using namespace srpc;
std::atomic query_count(0); // per_second
std::atomic last_timestamp(0L);
//volatile bool stop_flag = false;
int max_qps = 0;
long long total_count = 0;
std::string remote_host;
unsigned short remote_port;
WFFacilities::WaitGroup wait_group(1);
inline void collect_qps()
{
int64_t ms_timestamp = GET_CURRENT_MS();
++query_count;
if (ms_timestamp / 1000 > last_timestamp)
{
last_timestamp = ms_timestamp / 1000;
int count = query_count;
query_count = 0;
total_count += count;
if (count > max_qps)
max_qps = count;
long long ts = ms_timestamp;
fprintf(stdout, "TIMESTAMP(ms) = %llu QPS = %d\n", ts, count);
}
}
template
class BenchmarkPBServiceImpl : public BenchmarkPB::Service
{
public:
void echo_pb(FixLengthPBMsg *request, EmptyPBMsg *response,
RPCContext *ctx) override
{
auto *task = this->client->create_echo_pb_task(
[](EmptyPBMsg *remote_resp, srpc::RPCContext *remote_ctx) {
collect_qps();
});
task->user_data = response;
task->serialize_input(request);
ctx->get_series()->push_back(task);
}
void slow_pb(FixLengthPBMsg *request, EmptyPBMsg *response,
RPCContext *ctx) override
{
auto *task = WFTaskFactory::create_timer_task(15000, nullptr);
ctx->get_series()->push_back(task);
}
CLIENT *client;
};
template
class BenchmarkThriftServiceImpl : public BenchmarkThrift::Service
{
public:
void echo_thrift(BenchmarkThrift::echo_thriftRequest *request,
BenchmarkThrift::echo_thriftResponse *response,
RPCContext *ctx) override
{
auto *task = this->client->create_echo_thrift_task(
[](BenchmarkThrift::echo_thriftResponse *remote_resp,
srpc::RPCContext *remote_ctx) {
collect_qps();
});
task->user_data = response;
task->serialize_input(request);
ctx->get_series()->push_back(task);
}
void slow_thrift(BenchmarkThrift::slow_thriftRequest *request,
BenchmarkThrift::slow_thriftResponse *response,
RPCContext *ctx) override
{
auto *task = WFTaskFactory::create_timer_task(15000, nullptr);
ctx->get_series()->push_back(task);
}
CLIENT *client;
};
static void sig_handler(int signo)
{
wait_group.done();
}
template class SERVICE, class CLIENT>
static void init_proxy_client(SERVICE& service_impl)
{
RPCClientParams client_params = RPC_CLIENT_PARAMS_DEFAULT;
client_params.task_params.keep_alive_timeout = -1;
client_params.host = remote_host;
client_params.port = remote_port;
service_impl.client = new CLIENT(&client_params);
}
static void run_srpc_proxy(unsigned short port)
{
RPCServerParams params = RPC_SERVER_PARAMS_DEFAULT;
params.max_connections = 2048;
SRPCServer proxy_server(¶ms);
BenchmarkPBServiceImpl pb_impl;
BenchmarkThriftServiceImpl thrift_impl;
init_proxy_client(pb_impl);
init_proxy_client(thrift_impl);
proxy_server.add_service(&pb_impl);
proxy_server.add_service(&thrift_impl);
if (proxy_server.start(port) == 0)
{
wait_group.wait();
proxy_server.stop();
}
else
perror("server start");
}
template
static void run_pb_proxy(unsigned short port)
{
RPCServerParams params = RPC_SERVER_PARAMS_DEFAULT;
params.max_connections = 2048;
SERVER server(¶ms);
BenchmarkPBServiceImpl pb_impl;
init_proxy_client(pb_impl);
server.add_service(&pb_impl);
if (server.start(port) == 0)
{
wait_group.wait();
server.stop();
}
else
perror("server start");
}
template
static void run_thrift_proxy(unsigned short port)
{
RPCServerParams params = RPC_SERVER_PARAMS_DEFAULT;
params.max_connections = 2048;
SERVER server(¶ms);
BenchmarkThriftServiceImpl thrift_impl;
init_proxy_client(thrift_impl);
server.add_service(&thrift_impl);
if (server.start(port) == 0)
{
wait_group.wait();
server.stop();
}
else
perror("server start");
}
int main(int argc, char* argv[])
{
GOOGLE_PROTOBUF_VERIFY_VERSION;
if (argc != 5)
{
fprintf(stderr, "Usage: %s "
" \n", argv[0]);
abort();
}
signal(SIGINT, sig_handler);
signal(SIGTERM, sig_handler);
WFGlobalSettings my = GLOBAL_SETTINGS_DEFAULT;
my.poller_threads = 16;
my.handler_threads = 16;
WORKFLOW_library_init(&my);
unsigned short port = atoi(argv[1]);
remote_host = argv[2];
remote_port = atoi(argv[3]);
std::string server_type = argv[4];
if (server_type == "srpc")
run_srpc_proxy(port);
else if (server_type == "brpc")
run_pb_proxy(port);
else if (server_type == "thrift")
run_thrift_proxy(port);
else if (server_type == "srpc_http")
run_pb_proxy(port);
else if (server_type == "thrift_http")
run_thrift_proxy(port);
else
abort();
fprintf(stdout, "\nTotal query: %llu max QPS: %d\n", total_count, max_qps);
google::protobuf::ShutdownProtobufLibrary();
return 0;
}
srpc-0.10.1/benchmark/server.cc 0000664 0000000 0000000 00000012224 14545022514 0016312 0 ustar 00root root 0000000 0000000 #include
#include
#include "benchmark_pb.srpc.h"
#include "benchmark_thrift.srpc.h"
#include "workflow/WFFacilities.h"
#ifdef _WIN32
#include "workflow/PlatformSocket.h"
#else
#include
#include
#include
#include
#include
#endif
using namespace srpc;
std::atomic query_count(0); // per_second
std::atomic last_timestamp(0L);
//volatile bool stop_flag = false;
int max_qps = 0;
long long total_count = 0;
WFFacilities::WaitGroup wait_group(1);
inline void collect_qps()
{
int64_t ms_timestamp = GET_CURRENT_MS();
++query_count;
if (ms_timestamp / 1000 > last_timestamp)
{
last_timestamp = ms_timestamp / 1000;
int count = query_count;
query_count = 0;
total_count += count;
if (count > max_qps)
max_qps = count;
long long ts = ms_timestamp;
fprintf(stdout, "TIMESTAMP(ms) = %llu QPS = %d\n", ts, count);
}
}
class BenchmarkPBServiceImpl : public BenchmarkPB::Service
{
public:
void echo_pb(FixLengthPBMsg *request, EmptyPBMsg *response,
RPCContext *ctx) override
{
collect_qps();
}
void slow_pb(FixLengthPBMsg *request, EmptyPBMsg *response,
RPCContext *ctx) override
{
auto *task = WFTaskFactory::create_timer_task(15000, nullptr);
ctx->get_series()->push_back(task);
}
};
class BenchmarkThriftServiceImpl : public BenchmarkThrift::Service
{
public:
void echo_thrift(BenchmarkThrift::echo_thriftRequest *request,
BenchmarkThrift::echo_thriftResponse *response,
RPCContext *ctx) override
{
collect_qps();
}
void slow_thrift(BenchmarkThrift::slow_thriftRequest *request,
BenchmarkThrift::slow_thriftResponse *response,
RPCContext *ctx) override
{
auto *task = WFTaskFactory::create_timer_task(15000, nullptr);
ctx->get_series()->push_back(task);
}
};
static void sig_handler(int signo)
{
wait_group.done();
}
static inline int create_bind_socket(unsigned short port)
{
int sockfd = socket(AF_INET, SOCK_STREAM, 0);
if (sockfd >= 0)
{
struct sockaddr_in sin = { };
sin.sin_family = AF_INET;
sin.sin_port = htons(port);
sin.sin_addr.s_addr = htonl(INADDR_ANY);
if (bind(sockfd, (struct sockaddr *)&sin, sizeof sin) >= 0)
return sockfd;
close(sockfd);
}
return -1;
}
static void run_srpc_server(unsigned short port, int proc_num)
{
RPCServerParams params = RPC_SERVER_PARAMS_DEFAULT;
params.max_connections = 2048;
SRPCServer server(¶ms);
BenchmarkPBServiceImpl pb_impl;
BenchmarkThriftServiceImpl thrift_impl;
server.add_service(&pb_impl);
server.add_service(&thrift_impl);
int sockfd = create_bind_socket(port);
if (sockfd < 0)
{
perror("create socket");
exit(1);
}
while ((proc_num /= 2) != 0)
fork();
if (server.serve(sockfd) == 0)
{
wait_group.wait();
server.stop();
}
else
perror("server start");
close(sockfd);
}
template
static void run_pb_server(unsigned short port, int proc_num)
{
RPCServerParams params = RPC_SERVER_PARAMS_DEFAULT;
params.max_connections = 2048;
SERVER server(¶ms);
BenchmarkPBServiceImpl pb_impl;
server.add_service(&pb_impl);
int sockfd = create_bind_socket(port);
if (sockfd < 0)
{
perror("create socket");
exit(1);
}
while ((proc_num /= 2) != 0)
fork();
if (server.serve(sockfd) == 0)
{
wait_group.wait();
server.stop();
}
else
perror("server start");
close(sockfd);
}
template
static void run_thrift_server(unsigned short port, int proc_num)
{
RPCServerParams params = RPC_SERVER_PARAMS_DEFAULT;
params.max_connections = 2048;
SERVER server(¶ms);
BenchmarkThriftServiceImpl thrift_impl;
server.add_service(&thrift_impl);
int sockfd = create_bind_socket(port);
if (sockfd < 0)
{
perror("create socket");
exit(1);
}
while ((proc_num /= 2) != 0)
fork();
if (server.serve(sockfd) == 0)
{
wait_group.wait();
server.stop();
}
else
perror("server start");
close(sockfd);
}
int main(int argc, char* argv[])
{
GOOGLE_PROTOBUF_VERIFY_VERSION;
int proc_num = 1;
if (argc == 4)
{
proc_num = atoi(argv[3]);
if (proc_num != 1 && proc_num != 2 && proc_num != 4 && proc_num != 8 && proc_num != 16)
{
fprintf(stderr, "Usage: %s [proc num (1/2/4/8/16)]\n", argv[0]);
abort();
}
}
else if (argc != 3)
{
fprintf(stderr, "Usage: %s [proc num (1/2/4/8/16)]\n", argv[0]);
abort();
}
signal(SIGINT, sig_handler);
signal(SIGTERM, sig_handler);
WFGlobalSettings my = GLOBAL_SETTINGS_DEFAULT;
my.poller_threads = 16;
my.handler_threads = 16;
WORKFLOW_library_init(&my);
unsigned short port = atoi(argv[1]);
std::string server_type = argv[2];
if (server_type == "srpc")
run_srpc_server(port, proc_num);
else if (server_type == "brpc")
run_pb_server(port, proc_num);
else if (server_type == "thrift")
run_thrift_server(port, proc_num);
else if (server_type == "srpc_http")
run_pb_server(port, proc_num);
else if (server_type == "thrift_http")
run_thrift_server(port, proc_num);
else
abort();
fprintf(stdout, "\nTotal query: %llu max QPS: %d\n", total_count, max_qps);
google::protobuf::ShutdownProtobufLibrary();
return 0;
}
srpc-0.10.1/benchmark/test.py 0000664 0000000 0000000 00000001175 14545022514 0016031 0 ustar 00root root 0000000 0000000 import os
import time
#serverlist = [("srpc", "pb"), ("brpc", "pb"), ("thrift", "thrift")]
serverlist = [("thrift", "thrift")]
#reqlist = [16, 32, 64, 128, 256, 512, 1024, 2048, 4096, 8192, 16384, 32768]
parlist = [1, 2, 4, 8, 16, 32, 64, 128, 256]
for server, idl in serverlist:
#os.system("nohup ./server 8811 %s &" % server)
for par in parlist:
#for reqsize in reqlist:
#cmd = "./echo_client %s" % reqsize
#cmd = "./client 127.0.0.1 8811 %s %s 100 %s" % (server, idl, reqsize)
cmd = "./client 127.0.0.1 8811 %s %s %s 1024" % (server, idl, par)
print cmd
os.system(cmd);
time.sleep(1);
#os.system("killall server")
srpc-0.10.1/benchmark/thrift_server.cc 0000664 0000000 0000000 00000002705 14545022514 0017675 0 ustar 00root root 0000000 0000000 #include "gen-cpp/BenchmarkThrift.h"
#include
#include
#include
#include
#include
#include
using namespace ::apache::thrift;
using namespace ::apache::thrift::protocol;
using namespace ::apache::thrift::transport;
using namespace ::apache::thrift::server;
using namespace ::apache::thrift::concurrency;
using boost::shared_ptr;
class BenchmarkThriftHandler : virtual public BenchmarkThriftIf {
public:
void echo_thrift(const std::string& msg) { }
void slow_thrift(const std::string& msg) {
usleep(15000);
}
};
int main(int argc, char **argv) {
int port = 8811;
shared_ptr handler(new BenchmarkThriftHandler());
shared_ptr processor(new BenchmarkThriftProcessor(handler));
shared_ptr protocolFactory(new TBinaryProtocolFactory());
boost::shared_ptr threadManager = ThreadManager::newSimpleThreadManager(16);
boost::shared_ptr threadFactory = boost::shared_ptr(new PosixThreadFactory());
TNonblockingServer server(processor, protocolFactory, port, threadManager);
server.setMaxConnections(2048);
server.setNumIOThreads(16);
threadManager->threadFactory(threadFactory);
threadManager->start();
server.serve();
return 0;
}
srpc-0.10.1/docs/ 0000775 0000000 0000000 00000000000 14545022514 0013472 5 ustar 00root root 0000000 0000000 srpc-0.10.1/docs/docs-01-idl.md 0000664 0000000 0000000 00000001310 14545022514 0015723 0 ustar 00root root 0000000 0000000 [English version](/docs/en/docs-01-idl.md)
## 01 - RPC IDL
- 描述文件
- 前后兼容
- Protobuf/Thrift
### 示例
下面我们通过一个具体例子来呈现
- 我们拿pb举例,定义一个ServiceName为``Example``的``example.proto``文件
- rpc接口名为``Echo``,输入参数为``EchoRequest``,输出参数为``EchoResponse``
- ``EchoRequest``包括两个string:``message``和``name``
- ``EchoResponse``包括一个string:``message``
~~~proto
syntax="proto2";
message EchoRequest {
optional string message = 1;
optional string name = 2;
};
message EchoResponse {
optional string message = 1;
};
service Example {
rpc Echo(EchoRequest) returns (EchoResponse);
};
~~~
srpc-0.10.1/docs/docs-02-service.md 0000664 0000000 0000000 00000002227 14545022514 0016624 0 ustar 00root root 0000000 0000000 [English version](/docs/en/docs-02-service.md)
## 02 - RPC Service
- 组成SRPC服务的基本单元
- 每一个Service一定由某一种IDL生成
- Service只与IDL有关,与网络通信具体协议无关
### 示例
下面我们通过一个具体例子来呈现
- 沿用上面的``example.proto``IDL描述文件
- 执行官方的``protoc example.proto --cpp_out=./ --proto_path=./``获得``example.pb.h``和``example.pb.cpp``两个文件
- 执行SRPC的``srpc_generator protobuf ./example.proto ./``获得``example.srpc.h``文件
- 我们派生``Example::Service``来实现具体的rpc业务逻辑,这就是一个RPC Service
- 注意这个Service没有任何网络、端口、通信协议等概念,仅仅负责完成实现从``EchoRequest``输入到输出``EchoResponse``的业务逻辑
~~~cpp
class ExampleServiceImpl : public Example::Service
{
public:
void Echo(EchoRequest *request, EchoResponse *response, RPCContext *ctx) override
{
response->set_message("Hi, " + request->name());
printf("get_req:\n%s\nset_resp:\n%s\n",
request->DebugString().c_str(),
response->DebugString().c_str());
}
};
~~~
srpc-0.10.1/docs/docs-03-server.md 0000664 0000000 0000000 00000004742 14545022514 0016477 0 ustar 00root root 0000000 0000000 [English version](/docs/en/docs-03-server.md)
## 03 - RPC Server
- 每一个Server对应一个端口
- 每一个Server对应一个确定的网络通信协议
- 每一个Service可以添加到任意的Server里
- 每一个Server可以拥有任意的Service,但在当前Server里ServiceName必须唯一
- 不同IDL的Service是可以放进同一个Server中的
### 示例
下面我们通过一个具体例子来呈现
- 沿用上面的``ExampleServiceImpl``Service
- 首先,我们创建1个RPC Server、需要确定协议
- 然后,我们可以创建任意个数的Service实例、任意不同proto形成的Service,把这些Service通过``add_service()``接口添加到Server里
- 最后,通过Server的``start``或者``serve``开启服务,处理即将到来的rpc请求
- 想像一下,我们也可以从``Example::Service``派生更多的功能的rpc``Echo``不同实现的Service
- 想像一下,我们可以在N个不同的端口创建N个不同的RPC Server、代表着不同的协议
- 想像一下,我们可以把同一个ServiceIMPL实例``add_service``到不同的Server上,我们也可以把不同的ServiceIMPL实例``add_service``到同一个Server上
- 想像一下,我们可以用同一个``ExampleServiceImpl``,在三个不同端口、同时服务于BPRC-STD、SRPC-STD、SRPC-Http
- 甚至,我们可以将1个PB的``ExampleServiceImpl``和1个Thrift的``AnotherThriftServiceImpl``,``add_service``到同一个SRPC-STD Server,两种IDL在同一个端口上完美工作!
~~~cpp
int main()
{
SRPCServer server_srpc;
SRPCHttpServer server_srpc_http;
BRPCServer server_brpc;
ThriftServer server_thrift;
TRPCServer server_trpc;
TRPCHttpServer server_trpc_http;
ExampleServiceImpl impl_pb;
AnotherThriftServiceImpl impl_thrift;
server_srpc.add_service(&impl_pb);
server_srpc.add_service(&impl_thrift);
server_srpc_http.add_service(&impl_pb);
server_srpc_http.add_service(&impl_thrift);
server_brpc.add_service(&impl_pb);
server_thrift.add_service(&impl_thrift);
server_trpc.add_service(&impl_pb);
server_trpc_http.add_service(&impl_thrift);
server_srpc.start(1412);
server_srpc_http.start(8811);
server_brpc.start(2020);
server_thrift.start(9090);
server_trpc.start(2022);
server_trpc_http.start(8822);
getchar();
server_trpc_http.stop();
server_trpc.stop();
server_thrift.stop();
server_brpc.stop();
server_srpc_http.stop();
server_srpc.stop();
return 0;
}
~~~
srpc-0.10.1/docs/docs-04-client.md 0000664 0000000 0000000 00000004053 14545022514 0016443 0 ustar 00root root 0000000 0000000 [English version](/docs/en/docs-04-client.md)
## 04 - RPC Client
- 每一个Client对应着一个确定的目标/一个确定的集群
- 每一个Client对应着一个确定的网络通信协议
- 每一个Client对应着一个确定的IDL
### 示例
下面我们通过一个具体例子来呈现
- 沿用上面的例子,client相对简单,直接调用即可
- 通过``Example::XXXClient``创建某种RPC的client实例,需要目标的ip+port或url
- 利用client实例直接调用rpc函数``Echo``即可,这是一次异步请求,请求完成后会进入回调函数
- 具体的RPC Context用法请看下一个段落
~~~cpp
#include
#include "example.srpc.h"
#include "workflow/WFFacilities.h"
using namespace srpc;
int main()
{
Example::SRPCClient client("127.0.0.1", 1412);
EchoRequest req;
req.set_message("Hello!");
req.set_name("SRPCClient");
WFFacilities::WaitGroup wait_group(1);
client.Echo(&req, [&wait_group](EchoResponse *response, RPCContext *ctx) {
if (ctx->success())
printf("%s\n", response->DebugString().c_str());
else
printf("status[%d] error[%d] errmsg:%s\n",
ctx->get_status_code(), ctx->get_error(), ctx->get_errmsg());
wait_group.done();
});
wait_group.wait();
return 0;
}
~~~
### 启动参数
Client可以直接通过传入ip、port启动,或者通过参数启动。
上面的例子:
~~~cpp
Example::SRPCClient client("127.0.0.1", 1412);
~~~
等同于:
~~~cpp
struct RPCClientParams param = RPC_CLIENT_PARAMS_DEFAULT;
param.host = "127.0.0.1";
param.port = 1412;
Example::SRPCClient client(¶m);
~~~
也等同于:
~~~cpp
struct RPCClientParams param = RPC_CLIENT_PARAMS_DEFAULT;
param.url = "srpc://127.0.0.1:1412";
Example::SRPCClient client(¶m);
~~~
注意这里一定要使用`RPC_CLIENT_PARAMS_DEFAULT`去初始化我们的参数,里边包含了一个`RPCTaskParams`,包括默认的data_type、compress_type、重试次数和多种超时,具体结构可以参考[rpc_options.h](/src/rpc_options.h)。
srpc-0.10.1/docs/docs-05-context.md 0000664 0000000 0000000 00000005374 14545022514 0016661 0 ustar 00root root 0000000 0000000 [English version](/docs/en/docs-05-context.md)
## 05 - RPC Context
- RPCContext专门用来辅助异步接口,Service和Client通用
- 每一个异步接口都会提供Context,用来给用户提供更高级的功能,比如获取对方ip、获取连接seqid等
- Context上一些功能是Server或Client独有的,比如Server可以设置回复数据的压缩方式,Client可以获取请求成功或失败
- Context上可以通过get_series获得所在的series,与workflow的异步模式无缝结合
### RPCContext API - Common
#### ``long long get_seqid() const;``
请求+回复视为1次完整通信,获得当前socket连接上的通信sequence id,seqid=0代表第1次
#### ``std::string get_remote_ip() const;``
获得对方IP地址,支持ipv4/ipv6
#### ``int get_peer_addr(struct sockaddr *addr, socklen_t *addrlen) const;``
获得对方地址,in/out参数为更底层的数据结构sockaddr
#### ``const std::string& get_service_name() const;``
获取RPC Service Name
#### ``const std::string& get_method_name() const;``
获取RPC Methode Name
#### ``SeriesWork *get_series() const;``
获取当前ServerTask/ClientTask所在series
### RPCContext API - Only for client done
#### ``bool success() const;``
client专用。这次请求是否成功
#### ``int get_status_code() const;``
client专用。这次请求的rpc status code
#### ``const char *get_errmsg() const;``
client专用。这次请求的错误信息
#### ``int get_error() const;``
client专用。这次请求的错误码
#### ``void *get_user_data() const;``
client专用。获取ClientTask的user_data。如果用户通过create_xxx_task接口产生task,则可以通过user_data域记录上下文,在创建task时设置,在回调函数中拿回。
### RPCContext API - Only for server process
#### ``void set_data_type(RPCDataType type);``
Server专用。设置数据打包类型
- RPCDataProtobuf
- RPCDataThrift
- RPCDataJson
#### ``void set_compress_type(RPCCompressType type);``
Server专用。设置数据压缩类型(注:Client的压缩类型在Client或Task上设置)
- RPCCompressNone
- RPCCompressSnappy
- RPCCompressGzip
- RPCCompressZlib
- RPCCompressLz4
#### ``void set_attachment_nocopy(const char *attachment, size_t len);``
Server专用。设置attachment附件。
#### ``bool get_attachment(const char **attachment, size_t *len) const;``
Server专用。获取attachment附件。
#### ``void set_reply_callback(std::function cb);``
Server专用。设置reply callback,操作系统写入socket缓冲区成功后被调用。
#### ``void set_send_timeout(int timeout);``
Server专用。设置发送超时,单位毫秒。-1代表无限。
#### ``void set_keep_alive(int timeout);``
Server专用。设置连接保活时间,单位毫秒。-1代表无限。
srpc-0.10.1/docs/docs-06-workflow.md 0000664 0000000 0000000 00000012617 14545022514 0017046 0 ustar 00root root 0000000 0000000 [English version](/docs/en/docs-06-workflow.md)
## 06 - 与workflow异步框架的结合
### 1. Server
下面我们通过一个具体例子来呈现
- Echo RPC在接收到请求时,向下游发起一次http请求
- 对下游请求完成后,我们将http response的body信息填充到response的message里,回复给客户端
- 我们不希望阻塞/占据着Handler的线程,所以对下游的请求一定是一次异步请求
- 首先,我们通过Workflow框架的工厂``WFTaskFactory::create_http_task``创建一个异步任务http_task
- 然后,我们利用RPCContext的``ctx->get_series()``获取到ServerTask所在的SeriesWork
- 最后,我们使用SeriesWork的``push_back``接口将http_task放到SeriesWork的后面
~~~cpp
class ExampleServiceImpl : public Example::Service
{
public:
void Echo(EchoRequest *request, EchoResponse *response, RPCContext *ctx) override
{
auto *http_task = WFTaskFactory::create_http_task("https://www.sogou.com", 0, 0,
[request, response](WFHttpTask *task) {
if (task->get_state() == WFT_STATE_SUCCESS)
{
const void *data;
size_t len;
task->get_resp()->get_parsed_body(&data, &len);
response->mutable_message()->assign((const char *)data, len);
}
else
response->set_message("Error: " + std::to_string(task->get_error()));
printf("Server Echo()\nget_req:\n%s\nset_resp:\n%s\n",
request->DebugString().c_str(),
response->DebugString().c_str());
});
ctx->get_series()->push_back(http_task);
}
};
~~~
### 2. Client
下面我们通过一个具体例子来呈现
- 我们并行发出两个请求,1个是rpc请求,1个是http请求
- 两个请求都结束后,我们再发起一次计算任务,计算两个数的平方和
- 首先,我们通过RPC Client的``create_Echo_task``创建一个rpc异步请求的网络任务rpc_task
- 然后,我们通过Workflow框架的工厂``WFTaskFactory::create_http_task``和``WFTaskFactory::create_go_task``分别创建异步网络任务http_task,和异步计算任务calc_task
- 最后,我们利用串并联流程图,乘号代表并行、大于号代表串行,将3个异步任务组合起来执行start
~~~cpp
void calc(int x, int y)
{
int z = x * x + y * y;
printf("calc result: %d\n", z);
}
int main()
{
Example::SRPCClient client("127.0.0.1", 1412);
auto *rpc_task = client.create_Echo_task([](EchoResponse *response, RPCContext *ctx) {
if (ctx->success())
printf("%s\n", response->DebugString().c_str());
else
printf("status[%d] error[%d] errmsg:%s\n",
ctx->get_status_code(), ctx->get_error(), ctx->get_errmsg());
});
auto *http_task = WFTaskFactory::create_http_task("https://www.sogou.com", 0, 0, [](WFHttpTask *task) {
if (task->get_state() == WFT_STATE_SUCCESS)
{
std::string body;
const void *data;
size_t len;
task->get_resp()->get_parsed_body(&data, &len);
body.assign((const char *)data, len);
printf("%s\n\n", body.c_str());
}
else
printf("Http request fail\n\n");
});
auto *calc_task = WFTaskFactory::create_go_task(calc, 3, 4);
EchoRequest req;
req.set_message("Hello!");
req.set_name("1412");
rpc_task->serialize_input(&req);
WFFacilities::WaitGroup wait_group(1);
SeriesWork *series = Workflow::create_series_work(http_task, [&wait_group](const SeriesWork *) {
wait_group.done();
});
series->push_back(rpc_task);
series->push_back(calc_task);
series->start();
wait_group.wait();
return 0;
}
~~~
### 3. Upstream
SRPC可以直接使用Workflow的任何组件,最常用的就是[Upstream](https://github.com/sogou/workflow/blob/master/docs/about-upstream.md),SRPC的任何一种client都可以使用Upstream。
我们通过参数来看看如何构造可以使用Upstream的client:
```cpp
#include "workflow/UpstreamManager.h"
int main()
{
// 1. 创建upstream并添加实例
UpstreamManager::upstream_create_weighted_random("echo_server", true);
UpstreamManager::upstream_add_server("echo_server", "127.0.0.1:1412");
UpstreamManager::upstream_add_server("echo_server", "192.168.10.10");
UpstreamManager::upstream_add_server("echo_server", "internal.host.com");
// 2. 构造参数,填上upstream的名字
RPCClientParams client_params = RPC_CLIENT_PARAMS_DEFAULT;
client_params.host = "echo_server";
client_params.port = 1412; // 这个port只用于upstream URI解析,不影响具体实例的选取
// 3. 用参数创建client,其他用法与示例类似
Example::SRPCClient client(&client_params);
...
```
如果使用了ConsistentHash或者Manual方式创建upstream,则我们往往需要对不同的task进行区分、以供选取算法使用。这时候可以使用client task上的`int set_uri_fragment(const std::string& fragment);`接口,设置请求级相关的信息。
这个域的是URI里的fragment,语义请参考[RFC3689 3.5-Fragment](https://datatracker.ietf.org/doc/html/rfc3986#section-3.5),任何需要用到fragment的功能(如其他选取策略里附带的其他信息),都可以利用这个域。
srpc-0.10.1/docs/docs-07-srpc-http.md 0000664 0000000 0000000 00000015313 14545022514 0017115 0 ustar 00root root 0000000 0000000 [English version](/docs/docs-07-srpc-http.md)
## 07 - 使用SPRC、TRPC、Thrift发送Http
**SRPC**支持**HTTP**协议,只要把**idl**的内容作填到**HTTP**的**body**中,并且在**header**里填上**idl**的类型(**json**/**protobuf**/**thrift**),就可以与其他框架通过**HTTP**协议互通,由此可以实现跨语言。
- 启动**SRPCHttpServer**/**TRPCHttpServer**/**ThriftHttpServer**,可以接收由任何语言实现的HTTP client发过来的请求;
- 启动**SRPCHttpClient**/**TRPCHttpClient**/**ThriftHttpClient**,也可以向任何语言实现的Http Server发送请求;
- **HTTP header**:`Content-Type`设置为`application/json`表示json,`application/x-protobuf`表示protobuf,`application/x-thrift`表示thrift;
- **HTTP body**: 如果body中涉及**bytes**类型,**json**中需要使用**base64**进行encode;
### 1. 示例
想实现**SRPCHttpClient**,可以把[tutorial-02-srpc_pb_client.cc](https://github.com/sogou/srpc/blob/master/tutorial/tutorial-02-srpc_pb_client.cc)或者[tutorial-09-client_task.cc](https://github.com/sogou/srpc/blob/master/tutorial/tutorial-09-client_task.cc)中的`SRPCClient`改成`SRPCHttpClient`即可。
在项目的[README.md](/docs//README_cn.md#6-run)中,我们演示了如何使用**curl**向**SRPCHttpServer**发送请求,下面我们给出例子演示如何使用**python**作为客户端,向**TRPCHttpServer**发送请求。
**proto文件:**
```proto
syntax="proto3"; // proto2 or proto3 are both supported
package trpc.test.helloworld;
message AddRequest {
string message = 1;
string name = 2;
bytes info = 3;
int32 error = 4;
};
message AddResponse {
string message = 1;
};
service Batch {
rpc Add(AddRequest) returns (AddResponse);
};
```
**python客户端:**
```py
import json
import requests
from base64 import b64encode
class Base64Encoder(json.JSONEncoder):
def default(self, o):
if isinstance(o, bytes):
return b64encode(o).decode()
return json.JSONEncoder.default(self, o)
headers = {'Content-Type': 'application/json'}
req = {
'message': 'hello',
'name': 'k',
'info': b'i am binary'
}
print(json.dumps(req, cls=Base64Encoder))
ret = requests.post(url = "http://localhost:8800/trpc.test.helloworld.Batch/Add",
headers = headers, data = json.dumps(req, cls=Base64Encoder))
print(ret.json())
```
### 2. 请求路径拼接
[README.md](/docs//README_cn.md#6-run)中,我们可以看到,路径是由service名和rpc名拼接而成的。而对于以上带package名 `package trpc.test.helloworld;`的例子, package名也需要拼接到路径中,**SRPCHttp** 和 **TRPCHttp** 的拼接路径方式并不一样,而**ThriftHttp**由于SRPC的thrift不支持多个service所以无需拼接任何路径。
我们以**curl**为例子:
与**SRPCHttpServer**互通:
```sh
curl 127.0.0.1:8811/trpc/test/helloworld/Batch/Add -H 'Content-Type: application/json' -d '{...}'
```
与**TRPCHttpServer**互通:
```sh
curl 127.0.0.1:8811/trpc.test.helloworld.Batch/Add -H 'Content-Type: application/json' -d '{...}'
```
与**ThriftHttpServer**互通:
```sh
curl 127.0.0.1:8811 -H 'Content-Type: application/json' -d '{...}'
```
### 3. HTTP状态码
SRPC支持server在`process()`中设置状态码,接口为**RPCContext**上的`set_http_code(int code)`。只有在框架能够正确处理请求的情况下,该错误码才有效,否则会被设置为框架层级的错误码。
**用法:**
~~~cpp
class ExampleServiceImpl : public Example::Service
{
public:
void Echo(EchoRequest *req, EchoResponse *resp, RPCContext *ctx) override
{
if (req->name() != "workflow")
ctx->set_http_code(404); // 设置HTTP状态码404
else
resp->set_message("Hi back");
}
};
~~~
**CURL命令:**
~~~sh
curl -i 127.0.0.1:1412/Example/Echo -H 'Content-Type: application/json' -d '{message:"from curl",name:"CURL"}'
~~~
**结果:**
~~~sh
HTTP/1.1 404 Not Found
SRPC-Status: 1
SRPC-Error: 0
Content-Type: application/json
Content-Encoding: identity
Content-Length: 21
Connection: Keep-Alive
~~~
**注意:**
我们依然可以通过返回结果的header中的`SRPC-Status: 1`来判断这个请求在框架层面是正确的,`404`是来自server的状态码。
### 4. HTTP Header
用户可以通过以下三个接口来设置/获取http header:
~~~cpp
bool get_http_header(const std::string& name, std::string& value) const;
bool set_http_header(const std::string& name, const std::string& value);
bool add_http_header(const std::string& name, const std::string& value);
~~~
对于**server**来说,这些接口在`RPCContext`上。
对于**client**来说,需要通过`RPCClientTask`设置**请求上的http header**、并且在回调函数的`RPCContext`上**获取回复上的http header**,用法如下所示:
~~~cpp
int main()
{
Example::SRPCHttpClient client("127.0.0.1", 80);
EchoRequest req;
req.set_message("Hello, srpc!");
auto *task = client.create_Echo_task([](EchoResponse *resp, RPCContext *ctx) {
if (ctx->success())
{
std::string value;
ctx->get_http_header("server_key", value); // 获取回复中的header
}
});
task->serialize_input(&req);
task->set_http_header("client_key", "client_value"); // 设置请求中的header
task->start();
wait_group.wait();
return 0;
}
~~~
### 5. IDL传输格式问题
如果我们填写的是Protobuf且用的标准为proto3,每个域由于没有optional和required区分,所以都是带有默认值的。如果我们设置的值正好等于默认值,则proto3不能识别为被set过,就不能被序列化的时候发出。
在protobuf转json的过程中,SRPC在**RPCContext上**提供了几个接口,支持 [JsonPrintOptions](https://developers.google.com/protocol-buffers/docs/reference/cpp/google.protobuf.util.json_util#JsonPrintOptions) 上的功能。具体接口与用法描述可以查看:[rpc_context.h](/src/rpc_context.h)
**示例:**
```cpp
class ExampleServiceImpl : public Example::Service
{
public:
void Echo(EchoRequest *req, EchoResponse *resp, RPCContext *ctx) override
{
resp->set_message("Hi back");
resp->set_error(0); // 0是error类型int32在proto3中的默认值
ctx->set_json_always_print_primitive_fields(true); // 带上所有原始域
ctx->set_json_add_whitespace(true); // 增加json格式的空格
}
};
```
**原始输出:**
```sh
{"message":"Hi back"}
```
**通过RPCContext设置过json options之后的输出:**
```sh
{
"message": "Hi back",
"error": 0
}
```
srpc-0.10.1/docs/docs-08-tracing.md 0000664 0000000 0000000 00000011143 14545022514 0016616 0 ustar 00root root 0000000 0000000 [English version](/docs/en/docs-08-tracing.md)
## 08 - 上报Tracing到OpenTelemetry
**SRPC**支持产生和上报链路信息trace和span,并且可以通过多种途径进行上报,其中包括本地导出数据和上报到[OpenTelemetry](https://opentelemetry.io).
**SRPC**遵循**OpenTelemetry**的[数据规范(data specification)](https://github.com/open-telemetry/opentelemetry-specification)以及[w3c的trace context](https://www.w3.org/TR/trace-context/),因此可以使用插件**RPCTraceOpenTelemetry**进行上报。
秉承着**Workflow**的风格,所有的上报都是异步任务模式,对RPC的请求和服务不会产生任何性能影响。
### 1. 用法
先构造插件`RPCTraceOpenTelemetry`,然后通过`add_filter()`把插件加到**server**或**client**中。
以[tutorial-02-srpc_pb_client.cc](https://github.com/sogou/srpc/blob/master/tutorial/tutorial-02-srpc_pb_client.cc)作为client的示例,我们如下加两行代码:
```cpp
int main()
{
Example::SRPCClient client("127.0.0.1", 1412);
RPCTraceOpenTelemetry span_otel("http://127.0.0.1:4318");
client.add_filter(&span_otel);
...
}
```
以[tutorial-01-srpc_pb_server.cc](https://github.com/sogou/srpc/blob/master/tutorial/tutorial-01-srpc_pb_server.cc)作为server的示例,也增加类似的两行。同时我们还增加一个本地插件,用于把本次请求的trace数据也在屏幕上打印:
```cpp
int main()
{
SRPCServer server;
RPCTraceOpenTelemetry span_otel("http://127.0.0.1:4318");
server.add_filter(&span_otel);
RPCTraceDefault span_log; // 这个插件会把本次请求的trace信息打到屏幕上
server.add_filter(&span_log);
...
}
```
执行命令`make tutorial`可以编译出示例程序,并且分别把server和client跑起来,我们可以在屏幕上看到一些tracing信息:
我们可以看到上图client这边的span_id: **04d070f537f17d00**,它在下图server这里变成了parent_span_id: **04d070f537f17d00**:
### 2. Traces上报到Jaeger
打开Jaeger的主页,我们可以找到我们名为**Example**的服务(service)和名为**Echo**的函数(method)。这一个tracing记录上有两个span节点,是由server和client分别上报的。
我们可以在Jaeger看到client所上报的span_id: **04d070f537f17d00**和server所上报的span_id: **00202cf737f17d00**,同时也是能对应上刚才在屏幕看到的id的。
### 3. 参数
多久收集一份trace信息、上报请求的重试次数、以及其他参数,都可以通过`RPCTraceOpenTelemetry`的构造函数指定。代码参考:[src/module/rpc_trace_filter.h](https://github.com/sogou/srpc/blob/master/src/module/rpc_trace_filter.h#L238)
默认每秒收集1000条trace信息,并且透传tracing信息等其他功能也已遵循上述规范实现。
### 4. Attributes
我们可以通过`add_attributes()`添加某些额外的信息,比如数据规范中的OTEL_RESOURCE_ATTRIBUTES。
注意我们的service名"Example"也是设置到attributes中的,key为`service.name`。如果用户也在OTEL_RESOURCE_ATTRIBUTES中使用了`service.name`这个key,则SRPC的service name优先级更高,参考:[OpenTelemetry#resource](https://github.com/open-telemetry/opentelemetry-dotnet/tree/main/src/OpenTelemetry#resource)
### 5. Log和Baggage
SRPC提供了`log()`和`baggage()`接口,用户可以添加需要通过链路透传的数据。
```cpp
void log(const RPCLogVector& fields);
void baggage(const std::string& key, const std::string& value);
```
作为Server,我们可以使用`RPCContext`上的接口来添加log:
```cpp
class ExampleServiceImpl : public Example::Service
{
public:
void Echo(EchoRequest *req, EchoResponse *resp, RPCContext *ctx) override
{
resp->set_message("Hi back");
ctx->log({{"event", "info"}, {"message", "rpc server echo() end."}});
}
};
```
作为client,我们可以使用`RPCClientTask`上的接口添加log:
```cpp
srpc::SRPCClientTask *task = client.create_Echo_task(...);
task->log({{"event", "info"}, {"message", "log by rpc client echo()."}});
```
srpc-0.10.1/docs/docs-09-metrics.md 0000664 0000000 0000000 00000026411 14545022514 0016642 0 ustar 00root root 0000000 0000000 [English version](/docs/en/docs-09-metrics.md)
## 09 - 上报Metrics
**Metrics**(指标)是常用的监控需求,**SRPC**支持产生与统计Metrics,并通过多种途径上报,其中包括上报到[Prometheus](https://prometheus.io/)和[OpenTelemetry](https://opentelemetry.io)。
秉承着**Workflow**的风格,所有的上报都是异步任务推送或拉取模式,对RPC的请求和服务不会产生任何性能影响。
本文档会介绍Metrics的概念、结合tutorial-16对接口进行讲解、上报Prometheus的特点、上报OpenTelemetry的特点、以及介绍使用了thread local进行性能提速的Var模块。
### 1. Metrics概念介绍
**Prometheus**的数据类型可以参考官方文档:[Concepts - Metrics](https://prometheus.io/docs/concepts/metric_types/) 和 [Type of Metrics](https://prometheus.io/docs/tutorials/understanding_metric_types/)。
**OpenTelemetry**的可以参考[数据规范(data specification)](https://github.com/open-telemetry/opentelemetry-specification)以及[Metrics的datamodel.md](https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/datamodel.md)。
其中四种基本指标的概念都是一致的,因此SRPC对这四种指标进行了最基本的支持,可分别对应到Prometheus和OpenTelemetry的上报数据中。参考下表:
|指标类型|Prometheus|OpenTelemetry| SRPC中的创建接口 | SRPC中的常用操作 |
|-------|----------|------------|-----------------|-------------------|
|单个数值| Gauge | Gauge | create_gauge(name, help); | void increase(); void decrease();|
|计数器 | Counter | Sum | create_counter(name, help); |GaugeVar *add(labels);|
|直方图 |Histogram | Histogram | create_histogram(name, help, buckets); |void observe(data);|
|采样 | Summary | Summary | create_summary(name, help, quantiles); |void observe(data);|
四种指标的大致描述:
1. **单个数值**:简单数值的度量,可以简单的增减。
2. **计数器**:累计的度量,可以通过添加若干**label**去区分不同label下的数值。
3. **直方图**:对观察observer()得到的接口进行分区间累加,因此需要传入**buckets**告知需要划分的区间;
比如传入的bucket分桶值是{ 1, 10, 100 }这3个数,则我们可以得到数据分布于{ 0 - 1, 0 - 10, 0 - 100, 0 - +Inf }4个区间的数据累计值,同时也会得到整体的总以及数据个数。
4. **采样**:与直方图类似,但传入的是**分位数quantiles**,比如{0.5, 0.9}以及它们的精度,主要用于事先不知道数据分布具体数值的场景(相比之下,直方图需要传入具体数值)。
采样是带有**时间窗口**的,可以通过接口指定**max_age统计窗口时长**以及**age_bucket内部分桶个数**,SRPC中默认的是60秒分5个桶切换时间窗口。
考虑到**采样Summary**本身的统计复杂性,一般来说建议优先使用**直方图Histogram**。
为了方便用户使用,目前SRPC里这四种指标都使用double类型进行统计。
### 2. 用法示例
我们结合[tutorial-16-server_with_metrics.cc](/tutorial/tutorial-16-server_with_metrics.cc)看看基本用法,本示例虽然是server,但client中的用法也是一样的。
#### (1) 创建插件
我们选择Prometheus作为我们的上报对象,因此需要使用**RPCMetricsPull**插件。
~~~cpp
#include "srpc/rpc_metrics_filter.h" // Metrics插件所在的头文件
int main()
{
SRPCServer server;
ExampleServiceImpl impl;
RPCMetricsPull filter; // 创建一个插件
filter.init(8080); // 配合Prometheus中填好的收集数据的端口
~~~
#### (2) 添加指标
这个**RPCMetricsPull**插件本身自带了统计部分常用指标,包括整体请求个数统计、按照service和method作为label的不同维度的请求个数统计以及请求耗时的分位数统计等。
用户可以自行增加想要统计的值,这里我们增加一个用于统计请求大小的直方图histogram,名字为"echo_request_size",上报时的指标描述信息是"Echo request size",数据bucket划分是 { 1, 10, 100 } 。
~~~cpp
filter.create_histogram("echo_request_size", "Echo request size", {1, 10, 100});
~~~
说明:
**添加指标的时机**
指标是可以随时添加的,即使server/client跑起来之后也可以。但**必须在操作这个指标之前添加**,否则获取指标的时候会获取到空指针,无法进行统计操作。
**指标的名字**
指标的名字是**全局唯一**的(无论是四种基本类型中的哪种),且**只能包含大小写字母和下划线,即a-z, A-Z, _** 。如果使用已经存在的名字创建指标,则会创建失败并返回NULL。
一旦创建成功,我们之后都会使用这同一个名字去操作这个指标。
**创建其他指标的接口**
可以查看刚才include的头文件[rpc_metrics_filter.h](/src/module/rpc_metrics_filter.h)中的:`class RPCMetricsFilter`。
#### (3) 把插件添加到server/client中
由于我们需要操作指标时,是需要调用这个插件上的接口的,因此我们在service中保留一下这个指针。这只是示例程序的用法,用户可以使用自己习惯的方式:
~~~cpp
class ExampleServiceImpl : public Example::Service
{
public:
void Echo(EchoRequest *req, EchoResponse *resp, RPCContext *ctx) override;
void set_filter(RPCMetricsPull *filter) { this->filter = filter; }
private:
RPCMetricsPull *filter; // 保留了插件的指针,并实现接口设置进去,这并非SRPC框架的接口
};
~~~
main函数中继续这样写:
~~~cpp
int main()
{
...
impl.set_filter(&filter); // 设置到我们刚才为service留的接口中
server.add_filter(&filter); // client也一样可以调用add_filter()
server.add_service(&impl);
filter.deinit();
return 0;
}
~~~
#### (4) 操作指标
我们在每次收到请求的时候,都把EchoRequest的大小统计到刚才创建的直方图指标上:
~~~cpp
class ExampleServiceImpl : public Example::Service
{
public:
void Echo(EchoRequest *req, EchoResponse *resp, RPCContext *ctx) override
{
resp->set_message("Hi back");
this->filter->histogram("echo_request_size")->observe(req->ByteSizeLong());
}
~~~
可以看到,我们通过filter上的histogram()接口,就可以带着刚才的名字去到指标的指针,并且通过observe()填入数据大小。
四种基本类型的获取接口如下:
~~~cpp
class RpcMetricsFilter : public RPCFilter
{
GaugeVar *gauge(const std::string& name);
CounterVar *counter(const std::string& name);
HistogramVar *histogram(const std::string& name);
SummaryVar *summary(const std::string& name);
~~~
如果找到,会返回四种基本指标类型的指针,可以如示例进行下一步操作比如histogram的统计接口observe(),**注意:不存在这个名字则会返回空指针,因此要保证我们拿到的变量一定是成功创建过的**。
四种类型常用的操作接口如上文表格所示,具体可以参考[rpc_var.h](/src/var/rpc_var.h)。
值得说明一下Counter类型的接口:
~~~cpp
class CounterVar : public RPCVar
{
GaugeVar *add(const std::map& labels);
~~~
接口可以对Counter指标添加某个维度的Gauge值,各维度的统计是分开的,且labels为一个map,即可以通过多组label指定一个维度,比如:
~~~cpp
filter->counter("service_method_count")->add({"service", "Example"}, {"method", "Echo"}})->increase();
~~~
就可以获得一个针对`{service="Example",method="Echo"}`统计出来的数值。
#### (5) 自动上报
SRPC的插件都是自动上报的,因此无需用户调用任何接口。我们尝试调用client发送请求产生一些统计数据,然后看看上报出来的数据是什么。
~~~sh
./srpc_pb_client
message: "Hi back"
message: "Hi back"
~~~
由于Prometheus是使用Pull模式拉取,即会通过我们注册到Prometheus的端口和/metrics进行拉取,也就是我们刚才初始化上报插件需要配对上端口的原因。通过与Prometheus相同的方式,我们可以本地访问一下,会看到这样的一些数据:
~~~sh
curl localhost:8080/metrics
# HELP total_request_count total request count
# TYPE total_request_count gauge
total_request_count 2.000000
# HELP total_request_method request method statistics
# TYPE total_request_method counter
total_request_method{method="Echo",service="Example"} 2.000000
# HELP total_request_latency request latency nano seconds
# TYPE total_request_latency summary
total_request_latency{quantile="0.500000"} 645078.500000
total_request_latency{quantile="0.900000"} 645078.500000
total_request_latency_sum 1290157.000000
total_request_latency_count 2
# HELP echo_request_size Echo request size
# TYPE echo_request_size histogram
echo_request_size_bucket{le="1.000000"}0
echo_request_size_bucket{le="10.000000"}0
echo_request_size_bucket{le="100.000000"}2
echo_request_size_bucket{le="+Inf"} 2
echo_request_size_sum 40.000000
echo_request_size_count 2
~~~
可以看到,我们针对tutorial-02的client产生的两个请求,分别获得的四种基本指标的统计数据。其中histogram是我们创建的,而gauge、counter、summary都是插件中自带的。插件中默认统计的数据还会陆续添加,以方便开发者。
### 3. 上报Prometheus的特点
上报Prometheus主要特点刚才已经大概描述过:
1. 使用Pull模式,定期被收集;
2. 需要指定我们被收集数据的端口;
3. 通过/metrics返回具体数据内容;
4. 数据内容为Prometheus所约定的string格式;
界面参考如下:

### 4. 上报OpenTelemetry的特点
上报OpenTelemetry的主要特点:
1. 使用推送模式,定期发送http请求;
2. 插件需要填入要上报的URL;
3. 内部默认通过/v1/metrics上报数据;
4. 数据内容为OpenTelemetry所约定的[protobuf](/src/module/proto/opentelemetry_metrics.proto);
基本接口参考:
~~~cpp
class RPCMetricsOTel : public RPCMetricsFilter
{
public:
RPCMetricsOTel(const std::string& url);
RPCMetricsOTel(const std::string& url, unsigned int redirect_max,
unsigned int retry_max, size_t report_threshold,
size_t report_interval);
~~~
用户可以指定累计多少个请求上报一次或者累计多久上报一次,默认为累计100个请求或1秒上报一次。
### 5. Var模块
上面可以看到,我们每个请求都会对全局唯一名称指定的变量进行操作,那么多线程调用时,对复杂的指标类型(比如直方图或者采样)操作会成为性能瓶颈吗?
答案是不会的,因为通过filter获取对应指标的接口是**thread local**的。
SRPC内部引入了线程安全的var结构,每次获取var时调用的接口,拿到的都是thread local的指标指针,每次统计也都是分别收集到本线程,因此多线程情况下的统计不会造成对全局的争抢。而上报是异步上报的,在上报被触发的时候,全局会通过expose()挨个把每个线程中相应的指标reduce()到一起,最后通过具体模块需要的格式进行上报。
srpc-0.10.1/docs/docs-10-http-with-modules.md 0000664 0000000 0000000 00000030145 14545022514 0020561 0 ustar 00root root 0000000 0000000
## 10 - 带生态插件的HttpServer和HttpClient
**srpc**提供带有插件功能的**HttpServer**和**HttpClient**,可以上报**trace**和**metrcis**,用法和功能完全兼容**Workflow**,添加插件的用法也和srpc目前的Server和Client一样,用于使用**Http功能**同时需**要采集trace、metrics等信息并上报**的场景。
此功能更加适用于原先已经使用Workflow收发Http的开发者,几乎不改动现有代码即可拥有生态上报功能。
(1) 补充插件基本介绍:
- **链路信息**:如何使用**trace**插件,并上报到[OpenTelemetry](https://opentelemetry.io)? 参考:[docs-08-tracing.md](https://github.com/sogou/srpc/blob/master/docs/docs-08-tracing.md)
- **监控指标**:如何使用**metrics**插件,并且上报到[Prometheus](https://prometheus.io/)和[OpenTelemetry](https://opentelemetry.io)?参考:[docs-09-metrics.md](https://github.com/sogou/srpc/blob/master/docs/docs-09-metrics.md)
(2) 补充Workflow中的Http用法:
- [Http客户端任务:wget](https://github.com/sogou/workflow/blob/master/docs/tutorial-01-wget.md)
- [ttp服务器:http_echo_server](https://github.com/sogou/workflow/blob/master/docs/tutorial-04-http_echo_server.md)
(3) 补充SRPC中带插件上报功能的示例代码:
- [tutorial-17-http_server.cc](/tutorialtutorial-17-http_server.cc)
- [tutorial-18-http_client.cc](/tutorialtutorial-18-http_client.cc)
### 1. HttpServer用法
```cpp
int main()
{
// 1. 构造方式与Workflow类似,Workflow用法是:
// WFHttpServer server([](WFHttpTask *task){ /* process */ });
srpc::HttpServer server([](WFHttpTask *task){ /* process */ });
// 2. 插件与SRPC通用,添加方式一致
srpc::RPCTraceOpenTelemetry otel("http://127.0.0.1:4318");
server.add_filter(&otel);
// 3. server启动方式与Workflow/SRPC一样
if (server.start(1412) == 0)
{
...
}
}
```
由于这里直接收发Http请求,因此有几个注意点:
1. 操作的是`WFHttpTask`(与Workflow中的一致),而不是由Protobuf或者Thrift等IDL定义的结构体;
2. 接口也不再是RPC里定义的**Service**了,因此也无需派生ServiceImpl实现RPC函数,而是直接给Server传一个`process函数`;
3. `process函数`格式也与Workflow一致:`std::function`;
4. process函数里拿到的参数是task,通过task->get_req()和task->get_resp()可以拿到请求与回复,分别是`HttpRequest`和`HttpResponse`,而其他上下文也在task而非**RPCContext**上;
### 2. HttpClient用法
```cpp
int main()
{
// 1. 先构造一个client(与Workflow用法不同,与SRPC用法类似)
srpc::HttpClient client;
// 2. 插件与SRPC通用,添加方式一致
srpc::RPCTraceDefault trace_log;
client.add_filter(&trace_log);
// 3. 发出Http请求的方式与Workflow类似,Workflow用法是:
// WFHttpTask *task = WFTaskFactory::create_http_task(...);
// 函数名、参数、返回值与Workflow用法一致
WFHttpTask *task = client.create_http_task("http://127.0.0.1:1412",
REDIRECT_MAX,
RETRY_MAX,
[](WFHttpTask *task){ /* callback */ });
task->start();
...
return 0;
}
```
同样几个注意点:
1. 操作的是`WFHttpTask`(与Workflow中的一致),而不是由Protobuf或者Thrift等IDL定义的结构;
2. 如果想拿到req,可以`task->get_req()`,拿到的是`HttpRequest`,而不是XXXRequest之类;
3. 和请求相关的上下文也不再需要**RPCContext**了,都在task上;
4. callback函数格式也与Workflow一致:`std::function`;
### 3. Http协议采集的数据
**OpenTelemetry的trace数据官方文档**:
https://opentelemetry.io/docs/reference/specification/trace/semantic_conventions/http/
SRPC框架的trace模块已经采集以下内容,并会通过各自的filter以不同的形式发出。
**1. 公共指标**
| 指标名 | 含义 | 类型 | 例子 | 备注 |
|-------|-----|-----|------|-----|
|task.state| 框架状态码 | int | 0 |(以下简称state) |
|task.error| 框架错误码 | int | 1 |state!=0时才有 |
|http.status_code| Http返回码 | string | 200 | state=0时才有 |
|http.method| Http请求方法 | string | GET | |
|http.scheme| scheme | string | https | |
|http.request_content_length | 请求大小 | int | 3840 | state=0时才有 |
|http.response_content_length| 回复大小 | int | 141 | state=0时才有 |
|net.sock.family| 协议地址族 | string | inet | |
|net.sock.peer.addr| 远程地址 | string | 10.xx.xx.xx | state=0时才有 |
|net.sock.peer.port| 远程端口 | int | 8080 | state=0时才有 |
**2. Client指标**
| 指标名 | 含义 | 类型 | 例子 | 备注 |
|-------|-----|-----|------|-----|
|srpc.timeout_reason| 超时原因 | int | 2 | state=WFT_STATE_SYS_ERROR(1)和error=ETIMEDOUT(116)时才有 |
|http.resend_count| 框架重试次数 | int | 0 | state=0时才有 |
|net.peer.addr| uri请求的地址 | string | 10.xx.xx.xx | |
|net.peer.port| uri请求的端口 | int | 80 | |
|* http.url| 请求的url | string | | |
state与error参考:[workflow/src/kernel/Communicator.h](workflow/src/kernel/Communicator.h)
timeout_reason参考:[workflow/src/kernel/CommRequest.h](workflow/src/kernel/CommRequest.h)
**3. Server指标**
| 指标名 | 含义 | 类型 | 例子 | 备注 |
|-------|-----|-----|------|-----|
|net.host.name| 服务器名称 | string | example.com | 虚拟主机名,来自比如对方header里发过来的Host信息等 |
|net.host.port| 监听的端口 | int | 80 | |
|http.target| 完整的请求目标 | string | /users/12314/?q=ddds | |
|http.client_ip| 原始client地址 | string | 10.x.x.x | 有时候会有,比如转发场景下,header中带有“X-Forwarded-For”等 |
**OpenTelemetry的metrics数据官方文档(SRPC正在支持中)**:
https://opentelemetry.io/docs/reference/specification/metrics/semantic_conventions/http-metrics/
### 4. 示例数据
以下通过client-server的调用,我们可以看到这样的链路图:
```
[trace_id : 005028aa52fb0000005028aa52fb0002] [client]->[server]
timeline: 1681380123462593000 1681380123462895000 1681380123463045000 1681380123463213000
[client][begin].........................................................................[end]
span_id : 0400fb52aa285000
[server][begin].....................[end]
span_id : 00305c54aa285000
parent_span_id : 0400fb52aa285000
```
先通过`make tutorial`命令可以把tutorial里的`http_server`和`http_client`编出来,其中tutorial-17-http_server.cc中可以把上报OpenTelemetry的插件打开:
```cpp
srpc::RPCTraceOpenTelemetry otel("http://127.0.0.1:4318");
srpc::HttpServer server(process);
server.add_filter(&otel);
...
```
分别按照如下执行,可以看到我们的client通过RPCTraceDefault插件本地打印的trace信息:
```sh
./http_client
```
```
callback. state = 0 error = 0
Hello from server!
finish print body. body_len = 31
[SPAN_LOG] trace_id: 005028aa52fb0000005028aa52fb0002 span_id: 0400fb52aa285000 start_time: 1681380123462593000 finish_time: 1681380123463213000 duration: 620000(ns) http.method: GET http.request_content_length: 123145504128464 http.resend_count: 0 http.response_content_length: 31 http.scheme: http http.status_code: 200 net.peer.name: 127.0.0.1 net.peer.port: 1412 net.sock.family: inet net.sock.peer.addr: 127.0.0.1 net.sock.peer.port: 1412 component: srpc.srpc span.kind: srpc.client state: 0
^C
```
我们把server对OpenTelemetry上报的Protobuf内容也同时打印出来:
```sh
./http_server
```
```sh
http server get request_uri: /
[SPAN_LOG] trace_id: 005028aa52fb0000005028aa52fb0002 span_id: 00305c54aa285000 parent_span_id: 0400fb52aa285000 start_time: 1681380123462895000 finish_time: 1681380123463045000 duration: 150000(ns) http.method: GET http.request_content_length: 0 http.response_content_length: 31 http.scheme: http http.status_code: 200 http.target: / net.host.name: 127.0.0.1:1412 net.host.port: 1412 net.sock.family: inet net.sock.peer.addr: 127.0.0.1 net.sock.peer.port: 56591 component: srpc.srpc span.kind: srpc.server state: 0
resource_spans {
resource {
}
instrumentation_library_spans {
spans {
trace_id: "\000P(\252R\373\000\000\000P(\252R\373\000\002"
span_id: "\0000\\T\252(P\000"
parent_span_id: "\004\000\373R\252(P\000"
name: "GET"
kind: SPAN_KIND_SERVER
start_time_unix_nano: 1681380123462895000
end_time_unix_nano: 1681380123463045000
attributes {
key: "http.method"
value {
string_value: "GET"
}
}
attributes {
key: "http.request_content_length"
value {
int_value: 0
}
}
attributes {
key: "http.response_content_length"
value {
int_value: 31
}
}
attributes {
key: "http.scheme"
value {
string_value: "http"
}
}
attributes {
key: "http.status_code"
value {
int_value: 200
}
}
attributes {
key: "http.target"
value {
string_value: "/"
}
}
attributes {
key: "net.host.name"
value {
string_value: "127.0.0.1:1412"
}
}
attributes {
key: "net.host.port"
value {
int_value: 1412
}
}
attributes {
key: "net.sock.family"
value {
string_value: "inet"
}
}
attributes {
key: "net.sock.peer.addr"
value {
string_value: "127.0.0.1"
}
}
attributes {
key: "net.sock.peer.port"
value {
int_value: 56591
}
}
status {
}
}
}
}
```
可以看到用法和SRPC框架默认的trace模块是一样的。
### 5. 其他
补充一些常见问题,帮助开发中更好地理解这个模块。
**Q1: 本次新增的HttpServer与HttpClient,与SRPCHttpServer/SRPCHttpClient有什么共同点/不同点?**
共同点:
- 应用层协议相同,都是Http协议进行网络收发;
- 采集的数据依据相同,都是从Http Header收集和透传出去;
不同点:
- 用法不同:本次新增的Http模块是延续Workflow风格的用法,比如WFHttpTask;而SRPCHttp/ThriftHttp/TRPCHttp的用法是RPC模式,且包括了同步/异步/半同步的使用方式;
- 接口不同:前者用url直接定位要发的请求,而server也是一个process函数作为处理请求的统一入口;而原先的模块对Http只是网络层面的收发,url中的路由信息是通过${service}和${method}进行拼接的,然后把Protobuf或者Thrift这个结构体作为Http协议的body发出;
- 开发者接触的请求/回复不同:前者从task上拿出HttpRequest和HttpResponse,后者是Protobuf/Thrift里定义的Message;
- 框架级state和error略有不同:前者是task.state和task.error,使用workflow的状态码,比如0表示成功;而后者是srpc.state和srpc.error,使用SRPC的[状态码](/src/rpc_basic.h),比如1表示成功;
**Q2: 和Workflow原生的Http协议是什么关系?**
srpc中新增的功能,开发者拿到的也是Workflow中定义的WFHttpTask,在实现上进行了行为派生,因此收发期间有几个切面可以进行一些模块化编程,这是和Workflow的Http相比更多的功能。
只要开发者把模块通过add_filter()加入到Server/Client中,通过产生的任务就是带有切面功能的,而服务开发者无需感知。
**Q3: 为什么放在SRPC项目中?**
目前生态插件所需要的几个功能都在SRPC项目中,包括:
- 收集数据的rpc_module,包括生成trace_id等通用功能;
- 上报信息的rpc_filter,包括使用Protobuf格式上报OpenTelemetry;
- 统计监控指标的rpc_var;
srpc-0.10.1/docs/en/ 0000775 0000000 0000000 00000000000 14545022514 0014074 5 ustar 00root root 0000000 0000000 srpc-0.10.1/docs/en/docs-01-idl.md 0000664 0000000 0000000 00000001437 14545022514 0016337 0 ustar 00root root 0000000 0000000 [中文版](/docs/docs-01-idl.md)
## 01 - RPC IDL
- Interface Description Languaue file
- Backward and forward compatibility
- Protobuf/Thrift
### Sample
You can follow the detailed example below:
- Take pb as an example. First, define an `example.proto` file with the ServiceName as `Example`.
- The name of the rpc interface is `Echo`, with the input parameter as `EchoRequest`, and the output parameter as `EchoResponse`.
- `EchoRequest` consists of two strings: `message` and `name`.
- `EchoResponse` consists of one string: `message`.
~~~proto
syntax="proto2";
message EchoRequest {
optional string message = 1;
optional string name = 2;
};
message EchoResponse {
optional string message = 1;
};
service Example {
rpc Echo(EchoRequest) returns (EchoResponse);
};
~~~
srpc-0.10.1/docs/en/docs-02-service.md 0000664 0000000 0000000 00000002335 14545022514 0017226 0 ustar 00root root 0000000 0000000 [中文版](/docs/docs-02-service.md)
## 02 - RPC Service
- It is the basic unit for SRPC services.
- Each service must be generated by one type of IDLs.
- Service is determined by IDL type, not by specific network communication protocol.
### Sample
You can follow the detailed example below:
- Use the same `example.proto` IDL above.
- Run the official `protoc example.proto --cpp_out=./ --proto_path=./` to get two files: `example.pb.h` and `example.pb.cpp`.
- Run the `srpc_generator protobuf ./example.proto ./` in SRPC to get `example.srpc.h`.
- Derive `Example::Service` to implement the rpc business logic, which is an RPC Service.
- Please note that this Service does not involve any concepts such as network, port, communication protocol, etc., and it is only responsible for completing the business logic that convert `EchoRequest` to `EchoResponse`.
~~~cpp
class ExampleServiceImpl : public Example::Service
{
public:
void Echo(EchoRequest *request, EchoResponse *response, RPCContext *ctx) override
{
response->set_message("Hi, " + request->name());
printf("get_req:\n%s\nset_resp:\n%s\n",
request->DebugString().c_str(),
response->DebugString().c_str());
}
};
~~~
srpc-0.10.1/docs/en/docs-03-server.md 0000664 0000000 0000000 00000005121 14545022514 0017071 0 ustar 00root root 0000000 0000000 [中文版](/docs/docs-03-server.md)
## 03 - RPC Server
- Each server corresponds to one port
- Each server corresponds to one specific network communication protocol
- One service may be added into multiple Servers
- One Server may have one or more Services, but the ServiceName must be unique within that Server
- Services from different IDLs can be added into the same Server
### Sample
You can follow the detailed example below:
- Follow the above `ExampleServiceImpl` Service
- First, create an RPC Server and determine the proto file.
- Then, create any number of Service instances and any number of Services for different protocols, and add these services to the Server through the `add_service()`interface.
- Finally, use `start()` or `serve()` to start the services in the Server and handle the upcoming rpc requests through the Server.
- Imagine that we can also derive more Serivce from `Example::Service`, which have different implementations of rpc `Echo`.
- Imagine that we can create N different RPC Servers on N different ports, serving on different network protocols.
- Imagine that we can use `add_service()` to add the same ServiceIMPL instance on different Servers, or we can use `add_service()` to add different ServiceIMPL instances on the same server.
- Imagine that we can use the same `ExampleServiceImpl`, serving BPRC-STD, SRPC-STD, SRPC-Http at three different ports at the same time.
- And we can use `add_service()` to add one `ExampleServiceImpl` related to Protobuf IDL and one `AnotherThriftServiceImpl` related to Thrift IDL to the same SRPC-STD Server, and the two IDLs work perfectly on the same port!
~~~cpp
int main()
{
SRPCServer server_srpc;
SRPCHttpServer server_srpc_http;
BRPCServer server_brpc;
ThriftServer server_thrift;
TRPCServer server_trpc;
TRPCHttpServer server_trpc_http;
ExampleServiceImpl impl_pb;
AnotherThriftServiceImpl impl_thrift;
server_srpc.add_service(&impl_pb);
server_srpc.add_service(&impl_thrift);
server_srpc_http.add_service(&impl_pb);
server_srpc_http.add_service(&impl_thrift);
server_brpc.add_service(&impl_pb);
server_thrift.add_service(&impl_thrift);
server_trpc.add_service(&impl_pb);
server_trpc_http.add_service(&impl_pb);
server_srpc.start(1412);
server_srpc_http.start(8811);
server_brpc.start(2020);
server_thrift.start(9090);
server_trpc.start(2022);
server_trpc_http.start(8822);
getchar();
server_trpc_http.stop();
server_trpc.stop();
server_thrift.stop();
server_brpc.stop();
server_srpc_http.stop();
server_srpc.stop();
return 0;
}
~~~
srpc-0.10.1/docs/en/docs-04-client.md 0000664 0000000 0000000 00000004376 14545022514 0017055 0 ustar 00root root 0000000 0000000 [中文版](/docs/docs-04-client.md)
## 04 - RPC Client
- Each Client corresponds to one specific target/one specific cluster
- Each Client corresponds to one specific network communication protocol
- Each Client corresponds to one specific IDL
### Sample
You can follow the detailed example below:
- Following the above example, the client is relatively simple and you can call the method directly.
- Use `Example::XXXClient` to create a client instance of some RPC. The IP+port or URL of the target is required.
- With the client instance, directly call the rpc function `Echo`. This is an asynchronous request, and the callback function will be invoked after the request is completed.
- For the usage of the RPC Context, please check [RPC Context](/docs/en/rpc.md#rpc-context).
~~~cpp
#include
#include "example.srpc.h"
#include "workflow/WFFacilities.h"
using namespace srpc;
int main()
{
Example::SRPCClient client("127.0.0.1", 1412);
EchoRequest req;
req.set_message("Hello!");
req.set_name("SRPCClient");
WFFacilities::WaitGroup wait_group(1);
client.Echo(&req, [&wait_group](EchoResponse *response, RPCContext *ctx) {
if (ctx->success())
printf("%s\n", response->DebugString().c_str());
else
printf("status[%d] error[%d] errmsg:%s\n",
ctx->get_status_code(), ctx->get_error(), ctx->get_errmsg());
wait_group.done();
});
wait_group.wait();
return 0;
}
~~~
### Client startup parameters
Client can be started directly by passing in ip, port, or through client startup parameters.
The above example:
~~~cpp
Example::SRPCClient client("127.0.0.1", 1412);
~~~
is equivalent to:
~~~cpp
struct RPCClientParams param = RPC_CLIENT_PARAMS_DEFAULT;
param.host = "127.0.0.1";
param.port = 1412;
Example::SRPCClient client(¶m);
~~~
also equivalent to:
~~~cpp
struct RPCClientParams param = RPC_CLIENT_PARAMS_DEFAULT;
param.url = "srpc://127.0.0.1:1412";
Example::SRPCClient client(¶m);
~~~
Note that `RPC_CLIENT_PARAMS_DEFAULT` must be used to initialize the client's parameters, which contains a `RPCTaskParams`, including the default data_type, compress_type, retry_max and various timeouts. The specific struct can refer to [rpc_options.h](/src/rpc_options.h).
srpc-0.10.1/docs/en/docs-05-context.md 0000664 0000000 0000000 00000006402 14545022514 0017254 0 ustar 00root root 0000000 0000000 [中文版](/docs/docs-05-context.md)
## 05 - RPC Context
- RPCContext is used specially to assist asynchronous interfaces, and can be used in both Service and Client.
- Each asynchronous interface will provide a Context, which offers higher-level functions, such as obtaining the remote IP, the connection seqid, and so on.
- Some functions on Context are unique to Server or Client. For example, you can set the compression mode of the response data on Server, and you can obtain the success or failure status of a request on Client.
- On the Context, you can use ``get_series()`` to obtain the SeriesWork, which is seamlessly integrated with the asynchronous mode of Workflow.
### RPCContext API - Common
#### `long long get_seqid() const;`
One complete communication consists of request+response. The sequence id of the communication on the current socket connection can be obtained, and seqid=0 indicates the first communication.
#### `std::string get_remote_ip() const;`
Get the remote IP address. IPv4/IPv6 is supported.
#### `int get_peer_addr(struct sockaddr *addr, socklen_t *addrlen) const;`
Get the remote address. The in/out parameter is the lower-level data structure sockaddr.
#### `const std::string& get_service_name() const;`
Get RPC Service Name.
#### `const std::string& get_method_name() const;`
Get RPC Method Name.
#### `SeriesWork *get_series() const;`
Get the SeriesWork of the current ServerTask/ClientTask.
### RPCContext API - Only for client done
#### `bool success() const;`
For client only. The success or failure of the request.
#### `int get_status_code() const;`
For client only. The rpc status code of the request.
#### `const char *get_errmsg() const;`
For client only. The error info of the request.
#### `int get_error() const;`
For client only. The error code of the request.
#### `void *get_user_data() const;`
For client only. Get the user\_data of the ClientTask. If a user generates a task through the ``create_xxx_task()`` interface, the context can be recorded in the user_data field. You can set that field when creating the task, and retrieve it in the callback function.
### RPCContext API - Only for server process
#### `void set_data_type(RPCDataType type);`
For Server only. Set the data packaging type
- RPCDataProtobuf
- RPCDataThrift
- RPCDataJson
#### `void set_compress_type(RPCCompressType type);`
For Server only. Set the data compression type (note: the compression type for the Client is set on Client or Task)
- RPCCompressNone
- RPCCompressSnappy
- RPCCompressGzip
- RPCCompressZlib
- RPCCompressLz4
#### `void set_attachment_nocopy(const char *attachment, size_t len);`
For Server only. Set the attachment.
#### `bool get_attachment(const char **attachment, size_t *len) const;`
For Server only. Get the attachment.
#### `void set_reply_callback(std::function cb);`
For Server only. Set reply callback, which is called after the operating system successfully writes the data into the socket buffer.
#### `void set_send_timeout(int timeout);`
For Server only. Set the maximum time for sending the message, in milliseconds. -1 indicates unlimited time.
#### `void set_keep_alive(int timeout);`
For Server only. Set the maximum connection keep-alive time, in milliseconds. -1 indicates unlimited time.
srpc-0.10.1/docs/en/docs-06-workflow.md 0000664 0000000 0000000 00000013506 14545022514 0017446 0 ustar 00root root 0000000 0000000 [中文版](/docs/docs-06-workflow.md)
## 06 - Integrating with the asynchronous Workflow framework
### 1. Server
You can follow the detailed example below:
- Echo RPC sends an HTTP request to the upstream modules when it receives the request.
- After the request to the upstream modules is completed, the server populates the body of HTTP response into the message of the response and send a reply to the client.
- We don't want to block/occupy the handler thread, so the request to the upstream must be asynchronous.
- First, we can use `WFTaskFactory::create_http_task()` of the factory of Workflow to create an asynchronous http_task.
- Then, we use `ctx->get_series()` of the RPCContext to get the SeriesWork of the current ServerTask.
- Finally, we use the `push_back()` interface of the SeriesWork to append the http\_task to the SeriesWork.
~~~cpp
class ExampleServiceImpl : public Example::Service
{
public:
void Echo(EchoRequest *request, EchoResponse *response, RPCContext *ctx) override
{
auto *http_task = WFTaskFactory::create_http_task("https://www.sogou.com", 0, 0,
[request, response](WFHttpTask *task) {
if (task->get_state() == WFT_STATE_SUCCESS)
{
const void *data;
size_t len;
task->get_resp()->get_parsed_body(&data, &len);
response->mutable_message()->assign((const char *)data, len);
}
else
response->set_message("Error: " + std::to_string(task->get_error()));
printf("Server Echo()\nget_req:\n%s\nset_resp:\n%s\n",
request->DebugString().c_str(),
response->DebugString().c_str());
});
ctx->get_series()->push_back(http_task);
}
};
~~~
### 2. Client
You can follow the detailed example below:
- We send two requests in parallel. One is an RPC request and the other is an HTTP request.
- After both requests are finished, we initiate a calculation task again to calculate the sum of the squares of the two numbers.
- First, use `create_Echo_task()` of the RPC Client to create an rpc\_task, which is an asynchronous RPC network request.
- Then, use `WFTaskFactory::create_http_task` and `WFTaskFactory::create_go_task` in the the factory of Workflow to create an asynchronous network task http\_task and an asynchronous computing task calc\_task respectively.
- Finally, use the serial-parallel graph to organize three asynchronous tasks, in which the multiplication sign indicates parallel tasks and the greater than sign indicates serial tasks and then execute ``start()``.
~~~cpp
void calc(int x, int y)
{
int z = x * x + y * y;
printf("calc result: %d\n", z);
}
int main()
{
Example::SRPCClient client("127.0.0.1", 1412);
auto *rpc_task = client.create_Echo_task([](EchoResponse *response, RPCContext *ctx) {
if (ctx->success())
printf("%s\n", response->DebugString().c_str());
else
printf("status[%d] error[%d] errmsg:%s\n",
ctx->get_status_code(), ctx->get_error(), ctx->get_errmsg());
});
auto *http_task = WFTaskFactory::create_http_task("https://www.sogou.com", 0, 0, [](WFHttpTask *task) {
if (task->get_state() == WFT_STATE_SUCCESS)
{
std::string body;
const void *data;
size_t len;
task->get_resp()->get_parsed_body(&data, &len);
body.assign((const char *)data, len);
printf("%s\n\n", body.c_str());
}
else
printf("Http request fail\n\n");
});
auto *calc_task = WFTaskFactory::create_go_task(calc, 3, 4);
EchoRequest req;
req.set_message("Hello!");
req.set_name("1412");
rpc_task->serialize_input(&req);
WFFacilities::WaitGroup wait_group(1);
SeriesWork *series = Workflow::create_series_work(http_task, [&wait_group](const SeriesWork *) {
wait_group.done();
});
series->push_back(rpc_task);
series->push_back(calc_task);
series->start();
wait_group.wait();
return 0;
}
~~~
### 3. Upstream
SRPC can directly use any component of Workflow, the most commonly used is [Upstream](https://github.com/sogou/workflow/blob/master/docs/en/about-upstream.md), any kind of client of SRPC can use Upstream.
You may use the example below to construct a client that can use Upstream through parameters:
```cpp
#include "workflow/UpstreamManager.h"
int main()
{
// 1. create upstream and add server instances
UpstreamManager::upstream_create_weighted_random("echo_server", true);
UpstreamManager::upstream_add_server("echo_server", "127.0.0.1:1412");
UpstreamManager::upstream_add_server("echo_server", "192.168.10.10");
UpstreamManager::upstream_add_server("echo_server", "internal.host.com");
// 2. create params and fill upstream name
RPCClientParams client_params = RPC_CLIENT_PARAMS_DEFAULT;
client_params.host = "echo_server";
client_params.port = 1412; // this port only used when upstream URI parsing and will not affect the select of instances
// 3. construct client by params, the rest of usage is similar as other tutorials
Example::SRPCClient client(&client_params);
...
```
If we use the **ConsistentHash** or **Manual** upstream, we often need to distinguish different tasks for the selection algorithm. At this time, we may use the `int set_uri_fragment(const std::string& fragment);` interface on the client task to set request-level related information.
This field is the fragment in the URI. For the semantics, please refer to [RFC3689 3.5-Fragment](https://datatracker.ietf.org/doc/html/rfc3986#section-3.5), any infomation that needs to use the fragment (such as other information included in some other selection policy), you may use this field as well.
srpc-0.10.1/docs/en/docs-07-srpc-http.md 0000664 0000000 0000000 00000016222 14545022514 0017517 0 ustar 00root root 0000000 0000000 [中文版](/docs/docs-07-srpc-http.md)
## 07 - Use Http with SRPC, TRPC and Thrift
**srpc** supports **HTTP** protocol, which make it convenient to communicate to other language. Just fill the **IDL** content into **HTTP body**, and fill the **IDL** type(**json**/**protobuf**/**thrift**) into **Http header** , we communicate with other frameworks through the **HTTP** protocol.
- **SRPCHttpServer**, **TRPCHttpServer** and **ThriftHttpServer** can receive HTTP requests from client implemented by any language.
- **SRPCHttpClient**, **TRPCHttpClient** and **ThriftHttpClient** can send HTTP requests to servers implemented by any language.
- **HTTP header**: `Content-Type` needs to be set as `application/json` when body is json, or set as `application/x-protobuf` when body is protobuf, or set as `application/x-thrift` when body is thrift;
- **HTTP body**: If there is **bytes** type in body, **json** needs to be encoded with **base64**.
### 1. Example
To implement **SRPCHttpClient** is so simple that we just need to refer to [tutorial-02-srpc_pb_client.cc](https://github.com/sogou/srpc/blob/master/tutorial/tutorial-02-srpc_pb_client.cc) or [tutorial-09-client_task.cc](https://github.com/sogou/srpc/blob/master/tutorial/tutorial-09-client_task.cc) and change the `SRPCClient` into `SRPCHttpClient`.
[README.md](/docs/en/README.md#6-run) demonstrates how to use **curl** to send a request to **SRPCHttpServer**, and below we give an example to demonstrate how to use **python** as a client to send a request to **TRPCHttpServer**.
**proto file :**
```proto
syntax="proto3"; // proto2 or proto3 are both supported
package trpc.test.helloworld;
message AddRequest {
string message = 1;
string name = 2;
bytes info = 3;
int32 error = 4;
};
message AddResponse {
string message = 1;
};
service Batch {
rpc Add(AddRequest) returns (AddResponse);
};
```
**python client :**
```py
import json
import requests
from base64 import b64encode
class Base64Encoder(json.JSONEncoder):
def default(self, o):
if isinstance(o, bytes):
return b64encode(o).decode()
return json.JSONEncoder.default(self, o)
headers = {'Content-Type': 'application/json'}
req = {
'message': 'hello',
'name': 'k',
'info': b'i am binary'
}
print(json.dumps(req, cls=Base64Encoder))
ret = requests.post(url = "http://localhost:8800/trpc.test.helloworld.Batch/Add",
headers = headers, data = json.dumps(req, cls=Base64Encoder))
print(ret.json())
```
### 2. Splicing the path of HTTP request
[README.md](/docs/en/README_cn.md#6-run) shows that the path is concatenated by the service name and the rpc name. Moreover, for the above proto file example with the package name `package trpc.test.helloworld;`, the package name also needs to be spliced into the path. The splicing paths of **SRPCHttp** and **TRPCHttpClient** are different. For **ThriftHttp**, multi service is not supported in SRPC thrift, so no path is required.
Let's take **curl** as an example:
Request to **SRPCHttpServer**:
```sh
curl 127.0.0.1:8811/trpc/test/helloworld/Batch/Add -H 'Content-Type: application/json' -d '{...}'
```
Request to **TRPCHttpServer**:
```sh
curl 127.0.0.1:8811/trpc.test.helloworld.Batch/Add -H 'Content-Type: application/json' -d '{...}'
```
Request to **ThriftHttpServer**:
```sh
curl 127.0.0.1:8811 -H 'Content-Type: application/json' -d '{...}'
```
### 3. HTTP status code
SRPC supports server to set HTTP status code in `process()`, the interface is `set_http_code(int code)` on **RPCContext**. This error code is only valid if the framework can handle the request correctly, otherwise it will be set to the SRPC framework-level error code.
**Usage :**
~~~cpp
class ExampleServiceImpl : public Example::Service
{
public:
void Echo(EchoRequest *req, EchoResponse *resp, RPCContext *ctx) override
{
if (req->name() != "workflow")
ctx->set_http_code(404); // set HTTP status code as 404
else
resp->set_message("Hi back");
}
};
~~~
**CURL command :**
~~~sh
curl -i 127.0.0.1:1412/Example/Echo -H 'Content-Type: application/json' -d '{message:"from curl",name:"CURL"}'
~~~
**Result :**
~~~sh
HTTP/1.1 404 Not Found
SRPC-Status: 1
SRPC-Error: 0
Content-Type: application/json
Content-Encoding: identity
Content-Length: 21
Connection: Keep-Alive
~~~
**Notice :**
We can still distinguish this request is correct at framework-level by `SRPC-Status: 1` in the header of the returned result, and `404` is the status code from the server.
### 4. HTTP Header
Use the following three interfaces to get or set HTTP header:
~~~cpp
bool get_http_header(const std::string& name, std::string& value) const;
bool set_http_header(const std::string& name, const std::string& value);
bool add_http_header(const std::string& name, const std::string& value);
~~~
For **server**, these interfaces are on `RPCContext`.
For **client**, we need to set the **HTTP header on the request** through `RPCClientTask`, and get the **HTTP header on the response** on the `RPCContext` of the callback function. The usage is as follows:
~~~cpp
int main()
{
Example::SRPCHttpClient client("127.0.0.1", 80);
EchoRequest req;
req.set_message("Hello, srpc!");
auto *task = client.create_Echo_task([](EchoResponse *resp, RPCContext *ctx) {
if (ctx->success())
{
std::string value;
ctx->get_http_header("server_key", value); // get the HTTP header on response
}
});
task->serialize_input(&req);
task->set_http_header("client_key", "client_value"); // set the HTTP headers on request
task->start();
wait_group.wait();
return 0;
}
~~~
### 5. proto3 transport format problem
When IDL is protobuf, by default proto3 primitive fields with default values will be omitted in JSON output because there is no **optional** and **required** distinction. For example, an int32 field set to 0 will be omitted.So proto3 cannot be recognized as being set, and it cannot be emitted when serialized. JsonPrintOptions can use a flag to override the default behavior and print primitive fields regardless of their values.
SRPC provides some interfaces on **RPCContext上** to implement this and other flags on [JsonPrintOptions](https://developers.google.com/protocol-buffers/docs/reference/cpp/google.protobuf.util.json_util#JsonPrintOptions). The specific interface and usage description can be refered in:[rpc_context.h](/src/rpc_context.h)
**Exmaple :**
```cpp
class ExampleServiceImpl : public Example::Service
{
public:
void Echo(EchoRequest *req, EchoResponse *resp, RPCContext *ctx) override
{
resp->set_message("Hi back");
resp->set_error(0); // the type of error is int32 and 0 is the default value of int32
ctx->set_json_always_print_primitive_fields(true); // with all primitive fields
ctx->set_json_add_whitespace(true); // add spaces, line breaks and indentation
}
};
```
**Origin output :**
```sh
{"message":"Hi back"}
```
**After set json options on RPCContext:**
```sh
{
"message": "Hi back",
"error": 0
}
```
srpc-0.10.1/docs/en/docs-08-tracing.md 0000664 0000000 0000000 00000011133 14545022514 0017217 0 ustar 00root root 0000000 0000000 [中文版](/docs/docs-08-tracing.md)
## 08 - Report Tracing to OpenTelemetry
**SRPC** supports generating and reporting tracing and spans, which can be reported in multiple ways, including exporting data locally or to [OpenTelemetry](https://opentelemetry.io).
Since **SRPC** follows the [data specification](https://github.com/open-telemetry/opentelemetry-specification) of **OpenTelemetry** and the specification of [w3c trace context](https://www.w3.org/TR/trace-context/), now we can use **RPCTraceOpenTelemetry** as the reporting plugin.
The report conforms to the **Workflow** style, which is pure asynchronous task and therefore has no performance impact on the RPC requests and services.
### 1. Usage
After the plugin `RPCTraceOpenTelemetry` is constructed, we can use `add_filter()` to add it into **server** or **client**.
For [tutorial-02-srpc_pb_client.cc](https://github.com/sogou/srpc/blob/master/tutorial/tutorial-02-srpc_pb_client.cc), add 2 lines like the following :
```cpp
int main()
{
Example::SRPCClient client("127.0.0.1", 1412);
RPCTraceOpenTelemetry span_otel("http://127.0.0.1:4318");
client.add_filter(&span_otel);
...
}
```
For [tutorial-01-srpc_pb_server.cc](https://github.com/sogou/srpc/blob/master/tutorial/tutorial-01-srpc_pb_server.cc), add the similar 2 lines. We also add the local plugin to print the reported data on the screen :
```cpp
int main()
{
SRPCServer server;
RPCTraceOpenTelemetry span_otel("http://127.0.0.1:4318");
server.add_filter(&span_otel);
RPCTraceDefault span_log; // this plugin will print the tracing info on the screen
server.add_filter(&span_log);
...
}
```
make the tutorial and run both server and client, we can see some tracing information on the screen.
We can find the span_id: **04d070f537f17d00** in client become parent_span_id: **04d070f537f17d00** in server:
### 2. Traces on Jaeger
Open the show page of Jaeger, we can find our service name **Example** and method name **Echo**. Here are two span nodes, which were reported by server and client respectively.
As what we saw on the screen, the client reported span_id: **04d070f537f17d00** and server reported span_id: **00202cf737f17d00**, these span and the correlated tracing information can be found on Jaeger, too.
### 3. About Parameters
How long to collect a trace, and the number of reported retries and other parameters can be specified through the constructor parameters of `RPCTraceOpenTelemetry`. Code reference: [src/module/rpc_trace_filter.h](https://github.com/sogou/srpc/blob/master/src/module/rpc_trace_filter.h#L238)
The default value is to collect up to 1000 trace information per second, and features such as transferring tracing information through the srpc framework transparently have also been implemented, which also conform to the specifications.
### 4. Attributes
We can also use `add_attributes()` to add some other informations as OTEL_RESOURCE_ATTRIBUTES.
Please notice that our service name "Example" is set also thought this attributes, the key of which is `service.name`. If `service.name` is also provided in OTEL_RESOURCE_ATTRIBUTES by users, then srpc service name takes precedence. Refers to : [OpenTelemetry#resource](https://github.com/open-telemetry/opentelemetry-dotnet/tree/main/src/OpenTelemetry#resource)
### 5. Log and Baggage
SRPC provides `log()` and `baggage()` to carry some user data through span.
API :
```cpp
void log(const RPCLogVector& fields);
void baggage(const std::string& key, const std::string& value);
```
As a server, we can use `RPCContext` to add log annotation:
```cpp
class ExampleServiceImpl : public Example::Service
{
public:
void Echo(EchoRequest *req, EchoResponse *resp, RPCContext *ctx) override
{
resp->set_message("Hi back");
ctx->log({{"event", "info"}, {"message", "rpc server echo() end."}});
}
};
```
As a client, we can use `RPCClientTask` to add log on span:
```cpp
srpc::SRPCClientTask *task = client.create_Echo_task(...);
task->log({{"event", "info"}, {"message", "log by rpc client echo()."}});
```
srpc-0.10.1/docs/en/docs-09-metrics.md 0000664 0000000 0000000 00000027610 14545022514 0017246 0 ustar 00root root 0000000 0000000 [中文版](/docs/docs-09-metrics.md)
## 09 - Report Metrics to OpenTelemetry / Prometheus
**Metrics** are common monitoring requirements. **SRPC** supports the generation and statistics of Metrics, and reports through various way, including reporting to [Prometheus](https://prometheus.io/) and [OpenTelemetry](https://opentelemetry.io).
The report conforms to the **Workflow** style, which is pure asynchronous task or pull mode, and therefore has no performance impact on the RPC requests and services.
This document will introduce the concept of Metrics, explain the interface of [tutorial-16](/tutorial/tutorial-16-server_with_metrics.cc), the usage of reporting to Prometheus, the usage of reporting to OpenTelemetry, and introduce the Var module which uses thread local to speed up performance.
### 1. Introduction
The metrics type of **Prometheus** can be refered through the official documentation : [Concepts - Metrics](https://prometheus.io/docs/concepts/metric_types/) and [Type of Metrics](https://prometheus.io/docs/tutorials/understanding_metric_types/)。
The basic metrics concept of **OpenTelemetry** can be refer through [(data specification)](https://github.com/open-telemetry/opentelemetry-specification) and [Metrics的datamodel.md](https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/datamodel.md).
The concepts of the four basic metrics are the same, so SRPC provides the most basic support for these four metrics, which can correspond to the reported data of Prometheus and OpenTelemetry respectively. Please refer to the table below:
|Metrics type|Prometheus|OpenTelemetry| create api in SRPC | some operations in SRPC |
|-------|----------|------------|-----------------|-------------------|
|single value| Gauge | Gauge | create_gauge(name, help); | void increase(); void decrease();|
|counter| Counter | Sum | create_counter(name, help); |GaugeVar *add(labels);|
|histogram |Histogram | Histogram | create_histogram(name, help, buckets); |void observe(data);|
|samples | Summary | Summary | create_summary(name, help, quantiles); |void observe(data);|
Basic description of the four types of metrics:
1. **Single value**: A simple value that can be increased or decreased.
2. **Counter**: A counter is a cumulative metrics, which can be added several **labels** to distinguish the values under different labels.
3. **Histogram**: A histogram samples observations (usually things like request durations or response sizes) and counts them in configurable buckets, so it is necessary to pass in **buckets** to inform the interval to be divided.
For example, if the input bucket value is { 1, 10, 100 }, then we can get the data distributed in these four intervals { 0 - 1, 0 - 10, 0 - 100, 0 - +Inf }. It also provides a sum and the total count of all observed values.
4. **Samples**: Similar to histogram, but **quantiles** are passed in, such as {0.5, 0.9} and their precision. It mainly used in scenarios where the specific value of the data distribution is not known in advance (In contrast, histograms require specific values to be passed in).
Sampling comes with a **time window**. You may specify the statistics window by **max_age** and the number of buckets by **age_bucket**. The default in SRPC is using 5 bucket in 60 seconds to switch times window.
Considering the statistical complexity of **sampling Summary**, **Histogram** is recommended in preference instead.
For convenience, these four type of metrics in SRPC are of type **double**.
### 2. Usage
The example shows usage via [tutorial-16-server_with_metrics.cc](/tutorial/tutorial-16-server_with_metrics.cc). Althought this example is for the server, the usage on the client is the same.
#### (1) create filter
Here we use Prometheus to be reported, thus we need the filter **RPCMetricsPull** as the plugin.
~~~cpp
#include "srpc/rpc_metrics_filter.h" // metrics filter header file
int main()
{
SRPCServer server;
ExampleServiceImpl impl;
RPCMetricsPull filter; // create a filter
filter.init(8080); // the port for Prometheus to pull metrics data
~~~
#### (2) add metrics
By default, there are some common metrics in the filter **RPCMetricsPull**, including the number of overall request, the number of requests in different service or method as labels, and the quantiles of latency.
Users can add any metrics they want. Here we add a histogram namee "echo_request_size" to count the size of requests. This metric describes the information as "Echo request size", and the data bucket division is { 1, 10, 100 }.
~~~cpp
filter.create_histogram("echo_request_size", "Echo request size", {1, 10, 100});
~~~
More illutrations:
**Timing to add metrics**
Metrics can be added any time, even after the server/client has been running. But **must be added before we use** this metrics, otherwise a NULL pointer will be obtained when we acquire the metrics by name.
**The name of the metrics**
The name of metrics is **globally unique** (no matter which of the four basic types), and **can only contain uppercase and lowercase letters and underscores, ie a~z, A~Z, _** . The creation will fail and return NULL if we create with an existed metric‘s name.
We will use this unique name to operate the metrics after it has been created.
**Other APIs to create metrics**
You can check this header file [rpc_metrics_filter.h](/src/module/rpc_metrics_filter.h): `class RPCMetricsFilter`.
#### (3) add filter into server / client
The following example keeps the pointer of this filter in ExampleServiceImpl because we may need the APIs on it to operate our metrics. It`s just one of the sample usages and users may use it in the way you used to.
~~~cpp
class ExampleServiceImpl : public Example::Service
{
public:
void Echo(EchoRequest *req, EchoResponse *resp, RPCContext *ctx) override;
void set_filter(RPCMetricsPull *filter) { this->filter = filter; }
private:
RPCMetricsPull *filter; // keep the pointer of filter and add set_filter(), notice that it's not APIs of SRPC
};
~~~
The usage in main:
~~~cpp
int main()
{
...
impl.set_filter(&filter); // keep the filter throught the APIs in service we mentioned above
server.add_filter(&filter); // client can also call add_filter()
server.add_service(&impl);
filter.deinit();
return 0;
}
~~~
#### (4) operation with the metrics
Every time we receive a request, we get cumulative calculation of the size of the EchoRequest with the histogram we just created.
~~~cpp
class ExampleServiceImpl : public Example::Service
{
public:
void Echo(EchoRequest *req, EchoResponse *resp, RPCContext *ctx) override
{
resp->set_message("Hi back");
this->filter->histogram("echo_request_size")->observe(req->ByteSizeLong());
}
~~~
As has been shown, we use the API `histogram()` on filter to get the histogram metrics by it's name. Then we use `observe()` to fill in the data size.
Let's take a look at the APIs to acquire four types of metrics.
~~~cpp
class RpcMetricsFilter : public RPCFilter
{
GaugeVar *gauge(const std::string& name);
CounterVar *counter(const std::string& name);
HistogramVar *histogram(const std::string& name);
SummaryVar *summary(const std::string& name);
~~~
If the metric is found by this name, the relevant type of pointer will be returned and we may use it to do the relevant next step, such as calling the observe() of historgram we did above. **Notice : If the metric name does not exist, Null will be returned, so we must ensure that the metrics we want must have be successfully created.**
Common APIs can be refered to the table above. For more details please refer to [rpc_var.h](/src/var/rpc_var.h).
It's worth taking a look at the API on Counter:
~~~cpp
class CounterVar : public RPCVar
{
GaugeVar *add(const std::map& labels);
~~~
This can add the Gauge value of a certain dimension into this Counter. The statistics of each dimension are separated. The labels are a map, that is, a dimension can be specified through multiple groups of labels, for example:
~~~cpp
filter->counter("service_method_count")->add({"service", "Example"}, {"method", "Echo"}})->increase();
~~~
Adn we can get the statistics calculated by `{service="Example",method="Echo"}`.
#### (5) Reporting
Reporting in SRPC filters is automatic, so users don't need to do anything. Next we will use a client to make some requests and check the format of data which will be reported.
~~~sh
./srpc_pb_client
message: "Hi back"
message: "Hi back"
~~~
Since Prometheus will use Pull mode, which means will pull through the PORT we set before and the URL path as /metrics. When pulling with the same way as Prometheus do, we can get these with a simple local request.
~~~sh
curl localhost:8080/metrics
# HELP total_request_count total request count
# TYPE total_request_count gauge
total_request_count 2.000000
# HELP total_request_method request method statistics
# TYPE total_request_method counter
total_request_method{method="Echo",service="Example"} 2.000000
# HELP total_request_latency request latency nano seconds
# TYPE total_request_latency summary
total_request_latency{quantile="0.500000"} 645078.500000
total_request_latency{quantile="0.900000"} 645078.500000
total_request_latency_sum 1290157.000000
total_request_latency_count 2
# HELP echo_request_size Echo request size
# TYPE echo_request_size histogram
echo_request_size_bucket{le="1.000000"}0
echo_request_size_bucket{le="10.000000"}0
echo_request_size_bucket{le="100.000000"}2
echo_request_size_bucket{le="+Inf"} 2
echo_request_size_sum 40.000000
echo_request_size_count 2
~~~
It can be seen that we obtained four types of metrics by sending 2 requests with tutorial-02 client, among which the histogram was created by us and gauge, counter and summary were created by filter. The default metrics will be increased for the convinience of users.
### 3. Features of reporting to Prometheus
The main features of reporting to Prometheus have just been described:
1. Use the Pull mode and will collected regularly;
2. PORT is required to be pull;
3. Return data content through /metrics;
4. The data content is in the string format defined by Prometheus;
The interface reference is as follows:

### 4. Features of reporting OpenTelemetry
The main features of reporting to OpenTelemetry:
1. Use the push mode and will send http requests regularly;
2. URL is required to make report;
3. Reports data through /v1/metrics;
4. The data content is [protobuf] (/src/module/proto/opentelemetry_metrics.proto) as agreed by OpenTelemetry;
Basic APIs references:
~~~cpp
class RPCMetricsOTel : public RPCMetricsFilter
{
public:
RPCMetricsOTel(const std::string& url);
RPCMetricsOTel(const std::string& url, unsigned int redirect_max,
unsigned int retry_max, size_t report_threshold,
size_t report_interval);
~~~
The time interval to make report and the numbers of requests to make a report can be specified. By default, the filter will accumulate 100 requests or report once every second.
### 5. Var
Would it be the bottle neck of performance when every request use the globally unique name to get the metrics, especially multi-threaded operations with the complicated metrics (histogram or summary)?
The answer is NEVER. Because the APIs for getting metrics through filters are **thread-local**.
SRPC builds a thread-local var system. Every time we get a var, we get its thread-local var pointer. Therefore every time we calculate will only be collected by the current thread. That's why multiple threads will not be conflicted by global mutex. Moreover, since reporting is always asynchronous, a global API expose() will get all the vars in all the threads and reduce them together when reports. Lastly the data will be reported in the format with specific filter.
srpc-0.10.1/docs/en/installation.md 0000664 0000000 0000000 00000007564 14545022514 0017133 0 ustar 00root root 0000000 0000000 # Installation
## 1. Install from source code on Linux
SRPC depends on the following modules : **CMake**(Require >= v3.6.0), **OpenSSL**(Recommend >= v1.1.0), **Protobuf**(Require >= v3.5.0)
Will get the following output:
1. static library: libsrpc.a (or dylib)
2. dynamic library: libsrpc.so (or dll)
3. tool for generating code: srpc_generator
- **cmake**
~~~sh
git clone --recursive https://github.com/sogou/srpc.git
cd srpc
make
make install
# compile tutorial
cd tutorial
make
~~~
- **bazel**
~~~sh
git clone --recursive https://github.com/sogou/srpc.git
cd srpc
bazel build ...
# It can compile out lib and src generator and all tutorials into bazel-bin/
~~~
In addition, we can use srpc_tools to install and deploy skeleton project. Refer to the usage:[srpc/tools/README.md](srpc/tools/README.md)
Workflow, snappy and lz4 can also be found via installed package in the system. If the submodule dependencies are not pulled in third\_party by '--recursive', they will be searched from the default installation path of the system. The version of snappy is required v1.1.6 or above.
If you need to install **Protobuf** from source code, refer to the command:
~~~sh
git clone -b 3.20.x https://github.com/protocolbuffers/protobuf.git protobuf.3.20
cd protobuf.3.20
sh autogen.sh
./configure
make -j4
make install
~~~
## 2. Debian Linux
SRPC has been packaged for Debian. It is currently in Debian sid (unstable) but will eventually be placed into the stable repository.
In order to access the unstable repository, you will need to edit your /etc/apt/sources.list file.
sources.list has the format: `deb `
Simply add the 'unstable' sub branch to your repo:
~~~~sh
deb http://deb.debian.org/ main contrib non-free
-->
deb http://deb.debian.org/ unstable main contrib non-free
~~~~
Once that is added, update your repo list and then you should be able to install it:
~~~~sh
sudo apt-get update
~~~~
To install the srpc library for development purposes:
~~~~sh
sudo apt-get install libsrpc-dev
~~~~
To install the srpc library for deployment:
~~~~sh
sudo apt-get install libsrpc
~~~~
## 3. Fedora Linux
SRPC has been packaged for Fedora.
To install the srpc library for development purposes:
~~~~sh
sudo dnf install srpc-devel
~~~~
To install the srpc library for deployment:
~~~~sh
sudo dnf install srpc
~~~~
## 4. Windows
There is no difference in the srpc code under the Windows version, but users need to use the [windows branch](https://github.com/sogou/workflow/tree/windows) of Workflow
Moreover, srpc_tools is not supported on Windows.
## 5. MacOS
- Install dependency `OpenSSL`
```
brew install openssl
```
- Install `CMake`
```
brew install cmake
```
- Specify the `OpenSSL` environment variable
The soft link of OpenSSL will not be automatically built after installation by brew, because LibreSSL is provided by default under MacOS. We need to manually configure the execution path, compilation path, and find_package path into the environment variables of cmake. You can execute `brew info openssl` to view relevant information, or configure it as follows:
```
echo 'export PATH="/usr/local/opt/openssl@1.1/bin:$PATH"' >> ~/.bash_profile
echo 'export LDFLAGS="-L/usr/local/opt/openssl@1.1/lib"' >> ~/.bash_profile
echo 'export CPPFLAGS="-I/usr/local/opt/openssl@1.1/include"' >> ~/.bash_profile
echo 'export PKG_CONFIG_PATH="/usr/local/opt/openssl@1.1/lib/pkgconfig"' >> ~/.bash_profile
echo 'export OPENSSL_ROOT_DIR=/usr/local/opt/openssl' >> ~/.bash_profile
echo 'export OPENSSL_LIBRARIES=/usr/local/opt/openssl/lib' >> ~/.bash_profile
```
If you use `zsh`, you need one more step to load the bash configuration:
```
echo 'test -f ~/.bash_profile && source ~/.bash_profile' >> ~/.zshrc
source ~/.zshrc
```
The remaining steps are no different from compiling on Linux.
srpc-0.10.1/docs/en/rpc.md 0000664 0000000 0000000 00000061411 14545022514 0015205 0 ustar 00root root 0000000 0000000 [中文版](/docs/rpc.md)
## Comparison of basic features
| RPC | IDL | Communication | Network data | Compression | Attachment | Semi-synchronous | Asynchronous | Streaming |
| --------------------------- | --------- | ------------- | ------------ | -------------------- | ------------- | ---------------- | ------------- | ------------- |
| Thrift Binary Framed | Thrift | TCP | Binary | Not supported | Not supported | Supported | Not supported | Not supported |
| Thrift Binary HttpTransport | Thrift | HTTP | Binary | Not supported | Not supported | Supported | Not supported | Not supported |
| GRPC | PB | HTTP2 | Binary | gzip/zlib/lz4/snappy | Supported | Not supported | Supported | Supported |
| BRPC Std | PB | TCP | Binary | gzip/zlib/lz4/snappy | Supported | Not supported | Supported | Supported |
| SRPC Std | PB/Thrift | TCP | Binary/JSON | gzip/zlib/lz4/snappy | Supported | Supported | Supported | Not supported |
| SRPC Std HTTP | PB/Thrift | HTTP | Binary/JSON | gzip/zlib/lz4/snappy | Supported | Supported | Supported | Not supported |
## Basic concepts
- Communication layer: TCP/TPC\_SSL/HTTP/HTTPS/HTTP2
- Protocol layer: Thrift-binary/BRPC-std/SRPC-std/SRPC-http/tRPC-std/tRPC-http
- Compression layer: no compression/gzip/zlib/lz4/snappy
- Data layer: PB binary/Thrift binary/JSON string
- IDL serialization layer: PB/Thrift serialization
- RPC invocation layer: Service/Client IMPL
## RPC Global
- Get srpc version `srpc::SRPCGlobal::get_instance()->get_srpc_version()`
## RPC Status Code
| Name | Value | Description |
| ----------------------------------- | ----- | -------------------------------------------------------- |
| RPCStatusUndefined | 0 | Undefined |
| RPCStatusOK | 1 | Correct/Success |
| RPCStatusServiceNotFound | 2 | Cannot find the RPC service name |
| RPCStatusMethodNotFound | 3 | Cannot find the RPC function name |
| RPCStatusMetaError | 4 | Meta error/ parsing failed |
| RPCStatusReqCompressSizeInvalid | 5 | Incorrect size for the request when compressing |
| RPCStatusReqDecompressSizeInvalid | 6 | Incorrect size for the request when decompressing |
| RPCStatusReqCompressNotSupported | 7 | The compression type for the request is not supported |
| RPCStatusReqDecompressNotSupported | 8 | The decompression type for the request is not supported |
| RPCStatusReqCompressError | 9 | Failed to compress the request |
| RPCStatusReqDecompressError | 10 | Failed to decompress the request |
| RPCStatusReqSerializeError | 11 | Failed to serialize the request by IDL |
| RPCStatusReqDeserializeError | 12 | Failed to deserialize the request by IDL |
| RPCStatusRespCompressSizeInvalid | 13 | Incorrect size for the response when compressing |
| RPCStatusRespDecompressSizeInvalid | 14 | Incorrect size for the response when compressing |
| RPCStatusRespCompressNotSupported | 15 | The compression type for the response is not supported |
| RPCStatusRespDecompressNotSupported | 16 | The decompression type for the response not supported |
| RPCStatusRespCompressError | 17 | Failed to compress the response |
| RPCStatusRespDecompressError | 18 | Failed to decompress the response |
| RPCStatusRespSerializeError | 19 | Failed to serialize the response by IDL |
| RPCStatusRespDeserializeError | 20 | Failed to deserialize the response by IDL |
| RPCStatusIDLSerializeNotSupported | 21 | IDL serialization type is not supported |
| RPCStatusIDLDeserializeNotSupported | 22 | IDL deserialization type is not supported |
| RPCStatusURIInvalid | 30 | Illegal URI |
| RPCStatusUpstreamFailed | 31 | Upstream is failed |
| RPCStatusSystemError | 100 | System error |
| RPCStatusSSLError | 101 | SSL error |
| RPCStatusDNSError | 102 | DNS error |
| RPCStatusProcessTerminated | 103 | Program exited&terminated |
## RPC IDL
- Interface Description Languaue file
- Backward and forward compatibility
- Protobuf/Thrift
### Sample
You can follow the detailed example below:
- Take pb as an example. First, define an `example.proto` file with the ServiceName as `Example`.
- The name of the rpc interface is `Echo`, with the input parameter as `EchoRequest`, and the output parameter as `EchoResponse`.
- `EchoRequest` consists of two strings: `message` and `name`.
- `EchoResponse` consists of one string: `message`.
~~~proto
syntax="proto2";
message EchoRequest {
optional string message = 1;
optional string name = 2;
};
message EchoResponse {
optional string message = 1;
};
service Example {
rpc Echo(EchoRequest) returns (EchoResponse);
};
~~~
## RPC Service
- It is the basic unit for SRPC services.
- Each service must be generated by one type of IDLs.
- Service is determined by IDL type, not by specific network communication protocol.
### Sample
You can follow the detailed example below:
- Use the same `example.proto` IDL above.
- Run the official `protoc example.proto --cpp_out=./ --proto_path=./` to get two files: `example.pb.h` and `example.pb.cpp`.
- Run the `srpc_generator protobuf ./example.proto ./` in SRPC to get `example.srpc.h`.
- Derive `Example::Service` to implement the rpc business logic, which is an RPC Service.
- Please note that this Service does not involve any concepts such as network, port, communication protocol, etc., and it is only responsible for completing the business logic that convert `EchoRequest` to `EchoResponse`.
~~~cpp
class ExampleServiceImpl : public Example::Service
{
public:
void Echo(EchoRequest *request, EchoResponse *response, RPCContext *ctx) override
{
response->set_message("Hi, " + request->name());
printf("get_req:\n%s\nset_resp:\n%s\n",
request->DebugString().c_str(),
response->DebugString().c_str());
}
};
~~~
## RPC Server
- Each server corresponds to one port
- Each server corresponds to one specific network communication protocol
- One service may be added into multiple Servers
- One Server may have one or more Services, but the ServiceName must be unique within that Server
- Services from different IDLs can be added into the same Server
### Sample
You can follow the detailed example below:
- Follow the above `ExampleServiceImpl` Service
- First, create an RPC Server and determine the proto file.
- Then, create any number of Service instances and any number of Services for different protocols, and add these services to the Server through the `add_service()`interface.
- Finally, use `start()` or `serve()` to start the services in the Server and handle the upcoming rpc requests through the Server.
- Imagine that we can also derive more Serivce from `Example::Service`, which have different implementations of rpc `Echo`.
- Imagine that we can create N different RPC Servers on N different ports, serving on different network protocols.
- Imagine that we can use `add_service()` to add the same ServiceIMPL instance on different Servers, or we can use `add_service()` to add different ServiceIMPL instances on the same server.
- Imagine that we can use the same `ExampleServiceImpl`, serving BPRC-STD, SRPC-STD, SRPC-Http at three different ports at the same time.
- And we can use `add_service()` to add one `ExampleServiceImpl` related to Protobuf IDL and one `AnotherThriftServiceImpl` related to Thrift IDL to the same SRPC-STD Server, and the two IDLs work perfectly on the same port!
~~~cpp
int main()
{
SRPCServer server_srpc;
SRPCHttpServer server_srpc_http;
BRPCServer server_brpc;
ThriftServer server_thrift;
TRPCServer server_trpc;
TRPCHttpServer server_trpc_http;
ExampleServiceImpl impl_pb;
AnotherThriftServiceImpl impl_thrift;
server_srpc.add_service(&impl_pb);
server_srpc.add_service(&impl_thrift);
server_srpc_http.add_service(&impl_pb);
server_srpc_http.add_service(&impl_thrift);
server_brpc.add_service(&impl_pb);
server_thrift.add_service(&impl_thrift);
server_trpc.add_service(&impl_pb);
server_trpc_http.add_service(&impl_pb);
server_srpc.start(1412);
server_srpc_http.start(8811);
server_brpc.start(2020);
server_thrift.start(9090);
server_trpc.start(2022);
server_trpc_http.start(8822);
getchar();
server_trpc_http.stop();
server_trpc.stop();
server_thrift.stop();
server_brpc.stop();
server_srpc_http.stop();
server_srpc.stop();
return 0;
}
~~~
## RPC Client
- Each Client corresponds to one specific target/one specific cluster
- Each Client corresponds to one specific network communication protocol
- Each Client corresponds to one specific IDL
### Sample
You can follow the detailed example below:
- Following the above example, the client is relatively simple and you can call the method directly.
- Use `Example::XXXClient` to create a client instance of some RPC. The IP+port or URL of the target is required.
- With the client instance, directly call the rpc function `Echo`. This is an asynchronous request, and the callback function will be invoked after the request is completed.
- For the usage of the RPC Context, please check [RPC Context](/docs/en/rpc.md#rpc-context).
~~~cpp
#include
#include "example.srpc.h"
#include "workflow/WFFacilities.h"
using namespace srpc;
int main()
{
Example::SRPCClient client("127.0.0.1", 1412);
EchoRequest req;
req.set_message("Hello!");
req.set_name("SRPCClient");
WFFacilities::WaitGroup wait_group(1);
client.Echo(&req, [&wait_group](EchoResponse *response, RPCContext *ctx) {
if (ctx->success())
printf("%s\n", response->DebugString().c_str());
else
printf("status[%d] error[%d] errmsg:%s\n",
ctx->get_status_code(), ctx->get_error(), ctx->get_errmsg());
wait_group.done();
});
wait_group.wait();
return 0;
}
~~~
## RPC Context
- RPCContext is used specially to assist asynchronous interfaces, and can be used in both Service and Client.
- Each asynchronous interface will provide a Context, which offers higher-level functions, such as obtaining the remote IP, the connection seqid, and so on.
- Some functions on Context are unique to Server or Client. For example, you can set the compression mode of the response data on Server, and you can obtain the success or failure status of a request on Client.
- On the Context, you can use ``get_series()`` to obtain the SeriesWork, which is seamlessly integrated with the asynchronous mode of Workflow.
### RPCContext API - Common
#### `long long get_seqid() const;`
One complete communication consists of request+response. The sequence id of the communication on the current socket connection can be obtained, and seqid=0 indicates the first communication.
#### `std::string get_remote_ip() const;`
Get the remote IP address. IPv4/IPv6 is supported.
#### `int get_peer_addr(struct sockaddr *addr, socklen_t *addrlen) const;`
Get the remote address. The in/out parameter is the lower-level data structure sockaddr.
#### `const std::string& get_service_name() const;`
Get RPC Service Name.
#### `const std::string& get_method_name() const;`
Get RPC Method Name.
#### `SeriesWork *get_series() const;`
Get the SeriesWork of the current ServerTask/ClientTask.
#### `bool get_http_header(const std::string& name, std::string& value);`
If using the HTTP protocol, get the value in the HTTP header according to the name
### RPCContext API - Only for client done
#### `bool success() const;`
For client only. The success or failure of the request.
#### `int get_status_code() const;`
For client only. The rpc status code of the request.
#### `const char *get_errmsg() const;`
For client only. The error info of the request.
#### `int get_error() const;`
For client only. The error code of the request.
#### `void *get_user_data() const;`
For client only. Get the user\_data of the ClientTask. If a user generates a task through the ``create_xxx_task()`` interface, the context can be recorded in the user_data field. You can set that field when creating the task, and retrieve it in the callback function.
### RPCContext API - Only for server process
#### `void set_data_type(RPCDataType type);`
For Server only. Set the data packaging type
- RPCDataProtobuf
- RPCDataThrift
- RPCDataJson
#### `void set_compress_type(RPCCompressType type);`
For Server only. Set the data compression type (note: the compression type for the Client is set on Client or Task)
- RPCCompressNone
- RPCCompressSnappy
- RPCCompressGzip
- RPCCompressZlib
- RPCCompressLz4
#### `void set_attachment_nocopy(const char *attachment, size_t len);`
For Server only. Set the attachment.
#### `bool get_attachment(const char **attachment, size_t *len) const;`
For Server only. Get the attachment.
#### `void set_reply_callback(std::function cb);`
For Server only. Set reply callback, which is called after the operating system successfully writes the data into the socket buffer.
#### `void set_send_timeout(int timeout);`
For Server only. Set the maximum time for sending the message, in milliseconds. -1 indicates unlimited time.
#### `void set_keep_alive(int timeout);`
For Server only. Set the maximum connection keep-alive time, in milliseconds. -1 indicates unlimited time.
#### `bool set_http_code(int code);`
For Server only. If using the HTTP protocol, set the http status code. Only works if the srpc framework handles correctly.
#### `bool set_http_header(const std::string& name, const std::string& value);`
For Server only. If using the HTTP protocol, set into http header. If the name is set, old value will be overwritten.
#### `bool add_http_header(const std::string& name, const std::string& value);`
For Server only. If using the HTTP protocol, set into http header. Will keep multiple values if the name is set.
#### `void log(const RPCLogVector& fields);`
For Server only. For transparent data transmission, please refer to the log semantics in OpenTelemetry.
#### `void baggage(const std::string& key, const std::string& value);`
For Server only. For transparent data transmission, please refer to the baggage semantics in OpenTelemetry.
#### `void set_json_add_whitespace(bool on);`
For Server only. For JsonPrintOptions, whether to add white space and so on to make JSON output easy to read.
#### `void set_json_always_print_enums_as_ints(bool flag);`
For Server only. For JsonPrintOptions, whether to always print enums as ints.
#### `void set_json_preserve_proto_field_names(bool flag);`
For Server only. For JsonPrintOptions, whether to preserve proto field names.
#### `void set_json_always_print_primitive_fields(bool flag);`
For Server only. For JsonPrintOptions, whether to always print primitive fields.
## RPC Options
### Server Parameters
| Name | Default value | Description |
| ------------------------ | --------------------------- | ------------------------------------------------------------ |
| max\_connections | 2000 | The maximum number of connections for the Server. The default value is 2000. |
| peer\_response\_timeout: | 10\* 1000 | The maximum time for each read IO. The default value is 10 seconds. |
| receive\_timeout: | -1 | The maximum read time of each complete message. -1 indicates unlimited time. |
| keep\_alive\_timeout: | 60 \* 1000 | Maximum keep_alive time for idle connection. -1 indicates no disconnection. 0 indicates short connection. The default keep-alive time for long connection is 60 seconds |
| request\_size\_limit | 2LL \* 1024 \* 1024 \* 1024 | Request packet size limit. Maximum: 2 GB |
| ssl\_accept\_timeout: | 10 \* 1000 | SSL connection timeout. The default value is 10 seconds. |
### Client Parameters
| Name | Default value | Description |
| ------------ | ----------------------------------- | ------------------------------------------------------------ |
| host | "" | Target host, which can be an IP address or a domain name |
| port | 1412 | Destination port number. The default value is 1412 |
| is\_ssl | false | SSL switch. The default value is off |
| url | "" | The URL is valid only when the host is empty. URL will override three items: host/port/is\_ssl |
| task\_params | The default configuration for tasks | See the configuration items below |
### Task Parameters
| Name | Default value | Description |
| -------------------- | ---------------- | ------------------------------------------------------------ |
| send\_timeout | -1 | Maximum time for sending the message. The default value is unlimited time. |
| receive\_timeout | -1 | Maximum time for sending a response. The default value is unlimited time. |
| watch\_timeout | 0 | The maximum time of the first reply from remote. The default value is 0, indicating unlimited time. |
| keep\_alive\_timeout | 30 \* 1000 | The maximum keep-alive time for idle connections. -1 indicates no disconnection. The default value is 30 seconds. |
| retry\_max | 0 | Maximum number of retries. The default value is 0, indicating no retry. |
| compress\_type | RPCCompressNone | Compression type. The default value is no compression. |
| data\_type | RPCDataUndefined | The data type of network packets. The default value is consistent with the default value for RPC. For SRPC-Http protocol, it is JSON, and for others, they are the corresponding IDL types. |
## Integrating with the asynchronous Workflow framework
### 1. Server
You can follow the detailed example below:
- Echo RPC sends an HTTP request to the upstream modules when it receives the request.
- After the request to the upstream modules is completed, the server populates the body of HTTP response into the message of the response and send a reply to the client.
- We don't want to block/occupy the handler thread, so the request to the upstream must be asynchronous.
- First, we can use `WFTaskFactory::create_http_task()` of the factory of Workflow to create an asynchronous http_task.
- Then, we use `ctx->get_series()` of the RPCContext to get the SeriesWork of the current ServerTask.
- Finally, we use the `push_back()` interface of the SeriesWork to append the http\_task to the SeriesWork.
~~~cpp
class ExampleServiceImpl : public Example::Service
{
public:
void Echo(EchoRequest *request, EchoResponse *response, RPCContext *ctx) override
{
auto *http_task = WFTaskFactory::create_http_task("https://www.sogou.com", 0, 0,
[request, response](WFHttpTask *task) {
if (task->get_state() == WFT_STATE_SUCCESS)
{
const void *data;
size_t len;
task->get_resp()->get_parsed_body(&data, &len);
response->mutable_message()->assign((const char *)data, len);
}
else
response->set_message("Error: " + std::to_string(task->get_error()));
printf("Server Echo()\nget_req:\n%s\nset_resp:\n%s\n",
request->DebugString().c_str(),
response->DebugString().c_str());
});
ctx->get_series()->push_back(http_task);
}
};
~~~
### 2. Client
You can follow the detailed example below:
- We send two requests in parallel. One is an RPC request and the other is an HTTP request.
- After both requests are finished, we initiate a calculation task again to calculate the sum of the squares of the two numbers.
- First, use `create_Echo_task()` of the RPC Client to create an rpc\_task, which is an asynchronous RPC network request.
- Then, use `WFTaskFactory::create_http_task` and `WFTaskFactory::create_go_task` in the the factory of Workflow to create an asynchronous network task http\_task and an asynchronous computing task calc\_task respectively.
- Finally, use the serial-parallel graph to organize three asynchronous tasks, in which the multiplication sign indicates parallel tasks and the greater than sign indicates serial tasks and then execute ``start()``.
~~~cpp
void calc(int x, int y)
{
int z = x * x + y * y;
printf("calc result: %d\n", z);
}
int main()
{
Example::SRPCClient client("127.0.0.1", 1412);
auto *rpc_task = client.create_Echo_task([](EchoResponse *response, RPCContext *ctx) {
if (ctx->success())
printf("%s\n", response->DebugString().c_str());
else
printf("status[%d] error[%d] errmsg:%s\n",
ctx->get_status_code(), ctx->get_error(), ctx->get_errmsg());
});
auto *http_task = WFTaskFactory::create_http_task("https://www.sogou.com", 0, 0, [](WFHttpTask *task) {
if (task->get_state() == WFT_STATE_SUCCESS)
{
std::string body;
const void *data;
size_t len;
task->get_resp()->get_parsed_body(&data, &len);
body.assign((const char *)data, len);
printf("%s\n\n", body.c_str());
}
else
printf("Http request fail\n\n");
});
auto *calc_task = WFTaskFactory::create_go_task(calc, 3, 4);
EchoRequest req;
req.set_message("Hello!");
req.set_name("1412");
rpc_task->serialize_input(&req);
WFFacilities::WaitGroup wait_group(1);
SeriesWork *series = Workflow::create_series_work(http_task, [&wait_group](const SeriesWork *) {
wait_group.done();
});
series->push_back(rpc_task);
series->push_back(calc_task);
series->start();
wait_group.wait();
return 0;
}
~~~
### 3. Upstream
SRPC can directly use any component of Workflow, the most commonly used is [Upstream](https://github.com/sogou/workflow/blob/master/docs/en/about-upstream.md), any kind of client of SRPC can use Upstream.
You may use the example below to construct a client that can use Upstream through parameters:
```cpp
#include "workflow/UpstreamManager.h"
int main()
{
// 1. create upstream and add server instances
UpstreamManager::upstream_create_weighted_random("echo_server", true);
UpstreamManager::upstream_add_server("echo_server", "127.0.0.1:1412");
UpstreamManager::upstream_add_server("echo_server", "192.168.10.10");
UpstreamManager::upstream_add_server("echo_server", "internal.host.com");
// 2. create params and fill upstream name
RPCClientParams client_params = RPC_CLIENT_PARAMS_DEFAULT;
client_params.host = "srpc::echo_server"; // this scheme only used when upstream URI parsing
client_params.port = 1412; // this port only used when upstream URI parsing and will not affect the select of instances
// 3. construct client by params, the rest of usage is similar as other tutorials
Example::SRPCClient client(&client_params);
...
```
If we use the **ConsistentHash** or **Manual** upstream, we often need to distinguish different tasks for the selection algorithm. At this time, we may use the `int set_uri_fragment(const std::string& fragment);` interface on the client task to set request-level related information.
This field is the fragment in the URI. For the semantics, please refer to [RFC3689 3.5-Fragment](https://datatracker.ietf.org/doc/html/rfc3986#section-3.5), any infomation that needs to use the fragment (such as other information included in some other selection policy), you may use this field as well.
srpc-0.10.1/docs/images/ 0000775 0000000 0000000 00000000000 14545022514 0014737 5 ustar 00root root 0000000 0000000 srpc-0.10.1/docs/images/benchmark1.png 0000664 0000000 0000000 00000116366 14545022514 0017475 0 ustar 00root root 0000000 0000000 PNG
IHDR * v) sRGB gAMA a pHYs od IDATx^ y vvbo8!1v8^b±?'^u;|t|
X02Ұ]!v6 I !UhA!wvLuUuo7oU A
P7 n4 @ h A u A
P7 n4 @ h h~_7'\xy7n\_hQa) AC%4x5¾fm Ȃ !Z֖9Jmq^lYaͽc;v
4{;ٗc=Cؾ6^^
ߟvF{{">ºjQ%7m0 JA 4`!<^|Yi索~']2ux_BJIz_.$TJECrmTZQߣݔk3.zzo y!h h A_ӃȰ+L2C>~_^?9 NАMzF}ہWzXÄ>òx0xۨvRm?>