diff --git a/wide_n_deep_serving_client/wide_n_deep_serving_client.cpp b/wide_n_deep_serving_client/wide_n_deep_serving_client.cpp new file mode 100644 index 0000000..e6d8343 --- /dev/null +++ b/wide_n_deep_serving_client/wide_n_deep_serving_client.cpp @@ -0,0 +1,112 @@ +/* + * wide_n_deep_serving_client.cpp + * + * Created on: 2017年10月28日 + * Author: lambdaji + */ + +#include "wide_n_deep_serving_client.h" +#include "google/protobuf/map.h" +#include "tensorflow/core/example/example.pb.h" +#include "tensorflow/core/example/feature.pb.h" +#include "tensorflow/core/framework/tensor.h" +#include "tensorflow/core/platform/types.h" +#include "tensorflow/core/util/command_line_flags.h" +#include +#include + +typedef google::protobuf::Map OutMap; + +using grpc::Channel; +using grpc::ClientContext; +using grpc::Status; + +using tensorflow::serving::PredictRequest; +using tensorflow::serving::PredictResponse; + +int ServingClient::callPredict(const std::string& model_name, const std::string& model_signature_name, std::map & result) +{ + PredictRequest predictRequest; + PredictResponse response; + ClientContext context; + tensorflow::TensorProto req_tp; + tensorflow::Example example; + //int64_t iBegin = TNOWMS; + //int64_t iEnd = TNOWMS; + + predictRequest.mutable_model_spec()->set_name(model_name); + predictRequest.mutable_model_spec()->set_signature_name(model_signature_name); //serving_default + + //iBegin = TNOWMS; + google::protobuf::Map& inputs = *predictRequest.mutable_inputs(); + google::protobuf::Map& feature_dict = *example.mutable_features()->mutable_feature(); + + //feature to example + feature_dict["age"].mutable_float_list()->add_value(25); + feature_dict["capital_gain"].mutable_float_list()->add_value(0); + feature_dict["capital_loss"].mutable_float_list()->add_value(0); + feature_dict["education"].mutable_bytes_list()->add_value("11th"); + feature_dict["education_num"].mutable_float_list()->add_value(7); + feature_dict["gender"].mutable_bytes_list()->add_value("Male"); + feature_dict["hours_per_week"].mutable_float_list()->add_value(40); + feature_dict["native_country"].mutable_bytes_list()->add_value("United-States"); + feature_dict["occupation"].mutable_bytes_list()->add_value("Machine-op-inspct"); + feature_dict["relationship"].mutable_bytes_list()->add_value("Own-child"); + feature_dict["workclass"].mutable_bytes_list()->add_value("Private"); + + //serialize to req.inputs + string serialized; + example.SerializeToString(&serialized); + req_tp.mutable_tensor_shape()->add_dim()->set_size(1); //set_size(5) for batch predicting + req_tp.set_dtype(tensorflow::DataType::DT_STRING); + req_tp.add_string_val(serialized); //1st + //req_tp.add_string_val(serialized); //2nd + //req_tp.add_string_val(serialized); //3rd + //req_tp.add_string_val(serialized); //4th + //req_tp.add_string_val(serialized); //5th + inputs["inputs"] = req_tp; + + //iEnd = TNOWMS; + //TLOGDEBUG("ServingClient::callPredict sample_to_tfrequest timecost(ms):" << (iEnd - iBegin) << endl); + + //predict + //iBegin = TNOWMS; + Status status = _stub->Predict(&context, predictRequest, &response); + //iEnd = TNOWMS; + //TLOGDEBUG("ServingClient::callPredict _stub->Predict timecost(ms):" << (iEnd - iBegin) << endl); + + if (status.ok()) + { + //TLOGDEBUG("ServingClient::callPredict call predict ok" << endl); + //TLOGDEBUG("ServingClient::callPredict outputs size is " << response.outputs_size() << endl); + + OutMap& map_outputs = *response.mutable_outputs(); + OutMap::iterator iter; + int output_index = 0; + + for (iter = map_outputs.begin(); iter != map_outputs.end(); ++iter) + { + tensorflow::TensorProto& result_tensor_proto = iter->second; + tensorflow::Tensor tensor; + bool converted = tensor.FromProto(result_tensor_proto); + if (converted) + { + //TLOGDEBUG("ServingClient::callPredict the result tensor[" << output_index << "] is:" << tensor.SummarizeValue(10) << endl); + result[iter->first] = tensor.SummarizeValue(10); + } + else + { + //TLOGDEBUG("ServingClient::callPredict the result tensor[" << output_index << "] convert failed." << endl); + } + ++output_index; + } + + return 0; + } + else + { + //TLOGDEBUG("ServingClient::callPredict gRPC call return code: " << status.error_code() << ": " << status.error_message() << endl); + return -1; + } +} + diff --git a/wide_n_deep_serving_client/wide_n_deep_serving_client.h b/wide_n_deep_serving_client/wide_n_deep_serving_client.h new file mode 100644 index 0000000..6f8078c --- /dev/null +++ b/wide_n_deep_serving_client/wide_n_deep_serving_client.h @@ -0,0 +1,41 @@ +/* + * wide_n_deep_serving_client.h + * + * Created on: 2017年10月28日 + * Author: lambdaji + */ + +#ifndef WIDE_N_DEEP_SERVING_CLIENT_H_ +#define WIDE_N_DEEP_SERVING_CLIENT_H_ + +#include +#include +#include +#include +#include +#include + +#include "grpc++/create_channel.h" +#include "grpc++/security/credentials.h" +#include "tensorflow_serving/apis/prediction_service.grpc.pb.h" + +using tensorflow::serving::PredictionService; + +class ServingClient +{ +public: + static std::shared_ptr createClient(const std::string sServerPort){ + std::shared_ptr p = std::make_shared(grpc::CreateChannel(sServerPort, grpc::InsecureChannelCredentials())); + return p; + } +public: + ServingClient(const std::shared_ptr& channel) : _stub(PredictionService::NewStub(channel)) { } + int callPredict(const std::string& model_name, const std::string& model_signature_name, std::map & result); + +private: + std::unique_ptr _stub; +}; + + + +#endif /* WIDE_N_DEEP_SERVING_CLIENT_H_ */ diff --git a/wide_n_deep_serving_client/wide_n_deep_serving_client.md b/wide_n_deep_serving_client/wide_n_deep_serving_client.md new file mode 100644 index 0000000..a8fed30 --- /dev/null +++ b/wide_n_deep_serving_client/wide_n_deep_serving_client.md @@ -0,0 +1,12 @@ +Serving a TensorFlow Wide & Deep Model (tf.estimator) by TF-Serving in C++ + +This tutorial builds on the code developed in TensorFlow Wide & Deep Learning Tutorial, so if you haven't yet completed that tutorial, you need to take a look at it first. + +This tutorial referenced a python version from https://github.com/MtDersvan/tf_playground/tree/master/wide_and_deep_tutorial. + +You'd better take a look at the following proto file first in: + - tensorflow/core/example/example.proto + - tensorflow/core/example/feature.proto + - tensorflow/core/framework/tensor.proto + - tensorflow_serving/apis/predict.proto + - tensorflow_serving/apis/model.proto \ No newline at end of file