Sunday, 4 June 2017

gRPC Error 9 trying to serve custom model via TensorFlow Serving

In my attempt to better understand TF Serving, I have a simple custom TF model that adds 2 numbers and should return the result. I have exported the model and it seems to successfully run at localhost on port 9000. I am runnign the client from node.js and when I make the request, I receive the following error code 9 which seems to translate to "FailedPrecondition":

Error: {"code":9,"metadata":{"_internal_repr":{}}}

I have no clue where I am going wrong and was hoping someone may be able to help me out here.

I created and exported the model with the below code:

-- Simple Model --

x = tf.placeholder(tf.float32, shape=(None))
y = tf.placeholder(tf.float32, shape=(None))
three = tf.Variable(3, dtype=tf.float32)
z = tf.scalar_mul(three, x) + y

-- Export --

model_version = 1
path = os.path.join("calc_test", str(model_version))
builder = tf.python.saved_model.builder.SavedModelBuilder(path)
with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())

    builder.add_meta_graph_and_variables(
    sess,
    tags=[tf.python.saved_model.tag_constants.SERVING],
    signature_def_map = {
        "my_predict": tf.saved_model.signature_def_utils.predict_signature_def(
            inputs={"xval": x, "yval":y},
            outputs={"spam":z})
    })
    builder.save()

Finally I setup a mnistclient.js file which I run under node.js and it contains the below code:

var grpc = require('grpc')
var PROTO_PATH = __dirname + '/../../protos/mnisttest.proto';

module.exports = (connection) => {
    var tensorflow_serving = grpc.load(PROTO_PATH).mnisttest;//.serving;
    console.log(tensorflow_serving);

    var client = new tensorflow_serving.Greeter(
        connection, grpc.credentials.createInsecure()
    );

    return { 

        predict: (buffer, fn) => {
            // building PredictRequest proto message
            const msg = {
                model_spec: {name: "saved_model"},
                inputs: {"xval":5.0, "yval":6.0}
            };

            console.log(client);

            client.predict(msg, (err, response) => {
                if(err){
                    console.log("Error: ",JSON.stringify(err));
                    console.log("Resp: ", response);
                    //return fn(err);
                    return;
                }

                console.log('Got message ', response);
            });

        } //End of predict
    } //End of return
};

function main() {
    var cli = module.exports('localhost:9000')
    cli.test();
}

if( require.main === module){
    main();
}

The result I get from the console.logs above is this:

{ ModelSpec:
   { [Function: Message]
     encode: [Function],
     decode: [Function],
     decodeDelimited: [Function],
     decode64: [Function],
     decodeHex: [Function],
     decodeJSON: [Function] },
  PredictRequest:
   { [Function: Message]
     encode: [Function],
     decode: [Function],
     decodeDelimited: [Function],
     decode64: [Function],
     decodeHex: [Function],
     decodeJSON: [Function] },
  PredictResponse:
   { [Function: Message]
     encode: [Function],
     decode: [Function],
     decodeDelimited: [Function],
     decode64: [Function],
     decodeHex: [Function],
     decodeJSON: [Function] },
  PredictionService: { [Function: Client] service: { predict: [Object] } } }
Client { '$channel': Channel {} }
Error:  {"code":9,"metadata":{"_internal_repr":{}}}



via xtr33me

No comments:

Post a Comment