hi, im still new to tensorflow. i tried to created translation model by following this tutorial and i want to implemented it in raspberry pi device, but first i want to convert the model to tensorflow lite model by using this code
import tensorflow as tf
import tensorflow_text as text
# Load the SavedModel
# loaded_model = tf.saved_model.load("models/translator_id-en")
converter = tf.lite.TFLiteConverter.from_saved_model(
"models/translator_id-en"
) # path to the SavedModel directory
converter.target_spec.supported_ops = [
tf.lite.OpsSet.TFLITE_BUILTINS,
tf.lite.OpsSet.SELECT_TF_OPS,
]
converter._experimental_lower_tensor_list_ops = False
converter.allow_custom_ops = True
tflite_model = converter.convert()
# Save the model.
with open("models/model.tflite", "wb") as f:
f.write(tflite_model)
and i got this messages at terminal
Resource ops: HashTableV2, LookupTableExportV2, LookupTableFindV2, LookupTableSizeV2, WordpieceTokenizeWithOffsets
Details:
tf.HashTableV2() -> (tensor<!tf_type.resource>) : {container = "", device = "", key_dtype = !tf_type.string, shared_name = "hash_table_en_vocab.txt_-2_-1_load_26_656", use_node_name_sharing = true, value_dtype = i64}
tf.HashTableV2() -> (tensor<!tf_type.resource>) : {container = "", device = "", key_dtype = !tf_type.string, shared_name = "hash_table_id_vocab.txt_-2_-1_load_26_1238", use_node_name_sharing = true, value_dtype = i64}
tf.LookupTableExportV2(tensor<!tf_type.resource>) -> (tensor<*x!tf_type.string>, tensor<*xi64>) : {device = ""}
tf.LookupTableFindV2(tensor<!tf_type.resource>, tensor<?x!tf_type.string>, tensor<i64>) -> (tensor<*xi64>) : {device = ""}
tf.LookupTableSizeV2(tensor<!tf_type.resource>) -> (tensor<i64>) : {device = ""}
tf.WordpieceTokenizeWithOffsets(tensor<0x!tf_type.string>, tensor<!tf_type.resource>) -> (tensor<?x!tf_type.string>, tensor<1xi64>, tensor<?xi64>, tensor<?xi64>) : {device = "", max_bytes_per_word = 100 : i64, max_chars_per_token = 0 : i64, output_row_partition_type = "row_splits", split_unknown_characters = false, suffix_indicator = "##", unknown_token = "[UNK]", use_unknown_token = true}
tf.WordpieceTokenizeWithOffsets(tensor<?x!tf_type.string>, tensor<!tf_type.resource>) -> (tensor<?x!tf_type.string>, tensor<?xi64>, tensor<?xi64>, tensor<?xi64>) : {device = "", max_bytes_per_word = 100 : i64, max_chars_per_token = 0 : i64, output_row_partition_type = "row_splits", split_unknown_characters = false, suffix_indicator = "##", unknown_token = "[UNK]", use_unknown_token = true}
2024-05-08 18:49:15.387749: W tensorflow/compiler/mlir/lite/flatbuffer_export.cc:1918] TFLite interpreter needs to link Flex delegate in order to run the model since it contains the following Select TFop(s):
Flex ops: FlexBincount, FlexCaseFoldUTF8, FlexConcatV2, FlexEnsureShape, FlexHashTableV2, FlexLookupTableExportV2, FlexLookupTableFindV2, FlexLookupTableSizeV2, FlexMatrixBandPart, FlexNormalizeUTF8, FlexRaggedGather, FlexRaggedTensorToTensor, FlexRange, FlexRegexSplitWithOffsets, FlexStaticRegexFullMatch, FlexStaticRegexReplace, FlexStridedSlice, FlexStringSplitV2, FlexStringToHashBucketFast, FlexTensorListLength, FlexTensorListReserve, FlexTensorListResize, FlexTensorListSetItem, FlexTensorListStack, FlexUnsortedSegmentJoin, FlexUnsortedSegmentSum, FlexWordpieceTokenizeWithOffsets
did i miss something? and also i have no idea how do i perform translation in the tflite model. In my regular model i use something like this
# use cpu
import os
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
import tensorflow as tf
import tensorflow_text as text
import re
# load necessary modules
reloaded = tf.saved_model.load("models/translator_id-en")
def translate(ind_text):
output = reloaded(tf.constant(ind_text)).numpy()
output = output.decode("utf-8")
output = re.sub(r"\s*([^\s\w\d])\s*", r"\1", output)
return output
print(
"og: berapa harga tiket ini?",
)
print(
"tr: ",
translate("berapa harga tiket ini?"),
)
thanks in advance