Hi i`m trying to make a motion tracking application with Movenet in React Native
Confirmed keypoints are detected and shown up on console but having trouble to enable tracker
How can I enable built in keypoints tracker in Movenet???
Attached my source code below
import React, { useState, useEffect, useCallback, useMemo } from ‘react’;
import { View, StyleSheet, Platform, TouchableOpacity, Text } from ‘react-native’;
import Icon from ‘react-native-vector-icons/Ionicons’
import { Colors } from ‘react-native-paper’;
import { Camera } from ‘expo-camera’;
import * as tf from ‘@tensorflow/tfjs’;
import {cameraWithTensors} from ‘@tensorflow/tfjs-react-native’;
import * as poseDetection from ‘@tensorflow-models/pose-detection’;
import ‘@tensorflow/tfjs-backend-webgl’;
import ‘@mediapipe/pose’;
let coords =
export const CameraView = () => {
const [hasPermission, setHasPermission] = useState(null);
const [poseDetector, setPoseDetector] = useState(null);
const [frameworkReady, setFrameworkReady] = useState(false);
const backCamera = Camera.Constants.Type.back
const frontCamera = Camera.Constants.Type.front
const [camType, setCamType] = useState(backCamera)
const TensorCamera = cameraWithTensors(Camera);
let requestAnimationFrameId = 0;
const textureDims = Platform.OS === "ios"? { width: 1080, height: 1920 } : { width: 1600, height: 1200 };
const tensorDims = { width: 152, height: 200 };
const iconPressed = useCallback(() => camType === backCamera? setCamType(frontCamera):setCamType(backCamera),[camType])
const model = poseDetection.SupportedModels.MoveNet;
const detectorConfig = {
modelType: poseDetection.movenet.modelType.MULTIPOSE_LIGHTNING,
enableTracking: true,
trackerType: poseDetection.TrackerType.Keypoint,
trackerConfig: {maxTracks: 4,
maxAge: 1000,
minSimilarity: 1,
keypointTrackerParams:{
keypointConfidenceThreshold: 1,
keypointFalloff: [],
minNumberOfKeypoints: 4
}
}
}
const detectPose = async (tensor) =>{
if(!tensor) return
const poses = await poseDetector.estimatePoses(tensor)
if (poses[0] !== undefined) {
const points = poses[0].keypoints.map(point => [point.x,point.y,point.name])
console.log(points)
coords = points
} else {
coords = []
}
///console.log(coords)
}
const handleCameraStream = (imageAsTensors) => {
const loop = async () => {
const nextImageTensor = await imageAsTensors.next().value;
await detectPose(nextImageTensor);
requestAnimationFrameId = requestAnimationFrame(loop);
};
if (true) loop();
}
useEffect(() => {
if(!frameworkReady) {
;(async () => {
const { status } = await Camera.requestPermissionsAsync();
console.log(`permissions status: ${status}`);
setHasPermission(status === 'granted');
await tf.ready();
setPoseDetector(await poseDetection.createDetector(model, detectorConfig))
setFrameworkReady(true);
})();
}
}, []);
useEffect(() => {
return () => {
cancelAnimationFrame(requestAnimationFrameId);
};
}, [requestAnimationFrameId]);
return(
<View style={styles.cameraView}>
<TensorCamera
style={styles.camera}
type={camType}
zoom={0}
cameraTextureHeight={textureDims.height}
cameraTextureWidth={textureDims.width}
resizeHeight={tensorDims.height}
resizeWidth={tensorDims.width}
resizeDepth={3}
onReady={(imageAsTensors) => handleCameraStream(imageAsTensors)}
autorender={true}
>
</TensorCamera>
<TouchableOpacity style={[styles.absoluteView]} activeOpacity={0.1}>
<Icon name="camera-reverse-outline" size={40} color="white" onPress={iconPressed}/>
</TouchableOpacity>
</View>
)
}
const styles = StyleSheet.create({
camera:{flex:1},
cameraView:{flex:1},
absoluteView:{
position:'absolute',
right:30,
bottom: Platform.select({ios:40, android:30}),
padding: 10,
},
tracker:{
position:'absolute',
width:10,
height:10,
borderRadius:5,
backgroundColor: Colors.blue500
}
})