These days I have tried to generate an application using the pythonapp template from the Github project Jumpstarter(https://github.com/aldebaran/robot-jumpstarter) to do the localization of Pepper. My basic idea is to combine the LandmarkDetector module in the generated app „Lokalisierung“(Localization of German).
You can read the whole code of "LandmarkDetector.py","main.py" and"MainLandmarkDetection.py" here:
#! /usr/bin/env python
# -*- encoding: UTF-8 -*-
"""Example: Demonstrates a way to localize the robot with ALLandMarkDetection"""
import qi
import time
import sys
import argparse
import math
import almath
class LandmarkDetector(object):
""" We first instantiate a proxy to the ALLandMarkDetection module Note that this module should be loaded on the robot's naoqi. The module output its results in ALMemory in a variable called "LandmarkDetected". We then read this ALMemory value and check whether we get interesting things. After that we get the related position of the landmark compared to robot. """
def __init__(self, app):
"""
Initialisation of qi framework and event detection.
"""
super(LandmarkDetector, self).__init__()
app.start()
session = app.session
# Get the service ALMemory.
self.memory = session.service("ALMemory")
# Connect the event callback.
# Get the services ALMotion & ALRobotPosture.
self.motion_service = session.service("ALMotion")
self.posture_service = session.service("ALRobotPosture")
self.subscriber = self.memory.subscriber("LandmarkDetected")
print "self.subscriber = self.memory.subscriber(LandmarkDetected)"
self.subscriber.signal.connect(self.on_landmark_detected)
print "self.subscriber.signal.connect(self.on_landmark_detected)"
# Get the services ALTextToSpeech, ALLandMarkDetection and ALMotion.
self.tts = session.service("ALTextToSpeech")
self.landmark_detection = session.service("ALLandMarkDetection")
# print "self.landmark_detection" is repr(self.landmark_detection)
self.motion_service = session.service("ALMotion")
self.landmark_detection.subscribe("LandmarkDetector", 500, 0.0 )
print "self.landmark_detection.subscribe(LandmarkDetector, 500, 0.0 )"
self.got_landmark = False
# Set here the size of the landmark in meters.
self.landmarkTheoreticalSize = 0.06 #in meters 0 #.05 or 0.06?
# Set here the current camera ("CameraTop" or "CameraBottom").
self.currentCamera = "CameraTop"
def on_landmark_detected(self, markData):
"""
Callback for event LandmarkDetected.
"""
while markData == [] : # empty value when the landmark disappears
self.got_landmark = False
self.motion_service.moveTo(0, 0, 0.1 * math.pi)
if not self.got_landmark: # only speak the first time a landmark appears
self.got_landmark = True
#stop.motion_service.moveTo
print "Ich sehe eine Landmarke! "
self.tts.say("Ich sehe eine Landmarke! ")
# Retrieve landmark center position in radians.
wzCamera = markData[1][0][0][1]
wyCamera = markData[1][0][0][2]
# Retrieve landmark angular size in radians.
angularSize = markData[1][0][0][3]
# Compute distance to landmark.
distanceFromCameraToLandmark = self.landmarkTheoreticalSize / ( 2 * math.tan( angularSize / 2))
# Get current camera position in NAO space.
transform = self.motion_service.getTransform(self.currentCamera, 2, True)
transformList = almath.vectorFloat(transform)
robotToCamera = almath.Transform(transformList)
# Compute the rotation to point towards the landmark.
cameraToLandmarkRotationTransform = almath.Transform_from3DRotation(0, wyCamera, wzCamera)
# Compute the translation to reach the landmark.
cameraToLandmarkTranslationTransform = almath.Transform(distanceFromCameraToLandmark, 0, 0)
# Combine all transformations to get the landmark position in NAO space.
robotToLandmark = robotToCamera * cameraToLandmarkRotationTransform *cameraToLandmarkTranslationTransform
# robotTurnAroundAngle = almath.rotationFromAngleDirection(0, 1, 1, 1)
# print "robotTurnAroundAngle = ", robotTurnAroundAngle
print "x " + str(robotToLandmark.r1_c4) + " (in meters)"
print "y " + str(robotToLandmark.r2_c4) + " (in meters)"
print "z " + str(robotToLandmark.r3_c4) + " (in meters)"
def run(self):
"""
Loop on, wait for events until manual interruption.
"""
# Wake up robot
self.motion_service.wakeUp()
# Send robot to Pose Init
self.posture_service.goToPosture("StandInit", 0.5)
# Example showing how to get a simplified robot position in world.
useSensorValues = False
result = self.motion_service.getRobotPosition(useSensorValues)
print "Robot Position", result
# Example showing how to use this information to know the robot's diplacement.
useSensorValues = False
# initRobotPosition = almath.Pose2D(self.motion_service.getRobotPosition(useSensorValues))
# Make the robot move
for i in range(1, 12, 1):
self.motion_service.moveTo(0, 0, 0.1 * math.pi)
print "self.motion_service.moveTo(0, 0, (0.1)*math.pi)"
print "Starting LandmarkDetector"
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
print "Interrupted by user, stopping LandmarkDetector"
self.landmark_detection.unsubscribe("LandmarkDetector")
#stop
sys.exit(0)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--ip", type=str, default="10.0.0.10",
help="Robot IP address. On robot or Local Naoqi: use
'10.0.0.10'.")
parser.add_argument("--port", type=int, default=9559,
help="Naoqi port number")
args = parser.parse_args()
try:
# Initialize qi framework.
connection_url = "tcp://" + args.ip + ":" + str(args.port)
app = qi.Application(["LandmarkDetector", "--qi-url=" + connection_url])
except RuntimeError:
print ("Can't connect to Naoqi at ip \"" + args.ip + "\" on port " + str(args.port) +".\n"
"Please check your script arguments. Run with -h option for help.")
sys.exit(1)
landmark_detector = LandmarkDetector(app)
landmark_detector.run()
""" A sample showing how to make a Python script as an app. """
version = "0.0.8"
copyright = "Copyright 2015, Aldebaran Robotics" author = 'YOURNAME' email = 'YOUREMAIL@aldebaran.com'
import stk.runner
import stk.events
import stk.services
import stk.logging
class Activity(object):
"A sample standalone app, that demonstrates simple Python usage" APP_ID = "com.aldebaran.lokalisierung"
def __init__(self, qiapp):
self.qiapp = qiapp
self.events = stk.events.EventHelper(qiapp.session)
self.s = stk.services.ServiceCache(qiapp.session)
self.logger = stk.logging.get_logger(qiapp.session, self.APP_ID)
def on_touched(self, *args):
"Callback for tablet touched."
if args:
self.events.disconnect("ALTabletService.onTouchDown")
self.logger.info("Tablet touched: " + str(args))
self.s.ALTextToSpeech.say("Yay!")
self.stop()
def on_start(self):
"Ask to be touched, waits, and exits."
# Two ways of waiting for events
# 1) block until it's called
self.s.ALTextToSpeech.say("Touch my forehead.")
self.logger.warning("Listening for touch...")
while not self.events.wait_for("FrontTactilTouched"):
pass
# 2) explicitly connect a callback
if self.s.ALTabletService:
self.events.connect("ALTabletService.onTouchDown", self.on_touched)
self.s.ALTextToSpeech.say("okay, now touch my tablet.")
# (this allows to simltaneously speak and watch an event)
else:
self.s.ALTextToSpeech.say("touch my tablet ... oh. " + \
"I don't haave one.")
self.stop()
def stop(self):
"Standard way of stopping the application."
self.qiapp.stop()
def on_stop(self):
"Cleanup"
self.logger.info("Application finished.")
self.events.clear()
if __name__ == "__main__":
stk.runner.run_activity(Activity)
#! /usr/bin/env python
# -*- encoding: UTF-8 -*-
"""A sample showing how to make a Python script as an app to localize the robot with ALLandMarkDetection"""
version = "0.0.8"
copyright = "Copyright 2015, Aldebaran Robotics"
author = 'YOURNAME'
email = 'YOUREMAIL@aldebaran.com'
import stk.runner
import stk.events
import stk.services
import stk.logging
import time
import sys
import math
import almath
class Activity(object):
"A sample standalone app, that demonstrates simple Python usage" APP_ID = "com.aldebaran.lokalisierung"
def __init__(self, qiapp):
self.qiapp = qiapp
self.events = stk.events.EventHelper(qiapp.session)
self.s = stk.services.ServiceCache(qiapp.session)
self.logger = stk.logging.get_logger(qiapp.session, self.APP_ID)
self.qiapp.start()
session = qiapp.session
# Get the service ALMemory.
self.memory = session.service("ALMemory")
# Connect the event callback.
# Get the services ALMotion & ALRobotPosture.
self.motion_service = session.service("ALMotion")
self.posture_service = session.service("ALRobotPosture")
self.subscriber = self.memory.subscriber("LandmarkDetected")
print "self.subscriber = self.memory.subscriber(LandmarkDetected)"
self.subscriber.signal.connect(self.on_landmark_detected)
print "self.subscriber.signal.connect(self.on_landmark_detected)"
# Get the services ALTextToSpeech, ALLandMarkDetection and ALMotion.
self.tts = session.service("ALTextToSpeech")
self.landmark_detection = session.service("ALLandMarkDetection")
# print "self.landmark_detection" is repr(self.landmark_detection)
self.motion_service = session.service("ALMotion")
self.landmark_detection.subscribe("Activity", 500, 0.0)
print "self.landmark_detection.subscribe(Activity, 500, 0.0 )"
self.got_landmark = False
# Set here the size of the landmark in meters.
self.landmarkTheoreticalSize = 0.06 # in meters 0 #.05 or 0.06?
# Set here the current camera ("CameraTop" or "CameraBottom").
self.currentCamera = "CameraTop"
def on_landmark_detected(self, markData):
"""
Callback for event LandmarkDetected.
"""
while markData == []: # empty value when the landmark disappears
self.got_landmark = False
# self.motion_service.moveTo(0, 0, 0.1 * math.pi)
if not self.got_landmark: # only speak the first time a landmark appears
self.got_landmark = True
# stop.motion_service.moveTo
print "Ich sehe eine Landmarke! "
# self.tts.say("Ich sehe eine Landmarke! ")
# Retrieve landmark center position in radians.
wzCamera = markData[1][0][0][1]
wyCamera = markData[1][0][0][2]
# Retrieve landmark angular size in radians.
angularSize = markData[1][0][0][3]
# Compute distance to landmark.
distanceFromCameraToLandmark = self.landmarkTheoreticalSize / (2 * math.tan(angularSize / 2))
# Get current camera position in NAO space.
transform = self.motion_service.getTransform(self.currentCamera, 2, True)
transformList = almath.vectorFloat(transform)
robotToCamera = almath.Transform(transformList)
# Compute the rotation to point towards the landmark.
cameraToLandmarkRotationTransform = almath.Transform_from3DRotation(0, wyCamera, wzCamera)
# Compute the translation to reach the landmark.
cameraToLandmarkTranslationTransform = almath.Transform(distanceFromCameraToLandmark, 0, 0)
# Combine all transformations to get the landmark position in NAO space.
robotToLandmark = robotToCamera * cameraToLandmarkRotationTransform * cameraToLandmarkTranslationTransform
# robotTurnAroundAngle = almath.rotationFromAngleDirection(0, 1, 1, 1)
# print "robotTurnAroundAngle = ", robotTurnAroundAngle
print "x " + str(robotToLandmark.r1_c4) + " (in meters)"
print "y " + str(robotToLandmark.r2_c4) + " (in meters)"
print "z " + str(robotToLandmark.r3_c4) + " (in meters)"
def run(self):
"""
Loop on, wait for events until manual interruption.
"""
# Wake up robot
self.motion_service.wakeUp()
# Send robot to Pose Init
self.posture_service.goToPosture("StandInit", 0.5)
# Example showing how to get a simplified robot position in world.
useSensorValues = False
result = self.motion_service.getRobotPosition(useSensorValues)
print "Robot Position", result
# Example showing how to use this information to know the robot's diplacement.
useSensorValues = False
# initRobotPosition = almath.Pose2D(self.motion_service.getRobotPosition(useSensorValues))
# Make the robot move
for i in range(1, 20, 1):
self.motion_service.moveTo(0, 0, 0.1 * math.pi)
print "self.motion_service.moveTo(0, 0, (0.1)*math.pi)"
print "Starting Activity"
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
print "Interrupted by user, stopping Activity"
self.landmark_detection.unsubscribe("Activity")
# stop
sys.exit(0)
def stop(self):
"Standard way of stopping the application."
self.qiapp.stop()
def on_stop(self):
"Cleanup"
self.logger.info("Application finished.")
self.events.clear()
if __name__ == "__main__":
stk.runner.run_activity(Activity)
landmark_detector = Activity()
landmark_detector.run()
And I have a question to the parameter by ”
landmark_detector = Activity()
” in line 157 at almost the end of the program because of the Error in the image, which I should pass. After reading the answers to the similar question here by StackoverflowPython: TypeError: __init__() takes exactly 2 arguments (1 given), I am still confused. I think it should be a value which is given to the "qiapp", but what value?
Best regards, Frederik
Actually, when you call
stk.runner.run_activity(Activity)
... it will instantiate that activity object for you, with the right parameters, you don't need to landmark_detector = Activity() etc. in MainLandmarkDetector.py
And if this object has a method called on_start, that method will be called once everything is ready (so you may only need to rename your run() method to on_start()
Note also that instead of calling sys.stop(), you can just call self.stop() or self.qiapp.stop() (which is a bit cleaner in terms of letting the cleanup code in on_stop to be called, if you need to unsubscribe to things etc.)
See here for some documentation on stk.runner.
Note also that instead of doing
self.motion_service = session.service("ALMotion")
(...)
transform = self.motion_service.getTransform(self.currentCamera, 2, True)
you can directly do
transform = self.s.ALMotion.getTransform(self.currentCamera, 2, True)
... which (in my opinion) makes it easier to see what is being done exactly (and reduces the number of variables).