don't stop believing

Appium Unity App Client 실행 (OpenCV) 본문

Testing Automation/Appium

Appium Unity App Client 실행 (OpenCV)

Tongchun 2017. 11. 29. 18:06

Appium에서 Unity App에 대한 자동화 스크립트 입니다.

Unity App이기 때문에 화면이 이미지로 인식되어 element를 찾을 수 없습니다. 그래서  OpenCV를 적용해 이미지 매칭(template matching)을 사용했습니다.

OpenCV는 아래 url을 참고해 주세요. 

[http://dejavuqa.tistory.com/category/Python/OpenCV]


template matching을 사용하기 위해 두개의 함수를 만들었습니다.

하나는 자동으로 Canny해주는 함수고 다른 하나는 디바이스의 화면을 screenshot 후 찾으려는 이미지를 매치해 가운데 좌표를 반환하는 함수입니다.


먼저  auto_canny() 입니다. 이미지의 외곽선만 표시해 단순화 합니다.

1
2
3
4
5
6
7
8
def auto_canny(self, image, sigma=0.33):
v = np.median(image) # compute the median of the single channel pixel intensities
lower = int(max(0, (1.0 - sigma) * v))
upper = int(min(255, (1.0 + sigma) * v))
edged = cv2.Canny(image, lower, upper)
return edged # return the edged image
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX

그리고  detectImage() 입니다.  디바이스의 screenshot을 확인하고 찾으려는 이미지와 매칭해 screenshot에서 좌표를 반환합니다.

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
def detectImage(self, templateImagePath):
shotName = '{:%Y-%m-%d-%H-%M-%S}'.format(datetime.datetime.now())
directory = '%s/screenshots/' % os.getcwd()
screenshotImgPath = directory + shotName + '.png'
self.driver.save_screenshot(screenshotImgPath)
sleep(2)
#
screenImage = cv2.imread(screenshotImgPath, 0)
screenCann = self.auto_canny(screenImage)
#
templateImage = cv2.imread(templateImagePath, 0)
templateCann = self.auto_canny(templateImage)
w, h = templateCann.shape[::-1]
# template
res = cv2.matchTemplate(screenCann, templateCann, cv2.TM_CCOEFF_NORMED)
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
top_left = max_loc
x = top_left[0] + int(w/2)
y = top_left[1] + int(h/2)
return x, y
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX

위 두 함수를  이용해 스크립트를 작성합니다.

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
"""
Android Unity Script
"""
import unittest
import cv2
import numpy as np
from matplotlib import pyplot as plt
import os, sys
from appium import webdriver
from appium.webdriver.common.touch_action import TouchAction
from time import sleep
import datetime
class TableSearchTest(unittest.TestCase):
def setUp(self):
# Set up appium
app = os.path.join(os.path.dirname(__file__), '/Users/tongchunkim/Documents/TestAppium/appfiles/', 'UnityTestAndroid.apk')
app = os.path.abspath(app)
self.driver = webdriver.Remote(
command_executor='http://127.0.0.1:4723/wd/hub',
desired_capabilities={
'app': app,
'platformName': 'Android',
'platformVersion': '6.0',
'deviceName': 'V10',
'automationName': 'Appium',
'appPackage': 'com.kakaogames.sdk.unitysample'
})
def auto_canny(self, image, sigma=0.33):
v = np.median(image) # compute the median of the single channel pixel intensities
lower = int(max(0, (1.0 - sigma) * v))
upper = int(min(255, (1.0 + sigma) * v))
edged = cv2.Canny(image, lower, upper)
return edged # return the edged image
def detectImage(self, templateImagePath):
shotName = '{:%Y-%m-%d-%H-%M-%S}'.format(datetime.datetime.now())
directory = '%s/screenshots/' % os.getcwd()
screenshotImgPath = directory + shotName + '.png'
self.driver.save_screenshot(screenshotImgPath)
sleep(2)
#
screenImage = cv2.imread(screenshotImgPath, 0)
screenCann = self.auto_canny(screenImage)
#
templateImage = cv2.imread(templateImagePath, 0)
templateCann = self.auto_canny(templateImage)
w, h = templateCann.shape[::-1]
# template
res = cv2.matchTemplate(screenCann, templateCann, cv2.TM_CCOEFF_NORMED)
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
top_left = max_loc
x = top_left[0] + int(w/2)
y = top_left[1] + int(h/2)
return x, y
def test_search_field(self):
driver = self.driver
sleep(10)
co_x, co_y = self.detectImage('./templates/and-button-start.png')
action = TouchAction(self.driver)
action.tap(None, 720, 2367, 1).perform()
sleep(30)
def tearDown(self):
self.driver.quit()
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(TableSearchTest)
unittest.TextTestRunner(verbosity=2).run(suite)
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX

iOS 스크립트도 동일합니다.

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
"""
iOS Unity Script
"""
import unittest
import cv2
import numpy as np
from matplotlib import pyplot as plt
import os, sys
from appium import webdriver
from appium.webdriver.common.touch_action import TouchAction
from time import sleep
import datetime
class TableSearchTest(unittest.TestCase):
def setUp(self):
# Set up appium
app = os.path.join(os.path.dirname(__file__), '/Users/tongchunkim/Documents/TestAppium/appfiles/', 'com.kakaogames.sdk.unitysample.ipa')
app = os.path.abspath(app)
self.driver = webdriver.Remote(
command_executor='http://127.0.0.1:4723/wd/hub',
desired_capabilities={
'platformName': 'ios',
'platformVersion': '11.0',
'deviceName': 'iPhone 6',
'automationName': 'XCUITest',
'newCommandTimeout': 7200,
'bundleId': 'com.kakaogames.sdk.unitysample',
'udid': '73439839ee3db7b59fcdd8bc3aa8cc4862006b7b',
'xcodeOrgId': 'WVC7779982',
'xcodeSigningId': 'iPhone Developer'
})
def auto_canny(self, image, sigma=0.33):
v = np.median(image) # compute the median of the single channel pixel intensities
lower = int(max(0, (1.0 - sigma) * v))
upper = int(min(255, (1.0 + sigma) * v))
edged = cv2.Canny(image, lower, upper)
return edged # return the edged image
def detectImage(self, templateImagePath):
shotName = '{:%Y-%m-%d-%H-%M-%S}'.format(datetime.datetime.now())
directory = '%s/screenshots/' % os.getcwd()
screenshotImgPath = directory + shotName + '.png'
self.driver.save_screenshot(screenshotImgPath)
sleep(2)
#
screenImage = cv2.imread(screenshotImgPath, 0)
screenCann = self.auto_canny(screenImage)
#
templateImage = cv2.imread(templateImagePath, 0)
templateCann = self.auto_canny(templateImage)
w, h = templateCann.shape[::-1]
# template
res = cv2.matchTemplate(screenCann, templateCann, cv2.TM_CCOEFF_NORMED)
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
top_left = max_loc
x = top_left[0] + int(w/2)
y = top_left[1] + int(h/2)
return x, y
def test_search_field(self):
driver = self.driver
sleep(10)
co_x, co_y = self.detectImage('./templates/ios-button-start.png')
action = TouchAction(self.driver)
action.tap(None, co_x, co_y, 1).perform()
sleep(30)
def tearDown(self):
self.driver.quit()
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(TableSearchTest)
unittest.TextTestRunner(verbosity=2).run(suite)
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX



Comments