diff --git a/share/qtcreator/examples/01-Media/acodec.py b/share/qtcreator/examples/01-Media/acodec.py old mode 100755 new mode 100644 index 81d60edb02b..ff4906c9e8b --- a/share/qtcreator/examples/01-Media/acodec.py +++ b/share/qtcreator/examples/01-Media/acodec.py @@ -1,22 +1,37 @@ -from media.pyaudio import * -from media.media import * -import media.g711 as g711 -from mpp.payload_struct import * +# g711 encode/decode example +# +# Note: You will need an SD card to run this example. +# +# You can collect raw data and encode it into g711 or decode it into raw data output. + + +from media.pyaudio import * #导入pyaudio模块,用于采集和播放音频 +from media.media import * #导入media模块,用于初始化vb buffer +import media.g711 as g711 #导入g711模块,用于g711编解码 +from mpp.payload_struct import * #导入payload模块,用于获取音视频编解码类型 +import os + +def exit_check(): + try: + os.exitpoint() + except KeyboardInterrupt as e: + print("user stop: ", e) + return True + return False def encode_audio(filename, duration): - CHUNK = int(44100/25) - FORMAT = paInt16 - CHANNELS = 2 - RATE = 44100 + CHUNK = int(44100/25) #设置音频chunk值 + FORMAT = paInt16 #设置采样精度 + CHANNELS = 2 #设置声道数 + RATE = 44100 #设置采样率 p = PyAudio() - p.initialize(CHUNK) - enc = g711.Encoder(K_PT_G711A,CHUNK) - ret = media.buffer_init() - if ret: - print("record_audio, buffer_init failed") + p.initialize(CHUNK) #初始化PyAudio对象 + enc = g711.Encoder(K_PT_G711A,CHUNK) #创建g711编码器对象 + media.buffer_init() #vb buffer初始化 - enc.create() + enc.create() #创建编码器 + #创建音频输入流 stream = p.open(format=FORMAT, channels=CHANNELS, rate=RATE, @@ -24,109 +39,117 @@ def encode_audio(filename, duration): frames_per_buffer=CHUNK) frames = [] - + #采集音频数据编码并存入列表 for i in range(0, int(RATE / CHUNK * duration)): - frame_data = stream.read() - data = enc.encode(frame_data) - frames.append(data) - - stream.stop_stream() - stream.close() - p.terminate() - enc.destroy() - + frame_data = stream.read() #从音频输入流中读取音频数据 + data = enc.encode(frame_data) #编码音频数据为g711 + frames.append(data) #将g711编码数据保存到列表中 + if exit_check(): + break + + stream.stop_stream() #停止音频输入流 + stream.close() #关闭音频输入流 + p.terminate() #释放音频对象 + enc.destroy() #销毁g711音频编码器 + + #将g711编码数据存入文件中 with open(filename,mode='w') as wf: wf.write(b''.join(frames)) - media.buffer_deinit() + media.buffer_deinit() #释放vb buffer def decode_audio(filename): - #读取音频文件 - wf = open(filename,mode='rb') - FORMAT = paInt16 - CHANNELS = 2 - RATE = 44100 - CHUNK = int(RATE/25) - - #播放音频操作 + wf = open(filename,mode='rb') #打开g711文件 + FORMAT = paInt16 #设置音频chunk值 + CHANNELS = 2 #设置声道数 + RATE = 44100 #设置采样率 + CHUNK = int(RATE/25) #设置音频chunk值 + p = PyAudio() - p.initialize(CHUNK) - dec = g711.Decoder(K_PT_G711A,CHUNK) - ret = media.buffer_init() - if ret: - print("play_audio, buffer_init failed") + p.initialize(CHUNK) #初始化PyAudio对象 + dec = g711.Decoder(K_PT_G711A,CHUNK) #创建g711解码器对象 + media.buffer_init() #vb buffer初始化 - dec.create() + dec.create() #创建解码器 + #创建音频输出流 stream = p.open(format=FORMAT, channels=CHANNELS, rate=RATE, output=True, frames_per_buffer=CHUNK) - stream_len = CHUNK*CHANNELS*2//2 - stream_data = wf.read(stream_len) + stream_len = CHUNK*CHANNELS*2//2 #设置每次读取的g711数据流长度 + stream_data = wf.read(stream_len) #从g711文件中读取数据 + #解码g711文件并播放 while stream_data: - frame_data = dec.decode(stream_data) - stream.write(frame_data) - stream_data = wf.read(stream_len) + frame_data = dec.decode(stream_data) #解码g711文件 + stream.write(frame_data) #播放raw数据 + stream_data = wf.read(stream_len) #从g711文件中读取数据 + if exit_check(): + break - stream.stop_stream() - stream.close() - p.terminate() - dec.destroy() - wf.close() + stream.stop_stream() #停止音频输入流 + stream.close() #关闭音频输入流 + p.terminate() #释放音频对象 + dec.destroy() #销毁解码器 + wf.close() #关闭g711文件 - media.buffer_deinit() + media.buffer_deinit() #释放vb buffer def loop_codec(duration): - CHUNK = int(44100/25) - FORMAT = paInt16 - CHANNELS = 2 - RATE = 44100 + CHUNK = int(44100/25) #设置音频chunk值 + FORMAT = paInt16 #设置采样精度 + CHANNELS = 2 #设置声道数 + RATE = 44100 #设置采样率 p = PyAudio() - p.initialize(CHUNK) - dec = g711.Decoder(K_PT_G711A,CHUNK) - enc = g711.Encoder(K_PT_G711A,CHUNK) - ret = media.buffer_init() - if ret: - print("loop_audio, buffer_init failed") + p.initialize(CHUNK) #初始化PyAudio对象 + dec = g711.Decoder(K_PT_G711A,CHUNK) #创建g711解码器对象 + enc = g711.Encoder(K_PT_G711A,CHUNK) #创建g711编码器对象 + media.buffer_init() #vb buffer初始化 - dec.create() - enc.create() + dec.create() #创建g711解码器 + enc.create() #创建g711编码器 + #创建音频输入流 input_stream = p.open(format=FORMAT, channels=CHANNELS, rate=RATE, input=True, frames_per_buffer=CHUNK) + #创建音频输出流 output_stream = p.open(format=FORMAT, channels=CHANNELS, rate=RATE, output=True, frames_per_buffer=CHUNK) + #从音频输入流中获取数据->编码->解码->写入到音频输出流中 for i in range(0, int(RATE / CHUNK * duration)): - frame_data = input_stream.read() - stream_data = enc.encode(frame_data) - frame_data = dec.decode(stream_data) - output_stream.write(frame_data) - - - input_stream.stop_stream() - output_stream.stop_stream() - input_stream.close() - output_stream.close() - p.terminate() - dec.destroy() - enc.destroy() - - media.buffer_deinit() - -#encode_audio('/sdcard/app/test.g711a', 15) -#decode_audio('/sdcard/app/test.g711a') -loop_codec(15) -print("audio codec sample done") + frame_data = input_stream.read() #从音频输入流中获取raw音频数据 + stream_data = enc.encode(frame_data) #编码音频数据为g711 + frame_data = dec.decode(stream_data) #解码g711数据为raw数据 + output_stream.write(frame_data) #播放raw数据 + if exit_check(): + break + + input_stream.stop_stream() #停止音频输入流 + output_stream.stop_stream() #停止音频输出流 + input_stream.close() #关闭音频输入流 + output_stream.close() #关闭音频输出流 + p.terminate() #释放音频对象 + dec.destroy() #销毁g711解码器 + enc.destroy() #销毁g711编码器 + + media.buffer_deinit() #释放vb buffer + +if __name__ == "__main__": + os.exitpoint(os.EXITPOINT_ENABLE) + print("audio codec sample start") + #encode_audio('/sdcard/app/test.g711a', 15) #采集并编码g711文件 + #decode_audio('/sdcard/app/test.g711a') #解码g711文件并输出 + loop_codec(15) #采集音频数据->编码g711->解码g711->播放音频 + print("audio codec sample done") diff --git a/share/qtcreator/examples/01-Media/audio.py b/share/qtcreator/examples/01-Media/audio.py old mode 100755 new mode 100644 index 8da52baa5a8..015d9975808 --- a/share/qtcreator/examples/01-Media/audio.py +++ b/share/qtcreator/examples/01-Media/audio.py @@ -1,19 +1,33 @@ -from media.pyaudio import * -import media.wave as wave -from media.media import * +# audio input and output example +# +# Note: You will need an SD card to run this example. +# +# You can play wav files or capture audio to save as wav + +from media.pyaudio import * #导入pyaudio模块,用于采集和播放音频 +import media.wave as wave #导入wav模块,用于保存和加载wav音频文件 +from media.media import * #导入media模块,用于初始化vb buffer +import os + +def exit_check(): + try: + os.exitpoint() + except KeyboardInterrupt as e: + print("user stop: ", e) + return True + return False def record_audio(filename, duration): - CHUNK = int(44100/25) - FORMAT = paInt16 - CHANNELS = 2 - RATE = 44100 + CHUNK = int(44100/25) #设置音频chunk值 + FORMAT = paInt16 #设置采样精度 + CHANNELS = 2 #设置声道数 + RATE = 44100 #设置采样率 p = PyAudio() - p.initialize(CHUNK) - ret = media.buffer_init() - if ret: - print("record_audio, buffer_init failed") + p.initialize(CHUNK) #初始化PyAudio对象 + media.buffer_init() #vb buffer初始化 + #创建音频输入流 stream = p.open(format=FORMAT, channels=CHANNELS, rate=RATE, @@ -21,90 +35,99 @@ def record_audio(filename, duration): frames_per_buffer=CHUNK) frames = [] - + #采集音频数据并存入列表 for i in range(0, int(RATE / CHUNK * duration)): data = stream.read() frames.append(data) + if exit_check(): + break - stream.stop_stream() - stream.close() - p.terminate() + stream.stop_stream() #停止采集音频数据 + stream.close()#关闭音频输入流 + p.terminate()#释放音频对象 - wf = wave.open(filename, 'wb') - wf.set_channels(CHANNELS) - wf.set_sampwidth(p.get_sample_size(FORMAT)) - wf.set_framerate(RATE) - wf.write_frames(b''.join(frames)) - wf.close() + #将列表中的数据保存到wav文件中 + wf = wave.open(filename, 'wb') #创建wav 文件 + wf.set_channels(CHANNELS) #设置wav 声道数 + wf.set_sampwidth(p.get_sample_size(FORMAT)) #设置wav 采样精度 + wf.set_framerate(RATE) #设置wav 采样率 + wf.write_frames(b''.join(frames)) #存储wav音频数据 + wf.close() #关闭wav文件 - media.buffer_deinit() + media.buffer_deinit() #释放vb buffer def play_audio(filename): - #读取音频文件 - wf = wave.open(filename, 'rb') - CHUNK = int(wf.get_framerate()/25) - #播放音频操作 + wf = wave.open(filename, 'rb')#打开wav文件 + CHUNK = int(wf.get_framerate()/25)#设置音频chunk值 + p = PyAudio() - p.initialize(CHUNK) - ret = media.buffer_init() - if ret: - print("play_audio, buffer_init failed") + p.initialize(CHUNK) #初始化PyAudio对象 + media.buffer_init()#vb buffer初始化 + #创建音频输出流,设置的音频参数均为wave中获取到的参数 stream = p.open(format=p.get_format_from_width(wf.get_sampwidth()), channels=wf.get_channels(), rate=wf.get_framerate(), output=True,frames_per_buffer=CHUNK) - data = wf.read_frames(CHUNK) + + data = wf.read_frames(CHUNK)#从wav文件中读取数一帧数据 while data: - stream.write(data) - data = wf.read_frames(CHUNK) + stream.write(data) #将帧数据写入到音频输出流中 + data = wf.read_frames(CHUNK) #从wav文件中读取数一帧数据 + if exit_check(): + break - stream.stop_stream() - stream.close() - p.terminate() - wf.close() + stream.stop_stream() #停止音频输出流 + stream.close()#关闭音频输出流 + p.terminate()#释放音频对象 + wf.close()#关闭wav文件 - media.buffer_deinit() + media.buffer_deinit()#释放vb buffer def loop_audio(duration): - CHUNK = int(44100/25) - FORMAT = paInt16 - CHANNELS = 2 - RATE = 44100 + CHUNK = int(44100/25)#设置音频chunck + FORMAT = paInt16 #设置音频采样精度 + CHANNELS = 2 #设置音频声道数 + RATE = 44100 #设置音频采样率 p = PyAudio() - p.initialize(CHUNK) - ret = media.buffer_init() - if ret: - print("loop_audio, buffer_init failed") + p.initialize(CHUNK)#初始化PyAudio对象 + media.buffer_init() #初始化vb buffer + #创建音频输入流 input_stream = p.open(format=FORMAT, channels=CHANNELS, rate=RATE, input=True, frames_per_buffer=CHUNK) + #创建音频输出流 output_stream = p.open(format=FORMAT, channels=CHANNELS, rate=RATE, output=True,frames_per_buffer=CHUNK) + #从音频输入流中获取数据写入到音频输出流中 for i in range(0, int(RATE / CHUNK * duration)): output_stream.write(input_stream.read()) - - - input_stream.stop_stream() - output_stream.stop_stream() - input_stream.close() - output_stream.close() - p.terminate() - - media.buffer_deinit() - -#play_audio('/sdcard/app/input.wav') -#record_audio('/sdcard/app/output.wav', 15) -loop_audio(15) -print("audio sample done") + if exit_check(): + break + + input_stream.stop_stream()#停止音频输入流 + output_stream.stop_stream()#停止音频输出流 + input_stream.close() #关闭音频输入流 + output_stream.close() #关闭音频输出流 + p.terminate() #释放音频对象 + + media.buffer_deinit() #释放vb buffer + +if __name__ == "__main__": + os.exitpoint(os.EXITPOINT_ENABLE) + print("audio sample start") + #play_audio('/sdcard/app/input.wav') #播放wav文件 + #record_audio('/sdcard/app/output.wav', 15) #录制wav文件 + loop_audio(15) #采集音频并输出 + print("audio sample done") diff --git a/share/qtcreator/examples/01-Media/camera.py b/share/qtcreator/examples/01-Media/camera.py index c8d4f370506..d27ef0d0f2a 100755 --- a/share/qtcreator/examples/01-Media/camera.py +++ b/share/qtcreator/examples/01-Media/camera.py @@ -1,116 +1,94 @@ +# Camera Example +# +# Note: You will need an SD card to run this example. +# +# You can start camera preview and capture yuv image. + from media.camera import * from media.display import * from media.media import * -from time import * -import time -import image - - -def canmv_camera_test(): - print("canmv_camera_test") +import time, os +def camera_test(): + print("camera_test") # use hdmi for display display.init(LT9611_1920X1080_30FPS) - - # use EVB LCD for display - #display.init(HX8377_1080X1920_30FPS) - camera.sensor_init(CAM_DEV_ID_0, CAM_DEFAULT_SENSOR) - #camera.sensor_init(CAM_DEV_ID_0, CAM_IMX335_2LANE_1920X1080_30FPS_12BIT_LINEAR) - out_width = 1920 out_height = 1080 + # set camera out width align up with 16Bytes out_width = ALIGN_UP(out_width, 16) - - # set chn0 output yuv420sp + # set chn0 output size camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_0, out_width, out_height) + # set chn0 out format camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_0, PIXEL_FORMAT_YUV_SEMIPLANAR_420) - + # create meida source device meida_source = media_device(CAMERA_MOD_ID, CAM_DEV_ID_0, CAM_CHN_ID_0) + # create meida sink device meida_sink = media_device(DISPLAY_MOD_ID, DISPLAY_DEV_ID, DISPLAY_CHN_VIDEO1) + # create meida link media.create_link(meida_source, meida_sink) - + # set display plane with video channel display.set_plane(0, 0, out_width, out_height, PIXEL_FORMAT_YVU_PLANAR_420, DISPLAY_MIRROR_NONE, DISPLAY_CHN_VIDEO1) - out_width = 640 out_height = 480 out_width = ALIGN_UP(out_width, 16) - - # set chn1 output rgb888 + # set chn1 output size camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_1, out_width, out_height) + # set chn1 output format camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_1, PIXEL_FORMAT_RGB_888) - - # set chn2 output rgb888planar + # set chn1 output size camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_2, out_width, out_height) + # set chn2 output format camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_2, PIXEL_FORMAT_RGB_888_PLANAR) - - ret = media.buffer_init() - if ret: - print("canmv_camera_test, buffer init failed") - return ret - + # init meida buffer + media.buffer_init() + # start stream for camera device0 camera.start_stream(CAM_DEV_ID_0) - time.sleep(15) - - capture_count = 0 - while capture_count < 100: - time.sleep(1) - for dev_num in range(CAM_DEV_ID_MAX): - if not camera.cam_dev[dev_num].dev_attr.dev_enable: - continue - - for chn_num in range(CAM_CHN_ID_MAX): - if not camera.cam_dev[dev_num].chn_attr[chn_num].chn_enable: + try: + while True: + os.exitpoint() + time.sleep(5) + for dev_num in range(CAM_DEV_ID_MAX): + if not camera.cam_dev[dev_num].dev_attr.dev_enable: continue - print(f"canmv_camera_test, dev({dev_num}) chn({chn_num}) capture frame.") - - img = camera.capture_image(dev_num, chn_num) - if img == -1: - print("camera.capture_image failed") - continue - - if img.format() == image.YUV420: - suffix = "yuv420sp" - elif img.format() == image.RGB888: - suffix = "rgb888" - elif img.format() == image.RGBP888: - suffix = "rgb888p" - else: - suffix = "unkown" - - filename = f"/sdcard/dev_{dev_num:02d}_chn_{chn_num:02d}_{img.width()}x{img.height()}_{capture_count:04d}.{suffix}" - print("save capture image to file:", filename) - - with open(filename, "wb") as f: - if f: - img_data = uctypes.bytearray_at(img.virtaddr(), img.size()) - #f.write(img_data) + for chn_num in range(CAM_CHN_ID_MAX): + if not camera.cam_dev[dev_num].chn_attr[chn_num].chn_enable: + continue + + print(f"camera_test, dev({dev_num}) chn({chn_num}) capture frame.") + # capture image from dev and chn + img = camera.capture_image(dev_num, chn_num) + if img.format() == image.YUV420: + suffix = "yuv420sp" + elif img.format() == image.RGB888: + suffix = "rgb888" + elif img.format() == image.RGBP888: + suffix = "rgb888p" else: - print(f"capture_image, open dump file failed({filename})") - - time.sleep(1) - - camera.release_image(dev_num, chn_num, img) - - capture_count += 1 - + suffix = "unkown" + + filename = f"/sdcard/dev_{dev_num:02d}_chn_{chn_num:02d}_{img.width()}x{img.height()}.{suffix}" + print("save capture image to file:", filename) + img.save(filename) + # release image for dev and chn + camera.release_image(dev_num, chn_num, img) + except KeyboardInterrupt as e: + print("user stop: ", e) + except BaseException as e: + sys.print_exception(e) + # stop stream for camera device0 camera.stop_stream(CAM_DEV_ID_0) - + # deinit display display.deinit() - + # destroy media link media.destroy_link(meida_source, meida_sink) - - time.sleep(1) - - ret = media.buffer_deinit() - if ret: - print("camera test, media_buffer_deinit failed") - return ret - - print("camera test exit") - return 0 - - -canmv_camera_test() - + os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) + time.sleep_ms(100) + # deinit media buffer + media.buffer_deinit() + +if __name__ == "__main__": + os.exitpoint(os.EXITPOINT_ENABLE) + camera_test() diff --git a/share/qtcreator/examples/01-Media/camera_480p.py b/share/qtcreator/examples/01-Media/camera_480p.py new file mode 100644 index 00000000000..fb767130b1a --- /dev/null +++ b/share/qtcreator/examples/01-Media/camera_480p.py @@ -0,0 +1,69 @@ +from media.camera import * +from media.display import * +from media.media import * +from time import * +import time +import image +from mpp import * +import gc +from random import randint + +def canmv_camera_test(): + print("canmv_camera_test") + + # use hdmi for display + display.init(LT9611_1920X1080_30FPS) + + # use EVB LCD for display + # display.init(HX8377_1080X1920_30FPS) + + camera.sensor_init(CAM_DEV_ID_0, CAM_DEFAULT_SENSOR) + + out_width = 640 + out_height = 480 + out_width = ALIGN_UP(out_width, 16) + + # set chn0 output yuv420sp + camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_0, out_width, out_height) + camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_0, PIXEL_FORMAT_YUV_SEMIPLANAR_420) + + meida_source = media_device(CAMERA_MOD_ID, CAM_DEV_ID_0, CAM_CHN_ID_0) + meida_sink = media_device(DISPLAY_MOD_ID, DISPLAY_DEV_ID, DISPLAY_CHN_VIDEO1) + media.create_link(meida_source, meida_sink) + + display.set_plane(0, 0, out_width, out_height, PIXEL_FORMAT_YVU_PLANAR_420, DISPLAY_MIRROR_NONE, DISPLAY_CHN_VIDEO1) + + ret = media.buffer_init() + if ret: + print("canmv_camera_test, buffer init failed") + return ret + + camera.start_stream(CAM_DEV_ID_0) + img = None + try: + capture_count = 0 + while True: + img = camera.capture_image(0, 0) + img.compress_for_ide() + camera.release_image(0, 0, img) + gc.collect() + capture_count += 1 + print(capture_count) + except Exception as e: + print(f"An error occurred during buffer used: {e}") + finally: + print('end') + if img: + camera.release_image(0, 0, img) + else: + print('img not dumped') + camera.stop_stream(CAM_DEV_ID_0) + + display.deinit() + + media.destroy_link(meida_source, meida_sink) + time.sleep(1) + print("camera test exit") + return 0 + +canmv_camera_test() diff --git a/share/qtcreator/examples/01-Media/display.py b/share/qtcreator/examples/01-Media/display.py index b24698cf7f6..2e0145e5199 100755 --- a/share/qtcreator/examples/01-Media/display.py +++ b/share/qtcreator/examples/01-Media/display.py @@ -1,53 +1,67 @@ from media.camera import * from media.display import * from media.media import * -from time import * -import time +import time, os, urandom, sys +DISPLAY_WIDTH = ALIGN_UP(1920, 16) +DISPLAY_HEIGHT = 1080 -def camera_display_test(): - CAM_OUTPUT_BUF_NUM = 6 - CAM_INPUT_BUF_NUM = 4 - - out_width = 1080 - out_height = 720 - out_width = ALIGN_UP(out_width, 16) - +def display_test(): + print("display test") + # use hdmi for display display.init(LT9611_1920X1080_30FPS) - camera.sensor_init(CAM_DEV_ID_0, CAM_DEFAULT_SENSOR) - - # set chn0 - camera.set_outbufs(CAM_DEV_ID_0, CAM_CHN_ID_0, CAM_OUTPUT_BUF_NUM) - camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_0, out_width, out_height) - camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_0, PIXEL_FORMAT_YUV_SEMIPLANAR_420) - - meida_source = media_device(CAMERA_MOD_ID, CAM_DEV_ID_0, CAM_CHN_ID_0) - meida_sink = media_device(DISPLAY_MOD_ID, DISPLAY_DEV_ID, DISPLAY_CHN_VIDEO1) - media.create_link(meida_source, meida_sink) + # config vb for osd layer + config = k_vb_config() + config.max_pool_cnt = 1 + config.comm_pool[0].blk_size = 4*DISPLAY_WIDTH*DISPLAY_HEIGHT + config.comm_pool[0].blk_cnt = 1 + config.comm_pool[0].mode = VB_REMAP_MODE_NOCACHE + # meida buffer config + media.buffer_config(config) + # media buffer init + media.buffer_init() + # request media buffer for osd image + globals()["buffer"] = media.request_buffer(4 * DISPLAY_WIDTH * DISPLAY_HEIGHT) - display.set_plane(400, 200, out_width, out_height, PIXEL_FORMAT_YVU_PLANAR_420, DISPLAY_MIRROR_NONE, DISPLAY_CHN_VIDEO1) + # create image for drawing + img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888) + # create image for osd + buffer = globals()["buffer"] + osd_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888, alloc=image.ALLOC_VB, phyaddr=buffer.phys_addr, virtaddr=buffer.virt_addr, poolid=buffer.pool_id) + osd_img.clear() + display.show_image(osd_img, 0, 0, DISPLAY_CHN_OSD0) + try: + while True: + img.clear() + for i in range(10): + x = (urandom.getrandbits(11) % img.width()) + y = (urandom.getrandbits(11) % img.height()) + r = (urandom.getrandbits(8)) + g = (urandom.getrandbits(8)) + b = (urandom.getrandbits(8)) + # If the first argument is a scaler then this method expects + # to see x, y, and text. Otherwise, it expects a (x,y,text) tuple. + # Character and string rotation can be done at 0, 90, 180, 270, and etc. degrees. + img.draw_string(x, y, "Hello World!", color = (r, g, b), scale = 2, mono_space = False, + char_rotation = 0, char_hmirror = False, char_vflip = False, + string_rotation = 0, string_hmirror = False, string_vflip = False) + img.copy_to(osd_img) + time.sleep(1) + os.exitpoint() + except KeyboardInterrupt as e: + print("user stop: ", e) + except BaseException as e: + sys.print_exception(e) - ret = media.buffer_init() - if ret: - print("camera_display_test, buffer init failed") - return ret - - camera.start_stream(CAM_DEV_ID_0) - count = 0 - while count < 600: - time.sleep(1) - count += 1 - - camera.stop_stream(CAM_DEV_ID_0) - media.destroy_link(meida_source, meida_sink) - time.sleep(1) + # deinit display display.deinit() - ret = media.buffer_deinit() - if ret: - print("camera_display_test, media_buffer_deinit failed") - return ret - - print("camera_display_test exit") - return 0 + os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) + time.sleep_ms(100) + # release media buffer + media.release_buffer(globals()["buffer"]) + # deinit media buffer + media.buffer_deinit() -camera_display_test() +if __name__ == "__main__": + os.exitpoint(os.EXITPOINT_ENABLE) + display_test() diff --git a/share/qtcreator/examples/01-Media/media.py b/share/qtcreator/examples/01-Media/media.py index c491468aec1..805e2a06aa4 100755 --- a/share/qtcreator/examples/01-Media/media.py +++ b/share/qtcreator/examples/01-Media/media.py @@ -1,28 +1,15 @@ -from media.media import * +# Meida Example +# +# Note: You will need an SD card to run this example. +# +# You can get how to use the meida api form this example. +from media.media import * +import os def media_buf_test(): print("media_buf_test start") config = k_vb_config() - config.max_pool_cnt = 10 - - config.comm_pool[0].blk_size = 1024*1024 - config.comm_pool[0].blk_cnt = 10 - config.comm_pool[0].mode = VB_REMAP_MODE_NONE - - config.comm_pool[1].blk_size = 2*1024*1024 - config.comm_pool[1].blk_cnt = 10 - config.comm_pool[1].mode = VB_REMAP_MODE_NOCACHE - - config.comm_pool[2].blk_size = 3*1024*1024 - config.comm_pool[2].blk_cnt = 10 - config.comm_pool[3].mode = VB_REMAP_MODE_CACHED - - print("media_buf_test buffer_config 111") - ret = media.buffer_config(config) - if ret: - print("media_buf_test, buffer_config failed") - return ret config.max_pool_cnt = 10 @@ -38,11 +25,8 @@ def media_buf_test(): config.comm_pool[2].blk_cnt = 10 config.comm_pool[3].mode = VB_REMAP_MODE_CACHED - print("media_buf_test buffer_config 222") - ret = media.buffer_config(config) - if ret: - print("media_buf_test, buffer_config failed") - return ret + # config meida buffer + media.buffer_config(config) config.max_pool_cnt = 20 @@ -58,11 +42,8 @@ def media_buf_test(): config.comm_pool[2].blk_cnt = 3 config.comm_pool[3].mode = VB_REMAP_MODE_CACHED - print("media_buf_test buffer_config 333") - ret = media.buffer_config(config) - if ret: - print("media_buf_test, buffer_config failed") - return ret + # config meida buffer + media.buffer_config(config) config.max_pool_cnt = 30 @@ -78,40 +59,30 @@ def media_buf_test(): config.comm_pool[2].blk_cnt = 5 config.comm_pool[3].mode = VB_REMAP_MODE_CACHED - print("media_buf_test buffer_config 444") - ret = media.buffer_config(config) - if ret: - print("media_buf_test, buffer_config failed") - return ret + # config meida buffer + media.buffer_config(config) print("media_buf_test buffer_init") - ret = media.buffer_init() - if ret: - print("media_buf_test, buffer_init failed") - + # init meida buffer + media.buffer_init() print("media_buf_test request_buffer") + # request meida buffer buffer = media.request_buffer(4*1024*1024) - if buffer == -1: - print("media_buf_test, request_buffer failed") - else: - print(f"buffer handle({buffer.handle})") - print(f"buffer pool_id({buffer.pool_id})") - print(f"buffer phys_addr({buffer.phys_addr})") - print(f"buffer virt_addr({buffer.virt_addr})") - print(f"buffer size({buffer.size})") - ret = media.release_buffer(buffer) - if ret: - print("media_buf_test, release_buffer failed") + print(f"buffer handle({buffer.handle})") + print(f"buffer pool_id({buffer.pool_id})") + print(f"buffer phys_addr({buffer.phys_addr})") + print(f"buffer virt_addr({buffer.virt_addr})") + print(f"buffer size({buffer.size})") + # release meida buffer + media.release_buffer(buffer) print("media_buf_test buffer_deinit") - ret = media.buffer_deinit() - if ret: - print("media_buf_test, buffer_deinit failed") - return ret + # deinit meida buffer + media.buffer_deinit() print("media_buf_test end") - return 0 - -media_buf_test() +if __name__ == "__main__": + os.exitpoint(os.EXITPOINT_ENABLE) + media_buf_test() diff --git a/share/qtcreator/examples/01-Media/mp4muxer.py b/share/qtcreator/examples/01-Media/mp4muxer.py old mode 100755 new mode 100644 index 37fe60580e3..978b65e9077 --- a/share/qtcreator/examples/01-Media/mp4muxer.py +++ b/share/qtcreator/examples/01-Media/mp4muxer.py @@ -1,48 +1,45 @@ +# Save MP4 file example +# +# Note: You will need an SD card to run this example. +# +# You can capture audio and video and save them as MP4.The current version only supports MP4 format, video supports 264/265, and audio supports g711a/g711u. + from media.mp4format import * +import os -def canmv_mp4_muxer_test(): +def mp4_muxer_test(): + print("mp4_muxer_test start") width = 1280 height = 720 + # 实例化mp4 container mp4_muxer = Mp4Container() - mp4_cfg = Mp4CfgStr(mp4_muxer.MP4_CONFIG_TYPE_MUXER) if mp4_cfg.type == mp4_muxer.MP4_CONFIG_TYPE_MUXER: file_name = "/sdcard/app/tests/test.mp4" mp4_cfg.SetMuxerCfg(file_name, mp4_muxer.MP4_CODEC_ID_H265, width, height, mp4_muxer.MP4_CODEC_ID_G711U) - - ret = mp4_muxer.Create(mp4_cfg) - if ret: - print("canmv_mp4_muxer_test, mp4 muxer Create failed.") - return -1 - - ret = mp4_muxer.Start() - if ret: - print("canmv_mp4_muxer_test, mp4 muxer Start failed.") - return -1 + # 创建mp4 muxer + mp4_muxer.Create(mp4_cfg) + # 启动mp4 muxer + mp4_muxer.Start() frame_count = 0 - - while True: - ret = mp4_muxer.Process() - if ret: - print("canmv_mp4_muxer_test, mp4 muxer Process failed.") - return -1 - - frame_count += 1 - print("frame_coutn = ", frame_count) - if frame_count >= 100: - break - - ret = mp4_muxer.Stop() - if ret: - print("canmv_mp4_muxer_test, mp4 muxer Stop failed.") - return -1 - - ret = mp4_muxer.Destroy() - if ret: - print("canmv_mp4_muxer_test, mp4 muxer Destroy failed.") - return -1 - - return 0 - -canmv_mp4_muxer_test() \ No newline at end of file + try: + while True: + os.exitpoint() + # 处理音视频数据,按MP4格式写入文件 + mp4_muxer.Process() + frame_count += 1 + print("frame_coutn = ", frame_count) + if frame_count >= 200: + break + except BaseException as e: + print(e) + # 停止mp4 muxer + mp4_muxer.Stop() + # 销毁mp4 muxer + mp4_muxer.Destroy() + print("mp4_muxer_test stop") + +if __name__ == "__main__": + os.exitpoint(os.EXITPOINT_ENABLE) + mp4_muxer_test() diff --git a/share/qtcreator/examples/01-Media/player.py b/share/qtcreator/examples/01-Media/player.py old mode 100755 new mode 100644 index 2a477e39c52..c000ee4d699 --- a/share/qtcreator/examples/01-Media/player.py +++ b/share/qtcreator/examples/01-Media/player.py @@ -1,43 +1,39 @@ -from media.player import * +# play mp4 file example +# +# Note: You will need an SD card to run this example. +# +# You can load local files to play. The current version only supports MP4 format, video supports 264/265, and audio supports g711a/g711u. -start_play = False +from media.player import * #导入播放器模块,用于播放mp4文件 +import os + +start_play = False #播放结束flag def player_event(event,data): global start_play - if(event == K_PLAYER_EVENT_EOF): - start_play = False + if(event == K_PLAYER_EVENT_EOF): #播放结束标识 + start_play = False #设置播放结束标识 def play_mp4_test(filename): global start_play - player=Player() - player.load(filename) - player.set_event_callback(player_event) - player.start() + player=Player() #创建播放器对象 + player.load(filename) #加载mp4文件 + player.set_event_callback(player_event) #设置播放器事件回调 + player.start() #开始播放 start_play = True - while(start_play): - time.sleep(0.1) - - player.stop() + #等待播放结束 + try: + while(start_play): + time.sleep(0.1) + os.exitpoint() + except KeyboardInterrupt as e: + print("user stop: ", e) + except BaseException as e: + sys.print_exception(e) + + player.stop() #停止播放 print("play over") -''' -def test(filename): - player=Player() - print("....load_mp4") - player.load(filename) - time.sleep(1) - print("....destroy_mp4") - player.destroy_mp4(); - - - print("======_init_media_buffer before") - time.sleep(1) - player._init_media_buffer() - print("======_init_media_buffer end") - time.sleep(3) - print("======_deinit_media_buffer before") - player._deinit_media_buffer() - print("======_deinit_media_buffer end") -''' - -play_mp4_test("/sdcard/app/tests/test.mp4") \ No newline at end of file +if __name__ == "__main__": + os.exitpoint(os.EXITPOINT_ENABLE) + play_mp4_test("/sdcard/app/tests/test.mp4")#播放mp4文件 diff --git a/share/qtcreator/examples/01-Media/venc.py b/share/qtcreator/examples/01-Media/venc.py old mode 100755 new mode 100644 index 861ddf2f8da..1711083a3bd --- a/share/qtcreator/examples/01-Media/venc.py +++ b/share/qtcreator/examples/01-Media/venc.py @@ -1,58 +1,46 @@ +# Video encode example +# +# Note: You will need an SD card to run this example. +# +# You can capture videos and encode them into 264 files + from media.vencoder import * from media.camera import * from media.media import * -from time import * -import time +import time, os -def canmv_venc_test(): +def venc_test(): + print("venc_test start") width = 1280 height = 720 venc_chn = VENC_CHN_ID_0 - width = ALIGN_UP(width, 16) + # 初始化sensor camera.sensor_init(CAM_DEV_ID_0, CAM_DEFAULT_SENSOR) - - # set vicap chn0 + # 设置camera 输出buffer camera.set_outbufs(CAM_DEV_ID_0, CAM_CHN_ID_0, 6) + # 设置camera 输出buffer size camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_0, width, height) + # 设置camera 输出格式 camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_0, PIXEL_FORMAT_YUV_SEMIPLANAR_420) - + # 实例化video encoder encoder = Encoder() - ret = encoder.SetOutBufs(venc_chn, 15, width, height) - if ret: - print("canmv_venc_test, encoder SetOutBufs failed.") - return -1 - - ret = media.buffer_init() - if ret: - print("canmv_venc_test, buffer_init failed.") - return ret - + # 设置video encoder 输出buffer + encoder.SetOutBufs(venc_chn, 15, width, height) + # 初始化设置的buffer + media.buffer_init() chnAttr = ChnAttrStr(encoder.PAYLOAD_TYPE_H265, encoder.H265_PROFILE_MAIN, width, height) streamData = StreamData() - - ret = encoder.Create(venc_chn, chnAttr) - if ret < 0: - print("canmv_venc_test, vencoder create filed.") - return ret - - # Bind with camera + # 创建编码器 + encoder.Create(venc_chn, chnAttr) + # 绑定camera和venc media_source = media_device(CAMERA_MOD_ID, CAM_DEV_ID_0, CAM_CHN_ID_0) media_sink = media_device(VIDEO_ENCODE_MOD_ID, VENC_DEV_ID, venc_chn) - ret = media.create_link(media_source, media_sink) - if ret: - print("cam_venc_test, create link with camera failed.") - return ret - - ret = encoder.Start(venc_chn) - if ret: - print("cam_venc_test, encoder start failed") - return ret - - ret = camera.start_stream(CAM_DEV_ID_0) - if ret: - print("cam_venc_test, camera start failed") - return ret + media.create_link(media_source, media_sink) + # 开始编码 + encoder.Start(venc_chn) + # 启动camera + camera.start_stream(CAM_DEV_ID_0) frame_count = 0 if chnAttr.payload_type == encoder.PAYLOAD_TYPE_H265: @@ -62,53 +50,43 @@ def canmv_venc_test(): else: suffix = "unkown" print("cam_venc_test, venc payload_type unsupport") - return -1 out_file = f"/sdcard/app/tests/venc_chn_{venc_chn:02d}.{suffix}" print("save stream to file: ", out_file) with open(out_file, "wb") as fo: - while True: - ret = encoder.GetStream(venc_chn, streamData) - if ret < 0: - print("cam_venc_test, venc get stream failed") - return ret - - for pack_idx in range(0, streamData.pack_cnt): - stream_data = uctypes.bytearray_at(streamData.data[pack_idx], streamData.data_size[pack_idx]) - fo.write(stream_data) - print("stream size: ", streamData.data_size[pack_idx], "stream type: ", streamData.stream_type[pack_idx]) - - ret = encoder.ReleaseStream(venc_chn, streamData) - if ret < 0: - print("cam_venc_test, venc release stream failed") - return ret - - frame_count += 1 - if frame_count >= 100: - break - + try: + while True: + os.exitpoint() + encoder.GetStream(venc_chn, streamData) # 获取一帧码流 + + for pack_idx in range(0, streamData.pack_cnt): + stream_data = uctypes.bytearray_at(streamData.data[pack_idx], streamData.data_size[pack_idx]) + fo.write(stream_data) # 码流写文件 + print("stream size: ", streamData.data_size[pack_idx], "stream type: ", streamData.stream_type[pack_idx]) + + encoder.ReleaseStream(venc_chn, streamData) # 释放一帧码流 + + frame_count += 1 + if frame_count >= 100: + break + except KeyboardInterrupt as e: + print("user stop: ", e) + except BaseException as e: + sys.print_exception(e) + + # 停止camera camera.stop_stream(CAM_DEV_ID_0) - - ret = media.destroy_link(media_source, media_sink) - if ret: - print("cam_venc_test, venc destroy link with camera failed.") - return ret - - ret = encoder.Stop(venc_chn) - if ret < 0: - print("cam_venc_test, venc stop failed.") - return ret - - ret = encoder.Destroy(venc_chn) - if ret < 0: - print("cam_venc_test, venc destroy failed.") - return ret - - ret = media.buffer_deinit() - if ret: - print("cam_venc_test, media buffer deinit failed.") - return ret - - -canmv_venc_test() + # 销毁camera和venc的绑定 + media.destroy_link(media_source, media_sink) + # 停止编码 + encoder.Stop(venc_chn) + # 销毁编码器 + encoder.Destroy(venc_chn) + # 清理buffer + media.buffer_deinit() + print("venc_test stop") + +if __name__ == "__main__": + os.exitpoint(os.EXITPOINT_ENABLE) + venc_test() diff --git a/share/qtcreator/examples/02-Machine/adc/adc.py b/share/qtcreator/examples/02-Machine/adc/adc.py old mode 100755 new mode 100644 index 4266a9cb793..fa1e2048460 --- a/share/qtcreator/examples/02-Machine/adc/adc.py +++ b/share/qtcreator/examples/02-Machine/adc/adc.py @@ -1,6 +1,10 @@ +# 基础示例 +# +# 欢迎使用CanMV IDE, 点击IDE左下角的绿色按钮开始执行脚本 + from machine import ADC -adc = ADC(0,enable=True) -value = adc.value() +adc = ADC(0,enable=True) #构造adc对象,通道0默认开启 +value = adc.value() #获取通道0的数值 print("value = %d" % value) -adc.deinit() +adc.deinit() #注销adc对象 diff --git a/share/qtcreator/examples/02-Machine/fft/fft.py b/share/qtcreator/examples/02-Machine/fft/fft.py old mode 100755 new mode 100644 index 3a1342c46f4..c4584d6e31e --- a/share/qtcreator/examples/02-Machine/fft/fft.py +++ b/share/qtcreator/examples/02-Machine/fft/fft.py @@ -1,3 +1,7 @@ +# 基础示例 +# +# 欢迎使用CanMV IDE, 点击IDE左下角的绿色按钮开始执行脚本 + from machine import FFT import array import math @@ -13,18 +17,18 @@ def input_data(): data3 = 0.2 * math.cos(4 * 2 * PI * i / 64) data4 = 1000 * math.cos(5 * 2 * PI * i / 64) rx.append((int(data0 + data1 + data2 + data3 + data4))) -input_data() +input_data() #初始化需要进行FFT的数据,列表类型 print(rx) -data = np.array(rx,dtype=np.uint16) +data = np.array(rx,dtype=np.uint16) #把列表数据转换成数组 print(data) -fft1 = FFT(data, 64, 0x555) -res = fft1.run() +fft1 = FFT(data, 64, 0x555) #创建一个FFT对象,运算点数为64,偏移是0x555 +res = fft1.run() #获取FFT转换后的数据 print(res) -res = fft1.amplitude(res) +res = fft1.amplitude(res) #获取各个频率点的幅值 print(res) -res = fft1.freq(64,38400) +res = fft1.freq(64,38400) #获取所有频率点的频率值 print(res) diff --git a/share/qtcreator/examples/02-Machine/fpioa/fpioa.py b/share/qtcreator/examples/02-Machine/fpioa/fpioa.py old mode 100755 new mode 100644 index e0e585350f5..6c32c8d7f65 --- a/share/qtcreator/examples/02-Machine/fpioa/fpioa.py +++ b/share/qtcreator/examples/02-Machine/fpioa/fpioa.py @@ -15,8 +15,12 @@ a.set_function(60,set_sl=1,set_ie=0,set_oe=0,set_pd=1,set_pu=0,set_ds=6,set_st=0,set_di=1) a.help(60) +try : + print(a.set_function(60,set_msc=0)) + +except Exception as e: + print("set error ", e) -a.set_function(60,set_msc=0) a.help(60) diff --git a/share/qtcreator/examples/02-Machine/gpio/gpio.py b/share/qtcreator/examples/02-Machine/gpio/gpio.py old mode 100755 new mode 100644 index 2a9f9616e74..e6b3e865f00 --- a/share/qtcreator/examples/02-Machine/gpio/gpio.py +++ b/share/qtcreator/examples/02-Machine/gpio/gpio.py @@ -1,13 +1,12 @@ +# 基础示例 +# +# 欢迎使用CanMV IDE, 点击IDE左下角的绿色按钮开始执行脚本 + from machine import GPIO -gpio = GPIO(8, GPIO.OUT, GPIO.PULL_UP, value=0) -value = gpio.value() -print("value = %d" % value) -gpio.value(1) +gpio = GPIO(8, GPIO.OUT, GPIO.PULL_UP, value=0) #构造GPIO对象,gpio编号为8,设置为上拉输出低电平 +value = gpio.value() #获取gpio的值 +print("value = %d" % value) +gpio.value(1) #设置gpio输出为高电平 value = gpio.value() print("value = %d" % value) - -def on_timer(arg): - print("time up: %d" % arg) - -gpio.irq(on_timer,3) diff --git a/share/qtcreator/examples/02-Machine/i2c/i2c.py b/share/qtcreator/examples/02-Machine/i2c/i2c.py index bfaaf35246e..ac1e19afda9 100755 --- a/share/qtcreator/examples/02-Machine/i2c/i2c.py +++ b/share/qtcreator/examples/02-Machine/i2c/i2c.py @@ -1,32 +1,27 @@ from machine import I2C -i2c4=machine.I2C(4) +i2c4=I2C(4) # init i2c4 -i2c4.scan() +a=i2c4.scan() #scan i2c slave +print(a) -i2c4.writeto_mem(0x3b,0xff,bytes([0x80]),mem_size=8) -#value =0x17 -i2c4.readfrom_mem(0x3b,0x00,1,mem_size=8) -#value =0x2 -i2c4.readfrom_mem(0x3b,0x01,1,mem_size=8) +i2c4.writeto_mem(0x3b,0xff,bytes([0x80]),mem_size=8) # write hdmi page address(0x80) +i2c4.readfrom_mem(0x3b,0x00,1,mem_size=8) # read hdmi id0 ,value =0x17 +i2c4.readfrom_mem(0x3b,0x01,1,mem_size=8) # read hdmi id1 ,value =0x2 -i2c4.writeto(0x3b,bytes([0xff,0x80]),True) -i2c4.writeto(0x3b,bytes([0x00]),True) -#value =0x17 -i2c4.readfrom(0x3b,1) -i2c4.writeto(0x3b,bytes([0x01]),True) -#value =0x2 -i2c4.readfrom(0x3b,1) +i2c4.writeto(0x3b,bytes([0xff,0x80]),True) # write hdmi page address(0x80) +i2c4.writeto(0x3b,bytes([0x00]),True) #send the address0 of being readed +i2c4.readfrom(0x3b,1) #read hdmi id0 ,value =0x17 +i2c4.writeto(0x3b,bytes([0x01]),True) #send the address1 of being readed +i2c4.readfrom(0x3b,1) #read hdmi id0 ,value =0x17 -i2c4.writeto_mem(0x3b,0xff,bytes([0x80]),mem_size=8) +i2c4.writeto_mem(0x3b,0xff,bytes([0x80]),mem_size=8) # write hdmi page address(0x80) a=bytearray(1) -i2c4.readfrom_mem_into(0x3b,0x0,a,mem_size=8) -#value =0x17 -print(a) +i2c4.readfrom_mem_into(0x3b,0x0,a,mem_size=8) # read hdmi id0 into a ,value =0x17 +print(a) #printf a,value =0x17 -i2c4.writeto(0x3b,bytes([0xff,0x80]),True) -i2c4.writeto(0x3b,bytes([0x00]),True) +i2c4.writeto(0x3b,bytes([0xff,0x80]),True) # write hdmi page address(0x80) +i2c4.writeto(0x3b,bytes([0x00]),True) #send the address0 of being readed b=bytearray(1) -i2c4.readfrom_into(0x3b,b) -#value =0x17 -print(b) \ No newline at end of file +i2c4.readfrom_into(0x3b,b) #read hdmi id0 into b ,value =0x17 +print(b) #printf a,value =0x17 \ No newline at end of file diff --git a/share/qtcreator/examples/02-Machine/pwm/pwm.py b/share/qtcreator/examples/02-Machine/pwm/pwm.py old mode 100755 new mode 100644 index 816b6f4bcc7..97927cb4217 --- a/share/qtcreator/examples/02-Machine/pwm/pwm.py +++ b/share/qtcreator/examples/02-Machine/pwm/pwm.py @@ -1,4 +1,11 @@ from machine import PWM - -# channel 0 output freq 1kHz duty 50%, enable +# 实例化PWM通道0,频率为1000Hz,占空比为50%,默认使能输出 pwm0 = PWM(0, 1000, 50, enable = True) +# 关闭通道0输出 +pwm0.enable(0) +# 调整通道0频率为2000Hz +pwm0.freq(2000) +# 调整通道0占空比为40% +pwm0.duty(40) +# 打开通道0输出 +pwm0.enable(1) diff --git a/share/qtcreator/examples/02-Machine/spi/spi.py b/share/qtcreator/examples/02-Machine/spi/spi.py new file mode 100755 index 00000000000..929056623fd --- /dev/null +++ b/share/qtcreator/examples/02-Machine/spi/spi.py @@ -0,0 +1,35 @@ +from machine import SPI +from machine import FPIOA +a = FPIOA() + +a.help(14) +a.set_function(14,a.QSPI0_CS0) +a.help(14) + +a.help(15) +a.set_function(15,a.QSPI0_CLK) +a.help(15) + +a.help(16) +a.set_function(16,a.QSPI0_D0) +a.help(16) + +a.help(17) +a.set_function(17,a.QSPI0_D1) +a.help(17) + +spi=SPI(1,baudrate=5000000, polarity=0, phase=0, bits=8) # spi init clock 5MHz, polarity 0, phase 0, data bitwide 8bits + +spi.write(bytes([0x66])) # enable gd25lq128 reset + +spi.write(bytes([0x99])) # gd25lq128 reset + +a=bytes([0x9f]) # send buff +b=bytearray(3) # receive buf +spi.write_readinto(a,b) # read gd25lq128 id +print(b) # bytearray(b'\xc8`\x18') + +a=bytes([0x90,0,0,0]) # send buff +b=bytearray(2) # receive buf +spi.write_readinto(a,b) # read gd25lq128 id +print(b) # bytearray(b'\xc8\x17') diff --git a/share/qtcreator/examples/02-Machine/timer/timer.py b/share/qtcreator/examples/02-Machine/timer/timer.py old mode 100755 new mode 100644 index a62ea128253..64e9fe74331 --- a/share/qtcreator/examples/02-Machine/timer/timer.py +++ b/share/qtcreator/examples/02-Machine/timer/timer.py @@ -1,8 +1,10 @@ -from machine import Timer - +# 基础示例 +# +# 欢迎使用CanMV IDE, 点击IDE左下角的绿色按钮开始执行脚本 -def on_timer(arg): +from machine import Timer +def on_timer(arg): #定义定时器回调函数 print("time up: %d" % arg) -tim = Timer(mode=Timer.MODE_ONE_SHOT,period=3, unit=Timer.UNIT_S, callback=on_timer, arg=1, start=True) +tim = Timer(mode=Timer.MODE_ONE_SHOT,period=3, unit=Timer.UNIT_S, callback=on_timer, arg=1, start=True) #构造定时器对象,时间为3s,默认开启定时器 diff --git a/share/qtcreator/examples/02-Machine/uart/uart.py b/share/qtcreator/examples/02-Machine/uart/uart.py new file mode 100644 index 00000000000..22f0dd3d1e5 --- /dev/null +++ b/share/qtcreator/examples/02-Machine/uart/uart.py @@ -0,0 +1,16 @@ +from machine import UART +# UART1: baudrate 115200, 8bits, parity none, one stopbits +uart = UART(UART.UART2, baudrate=115200, bits=UART.EIGHTBITS, parity=UART.PARITY_NONE, stop=UART.STOPBITS_ONE) +# UART write +r = uart.write("UART test") +print(r) +# UART read +r = uart.read() +print(r) +# UART readline +r = uart.readline() +print(r) +# UART readinto +b = bytearray(8) +r = uart.readinto(b) +print(r) diff --git a/share/qtcreator/examples/02-Machine/wdt/wdt.py b/share/qtcreator/examples/02-Machine/wdt/wdt.py old mode 100755 new mode 100644 index 759dc638bbd..1c6a656a11b --- a/share/qtcreator/examples/02-Machine/wdt/wdt.py +++ b/share/qtcreator/examples/02-Machine/wdt/wdt.py @@ -1,13 +1,17 @@ +# 基础示例 +# +# 欢迎使用CanMV IDE, 点击IDE左下角的绿色按钮开始执行脚本 + import time from machine import WDT -wdt1 = WDT(1,3) +wdt1 = WDT(1,3) #构造wdt对象,/dev/watchdog1,timeout为3s print('into', wdt1) -time.sleep(2) -print(time.ticks_ms()) +time.sleep(2) #延时2s +print(time.ticks_ms()) ## 1.test wdt feed -wdt1.feed() -time.sleep(2) +wdt1.feed() #喂狗操作 +time.sleep(2) #延时2s print(time.ticks_ms()) ## 2.test wdt stop -wdt1.stop() +wdt1.stop() #停止喂狗 diff --git a/share/qtcreator/examples/04-AI-Demo/face_detection.py b/share/qtcreator/examples/04-AI-Demo/face_detection.py old mode 100755 new mode 100644 index a0a9c3ef109..57b1c801465 --- a/share/qtcreator/examples/04-AI-Demo/face_detection.py +++ b/share/qtcreator/examples/04-AI-Demo/face_detection.py @@ -7,6 +7,7 @@ import image #图像模块,主要用于读取、图像绘制元素(框、点等)等操作 import time #时间统计 import gc #垃圾回收模块 +import os, sys #操作系统接口模块 #********************for config.py******************** # display分辨率 @@ -278,7 +279,7 @@ def media_init(): config.comm_pool[0].blk_cnt = 1 config.comm_pool[0].mode = VB_REMAP_MODE_NOCACHE - ret = media.buffer_config(config) + media.buffer_config(config) global media_source, media_sink media_source = media_device(CAMERA_MOD_ID, CAM_DEV_ID_0, CAM_CHN_ID_0) @@ -286,26 +287,24 @@ def media_init(): media.create_link(media_source, media_sink) # 初始化媒体buffer - ret = media.buffer_init() - if ret: - return ret + media.buffer_init() global buffer, draw_img, osd_img buffer = media.request_buffer(4 * DISPLAY_WIDTH * DISPLAY_HEIGHT) # 用于画框 - draw_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888, alloc=image.ALLOC_MPGC) + draw_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888) # 用于拷贝画框结果,防止画框过程中发生buffer搬运 osd_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888, poolid=buffer.pool_id, alloc=image.ALLOC_VB, phyaddr=buffer.phys_addr, virtaddr=buffer.virt_addr) - return ret def media_deinit(): + os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) + time.sleep_ms(100) # meida资源释放 global buffer,media_source, media_sink media.release_buffer(buffer) media.destroy_link(media_source, media_sink) - ret = media.buffer_deinit() - return ret + media.buffer_deinit() #********************for face_detect.py******************** @@ -318,28 +317,18 @@ def face_detect_inference(): # 显示初始化 display_init() - rgb888p_img = None # 注意:将一定要将一下过程包在try中,用于保证程序停止后,资源释放完毕;确保下次程序仍能正常运行 try: # 注意:媒体初始化(注:媒体初始化必须在camera_start之前,确保media缓冲区已配置完全) - ret = media_init() - if ret: - print("face_detect_test, buffer init failed") - return ret + media_init() # 启动camera camera_start(CAM_DEV_ID_0) - time.sleep(5) while True: + os.exitpoint() with ScopedTiming("total",1): # (1)读取一帧图像 rgb888p_img = camera_read(CAM_DEV_ID_0) - # (2)若读取失败,释放当前帧 - if rgb888p_img == -1: - print("face_detect_test, capture_image failed") - camera_release_image(CAM_DEV_ID_0,rgb888p_img) - rgb888p_img = None - continue - + # (2) # (3)若读取成功,推理当前帧 if rgb888p_img.format() == image.RGBP888: # (3.1)推理当前图像,并获取检测结果 @@ -349,17 +338,14 @@ def face_detect_inference(): # (4)释放当前帧 camera_release_image(CAM_DEV_ID_0,rgb888p_img) - rgb888p_img = None - - except Exception as e: + gc.collect() + except KeyboardInterrupt as e: + print("user stop: ", e) + except BaseException as e: + sys.print_exception(e) # 捕捉运行运行中异常,并打印错误 print(f"An error occurred during buffer used: {e}") finally: - # 释放当前帧 - if rgb888p_img is not None: - #先release掉申请的内存再stop - camera_release_image(CAM_DEV_ID_0,rgb888p_img) - # 停止camera camera_stop(CAM_DEV_ID_0) # 释放显示资源 @@ -368,15 +354,11 @@ def face_detect_inference(): kpu_deinit(kpu_face_detect) # 垃圾回收 gc.collect() - time.sleep(1) # 释放媒体资源 - ret = media_deinit() - if ret: - print("face_detect_test, buffer_deinit failed") - return ret + media_deinit() print("face_detect_test end") - return 0 if __name__ == '__main__': + os.exitpoint(os.EXITPOINT_ENABLE) face_detect_inference() diff --git a/share/qtcreator/examples/04-AI-Demo/face_landmark.py b/share/qtcreator/examples/04-AI-Demo/face_landmark.py old mode 100755 new mode 100644 index 92b929b4380..08023310f8f --- a/share/qtcreator/examples/04-AI-Demo/face_landmark.py +++ b/share/qtcreator/examples/04-AI-Demo/face_landmark.py @@ -7,7 +7,7 @@ import image # 图像模块,主要用于读取、图像绘制元素(框、点等)等操作 import time # 时间统计 import gc # 垃圾回收模块 -import os # 操作系统接口模块 +import os, sys # 操作系统接口模块 import math # 数学模块 @@ -502,7 +502,7 @@ def media_init(): config.comm_pool[0].blk_cnt = 1 config.comm_pool[0].mode = VB_REMAP_MODE_NOCACHE - ret = media.buffer_config(config) + media.buffer_config(config) global media_source, media_sink media_source = media_device(CAMERA_MOD_ID, CAM_DEV_ID_0, CAM_CHN_ID_0) @@ -510,9 +510,7 @@ def media_init(): media.create_link(media_source, media_sink) # 初始化多媒体buffer - ret = media.buffer_init() - if ret: - return ret + media.buffer_init() global buffer, draw_img_ulab,draw_img, osd_img buffer = media.request_buffer(4 * DISPLAY_WIDTH * DISPLAY_HEIGHT) # 用于画框 @@ -521,16 +519,16 @@ def media_init(): # 用于拷贝画框结果,防止画框过程中发生buffer搬运 osd_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888, poolid=buffer.pool_id, alloc=image.ALLOC_VB, phyaddr=buffer.phys_addr, virtaddr=buffer.virt_addr) - return ret def media_deinit(): + os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) + time.sleep_ms(100) # meida资源释放 global buffer,media_source, media_sink media.release_buffer(buffer) media.destroy_link(media_source, media_sink) - ret = media.buffer_deinit() - return ret + media.buffer_deinit() #********************for face_detect.py******************** def face_landmark_inference(): @@ -544,28 +542,19 @@ def face_landmark_inference(): # 显示初始化 display_init() - rgb888p_img = None # 注意:将一定要将一下过程包在try中,用于保证程序停止后,资源释放完毕;确保下次程序仍能正常运行 try: # 注意:媒体初始化(注:媒体初始化必须在camera_start之前,确保media缓冲区已配置完全) - ret = media_init() - if ret: - print("face_detect_test, buffer init failed") - return ret + media_init() # 启动camera camera_start(CAM_DEV_ID_0) - time.sleep(5) while True: + os.exitpoint() with ScopedTiming("total",1): # (1)读取一帧图像 rgb888p_img = camera_read(CAM_DEV_ID_0) - # (2)若读取失败,释放当前帧 - if rgb888p_img == -1: - print("face_detect_test, capture_image failed") - camera_release_image(CAM_DEV_ID_0,rgb888p_img) - rgb888p_img = None - continue + # (2) # (3)若读取成功,推理当前帧 if rgb888p_img.format() == image.RGBP888: # (3.1)推理当前图像,并获取人脸检测结果 @@ -580,18 +569,14 @@ def face_landmark_inference(): # (4)释放当前帧 camera_release_image(CAM_DEV_ID_0,rgb888p_img) - rgb888p_img = None - #with ScopedTiming("gc collect", debug_mode > 0): - #gc.collect() - except Exception as e: + gc.collect() + except KeyboardInterrupt as e: + print("user stop: ", e) + except BaseException as e: + sys.print_exception(e) # 捕捉运行运行中异常,并打印错误 print(f"An error occurred during buffer used: {e}") finally: - # 释放当前帧 - if rgb888p_img is not None: - #先release掉申请的内存再stop - camera_release_image(CAM_DEV_ID_0,rgb888p_img) - # 停止camera camera_stop(CAM_DEV_ID_0) # 释放显示资源 @@ -601,15 +586,11 @@ def face_landmark_inference(): fld_kpu_deinit(kpu_face_landmark) # 垃圾回收 gc.collect() - time.sleep(1) # 释放媒体资源 - ret = media_deinit() - if ret: - print("face_landmark_test, buffer_deinit failed") - return ret + media_deinit() print("face_landmark_test end") - return 0 if __name__ == '__main__': + os.exitpoint(os.EXITPOINT_ENABLE) face_landmark_inference() diff --git a/share/qtcreator/examples/04-AI-Demo/face_mesh.py b/share/qtcreator/examples/04-AI-Demo/face_mesh.py old mode 100755 new mode 100644 index 64546e4a0ea..54e11454f0d --- a/share/qtcreator/examples/04-AI-Demo/face_mesh.py +++ b/share/qtcreator/examples/04-AI-Demo/face_mesh.py @@ -7,7 +7,7 @@ import image # 图像模块,主要用于读取、图像绘制元素(框、点等)等操作 import time # 时间统计 import gc # 垃圾回收模块 -import os # 操作系统接口模块 +import os, sys # 操作系统接口模块 import math # 数学模块 @@ -504,7 +504,7 @@ def media_init(): config.comm_pool[0].blk_cnt = 1 config.comm_pool[0].mode = VB_REMAP_MODE_NOCACHE - ret = media.buffer_config(config) + media.buffer_config(config) global media_source, media_sink media_source = media_device(CAMERA_MOD_ID, CAM_DEV_ID_0, CAM_CHN_ID_0) @@ -512,9 +512,7 @@ def media_init(): media.create_link(media_source, media_sink) # 初始化多媒体buffer - ret = media.buffer_init() - if ret: - return ret + media.buffer_init() global buffer, draw_img_ulab,draw_img, osd_img buffer = media.request_buffer(4 * DISPLAY_WIDTH * DISPLAY_HEIGHT) # 用于画框 @@ -523,16 +521,16 @@ def media_init(): # 用于拷贝画框结果,防止画框过程中发生buffer搬运 osd_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888, poolid=buffer.pool_id, alloc=image.ALLOC_VB, phyaddr=buffer.phys_addr, virtaddr=buffer.virt_addr) - return ret def media_deinit(): + os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) + time.sleep_ms(100) # meida资源释放 global buffer,media_source, media_sink media.release_buffer(buffer) media.destroy_link(media_source, media_sink) - ret = media.buffer_deinit() - return ret + media.buffer_deinit() #********************for face_detect.py******************** def face_mesh_inference(): @@ -547,29 +545,19 @@ def face_mesh_inference(): # 显示初始化 display_init() - rgb888p_img = None # 注意:将一定要将一下过程包在try中,用于保证程序停止后,资源释放完毕;确保下次程序仍能正常运行 try: # 注意:媒体初始化(注:媒体初始化必须在camera_start之前,确保media缓冲区已配置完全) - ret = media_init() - if ret: - print("face_detect_test, buffer init failed") - return ret + media_init() # 启动camera camera_start(CAM_DEV_ID_0) - time.sleep(5) while True: + os.exitpoint() with ScopedTiming("total",1): # (1)读取一帧图像 rgb888p_img = camera_read(CAM_DEV_ID_0) - # (2)若读取失败,释放当前帧 - if rgb888p_img == -1: - print("face_detect_test, capture_image failed") - camera_release_image(CAM_DEV_ID_0,rgb888p_img) - rgb888p_img = None - continue - # (3)若读取成功,推理当前帧 + # (2) # (3)若读取成功,推理当前帧 if rgb888p_img.format() == image.RGBP888: # (3.1)推理当前图像,并获取人脸检测结果 dets = fd_kpu_run(kpu_face_detect,rgb888p_img) @@ -585,18 +573,14 @@ def face_mesh_inference(): # (4)释放当前帧 camera_release_image(CAM_DEV_ID_0,rgb888p_img) - rgb888p_img = None - #with ScopedTiming("gc collect", debug_mode > 0): - #gc.collect() - except Exception as e: + gc.collect() + except KeyboardInterrupt as e: + print("user stop: ", e) + except BaseException as e: + sys.print_exception(e) # 捕捉运行运行中异常,并打印错误 print(f"An error occurred during buffer used: {e}") finally: - # 释放当前帧 - if rgb888p_img is not None: - #先release掉申请的内存再stop - camera_release_image(CAM_DEV_ID_0,rgb888p_img) - # 停止camera camera_stop(CAM_DEV_ID_0) # 释放显示资源 @@ -607,15 +591,11 @@ def face_mesh_inference(): fmpost_kpu_deinit(kpu_face_mesh_post) # 垃圾回收 gc.collect() - time.sleep(1) # 释放媒体资源 - ret = media_deinit() - if ret: - print("face_mesh_test, buffer_deinit failed") - return ret + media_deinit() - #print("face_mesh_test end") - return 0 + print("face_mesh_test end") if __name__ == '__main__': + os.exitpoint(os.EXITPOINT_ENABLE) face_mesh_inference() diff --git a/share/qtcreator/examples/04-AI-Demo/face_parse.py b/share/qtcreator/examples/04-AI-Demo/face_parse.py old mode 100755 new mode 100644 index b3b982b33b2..10d3ecc618c --- a/share/qtcreator/examples/04-AI-Demo/face_parse.py +++ b/share/qtcreator/examples/04-AI-Demo/face_parse.py @@ -7,7 +7,7 @@ import image # 图像模块,主要用于读取、图像绘制元素(框、点等)等操作 import time # 时间统计 import gc # 垃圾回收模块 -import os # 操作系统接口模块 +import os, sys # 操作系统接口模块 import math # 数学模块 #********************for config.py******************** @@ -415,7 +415,7 @@ def media_init(): config.comm_pool[0].blk_cnt = 1 config.comm_pool[0].mode = VB_REMAP_MODE_NOCACHE - ret = media.buffer_config(config) + media.buffer_config(config) global media_source, media_sink media_source = media_device(CAMERA_MOD_ID, CAM_DEV_ID_0, CAM_CHN_ID_0) @@ -423,9 +423,7 @@ def media_init(): media.create_link(media_source, media_sink) # 初始化多媒体buffer - ret = media.buffer_init() - if ret: - return ret + media.buffer_init() global buffer, draw_img_ulab,draw_img, osd_img buffer = media.request_buffer(4 * DISPLAY_WIDTH * DISPLAY_HEIGHT) @@ -435,16 +433,16 @@ def media_init(): # 用于拷贝画框结果,防止画框过程中发生buffer搬运 osd_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888, poolid=buffer.pool_id, alloc=image.ALLOC_VB, phyaddr=buffer.phys_addr, virtaddr=buffer.virt_addr) - return ret def media_deinit(): + os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) + time.sleep_ms(100) # meida资源释放 global buffer,media_source, media_sink media.release_buffer(buffer) media.destroy_link(media_source, media_sink) - ret = media.buffer_deinit() - return ret + media.buffer_deinit() #********************for face_detect.py******************** def face_parse_inference(): @@ -458,31 +456,20 @@ def face_parse_inference(): # 显示初始化 display_init() - rgb888p_img = None # 注意:将一定要将一下过程包在try中,用于保证程序停止后,资源释放完毕;确保下次程序仍能正常运行 try: # 注意:媒体初始化(注:媒体初始化必须在camera_start之前,确保media缓冲区已配置完全) - ret = media_init() - if ret: - print("face_detect_test, buffer init failed") - return ret + media_init() # 启动camera camera_start(CAM_DEV_ID_0) - time.sleep(5) - gc_count = 0 while True: + os.exitpoint() with ScopedTiming("total",1): # (1)读取一帧图像 rgb888p_img = camera_read(CAM_DEV_ID_0) - # (2)若读取失败,释放当前帧 - if rgb888p_img == -1: - print("face_detect_test, capture_image failed") - camera_release_image(CAM_DEV_ID_0,rgb888p_img) - rgb888p_img = None - continue - + # (2) # (3)若读取成功,推理当前帧 if rgb888p_img.format() == image.RGBP888: # (3.1)推理当前图像,并获取人脸检测结果 @@ -497,16 +484,14 @@ def face_parse_inference(): # (4)释放当前帧 camera_release_image(CAM_DEV_ID_0,rgb888p_img) - rgb888p_img = None - except Exception as e: + gc.collect() + except KeyboardInterrupt as e: + print("user stop: ", e) + except BaseException as e: + sys.print_exception(e) # 捕捉运行运行中异常,并打印错误 print(f"An error occurred during buffer used: {e}") finally: - # 释放当前帧 - if rgb888p_img is not None: - #先release掉申请的内存再stop - camera_release_image(CAM_DEV_ID_0,rgb888p_img) - # 停止camera camera_stop(CAM_DEV_ID_0) # 释放显示资源 @@ -516,15 +501,11 @@ def face_parse_inference(): fp_kpu_deinit(kpu_face_parse) # 垃圾回收 gc.collect() - time.sleep(1) # 释放媒体资源 - ret = media_deinit() - if ret: - print("face_parse_test, buffer_deinit failed") - return ret + media_deinit() print("face_parse_test end") - return 0 if __name__ == '__main__': + os.exitpoint(os.EXITPOINT_ENABLE) face_parse_inference() diff --git a/share/qtcreator/examples/04-AI-Demo/face_pose.py b/share/qtcreator/examples/04-AI-Demo/face_pose.py old mode 100755 new mode 100644 index aff7dd1e356..1ee888c0d67 --- a/share/qtcreator/examples/04-AI-Demo/face_pose.py +++ b/share/qtcreator/examples/04-AI-Demo/face_pose.py @@ -7,7 +7,7 @@ import image # 图像模块,主要用于读取、图像绘制元素(框、点等)等操作 import time # 时间统计 import gc # 垃圾回收模块 -import os # 操作系统接口模块 +import os, sys # 操作系统接口模块 import math # 数学模块 #********************for config.py******************** @@ -500,7 +500,7 @@ def media_init(): config.comm_pool[0].blk_cnt = 1 config.comm_pool[0].mode = VB_REMAP_MODE_NOCACHE - ret = media.buffer_config(config) + media.buffer_config(config) global media_source, media_sink media_source = media_device(CAMERA_MOD_ID, CAM_DEV_ID_0, CAM_CHN_ID_0) @@ -508,9 +508,7 @@ def media_init(): media.create_link(media_source, media_sink) # 初始化多媒体buffer - ret = media.buffer_init() - if ret: - return ret + media.buffer_init() global buffer, draw_img_ulab,draw_img, osd_img buffer = media.request_buffer(4 * DISPLAY_WIDTH * DISPLAY_HEIGHT) # 用于画框,draw_img->draw_img_ulab(两者指向同一块内存) @@ -519,16 +517,16 @@ def media_init(): # 用于拷贝画框结果,防止画框过程中发生buffer搬运 osd_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888, poolid=buffer.pool_id, alloc=image.ALLOC_VB, phyaddr=buffer.phys_addr, virtaddr=buffer.virt_addr) - return ret def media_deinit(): + os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) + time.sleep_ms(100) # meida资源释放 global buffer,media_source, media_sink media.release_buffer(buffer) media.destroy_link(media_source, media_sink) - ret = media.buffer_deinit() - return ret + media.buffer_deinit() #********************for face_detect.py******************** def face_pose_inference(): @@ -542,31 +540,20 @@ def face_pose_inference(): # 显示初始化 display_init() - rgb888p_img = None # 注意:将一定要将一下过程包在try中,用于保证程序停止后,资源释放完毕;确保下次程序仍能正常运行 try: # 注意:媒体初始化(注:媒体初始化必须在camera_start之前,确保media缓冲区已配置完全) - ret = media_init() - if ret: - print("face_detect_test, buffer init failed") - return ret + media_init() # 启动camera camera_start(CAM_DEV_ID_0) - time.sleep(5) - gc_count = 0 while True: + os.exitpoint() with ScopedTiming("total",1): # (1)读取一帧图像 rgb888p_img = camera_read(CAM_DEV_ID_0) - # (2)若读取失败,释放当前帧 - if rgb888p_img == -1: - print("face_detect_test, capture_image failed") - camera_release_image(CAM_DEV_ID_0,rgb888p_img) - rgb888p_img = None - continue - + # (2) # (3)若读取成功,推理当前帧 if rgb888p_img.format() == image.RGBP888: # (3.1)推理当前图像,并获取人脸检测结果 @@ -581,16 +568,14 @@ def face_pose_inference(): # (4)释放当前帧 camera_release_image(CAM_DEV_ID_0,rgb888p_img) - rgb888p_img = None - - except Exception as e: + gc.collect() + except KeyboardInterrupt as e: + print("user stop: ", e) + except BaseException as e: + sys.print_exception(e) # 捕捉运行运行中异常,并打印错误 print(f"An error occurred during buffer used: {e}") finally: - # 释放当前帧 - if rgb888p_img is not None: - #先release掉申请的内存再stop - camera_release_image(CAM_DEV_ID_0,rgb888p_img) # 停止camera camera_stop(CAM_DEV_ID_0) # 释放显示资源 @@ -600,15 +585,11 @@ def face_pose_inference(): fp_kpu_deinit(kpu_face_pose) # 垃圾回收 gc.collect() - time.sleep(1) # 释放媒体资源 - ret = media_deinit() - if ret: - print("face_pose_test, buffer_deinit failed") - return ret + media_deinit() print("face_pose_test end") - return 0 if __name__ == '__main__': + os.exitpoint(os.EXITPOINT_ENABLE) face_pose_inference() diff --git a/share/qtcreator/examples/04-AI-Demo/face_recognition.py b/share/qtcreator/examples/04-AI-Demo/face_recognition.py old mode 100755 new mode 100644 index 0afd17f92ac..3f00be7f76e --- a/share/qtcreator/examples/04-AI-Demo/face_recognition.py +++ b/share/qtcreator/examples/04-AI-Demo/face_recognition.py @@ -7,7 +7,7 @@ import image # 图像模块,主要用于读取、图像绘制元素(框、点等)等操作 import time # 时间统计 import gc # 垃圾回收模块 -import os # 操作系统接口模块 +import os, sys # 操作系统接口模块 import math # 数学模块 #********************for config.py******************** @@ -585,7 +585,7 @@ def media_init(): config.comm_pool[0].blk_cnt = 1 config.comm_pool[0].mode = VB_REMAP_MODE_NOCACHE - ret = media.buffer_config(config) + media.buffer_config(config) global media_source, media_sink media_source = media_device(CAMERA_MOD_ID, CAM_DEV_ID_0, CAM_CHN_ID_0) @@ -593,26 +593,24 @@ def media_init(): media.create_link(media_source, media_sink) # 初始化多媒体buffer - ret = media.buffer_init() - if ret: - return ret + media.buffer_init() global buffer, draw_img, osd_img buffer = media.request_buffer(4 * DISPLAY_WIDTH * DISPLAY_HEIGHT) # 用于画框 - draw_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888, alloc=image.ALLOC_MPGC) + draw_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888) # 用于拷贝画框结果,防止画框过程中发生buffer搬运 osd_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888, poolid=buffer.pool_id, alloc=image.ALLOC_VB, phyaddr=buffer.phys_addr, virtaddr=buffer.virt_addr) - return ret def media_deinit(): + os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) + time.sleep_ms(100) # meida资源释放 global buffer,media_source, media_sink media.release_buffer(buffer) media.destroy_link(media_source, media_sink) - ret = media.buffer_deinit() - return ret + media.buffer_deinit() #********************for face_detect.py******************** def face_recognition_inference(): @@ -626,29 +624,19 @@ def face_recognition_inference(): # 显示初始化 display_init() - rgb888p_img = None # 注意:将一定要将一下过程包在try中,用于保证程序停止后,资源释放完毕;确保下次程序仍能正常运行 try: # 注意:媒体初始化(注:媒体初始化必须在camera_start之前,确保media缓冲区已配置完全) - ret = media_init() - if ret: - print("face_detect_test, buffer init failed") - return ret + media_init() # 启动camera camera_start(CAM_DEV_ID_0) - time.sleep(5) while True: + os.exitpoint() with ScopedTiming("total",1): # (1)读取一帧图像 rgb888p_img = camera_read(CAM_DEV_ID_0) - # (2)若读取失败,释放当前帧 - if rgb888p_img == -1: - print("face_detect_test, capture_image failed") - camera_release_image(CAM_DEV_ID_0,rgb888p_img) - rgb888p_img = None - continue - + # (2) # (3)若读取成功,推理当前帧 if rgb888p_img.format() == image.RGBP888: # (3.1)推理当前图像,并获取人脸检测结果 @@ -663,17 +651,14 @@ def face_recognition_inference(): # (4)释放当前帧 camera_release_image(CAM_DEV_ID_0,rgb888p_img) - rgb888p_img = None gc.collect() - except Exception as e: + except KeyboardInterrupt as e: + print("user stop: ", e) + except BaseException as e: + sys.print_exception(e) # 捕捉运行运行中异常,并打印错误 print(f"An error occurred during buffer used: {e}") finally: - # 释放当前帧 - if rgb888p_img is not None: - #先release掉申请的内存再stop - camera_release_image(CAM_DEV_ID_0,rgb888p_img) - # 停止camera camera_stop(CAM_DEV_ID_0) # 释放显示资源 @@ -683,15 +668,11 @@ def face_recognition_inference(): fr_kpu_deinit(kpu_face_recg) # 垃圾回收 gc.collect() - time.sleep(1) # 释放媒体资源 - ret = media_deinit() - if ret: - print("face_recognition_test, buffer_deinit failed") - return ret + media_deinit() print("face_recognition_test end") - return 0 if __name__ == '__main__': + os.exitpoint(os.EXITPOINT_ENABLE) face_recognition_inference() diff --git a/share/qtcreator/examples/04-AI-Demo/face_registration.py b/share/qtcreator/examples/04-AI-Demo/face_registration.py old mode 100755 new mode 100644 index 301aba5fe43..55366a6e416 --- a/share/qtcreator/examples/04-AI-Demo/face_registration.py +++ b/share/qtcreator/examples/04-AI-Demo/face_registration.py @@ -4,7 +4,7 @@ import image #图像模块,主要用于读取、图像绘制元素(框、点等)等操作 import time #时间统计 import gc #垃圾回收模块 -import os #操作系统接口模块 +import os, sys #操作系统接口模块 import math #数学模块 #********************for config.py******************** @@ -462,7 +462,10 @@ def face_registration_inference(): print('No person detected') gc.collect() - except Exception as e: + except KeyboardInterrupt as e: + print("user stop: ", e) + except BaseException as e: + sys.print_exception(e) print(f"An error occurred during buffer used: {e}") finally: # 释放kpu资源 @@ -470,10 +473,9 @@ def face_registration_inference(): fr_kpu_deinit(kpu_face_reg) # 垃圾回收 gc.collect() - time.sleep(1) print("face_registration_test end") - return 0 if __name__ == '__main__': + os.exitpoint(os.EXITPOINT_ENABLE) face_registration_inference() diff --git a/share/qtcreator/examples/04-AI-Demo/finger_guessing.py b/share/qtcreator/examples/04-AI-Demo/finger_guessing.py old mode 100755 new mode 100644 index e0723c6cd20..b2e41c386f3 --- a/share/qtcreator/examples/04-AI-Demo/finger_guessing.py +++ b/share/qtcreator/examples/04-AI-Demo/finger_guessing.py @@ -8,6 +8,7 @@ import time #时间统计 import gc #垃圾回收模块 import aicube #aicube模块,封装ai cube 相关后处理 +import os, sys #操作系统接口模块 ##config.py #display分辨率 @@ -420,7 +421,7 @@ def media_init(): config.comm_pool[0].blk_cnt = 1 config.comm_pool[0].mode = VB_REMAP_MODE_NOCACHE - ret = media.buffer_config(config) + media.buffer_config(config) global media_source, media_sink media_source = media_device(CAMERA_MOD_ID, CAM_DEV_ID_0, CAM_CHN_ID_0) @@ -428,9 +429,7 @@ def media_init(): media.create_link(media_source, media_sink) # 初始化多媒体buffer - ret = media.buffer_init() - if ret: - return ret + media.buffer_init() global buffer, draw_img, osd_img, masks buffer = media.request_buffer(4 * DISPLAY_WIDTH * DISPLAY_HEIGHT) # 图层1,用于画框 @@ -439,16 +438,16 @@ def media_init(): # 图层2,用于拷贝画框结果,防止画框过程中发生buffer搬运 osd_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888, poolid=buffer.pool_id, alloc=image.ALLOC_VB, phyaddr=buffer.phys_addr, virtaddr=buffer.virt_addr) - return ret # media 释放内存 def media_deinit(): + os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) + time.sleep_ms(100) global buffer,media_source, media_sink media.release_buffer(buffer) media.destroy_link(media_source, media_sink) - ret = media.buffer_deinit() - return ret + media.buffer_deinit() #**********for finger_guessing.py********** @@ -459,13 +458,8 @@ def finger_guessing_inference(): camera_init(CAM_DEV_ID_0) # 初始化 camera display_init() # 初始化 display - rgb888p_img = None try: - ret = media_init() - if ret: - print("finger_guessing, buffer init failed") - return ret - + media_init() camera_start(CAM_DEV_ID_0) # 开启 camera counts_guess = -1 # 猜拳次数 计数 player_win = 0 # 玩家 赢次计数 @@ -475,14 +469,9 @@ def finger_guessing_inference(): LIBRARY = ["fist","yeah","five"] # 猜拳 石头剪刀布 三种方案的dict while True: + os.exitpoint() with ScopedTiming("total",1): rgb888p_img = camera_read(CAM_DEV_ID_0) # 读取一帧图像 - if rgb888p_img == -1: - print("finger_guessing, capture_image failed") - camera_release_image(CAM_DEV_ID_0,rgb888p_img) - rgb888p_img = None - continue - # for rgb888planar if rgb888p_img.format() == image.RGBP888: with ScopedTiming("trigger time", debug_mode > 0): @@ -491,6 +480,7 @@ def finger_guessing_inference(): draw_img.clear() for det_box in dets: gesture = hk_gesture(kpu_hand_keypoint_detect,rgb888p_img,det_box) # 执行手掌关键点检测 kpu 运行 以及 后处理过程 得到手势类型 + camera_release_image(CAM_DEV_ID_0,rgb888p_img) # camera 释放图形 if (len(dets) >= 2): draw_img.draw_string( 300 , 500, "Must have one hand !", color=(255,255,0,0), scale=7) draw_img.copy_to(osd_img) @@ -572,31 +562,25 @@ def finger_guessing_inference(): else: draw_img.copy_to(osd_img) display.show_image(osd_img, 0, 0, DISPLAY_CHN_OSD3) # 将得到的图像 绘制到 display - - camera_release_image(CAM_DEV_ID_0,rgb888p_img) # camera 释放图形 - rgb888p_img = None + else: + camera_release_image(CAM_DEV_ID_0,rgb888p_img) # camera 释放图形 gc.collect() - except Exception as e: + except KeyboardInterrupt as e: + print("user stop: ", e) + except BaseException as e: + sys.print_exception(e) print(f"An error occurred during buffer used: {e}") finally: - if rgb888p_img is not None: - #先release掉申请的内存再stop - camera_release_image(CAM_DEV_ID_0,rgb888p_img) - camera_stop(CAM_DEV_ID_0) # 停止 camera display_deinit() # 停止 display hd_kpu_deinit(kpu_hand_detect) # 释放手掌检测 kpu hk_kpu_deinit(kpu_hand_keypoint_detect) # 释放手掌关键点检测 kpu gc.collect() - ret = media_deinit() # 释放 整个 media - if ret: - print("finger_guessing, buffer_deinit failed") - return ret + media_deinit() # 释放 整个 media print("finger_guessing_test end") - return 0 if __name__ == '__main__': + os.exitpoint(os.EXITPOINT_ENABLE) finger_guessing_inference() - diff --git a/share/qtcreator/examples/04-AI-Demo/hand_detection.py b/share/qtcreator/examples/04-AI-Demo/hand_detection.py old mode 100755 new mode 100644 index 9ca1d45da2c..7fb6b1fd448 --- a/share/qtcreator/examples/04-AI-Demo/hand_detection.py +++ b/share/qtcreator/examples/04-AI-Demo/hand_detection.py @@ -8,7 +8,7 @@ import time #时间统计 import image #图像模块,主要用于读取、图像绘制元素(框、点等)等操作 - +import os, sys #操作系统接口模块 import gc #垃圾回收模块 ##config.py @@ -254,7 +254,7 @@ def media_init(): config.comm_pool[0].blk_cnt = 1 config.comm_pool[0].mode = VB_REMAP_MODE_NOCACHE - ret = media.buffer_config(config) + media.buffer_config(config) global media_source, media_sink media_source = media_device(CAMERA_MOD_ID, CAM_DEV_ID_0, CAM_CHN_ID_0) @@ -262,26 +262,24 @@ def media_init(): media.create_link(media_source, media_sink) # 初始化多媒体buffer - ret = media.buffer_init() - if ret: - return ret + media.buffer_init() global buffer, draw_img, osd_img buffer = media.request_buffer(4 * DISPLAY_WIDTH * DISPLAY_HEIGHT) # 图层1,用于画框 - draw_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888, alloc=image.ALLOC_MPGC) + draw_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888) # 图层2,用于拷贝画框结果,防止画框过程中发生buffer搬运 osd_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888, poolid=buffer.pool_id, alloc=image.ALLOC_VB, phyaddr=buffer.phys_addr, virtaddr=buffer.virt_addr) - return ret # media 释放内存 def media_deinit(): + os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) + time.sleep_ms(100) global buffer,media_source, media_sink media.release_buffer(buffer) media.destroy_link(media_source, media_sink) - ret = media.buffer_deinit() - return ret + media.buffer_deinit() #**********for hand_detect.py********** def hand_detect_inference(): @@ -290,22 +288,14 @@ def hand_detect_inference(): camera_init(CAM_DEV_ID_0) # 初始化 camera display_init() # 初始化 display - rgb888p_img = None try: - ret = media_init() - if ret: - print("hand_detect_test, buffer init failed") - return ret + media_init() camera_start(CAM_DEV_ID_0) while True: + os.exitpoint() with ScopedTiming("total",1): rgb888p_img = camera_read(CAM_DEV_ID_0) # 读取一帧图片 - if rgb888p_img == -1: - print("hand_detect_test, capture_image failed") - camera_release_image(CAM_DEV_ID_0,rgb888p_img) - rgb888p_img = None - continue # for rgb888planar if rgb888p_img.format() == image.RGBP888: @@ -313,27 +303,21 @@ def hand_detect_inference(): display_draw(dets) # 将得到的检测结果 绘制到 display camera_release_image(CAM_DEV_ID_0,rgb888p_img) # camera 释放图像 - rgb888p_img = None - #gc.collect() - except Exception as e: + gc.collect() + except KeyboardInterrupt as e: + print("user stop: ", e) + except BaseException as e: + sys.print_exception(e) print(f"An error occurred during buffer used: {e}") finally: - if rgb888p_img is not None: - #先release掉申请的内存再stop - camera_release_image(CAM_DEV_ID_0,rgb888p_img) - camera_stop(CAM_DEV_ID_0) # 停止 camera display_deinit() # 释放 display kpu_deinit(kpu_hand_detect) # 释放 kpu gc.collect() - ret = media_deinit() # 释放 整个media - if ret: - print("hand_detect_test, buffer_deinit failed") - return ret + media_deinit() # 释放 整个media print("hand_detect_test end") - return 0 if __name__ == '__main__': + os.exitpoint(os.EXITPOINT_ENABLE) hand_detect_inference() - diff --git a/share/qtcreator/examples/04-AI-Demo/hand_keypoint_detection.py b/share/qtcreator/examples/04-AI-Demo/hand_keypoint_detection.py old mode 100755 new mode 100644 index c01e33028e9..5b9490c5a1a --- a/share/qtcreator/examples/04-AI-Demo/hand_keypoint_detection.py +++ b/share/qtcreator/examples/04-AI-Demo/hand_keypoint_detection.py @@ -8,7 +8,7 @@ import time #时间统计 import image #图像模块,主要用于读取、图像绘制元素(框、点等)等操作 - +import os, sys #操作系统接口模块 import gc #垃圾回收模块 ##config.py @@ -349,7 +349,7 @@ def media_init(): config.comm_pool[0].blk_cnt = 1 config.comm_pool[0].mode = VB_REMAP_MODE_NOCACHE - ret = media.buffer_config(config) + media.buffer_config(config) global media_source, media_sink media_source = media_device(CAMERA_MOD_ID, CAM_DEV_ID_0, CAM_CHN_ID_0) @@ -357,26 +357,24 @@ def media_init(): media.create_link(media_source, media_sink) # 初始化多媒体buffer - ret = media.buffer_init() - if ret: - return ret + media.buffer_init() global buffer, draw_img, osd_img buffer = media.request_buffer(4 * DISPLAY_WIDTH * DISPLAY_HEIGHT) # 图层1,用于画框 - draw_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888, alloc=image.ALLOC_MPGC) + draw_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888) # 图层2,用于拷贝画框结果,防止画框过程中发生buffer搬运 osd_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888, poolid=buffer.pool_id, alloc=image.ALLOC_VB, phyaddr=buffer.phys_addr, virtaddr=buffer.virt_addr) - return ret # media 释放内存 def media_deinit(): + os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) + time.sleep_ms(100) global buffer,media_source, media_sink media.release_buffer(buffer) media.destroy_link(media_source, media_sink) - ret = media.buffer_deinit() - return ret + media.buffer_deinit() #**********for hand_keypoint_detect.py********** def hand_keypoint_detect_inference(): @@ -386,23 +384,13 @@ def hand_keypoint_detect_inference(): camera_init(CAM_DEV_ID_0) # 初始化 camera display_init() # 初始化 display - rgb888p_img = None try: - ret = media_init() - if ret: - print("hand_detect_test, buffer init failed") - return ret - + media_init() camera_start(CAM_DEV_ID_0) while True: + os.exitpoint() with ScopedTiming("total",1): rgb888p_img = camera_read(CAM_DEV_ID_0) # 读取一帧图片 - if rgb888p_img == -1: - print("hand_detect_test, capture_image failed") - camera_release_image(CAM_DEV_ID_0,rgb888p_img) - rgb888p_img = None - continue - # for rgb888planar if rgb888p_img.format() == image.RGBP888: dets = hd_kpu_run(kpu_hand_detect,rgb888p_img) # 执行手掌检测 kpu 运行 以及 后处理过程 @@ -443,29 +431,24 @@ def hand_keypoint_detect_inference(): display_draw(hk_results[0], x1_kp, y1_kp, w_kp, h_kp) # 将得到的手掌关键点检测结果 绘制到 display camera_release_image(CAM_DEV_ID_0,rgb888p_img) # camera 释放图像 - rgb888p_img = None - #gc.collect() + gc.collect() draw_img.copy_to(osd_img) display.show_image(osd_img, 0, 0, DISPLAY_CHN_OSD3) - except Exception as e: + except KeyboardInterrupt as e: + print("user stop: ", e) + except BaseException as e: + sys.print_exception(e) print(f"An error occurred during buffer used: {e}") finally: - if rgb888p_img is not None: - #先release掉申请的内存再stop - camera_release_image(CAM_DEV_ID_0,rgb888p_img) - camera_stop(CAM_DEV_ID_0) # 停止 camera display_deinit() # 释放 display hd_kpu_deinit(kpu_hand_detect) # 释放手掌检测 kpu hk_kpu_deinit(kpu_hand_keypoint_detect) # 释放手掌关键点检测 kpu gc.collect() - ret = media_deinit() # 释放 整个media - if ret: - print("hand_detect_test, buffer_deinit failed") - return ret + media_deinit() # 释放 整个media print("hand_detect_test end") - return 0 if __name__ == '__main__': + os.exitpoint(os.EXITPOINT_ENABLE) hand_keypoint_detect_inference() diff --git a/share/qtcreator/examples/04-AI-Demo/hand_recognition.py b/share/qtcreator/examples/04-AI-Demo/hand_recognition.py old mode 100755 new mode 100644 index 6fb4c7371e5..4441e4beed5 --- a/share/qtcreator/examples/04-AI-Demo/hand_recognition.py +++ b/share/qtcreator/examples/04-AI-Demo/hand_recognition.py @@ -8,7 +8,7 @@ import time #时间统计 import image #图像模块,主要用于读取、图像绘制元素(框、点等)等操作 - +import os, sys #操作系统接口模块 import gc #垃圾回收模块 ##config.py @@ -338,7 +338,7 @@ def media_init(): config.comm_pool[0].blk_cnt = 1 config.comm_pool[0].mode = VB_REMAP_MODE_NOCACHE - ret = media.buffer_config(config) + media.buffer_config(config) global media_source, media_sink media_source = media_device(CAMERA_MOD_ID, CAM_DEV_ID_0, CAM_CHN_ID_0) @@ -346,26 +346,24 @@ def media_init(): media.create_link(media_source, media_sink) # 初始化多媒体buffer - ret = media.buffer_init() - if ret: - return ret + media.buffer_init() global buffer, draw_img, osd_img buffer = media.request_buffer(4 * DISPLAY_WIDTH * DISPLAY_HEIGHT) # 图层1,用于画框 - draw_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888, alloc=image.ALLOC_MPGC) + draw_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888) # 图层2,用于拷贝画框结果,防止画框过程中发生buffer搬运 osd_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888, poolid=buffer.pool_id, alloc=image.ALLOC_VB, phyaddr=buffer.phys_addr, virtaddr=buffer.virt_addr) - return ret # media 释放内存 def media_deinit(): + os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) + time.sleep_ms(100) global buffer,media_source, media_sink media.release_buffer(buffer) media.destroy_link(media_source, media_sink) - ret = media.buffer_deinit() - return ret + media.buffer_deinit() #**********for hand_recognition.py********** def hand_recognition_inference(): @@ -375,23 +373,13 @@ def hand_recognition_inference(): camera_init(CAM_DEV_ID_0) # 初始化 camera display_init() # 初始化 display - rgb888p_img = None try: - ret = media_init() - if ret: - print("hand_recognition_test, buffer init failed") - return ret - + media_init() camera_start(CAM_DEV_ID_0) while True: + os.exitpoint() with ScopedTiming("total",1): rgb888p_img = camera_read(CAM_DEV_ID_0) # 读取一帧图片 - if rgb888p_img == -1: - print("hand_recognition_test, capture_image failed") - camera_release_image(CAM_DEV_ID_0,rgb888p_img) - rgb888p_img = None - continue - # for rgb888planar if rgb888p_img.format() == image.RGBP888: dets = hd_kpu_run(kpu_hand_detect,rgb888p_img) # 执行手掌检测 kpu 运行 以及 后处理过程 @@ -432,29 +420,24 @@ def hand_recognition_inference(): draw_img.draw_string( x1 , y1-50, hr_results, color=(255,0, 255, 0), scale=4) # 将得到的识别结果 绘制到 display camera_release_image(CAM_DEV_ID_0,rgb888p_img) # camera 释放图像 - rgb888p_img = None - #gc.collect() + gc.collect() draw_img.copy_to(osd_img) display.show_image(osd_img, 0, 0, DISPLAY_CHN_OSD3) - except Exception as e: + except KeyboardInterrupt as e: + print("user stop: ", e) + except BaseException as e: + sys.print_exception(e) print(f"An error occurred during buffer used: {e}") finally: - if rgb888p_img is not None: - #先release掉申请的内存再stop - camera_release_image(CAM_DEV_ID_0,rgb888p_img) - camera_stop(CAM_DEV_ID_0) # 停止 camera display_deinit() # 释放 display hd_kpu_deinit(kpu_hand_detect) # 释放手掌检测 kpu hr_kpu_deinit(kpu_hand_recognition) # 释放手势识别 kpu gc.collect() - ret = media_deinit() # 释放 整个media - if ret: - print("hand_recognition_test, buffer_deinit failed") - return ret + media_deinit() # 释放 整个media print("hand_recognition_test end") - return 0 if __name__ == '__main__': + os.exitpoint(os.EXITPOINT_ENABLE) hand_recognition_inference() diff --git a/share/qtcreator/examples/04-AI-Demo/keyword_spotting.py b/share/qtcreator/examples/04-AI-Demo/keyword_spotting.py old mode 100755 new mode 100644 index 416cac81ee3..c8d6293a2b9 --- a/share/qtcreator/examples/04-AI-Demo/keyword_spotting.py +++ b/share/qtcreator/examples/04-AI-Demo/keyword_spotting.py @@ -7,7 +7,7 @@ import time # 时间统计 import struct # 字节字符转换模块 import gc # 垃圾回收模块 -import os # 操作系统接口模块 +import os, sys # 操作系统接口模块 # key word spotting任务 # 检测阈值 @@ -55,9 +55,7 @@ def init_kws(): # 初始化音频流 p = PyAudio() p.initialize(CHUNK) - ret = media.buffer_init() - if ret: - print("record_audio, buffer_init failed") + media.buffer_init() # 用于采集实时音频数据 input_stream = p.open( format=FORMAT, @@ -142,6 +140,7 @@ def kws_inference(): pcm_data_list = [] try: while True: + os.exitpoint() with ScopedTiming("total", 1): pcm_data_list.clear() # 对实时音频流进行推理 @@ -154,6 +153,12 @@ def kws_inference(): pcm_data_list.append(float_pcm_data) # kpu运行和后处理 kpu_run_kws(kpu_kws,pcm_data_list) + gc.collect() + except KeyboardInterrupt as e: + print("user stop: ", e) + except BaseException as e: + sys.print_exception(e) + print(f"An error occurred during buffer used: {e}") finally: input_stream.stop_stream() output_stream.stop_stream() @@ -162,7 +167,7 @@ def kws_inference(): p.terminate() media.buffer_deinit() aidemo.kws_fp_destroy(fp) - #gc.collect() if __name__=="__main__": + os.exitpoint(os.EXITPOINT_ENABLE) kws_inference() diff --git a/share/qtcreator/examples/04-AI-Demo/licence_det.py b/share/qtcreator/examples/04-AI-Demo/licence_det.py old mode 100755 new mode 100644 index 82bc51ef74d..16c7fa67428 --- a/share/qtcreator/examples/04-AI-Demo/licence_det.py +++ b/share/qtcreator/examples/04-AI-Demo/licence_det.py @@ -7,6 +7,7 @@ import time #时间统计 import gc #垃圾回收模块 import aidemo #aidemo模块,封装ai demo相关后处理、画图操作 +import os, sys #操作系统接口模块 ##config.py #display分辨率 @@ -220,7 +221,7 @@ def media_init(): config.comm_pool[0].blk_cnt = 1 config.comm_pool[0].mode = VB_REMAP_MODE_NOCACHE - ret = media.buffer_config(config) + media.buffer_config(config) global media_source, media_sink media_source = media_device(CAMERA_MOD_ID, CAM_DEV_ID_0, CAM_CHN_ID_0) @@ -228,26 +229,24 @@ def media_init(): media.create_link(media_source, media_sink) # 初始化多媒体buffer - ret = media.buffer_init() - if ret: - return ret + media.buffer_init() global buffer, draw_img, osd_img buffer = media.request_buffer(4 * DISPLAY_WIDTH * DISPLAY_HEIGHT) # 图层1,用于画框 - draw_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888, alloc=image.ALLOC_MPGC) + draw_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888) # 图层2,用于拷贝画框结果,防止画框过程中发生buffer搬运 osd_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888, poolid=buffer.pool_id, alloc=image.ALLOC_VB, phyaddr=buffer.phys_addr, virtaddr=buffer.virt_addr) - return ret # media 释放内存 def media_deinit(): + os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) + time.sleep_ms(100) global buffer,media_source, media_sink media.release_buffer(buffer) media.destroy_link(media_source, media_sink) - ret = media.buffer_deinit() - return ret + media.buffer_deinit() #**********for licence_det.py********** @@ -257,53 +256,34 @@ def licence_det_inference(): camera_init(CAM_DEV_ID_0) # 初始化 camera display_init() # 初始化 display - rgb888p_img = None try: - ret = media_init() - if ret: - print("licence_det, buffer init failed") - return ret - + media_init() camera_start(CAM_DEV_ID_0) - time.sleep(5) while True: + os.exitpoint() with ScopedTiming("total",1): rgb888p_img = camera_read(CAM_DEV_ID_0) # 读取一帧图片 - if rgb888p_img == -1: - print("licence_det, capture_image failed") - camera_release_image(CAM_DEV_ID_0,rgb888p_img) - rgb888p_img = None - continue - # for rgb888planar if rgb888p_img.format() == image.RGBP888: dets = kpu_run(kpu_licence_det,rgb888p_img) # 执行车牌检测 kpu 运行 以及后处理过程 display_draw(dets) # 将得到的 检测结果 绘制到 display camera_release_image(CAM_DEV_ID_0,rgb888p_img) # camera 释放图像 - rgb888p_img = None - # gc.collect() - except Exception as e: + gc.collect() + except KeyboardInterrupt as e: + print("user stop: ", e) + except BaseException as e: + sys.print_exception(e) print(f"An error occurred during buffer used: {e}") finally: - if rgb888p_img is not None: - #先release掉申请的内存再stop - camera_release_image(CAM_DEV_ID_0,rgb888p_img) - camera_stop(CAM_DEV_ID_0) # 停止 camera display_deinit() # 释放 display kpu_deinit(kpu_licence_det) # 释放 kpu gc.collect() - time.sleep(1) - ret = media_deinit() # 释放整个media - if ret: - print("licence_det, buffer_deinit failed") - return ret + media_deinit() # 释放整个media print("licence_det end") - return 0 if __name__ == '__main__': + os.exitpoint(os.EXITPOINT_ENABLE) licence_det_inference() - - diff --git a/share/qtcreator/examples/04-AI-Demo/licence_det_rec.py b/share/qtcreator/examples/04-AI-Demo/licence_det_rec.py old mode 100755 new mode 100644 index 16540844cdc..ff6d08c6a21 --- a/share/qtcreator/examples/04-AI-Demo/licence_det_rec.py +++ b/share/qtcreator/examples/04-AI-Demo/licence_det_rec.py @@ -7,6 +7,7 @@ import time #时间统计 import gc #垃圾回收模块 import aidemo #aidemo模块,封装ai demo相关后处理、画图操作 +import os, sys #操作系统接口模块 ##config.py #display分辨率 @@ -353,7 +354,7 @@ def media_init(): config.comm_pool[0].blk_cnt = 1 config.comm_pool[0].mode = VB_REMAP_MODE_NOCACHE - ret = media.buffer_config(config) + media.buffer_config(config) global media_source, media_sink media_source = media_device(CAMERA_MOD_ID, CAM_DEV_ID_0, CAM_CHN_ID_0) @@ -361,26 +362,24 @@ def media_init(): media.create_link(media_source, media_sink) # 初始化多媒体buffer - ret = media.buffer_init() - if ret: - return ret + media.buffer_init() global buffer, draw_img, osd_img buffer = media.request_buffer(4 * DISPLAY_WIDTH * DISPLAY_HEIGHT) # 图层1,用于画框 - draw_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888, alloc=image.ALLOC_MPGC) + draw_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888) # 图层2,用于拷贝画框结果,防止画框过程中发生buffer搬运 osd_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888, poolid=buffer.pool_id, alloc=image.ALLOC_VB, phyaddr=buffer.phys_addr, virtaddr=buffer.virt_addr) - return ret # media 释放内存 def media_deinit(): + os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) + time.sleep_ms(100) global buffer,media_source, media_sink media.release_buffer(buffer) media.destroy_link(media_source, media_sink) - ret = media.buffer_deinit() - return ret + media.buffer_deinit() #**********for licence_det_rec.py********** @@ -391,24 +390,13 @@ def licence_det_rec_inference(): camera_init(CAM_DEV_ID_0) # 初始化 camera display_init() # 初始化 display - rgb888p_img = None try: - ret = media_init() - if ret: - print("licence_det_rec, buffer init failed") - return ret - + media_init() camera_start(CAM_DEV_ID_0) - time.sleep(5) while True: + os.exitpoint() with ScopedTiming("total",1): rgb888p_img = camera_read(CAM_DEV_ID_0) # 读取一帧图片 - if rgb888p_img == -1: - print("licence_det_rec, capture_image failed") - camera_release_image(CAM_DEV_ID_0,rgb888p_img) - rgb888p_img = None - continue - # for rgb888planar if rgb888p_img.format() == image.RGBP888: dets = det_kpu_run(kpu_licence_det,rgb888p_img) # 执行车牌检测 kpu 运行 以及 后处理过程 @@ -416,30 +404,22 @@ def licence_det_rec_inference(): display_draw(dets_recs) # 将得到的检测结果和识别结果 绘制到display camera_release_image(CAM_DEV_ID_0,rgb888p_img) # camera 释放图像 - rgb888p_img = None - # gc.collect() - except Exception as e: + gc.collect() + except KeyboardInterrupt as e: + print("user stop: ", e) + except BaseException as e: + sys.print_exception(e) print(f"An error occurred during buffer used: {e}") finally: - if rgb888p_img is not None: - #先release掉申请的内存再stop - camera_release_image(CAM_DEV_ID_0,rgb888p_img) - camera_stop(CAM_DEV_ID_0) # 停止 camera display_deinit() # 释放 display det_kpu_deinit(kpu_licence_det) # 释放 车牌检测 kpu rec_kpu_deinit(kpu_licence_rec) # 释放 车牌识别 kpu gc.collect() - time.sleep(1) - ret = media_deinit() # 释放 整个media - if ret: - print("licence_det_rec, buffer_deinit failed") - return ret + media_deinit() # 释放 整个media print("licence_det_rec end") - return 0 if __name__ == '__main__': + os.exitpoint(os.EXITPOINT_ENABLE) licence_det_rec_inference() - - diff --git a/share/qtcreator/examples/04-AI-Demo/object_detect_yolov8n.py b/share/qtcreator/examples/04-AI-Demo/object_detect_yolov8n.py old mode 100755 new mode 100644 index 5ceb0a93bb3..d1f9428d355 --- a/share/qtcreator/examples/04-AI-Demo/object_detect_yolov8n.py +++ b/share/qtcreator/examples/04-AI-Demo/object_detect_yolov8n.py @@ -6,6 +6,7 @@ import image #图像模块,主要用于读取、图像绘制元素(框、点等)等操作 import time #时间统计 import gc #垃圾回收模块 +import os, sys #操作系统接口模块 ##config.py #display分辨率 @@ -338,7 +339,7 @@ def media_init(): config.comm_pool[0].blk_cnt = 1 config.comm_pool[0].mode = VB_REMAP_MODE_NOCACHE - ret = media.buffer_config(config) + media.buffer_config(config) global media_source, media_sink media_source = media_device(CAMERA_MOD_ID, CAM_DEV_ID_0, CAM_CHN_ID_0) @@ -346,26 +347,24 @@ def media_init(): media.create_link(media_source, media_sink) # 初始化多媒体buffer - ret = media.buffer_init() - if ret: - return ret + media.buffer_init() global buffer, draw_img, osd_img buffer = media.request_buffer(4 * DISPLAY_WIDTH * DISPLAY_HEIGHT) # 图层1,用于画框 - draw_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888, alloc=image.ALLOC_MPGC) + draw_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888) # 图层2,用于拷贝画框结果,防止画框过程中发生buffer搬运 osd_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888, poolid=buffer.pool_id, alloc=image.ALLOC_VB, phyaddr=buffer.phys_addr, virtaddr=buffer.virt_addr) - return ret # media 释放内存 def media_deinit(): + os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) + time.sleep_ms(100) global buffer,media_source, media_sink media.release_buffer(buffer) media.destroy_link(media_source, media_sink) - ret = media.buffer_deinit() - return ret + media.buffer_deinit() #**********for ob_detect.py********** @@ -375,51 +374,34 @@ def ob_detect_inference(): camera_init(CAM_DEV_ID_0) # 初始化 camera display_init() # 初始化 display - rgb888p_img = None try: - ret = media_init() - if ret: - print("ob_detect, buffer init failed") - return ret - + media_init() camera_start(CAM_DEV_ID_0) - time.sleep(5) while True: + os.exitpoint() with ScopedTiming("total",1): rgb888p_img = camera_read(CAM_DEV_ID_0) # 读取一帧图片 - if rgb888p_img == -1: - print("ob_detect, capture_image failed") - camera_release_image(CAM_DEV_ID_0,rgb888p_img) - rgb888p_img = None - continue - # for rgb888planar if rgb888p_img.format() == image.RGBP888: dets = kpu_run(kpu_ob_detect,rgb888p_img) # 执行多目标检测 kpu运行 以及 后处理过程 display_draw(dets) # 将得到的检测结果 绘制到 display camera_release_image(CAM_DEV_ID_0,rgb888p_img) # camera 释放图像 - rgb888p_img = None - # gc.collect() - except Exception as e: + gc.collect() + except KeyboardInterrupt as e: + print("user stop: ", e) + except BaseException as e: + sys.print_exception(e) print(f"An error occurred during buffer used: {e}") finally: - if rgb888p_img is not None: - #先release掉申请的内存再stop - camera_release_image(CAM_DEV_ID_0,rgb888p_img) - camera_stop(CAM_DEV_ID_0) # 停止camera display_deinit() # 释放 display kpu_deinit(kpu_ob_detect) # 释放 kpu gc.collect() - time.sleep(1) - ret = media_deinit() # 释放 整个media - if ret: - print("ob_detect, buffer_deinit failed") - return ret + media_deinit() # 释放 整个media print("ob_detect_test end") - return 0 if __name__ == '__main__': + os.exitpoint(os.EXITPOINT_ENABLE) ob_detect_inference() diff --git a/share/qtcreator/examples/04-AI-Demo/ocr_det.py b/share/qtcreator/examples/04-AI-Demo/ocr_det.py old mode 100755 new mode 100644 index e1e0fd3a630..a7f18e96c76 --- a/share/qtcreator/examples/04-AI-Demo/ocr_det.py +++ b/share/qtcreator/examples/04-AI-Demo/ocr_det.py @@ -6,7 +6,7 @@ import image #图像模块,主要用于读取、图像绘制元素(框、点等)等操作 import time #时间统计 import gc #垃圾回收模块 -import os #操作系统接口模块 +import os, sys #操作系统接口模块 import aicube #aicube模块,封装检测分割等任务相关后处理 # display分辨率 @@ -267,7 +267,7 @@ def media_init(): config.comm_pool[0].blk_cnt = 1 config.comm_pool[0].mode = VB_REMAP_MODE_NOCACHE - ret = media.buffer_config(config) + media.buffer_config(config) global media_source, media_sink media_source = media_device(CAMERA_MOD_ID, CAM_DEV_ID_0, CAM_CHN_ID_0) @@ -275,26 +275,24 @@ def media_init(): media.create_link(media_source, media_sink) # 初始化多媒体buffer - ret = media.buffer_init() - if ret: - return ret + media.buffer_init() global buffer, draw_img, osd_img buffer = media.request_buffer(4 * DISPLAY_WIDTH * DISPLAY_HEIGHT) # 图层1,用于画框 - draw_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888, alloc=image.ALLOC_MPGC) + draw_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888) # 图层2,用于拷贝画框结果,防止画框过程中发生buffer搬运 osd_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888, poolid=buffer.pool_id, alloc=image.ALLOC_VB, phyaddr=buffer.phys_addr, virtaddr=buffer.virt_addr) - return ret # media 释放buffer,销毁link def media_deinit(): + os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) + time.sleep_ms(100) global buffer,media_source, media_sink media.release_buffer(buffer) media.destroy_link(media_source, media_sink) - ret = media.buffer_deinit() - return ret + media.buffer_deinit() # def ocr_det_inference(): @@ -302,50 +300,33 @@ def ocr_det_inference(): kpu_ocr_det = kpu_init_det(kmodel_file_det) # 创建ocr检测任务的kpu对象 camera_init(CAM_DEV_ID_0) # 初始化 camera display_init() # 初始化 display - rgb888p_img = None try: - ret = media_init() - if ret: - print("ocr_det_test, buffer init failed") - return ret - + media_init() camera_start(CAM_DEV_ID_0) - time.sleep(5) while True: + os.exitpoint() with ScopedTiming("total",1): rgb888p_img = camera_read(CAM_DEV_ID_0) # 读取一帧图像 - if rgb888p_img == -1: - print("ocr_det_test, capture_image failed") - camera_release_image(CAM_DEV_ID_0,rgb888p_img) - rgb888p_img = None - continue - # for rgb888planar if rgb888p_img.format() == image.RGBP888: det_results = kpu_run_det(kpu_ocr_det,rgb888p_img) # kpu运行获取kmodel的推理输出 display_draw(det_results) # 绘制检测结果,并显示 camera_release_image(CAM_DEV_ID_0,rgb888p_img) # 释放内存 - rgb888p_img = None - # gc.collect() - except Exception as e: + gc.collect() + except KeyboardInterrupt as e: + print("user stop: ", e) + except BaseException as e: + sys.print_exception(e) print(f"An error occurred during buffer used: {e}") finally: - if rgb888p_img is not None: - #先release掉申请的内存再stop - camera_release_image(CAM_DEV_ID_0,rgb888p_img) - camera_stop(CAM_DEV_ID_0) # 停止camera display_deinit() # 释放display kpu_deinit_det(kpu_ocr_det) # 释放kpu gc.collect() - time.sleep(1) - ret = media_deinit() # 释放整个media - if ret: - print("ocr_det_test, buffer_deinit failed") - return ret + media_deinit() # 释放整个media print("ocr_det_test end") - return 0 if __name__ == '__main__': + os.exitpoint(os.EXITPOINT_ENABLE) ocr_det_inference() diff --git a/share/qtcreator/examples/04-AI-Demo/ocr_rec.py b/share/qtcreator/examples/04-AI-Demo/ocr_rec.py old mode 100755 new mode 100644 index a5d742a0e42..71805adb24c --- a/share/qtcreator/examples/04-AI-Demo/ocr_rec.py +++ b/share/qtcreator/examples/04-AI-Demo/ocr_rec.py @@ -7,6 +7,7 @@ import time #时间统计 import gc #垃圾回收模块 import aicube #aicube模块,封装检测分割等任务相关后处理 +import os, sys #操作系统接口模块 # display分辨率 DISPLAY_WIDTH = ALIGN_UP(1920, 16) @@ -351,7 +352,7 @@ def media_init(): config.comm_pool[0].blk_cnt = 1 config.comm_pool[0].mode = VB_REMAP_MODE_NOCACHE - ret = media.buffer_config(config) + media.buffer_config(config) global media_source, media_sink media_source = media_device(CAMERA_MOD_ID, CAM_DEV_ID_0, CAM_CHN_ID_0) @@ -359,26 +360,24 @@ def media_init(): media.create_link(media_source, media_sink) # 初始化多媒体buffer - ret = media.buffer_init() - if ret: - return ret + media.buffer_init() global buffer, draw_img, osd_img buffer = media.request_buffer(4 * DISPLAY_WIDTH * DISPLAY_HEIGHT) # 图层1,用于画框 - draw_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888, alloc=image.ALLOC_MPGC) + draw_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888) # 图层2,用于拷贝画框结果,防止画框过程中发生buffer搬运 osd_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888, poolid=buffer.pool_id, alloc=image.ALLOC_VB, phyaddr=buffer.phys_addr, virtaddr=buffer.virt_addr) - return ret # media 释放buffer,销毁link def media_deinit(): + os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) + time.sleep_ms(100) global buffer,media_source, media_sink media.release_buffer(buffer) media.destroy_link(media_source, media_sink) - ret = media.buffer_deinit() - return ret + media.buffer_deinit() def ocr_rec_inference(): print("ocr_rec_test start") @@ -386,23 +385,13 @@ def ocr_rec_inference(): kpu_ocr_rec = kpu_init_rec(kmodel_file_rec) # 创建OCR识别kpu对象 camera_init(CAM_DEV_ID_0) # camera初始化 display_init() # display初始化 - rgb888p_img = None try: - ret = media_init() - if ret: - print("ocr_rec_test, buffer init failed") - return ret - + media_init() camera_start(CAM_DEV_ID_0) - time.sleep(5) while True: + os.exitpoint() with ScopedTiming("total",1): rgb888p_img = camera_read(CAM_DEV_ID_0) # 读取一帧图像 - if rgb888p_img == -1: - print("ocr_rec_test, capture_image failed") - camera_release_image(CAM_DEV_ID_0,rgb888p_img) - rgb888p_img = None - continue # for rgb888planar if rgb888p_img.format() == image.RGBP888: det_results = kpu_run_det(kpu_ocr_det,rgb888p_img) # kpu运行获取OCR检测kmodel的推理输出 @@ -414,28 +403,22 @@ def ocr_rec_inference(): print("\n"+ocr_results) display_draw(det_results) camera_release_image(CAM_DEV_ID_0,rgb888p_img) - rgb888p_img = None - # gc.collect() - except Exception as e: + gc.collect() + except KeyboardInterrupt as e: + print("user stop: ", e) + except BaseException as e: + sys.print_exception(e) print(f"An error occurred during buffer used: {e}") finally: - if rgb888p_img is not None: - #先release掉申请的内存再stop - camera_release_image(CAM_DEV_ID_0,rgb888p_img) - camera_stop(CAM_DEV_ID_0) # 停止camera display_deinit() # 释放display kpu_deinit_det(kpu_ocr_det) # 释放OCR检测步骤kpu kpu_deinit_rec(kpu_ocr_rec) # 释放OCR识别步骤kpu gc.collect() - time.sleep(1) - ret = media_deinit() # 释放整个media - if ret: - print("ocr_rec_test, buffer_deinit failed") - return ret + media_deinit() # 释放整个media print("ocr_rec_test end") - return 0 if __name__ == '__main__': + os.exitpoint(os.EXITPOINT_ENABLE) ocr_rec_inference() diff --git a/share/qtcreator/examples/04-AI-Demo/person_detection.py b/share/qtcreator/examples/04-AI-Demo/person_detection.py old mode 100755 new mode 100644 index a99f643a8f6..58f47c65770 --- a/share/qtcreator/examples/04-AI-Demo/person_detection.py +++ b/share/qtcreator/examples/04-AI-Demo/person_detection.py @@ -8,7 +8,7 @@ import time #时间统计 import image #图像模块,主要用于读取、图像绘制元素(框、点等)等操作 - +import os, sys #操作系统接口模块 import gc #垃圾回收模块 ##config.py @@ -256,7 +256,7 @@ def media_init(): config.comm_pool[0].blk_cnt = 1 config.comm_pool[0].mode = VB_REMAP_MODE_NOCACHE - ret = media.buffer_config(config) + media.buffer_config(config) global media_source, media_sink media_source = media_device(CAMERA_MOD_ID, CAM_DEV_ID_0, CAM_CHN_ID_0) @@ -264,26 +264,24 @@ def media_init(): media.create_link(media_source, media_sink) # 初始化多媒体buffer - ret = media.buffer_init() - if ret: - return ret + media.buffer_init() global buffer, draw_img, osd_img buffer = media.request_buffer(4 * DISPLAY_WIDTH * DISPLAY_HEIGHT) # 图层1,用于画框 - draw_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888, alloc=image.ALLOC_MPGC) + draw_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888) # 图层2,用于拷贝画框结果,防止画框过程中发生buffer搬运 osd_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888, poolid=buffer.pool_id, alloc=image.ALLOC_VB, phyaddr=buffer.phys_addr, virtaddr=buffer.virt_addr) - return ret # media 释放内存 def media_deinit(): + os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) + time.sleep_ms(100) global buffer,media_source, media_sink media.release_buffer(buffer) media.destroy_link(media_source, media_sink) - ret = media.buffer_deinit() - return ret + media.buffer_deinit() #**********for person_detect.py********** def person_detect_inference(): @@ -292,50 +290,34 @@ def person_detect_inference(): camera_init(CAM_DEV_ID_0) # 初始化 camera display_init() # 初始化 display - rgb888p_img = None try: - ret = media_init() - if ret: - print("person_detect_test, buffer init failed") - return ret - + media_init() camera_start(CAM_DEV_ID_0) while True: + os.exitpoint() with ScopedTiming("total",total_debug_mode): rgb888p_img = camera_read(CAM_DEV_ID_0) # 读取一帧图片 - if rgb888p_img == -1: - print("person_detect_test, capture_image failed") - camera_release_image(CAM_DEV_ID_0,rgb888p_img) - rgb888p_img = None - continue - # for rgb888planar if rgb888p_img.format() == image.RGBP888: dets = kpu_run(kpu_person_detect,rgb888p_img) # 执行行人检测 kpu 运行 以及 后处理过程 display_draw(dets) # 将得到的检测结果 绘制到 display camera_release_image(CAM_DEV_ID_0,rgb888p_img) # camera 释放图像 - rgb888p_img = None - # gc.collect() - except Exception as e: + gc.collect() + except KeyboardInterrupt as e: + print("user stop: ", e) + except BaseException as e: + sys.print_exception(e) print(f"An error occurred during buffer used: {e}") finally: - if rgb888p_img is not None: - #先release掉申请的内存再stop - camera_release_image(CAM_DEV_ID_0,rgb888p_img) - camera_stop(CAM_DEV_ID_0) # 停止 camera display_deinit() # 释放 display kpu_deinit(kpu_person_detect) # 释放 kpu gc.collect() - ret = media_deinit() # 释放 整个media - if ret: - print("person_detect_test, buffer_deinit failed") - return ret + media_deinit() # 释放 整个media print("person_detect_test end") - return 0 if __name__ == '__main__': + os.exitpoint(os.EXITPOINT_ENABLE) person_detect_inference() - diff --git a/share/qtcreator/examples/04-AI-Demo/person_kp_detect.py b/share/qtcreator/examples/04-AI-Demo/person_kp_detect.py old mode 100755 new mode 100644 index f6d549d3d61..ec47c46cc75 --- a/share/qtcreator/examples/04-AI-Demo/person_kp_detect.py +++ b/share/qtcreator/examples/04-AI-Demo/person_kp_detect.py @@ -7,6 +7,7 @@ import time #时间统计 import gc #垃圾回收模块 import aidemo #aidemo模块,封装ai demo相关后处理、画图操作 +import os, sys #操作系统接口模块 ##config.py #display分辨率 @@ -271,7 +272,7 @@ def media_init(): config.comm_pool[0].blk_cnt = 1 config.comm_pool[0].mode = VB_REMAP_MODE_NOCACHE - ret = media.buffer_config(config) + media.buffer_config(config) global media_source, media_sink media_source = media_device(CAMERA_MOD_ID, CAM_DEV_ID_0, CAM_CHN_ID_0) @@ -279,26 +280,24 @@ def media_init(): media.create_link(media_source, media_sink) # 初始化多媒体buffer - ret = media.buffer_init() - if ret: - return ret + media.buffer_init() global buffer, draw_img, osd_img buffer = media.request_buffer(4 * DISPLAY_WIDTH * DISPLAY_HEIGHT) # 图层1,用于画框 - draw_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888, alloc=image.ALLOC_MPGC) + draw_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888) # 图层2,用于拷贝画框结果,防止画框过程中发生buffer搬运 osd_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888, poolid=buffer.pool_id, alloc=image.ALLOC_VB, phyaddr=buffer.phys_addr, virtaddr=buffer.virt_addr) - return ret # media 释放内存 def media_deinit(): + os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) + time.sleep_ms(100) global buffer,media_source, media_sink media.release_buffer(buffer) media.destroy_link(media_source, media_sink) - ret = media.buffer_deinit() - return ret + media.buffer_deinit() #**********for person_kp_detect.py********** @@ -308,51 +307,34 @@ def person_kp_detect_inference(): camera_init(CAM_DEV_ID_0) # 初始化 camera display_init() # 初始化 display - rgb888p_img = None try: - ret = media_init() - if ret: - print("person_kp_detect, buffer init failed") - return ret - + media_init() camera_start(CAM_DEV_ID_0) - time.sleep(5) while True: + os.exitpoint() with ScopedTiming("total",1): rgb888p_img = camera_read(CAM_DEV_ID_0) # 读取一帧图片 - if rgb888p_img == -1: - print("person_kp_detect, capture_image failed") - camera_release_image(CAM_DEV_ID_0,rgb888p_img) - rgb888p_img = None - continue - # for rgb888planar if rgb888p_img.format() == image.RGBP888: person_kp_detect_res = kpu_run(kpu_person_kp_detect,rgb888p_img) # 执行人体关键点检测 kpu 运行 以及 后处理过程 display_draw(person_kp_detect_res) # 将得到的人体关键点结果 绘制到 display camera_release_image(CAM_DEV_ID_0,rgb888p_img) # camera 释放图像 - rgb888p_img = None - #gc.collect() - except Exception as e: + gc.collect() + except KeyboardInterrupt as e: + print("user stop: ", e) + except BaseException as e: + sys.print_exception(e) print(f"An error occurred during buffer used: {e}") finally: - if rgb888p_img is not None: - #先release掉申请的内存再stop - camera_release_image(CAM_DEV_ID_0,rgb888p_img) - camera_stop(CAM_DEV_ID_0) # 停止 camera display_deinit() # 释放 display kpu_deinit(kpu_person_kp_detect) # 释放 kpu gc.collect() - time.sleep(1) - ret = media_deinit() # 释放 整个media - if ret: - print("person_kp_detect, buffer_deinit failed") - return ret + media_deinit() # 释放 整个media print("person_kp_detect end") - return 0 if __name__ == '__main__': + os.exitpoint(os.EXITPOINT_ENABLE) person_kp_detect_inference() diff --git a/share/qtcreator/examples/04-AI-Demo/segment_yolov8n.py b/share/qtcreator/examples/04-AI-Demo/segment_yolov8n.py old mode 100755 new mode 100644 index 6a6b9df5d54..d35f82f0391 --- a/share/qtcreator/examples/04-AI-Demo/segment_yolov8n.py +++ b/share/qtcreator/examples/04-AI-Demo/segment_yolov8n.py @@ -7,6 +7,7 @@ import time #时间统计 import gc #垃圾回收模块 import aidemo #aidemo模块,封装ai demo相关后处理、画图操作 +import os, sys #操作系统接口模块 ##config.py #display分辨率 @@ -252,7 +253,7 @@ def media_init(): config.comm_pool[0].blk_cnt = 1 config.comm_pool[0].mode = VB_REMAP_MODE_NOCACHE - ret = media.buffer_config(config) + media.buffer_config(config) global media_source, media_sink media_source = media_device(CAMERA_MOD_ID, CAM_DEV_ID_0, CAM_CHN_ID_0) @@ -260,9 +261,7 @@ def media_init(): media.create_link(media_source, media_sink) # 初始化多媒体buffer - ret = media.buffer_init() - if ret: - return ret + media.buffer_init() global buffer, draw_img, osd_img, masks buffer = media.request_buffer(4 * DISPLAY_WIDTH * DISPLAY_HEIGHT) # 图层1,用于画框 @@ -271,16 +270,16 @@ def media_init(): # 图层2,用于拷贝画框结果,防止画框过程中发生buffer搬运 osd_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888, poolid=buffer.pool_id, alloc=image.ALLOC_VB, phyaddr=buffer.phys_addr, virtaddr=buffer.virt_addr) - return ret # media 释放内存 def media_deinit(): + os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) + time.sleep_ms(100) global buffer,media_source, media_sink media.release_buffer(buffer) media.destroy_link(media_source, media_sink) - ret = media.buffer_deinit() - return ret + media.buffer_deinit() #**********for seg.py********** @@ -290,51 +289,34 @@ def seg_inference(): camera_init(CAM_DEV_ID_0) # 初始化 camera display_init() # 初始化 display - rgb888p_img = None try: - ret = media_init() - if ret: - print("seg, buffer init failed") - return ret - + media_init() camera_start(CAM_DEV_ID_0) - time.sleep(5) while True: + os.exitpoint() with ScopedTiming("total",1): rgb888p_img = camera_read(CAM_DEV_ID_0) # 读取一帧图片 - if rgb888p_img == -1: - print("seg, capture_image failed") - camera_release_image(CAM_DEV_ID_0,rgb888p_img) - rgb888p_img = None - continue - # for rgb888planar if rgb888p_img.format() == image.RGBP888: seg_res = kpu_run(kpu_seg,rgb888p_img) # 执行多目标分割 kpu 运行 以及 后处理过程 display_draw(seg_res) # 将得到的分割结果 绘制到 display camera_release_image(CAM_DEV_ID_0,rgb888p_img) # camera 释放图像 - rgb888p_img = None - #gc.collect() - except Exception as e: + gc.collect() + except KeyboardInterrupt as e: + print("user stop: ", e) + except BaseException as e: + sys.print_exception(e) print(f"An error occurred during buffer used: {e}") finally: - if rgb888p_img is not None: - #先release掉申请的内存再stop - camera_release_image(CAM_DEV_ID_0,rgb888p_img) - camera_stop(CAM_DEV_ID_0) # 停止 camera display_deinit() # 释放 display kpu_deinit(kpu_seg) # 释放 kpu gc.collect() - time.sleep(1) - ret = media_deinit() # 释放 整个media - if ret: - print("seg, buffer_deinit failed") - return ret + media_deinit() # 释放 整个media print("seg end") - return 0 if __name__ == '__main__': + os.exitpoint(os.EXITPOINT_ENABLE) seg_inference() diff --git a/share/qtcreator/examples/05-nncase-Runtime/ai2d+kpu.py b/share/qtcreator/examples/05-nncase-Runtime/ai2d+kpu.py old mode 100755 new mode 100644 index 1e1c92a4a7d..daa80299df4 --- a/share/qtcreator/examples/05-nncase-Runtime/ai2d+kpu.py +++ b/share/qtcreator/examples/05-nncase-Runtime/ai2d+kpu.py @@ -1,11 +1,16 @@ -import nncase_runtime as nn -import ulab.numpy as np -import utime -import time -# init kpu and load kmodel +import nncase_runtime as nn # 导入nncase包 +import ulab.numpy as np # 导入numpy +import gc + +# We will explain how to use nncase_runtime in this test script for `KPU` and `AI2D`, +# including model reading, printing input and output information of the model, +# configuring `AI2D`, linking `AI2D` and `KPU`, setting input data, and how to obtain output. + + +# init kpu、ai2d and load kmodel kpu = nn.kpu() ai2d = nn.ai2d() -kpu.load_kmodel("/sdcard/app/tests/nncase_runtime/face_detection/face_detection.kmodel") +kpu.load_kmodel("/sdcard/app/tests/nncase_runtime/face_detection/face_detection_320.kmodel") # init kpu input data = np.zeros((1,3,320,320),dtype=np.uint8) @@ -26,7 +31,7 @@ ai2d_input = ai2d_input.reshape((1, 3, 624, 1024)) ai2d_input_tensor = nn.from_numpy(ai2d_input) - +# config ai2d ai2d.set_dtype(nn.ai2d_format.NCHW_FMT, nn.ai2d_format.NCHW_FMT, np.uint8, np.uint8) @@ -43,11 +48,16 @@ # get output for i in range(kpu.outputs_size()): - result = kpu.get_output_tensor(i) - result = result.to_numpy() - utime.sleep(1) - #file_ = "/sdcard/app/output_{}.bin".format(i) - #np.save(file_, result) + data = kpu.get_output_tensor(i) + result = data.to_numpy() print("result: ", i, result.flatten()[-5:]) print(result.shape,result.dtype) - + del data + +del kpu_input +del ai2d_input_tensor +del ai2d_builder +del ai2d_out +del ai2d +del kpu +gc.collect() diff --git a/share/qtcreator/examples/05-nncase-Runtime/kpu.py b/share/qtcreator/examples/05-nncase-Runtime/kpu.py old mode 100755 new mode 100644 index 399b20c737d..2b01dfe7655 --- a/share/qtcreator/examples/05-nncase-Runtime/kpu.py +++ b/share/qtcreator/examples/05-nncase-Runtime/kpu.py @@ -1,9 +1,14 @@ import nncase_runtime as nn import ulab.numpy as np +import gc + +# We will explain how to use nncase_runtime in this test script for `KPU`, +# including model reading, printing input and output information of the model, +# configuring input data, and how to obtain output. # init kpu and load kmodel kpu = nn.kpu() -kpu.load_kmodel("/sdcard/app/tests/mbv2/test.kmodel") +kpu.load_kmodel("/sdcard/app/tests/nncase_runtime/face_detection/face_detection_320.kmodel") # dump model input and output info print("inputs info:") @@ -15,19 +20,23 @@ print(kpu.outputs_desc(i)) # set input tensor -with open('/sdcard/app/tests/mbv2/input_0_0.bin', 'rb') as f: +with open('/sdcard/app/tests/nncase_runtime/face_detection/face_detection_ai2d_output.bin', 'rb') as f: data = f.read() -input_data = np.frombuffer(data, dtype=np.float) -input_data = input_data.reshape((1,3,224,224)) - +input_data = np.frombuffer(data, dtype=np.uint8) +input_data = input_data.reshape((1,3,320,320)) kpu.set_input_tensor(0, nn.from_numpy(input_data)) # run kmodel kpu.run() # get output -result = kpu.get_output_tensor(0) -result = result.to_numpy() -print(result.shape,result.dtype) - +for i in range(kpu.outputs_size()): + result = kpu.get_output_tensor(i) + data = result.to_numpy() + print("result: ", i, data.flatten()[-5:]) + print(data.shape, data.dtype) + del result + +del kpu +gc.collect() diff --git a/share/qtcreator/examples/07-April-Tags/find_apriltags.py b/share/qtcreator/examples/07-April-Tags/find_apriltags.py new file mode 100644 index 00000000000..633b73b2198 --- /dev/null +++ b/share/qtcreator/examples/07-April-Tags/find_apriltags.py @@ -0,0 +1,148 @@ +# AprilTags Example +# +# This example shows the power of the CanMV Cam to detect April Tags. + +from media.camera import * +from media.display import * +from media.media import * +import time, math, os, gc, sys + +DISPLAY_WIDTH = ALIGN_UP(1920, 16) +DISPLAY_HEIGHT = 1080 +SCALE = 4 +DETECT_WIDTH = DISPLAY_WIDTH // SCALE +DETECT_HEIGHT = DISPLAY_HEIGHT // SCALE + +# Note! Unlike find_qrcodes the find_apriltags method does not need lens correction on the image to work. + +# The apriltag code supports up to 6 tag families which can be processed at the same time. +# Returned tag objects will have their tag family and id within the tag family. + +tag_families = 0 +tag_families |= image.TAG16H5 # comment out to disable this family +tag_families |= image.TAG25H7 # comment out to disable this family +tag_families |= image.TAG25H9 # comment out to disable this family +tag_families |= image.TAG36H10 # comment out to disable this family +tag_families |= image.TAG36H11 # comment out to disable this family (default family) +tag_families |= image.ARTOOLKIT # comment out to disable this family + +# What's the difference between tag families? Well, for example, the TAG16H5 family is effectively +# a 4x4 square tag. So, this means it can be seen at a longer distance than a TAG36H11 tag which +# is a 6x6 square tag. However, the lower H value (H5 versus H11) means that the false positve +# rate for the 4x4 tag is much, much, much, higher than the 6x6 tag. So, unless you have a +# reason to use the other tags families just use TAG36H11 which is the default family. + +def family_name(tag): + if(tag.family() == image.TAG16H5): + return "TAG16H5" + if(tag.family() == image.TAG25H7): + return "TAG25H7" + if(tag.family() == image.TAG25H9): + return "TAG25H9" + if(tag.family() == image.TAG36H10): + return "TAG36H10" + if(tag.family() == image.TAG36H11): + return "TAG36H11" + if(tag.family() == image.ARTOOLKIT): + return "ARTOOLKIT" + +def camera_init(): + # use hdmi for display + display.init(LT9611_1920X1080_30FPS) + # config vb for osd layer + config = k_vb_config() + config.max_pool_cnt = 1 + config.comm_pool[0].blk_size = 4*DISPLAY_WIDTH*DISPLAY_HEIGHT + config.comm_pool[0].blk_cnt = 1 + config.comm_pool[0].mode = VB_REMAP_MODE_NOCACHE + # meida buffer config + media.buffer_config(config) + # init default sensor + camera.sensor_init(CAM_DEV_ID_0, CAM_DEFAULT_SENSOR) + # set chn0 output size + camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_0, DISPLAY_WIDTH, DISPLAY_HEIGHT) + # set chn0 output format + camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_0, PIXEL_FORMAT_YUV_SEMIPLANAR_420) + # create meida source device + globals()["meida_source"] = media_device(CAMERA_MOD_ID, CAM_DEV_ID_0, CAM_CHN_ID_0) + # create meida sink device + globals()["meida_sink"] = media_device(DISPLAY_MOD_ID, DISPLAY_DEV_ID, DISPLAY_CHN_VIDEO1) + # create meida link + media.create_link(meida_source, meida_sink) + # set display plane with video channel + display.set_plane(0, 0, DISPLAY_WIDTH, DISPLAY_HEIGHT, PIXEL_FORMAT_YVU_PLANAR_420, DISPLAY_MIRROR_NONE, DISPLAY_CHN_VIDEO1) + # set chn1 output nv12 + camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_1, DETECT_WIDTH, DETECT_HEIGHT) + camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_1, PIXEL_FORMAT_YUV_SEMIPLANAR_420) + # media buffer init + media.buffer_init() + # request media buffer for osd image + globals()["buffer"] = media.request_buffer(4 * DISPLAY_WIDTH * DISPLAY_HEIGHT) + # start stream for camera device0 + camera.start_stream(CAM_DEV_ID_0) + +def camera_deinit(): + # stop stream for camera device0 + camera.stop_stream(CAM_DEV_ID_0) + # deinit display + display.deinit() + os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) + time.sleep_ms(100) + # release media buffer + media.release_buffer(globals()["buffer"]) + # destroy media link + media.destroy_link(globals()["meida_source"], globals()["meida_sink"]) + # deinit media buffer + media.buffer_deinit() + +def capture_picture(): + # create image for drawing + draw_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888) + # create image for osd + buffer = globals()["buffer"] + osd_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888, alloc=image.ALLOC_VB, phyaddr=buffer.phys_addr, virtaddr=buffer.virt_addr, poolid=buffer.pool_id) + osd_img.clear() + display.show_image(osd_img, 0, 0, DISPLAY_CHN_OSD0) + fps = time.clock() + while True: + fps.tick() + try: + os.exitpoint() + yuv420_img = camera.capture_image(CAM_DEV_ID_0, CAM_CHN_ID_1) + img = image.Image(yuv420_img.width(), yuv420_img.height(), image.GRAYSCALE, alloc=image.ALLOC_HEAP, data=yuv420_img) + camera.release_image(CAM_DEV_ID_0, CAM_CHN_ID_1, yuv420_img) + draw_img.clear() + for tag in img.find_apriltags(families=tag_families): + draw_img.draw_rectangle([v*SCALE for v in tag.rect()], color=(255, 0, 0)) + draw_img.draw_cross(tag.cx()*SCALE, tag.cy()*SCALE, color=(0, 255, 0)) + print_args = (family_name(tag), tag.id(), (180 * tag.rotation()) / math.pi) + print("Tag Family %s, Tag ID %d, rotation %f (degrees)" % print_args) + draw_img.copy_to(osd_img) + print(fps.fps()) + del img + gc.collect() + except KeyboardInterrupt as e: + print("user stop: ", e) + break + except BaseException as e: + sys.print_exception(e) + break + +def main(): + os.exitpoint(os.EXITPOINT_ENABLE) + camera_is_init = False + try: + print("camera init") + camera_init() + camera_is_init = True + print("camera capture") + capture_picture() + except Exception as e: + sys.print_exception(e) + finally: + if camera_is_init: + print("camera deinit") + camera_deinit() + +if __name__ == "__main__": + main() diff --git a/share/qtcreator/examples/07-April-Tags/find_apriltags_3d_pose.py b/share/qtcreator/examples/07-April-Tags/find_apriltags_3d_pose.py new file mode 100644 index 00000000000..c614fd9216d --- /dev/null +++ b/share/qtcreator/examples/07-April-Tags/find_apriltags_3d_pose.py @@ -0,0 +1,148 @@ +# AprilTags Example +# +# This example shows the power of the CanMV Cam to detect April Tags. + +from media.camera import * +from media.display import * +from media.media import * +import time, math, os, gc, sys + +DISPLAY_WIDTH = ALIGN_UP(1920, 16) +DISPLAY_HEIGHT = 1080 +SCALE = 4 +DETECT_WIDTH = DISPLAY_WIDTH // SCALE +DETECT_HEIGHT = DISPLAY_HEIGHT // SCALE + +# Note! Unlike find_qrcodes the find_apriltags method does not need lens correction on the image to work. + +# What's the difference between tag families? Well, for example, the TAG16H5 family is effectively +# a 4x4 square tag. So, this means it can be seen at a longer distance than a TAG36H11 tag which +# is a 6x6 square tag. However, the lower H value (H5 versus H11) means that the false positve +# rate for the 4x4 tag is much, much, much, higher than the 6x6 tag. So, unless you have a +# reason to use the other tags families just use TAG36H11 which is the default family. + +# The AprilTags library outputs the pose information for tags. This is the x/y/z translation and +# x/y/z rotation. The x/y/z rotation is in radians and can be converted to degrees. As for +# translation the units are dimensionless and you must apply a conversion function. + +# f_x is the x focal length of the camera. It should be equal to the lens focal length in mm +# divided by the x sensor size in mm times the number of pixels in the image. +# The below values are for the OV7725 camera with a 2.8 mm lens. + +# f_y is the y focal length of the camera. It should be equal to the lens focal length in mm +# divided by the y sensor size in mm times the number of pixels in the image. +# The below values are for the OV7725 camera with a 2.8 mm lens. + +# c_x is the image x center position in pixels. +# c_y is the image y center position in pixels. + +f_x = (2.8 / 3.984) * DETECT_WIDTH # find_apriltags defaults to this if not set +f_y = (2.8 / 2.952) * DETECT_HEIGHT # find_apriltags defaults to this if not set +c_x = DETECT_WIDTH * 0.5 # find_apriltags defaults to this if not set (the image.w * 0.5) +c_y = DETECT_HEIGHT * 0.5 # find_apriltags defaults to this if not set (the image.h * 0.5) + +def degrees(radians): + return (180 * radians) / math.pi + +def camera_init(): + # use hdmi for display + display.init(LT9611_1920X1080_30FPS) + # config vb for osd layer + config = k_vb_config() + config.max_pool_cnt = 1 + config.comm_pool[0].blk_size = 4*DISPLAY_WIDTH*DISPLAY_HEIGHT + config.comm_pool[0].blk_cnt = 1 + config.comm_pool[0].mode = VB_REMAP_MODE_NOCACHE + # meida buffer config + media.buffer_config(config) + # init default sensor + camera.sensor_init(CAM_DEV_ID_0, CAM_DEFAULT_SENSOR) + # set chn0 output size + camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_0, DISPLAY_WIDTH, DISPLAY_HEIGHT) + # set chn0 output format + camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_0, PIXEL_FORMAT_YUV_SEMIPLANAR_420) + # create meida source device + globals()["meida_source"] = media_device(CAMERA_MOD_ID, CAM_DEV_ID_0, CAM_CHN_ID_0) + # create meida sink device + globals()["meida_sink"] = media_device(DISPLAY_MOD_ID, DISPLAY_DEV_ID, DISPLAY_CHN_VIDEO1) + # create meida link + media.create_link(meida_source, meida_sink) + # set display plane with video channel + display.set_plane(0, 0, DISPLAY_WIDTH, DISPLAY_HEIGHT, PIXEL_FORMAT_YVU_PLANAR_420, DISPLAY_MIRROR_NONE, DISPLAY_CHN_VIDEO1) + # set chn1 output nv12 + camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_1, DETECT_WIDTH, DETECT_HEIGHT) + camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_1, PIXEL_FORMAT_YUV_SEMIPLANAR_420) + # media buffer init + media.buffer_init() + # request media buffer for osd image + globals()["buffer"] = media.request_buffer(4 * DISPLAY_WIDTH * DISPLAY_HEIGHT) + # start stream for camera device0 + camera.start_stream(CAM_DEV_ID_0) + +def camera_deinit(): + # stop stream for camera device0 + camera.stop_stream(CAM_DEV_ID_0) + # deinit display + display.deinit() + os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) + time.sleep_ms(100) + # release media buffer + media.release_buffer(globals()["buffer"]) + # destroy media link + media.destroy_link(globals()["meida_source"], globals()["meida_sink"]) + # deinit media buffer + media.buffer_deinit() + +def capture_picture(): + # create image for drawing + draw_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888) + # create image for osd + buffer = globals()["buffer"] + osd_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888, alloc=image.ALLOC_VB, phyaddr=buffer.phys_addr, virtaddr=buffer.virt_addr, poolid=buffer.pool_id) + osd_img.clear() + display.show_image(osd_img, 0, 0, DISPLAY_CHN_OSD0) + fps = time.clock() + while True: + fps.tick() + try: + os.exitpoint() + yuv420_img = camera.capture_image(CAM_DEV_ID_0, CAM_CHN_ID_1) + img = image.Image(yuv420_img.width(), yuv420_img.height(), image.GRAYSCALE, alloc=image.ALLOC_HEAP, data=yuv420_img) + camera.release_image(CAM_DEV_ID_0, CAM_CHN_ID_1, yuv420_img) + draw_img.clear() + for tag in img.find_apriltags(fx=f_x, fy=f_y, cx=c_x, cy=c_y): # defaults to TAG36H11 + draw_img.draw_rectangle([v*SCALE for v in tag.rect()], color=(255, 0, 0)) + draw_img.draw_cross(tag.cx()*SCALE, tag.cy()*SCALE, color=(0, 255, 0)) + print_args = (tag.x_translation(), tag.y_translation(), tag.z_translation(), + degrees(tag.x_rotation()), degrees(tag.y_rotation()), degrees(tag.z_rotation())) + # Translation units are unknown. Rotation units are in degrees. + print("Tx: %f, Ty %f, Tz %f, Rx %f, Ry %f, Rz %f" % print_args) + draw_img.copy_to(osd_img) + print(fps.fps()) + del img + gc.collect() + except KeyboardInterrupt as e: + print("user stop: ", e) + break + except BaseException as e: + sys.print_exception(e) + break + +def main(): + os.exitpoint(os.EXITPOINT_ENABLE) + camera_is_init = False + try: + print("camera init") + camera_init() + camera_is_init = True + print("camera capture") + capture_picture() + except Exception as e: + sys.print_exception(e) + finally: + if camera_is_init: + print("camera deinit") + camera_deinit() + +if __name__ == "__main__": + main() diff --git a/share/qtcreator/examples/08-Codes/find_barcodes.py b/share/qtcreator/examples/08-Codes/find_barcodes.py new file mode 100644 index 00000000000..68f70ecda14 --- /dev/null +++ b/share/qtcreator/examples/08-Codes/find_barcodes.py @@ -0,0 +1,150 @@ +# Barcode Example +# +# This example shows off how easy it is to detect bar codes. + +from media.camera import * +from media.display import * +from media.media import * +import time, math, os, gc, sys + +DISPLAY_WIDTH = ALIGN_UP(1920, 16) +DISPLAY_HEIGHT = 1080 +SCALE = 4 +DETECT_WIDTH = DISPLAY_WIDTH // SCALE +DETECT_HEIGHT = DISPLAY_HEIGHT // SCALE + +def camera_init(): + # use hdmi for display + display.init(LT9611_1920X1080_30FPS) + # config vb for osd layer + config = k_vb_config() + config.max_pool_cnt = 1 + config.comm_pool[0].blk_size = 4*DISPLAY_WIDTH*DISPLAY_HEIGHT + config.comm_pool[0].blk_cnt = 1 + config.comm_pool[0].mode = VB_REMAP_MODE_NOCACHE + # meida buffer config + media.buffer_config(config) + # init default sensor + camera.sensor_init(CAM_DEV_ID_0, CAM_DEFAULT_SENSOR) + # set chn0 output size + camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_0, DISPLAY_WIDTH, DISPLAY_HEIGHT) + # set chn0 output format + camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_0, PIXEL_FORMAT_YUV_SEMIPLANAR_420) + # create meida source device + globals()["meida_source"] = media_device(CAMERA_MOD_ID, CAM_DEV_ID_0, CAM_CHN_ID_0) + # create meida sink device + globals()["meida_sink"] = media_device(DISPLAY_MOD_ID, DISPLAY_DEV_ID, DISPLAY_CHN_VIDEO1) + # create meida link + media.create_link(meida_source, meida_sink) + # set display plane with video channel + display.set_plane(0, 0, DISPLAY_WIDTH, DISPLAY_HEIGHT, PIXEL_FORMAT_YVU_PLANAR_420, DISPLAY_MIRROR_NONE, DISPLAY_CHN_VIDEO1) + # set chn1 output nv12 + camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_1, DETECT_WIDTH, DETECT_HEIGHT) + camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_1, PIXEL_FORMAT_YUV_SEMIPLANAR_420) + # media buffer init + media.buffer_init() + # request media buffer for osd image + globals()["buffer"] = media.request_buffer(4 * DISPLAY_WIDTH * DISPLAY_HEIGHT) + # start stream for camera device0 + camera.start_stream(CAM_DEV_ID_0) + +def camera_deinit(): + # stop stream for camera device0 + camera.stop_stream(CAM_DEV_ID_0) + # deinit display + display.deinit() + os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) + time.sleep_ms(100) + # release media buffer + media.release_buffer(globals()["buffer"]) + # destroy media link + media.destroy_link(globals()["meida_source"], globals()["meida_sink"]) + # deinit media buffer + media.buffer_deinit() + +def barcode_name(code): + if(code.type() == image.EAN2): + return "EAN2" + if(code.type() == image.EAN5): + return "EAN5" + if(code.type() == image.EAN8): + return "EAN8" + if(code.type() == image.UPCE): + return "UPCE" + if(code.type() == image.ISBN10): + return "ISBN10" + if(code.type() == image.UPCA): + return "UPCA" + if(code.type() == image.EAN13): + return "EAN13" + if(code.type() == image.ISBN13): + return "ISBN13" + if(code.type() == image.I25): + return "I25" + if(code.type() == image.DATABAR): + return "DATABAR" + if(code.type() == image.DATABAR_EXP): + return "DATABAR_EXP" + if(code.type() == image.CODABAR): + return "CODABAR" + if(code.type() == image.CODE39): + return "CODE39" + if(code.type() == image.PDF417): + return "PDF417" + if(code.type() == image.CODE93): + return "CODE93" + if(code.type() == image.CODE128): + return "CODE128" + +def capture_picture(): + # create image for drawing + draw_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888) + # create image for osd + buffer = globals()["buffer"] + osd_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888, alloc=image.ALLOC_VB, phyaddr=buffer.phys_addr, virtaddr=buffer.virt_addr, poolid=buffer.pool_id) + osd_img.clear() + display.show_image(osd_img, 0, 0, DISPLAY_CHN_OSD0) + fps = time.clock() + while True: + fps.tick() + try: + os.exitpoint() + yuv420_img = camera.capture_image(CAM_DEV_ID_0, CAM_CHN_ID_1) + img = image.Image(yuv420_img.width(), yuv420_img.height(), image.GRAYSCALE, alloc=image.ALLOC_HEAP, data=yuv420_img) + camera.release_image(CAM_DEV_ID_0, CAM_CHN_ID_1, yuv420_img) + codes = img.find_qrcodes() + draw_img.clear() + for code in codes: + draw_img.draw_rectangle([v*SCALE for v in code.rect()], color=(255, 0, 0)) + print_args = (barcode_name(code), code.payload(), (180 * code.rotation()) / math.pi, code.quality(), fps.fps()) + print("Barcode %s, Payload \"%s\", rotation %f (degrees), quality %d, FPS %f" % print_args) + draw_img.copy_to(osd_img) + if not codes: + print("FPS %f" % fps.fps()) + del img + gc.collect() + except KeyboardInterrupt as e: + print("user stop: ", e) + break + except BaseException as e: + sys.print_exception(e) + break + +def main(): + os.exitpoint(os.EXITPOINT_ENABLE) + camera_is_init = False + try: + print("camera init") + camera_init() + camera_is_init = True + print("camera capture") + capture_picture() + except Exception as e: + sys.print_exception(e) + finally: + if camera_is_init: + print("camera deinit") + camera_deinit() + +if __name__ == "__main__": + main() diff --git a/share/qtcreator/examples/08-Codes/find_datamatrices.py b/share/qtcreator/examples/08-Codes/find_datamatrices.py new file mode 100644 index 00000000000..271130574cf --- /dev/null +++ b/share/qtcreator/examples/08-Codes/find_datamatrices.py @@ -0,0 +1,116 @@ +# Data Matrices Example +# +# This example shows off how easy it is to detect data matrices. + +from media.camera import * +from media.display import * +from media.media import * +import time, math, os, gc, sys + +DISPLAY_WIDTH = ALIGN_UP(1920, 16) +DISPLAY_HEIGHT = 1080 +SCALE = 4 +DETECT_WIDTH = DISPLAY_WIDTH // SCALE +DETECT_HEIGHT = DISPLAY_HEIGHT // SCALE + +def camera_init(): + # use hdmi for display + display.init(LT9611_1920X1080_30FPS) + # config vb for osd layer + config = k_vb_config() + config.max_pool_cnt = 1 + config.comm_pool[0].blk_size = 4*DISPLAY_WIDTH*DISPLAY_HEIGHT + config.comm_pool[0].blk_cnt = 1 + config.comm_pool[0].mode = VB_REMAP_MODE_NOCACHE + # meida buffer config + media.buffer_config(config) + # init default sensor + camera.sensor_init(CAM_DEV_ID_0, CAM_DEFAULT_SENSOR) + # set chn0 output size + camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_0, DISPLAY_WIDTH, DISPLAY_HEIGHT) + # set chn0 output format + camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_0, PIXEL_FORMAT_YUV_SEMIPLANAR_420) + # create meida source device + globals()["meida_source"] = media_device(CAMERA_MOD_ID, CAM_DEV_ID_0, CAM_CHN_ID_0) + # create meida sink device + globals()["meida_sink"] = media_device(DISPLAY_MOD_ID, DISPLAY_DEV_ID, DISPLAY_CHN_VIDEO1) + # create meida link + media.create_link(meida_source, meida_sink) + # set display plane with video channel + display.set_plane(0, 0, DISPLAY_WIDTH, DISPLAY_HEIGHT, PIXEL_FORMAT_YVU_PLANAR_420, DISPLAY_MIRROR_NONE, DISPLAY_CHN_VIDEO1) + # set chn1 output nv12 + camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_1, DETECT_WIDTH, DETECT_HEIGHT) + camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_1, PIXEL_FORMAT_YUV_SEMIPLANAR_420) + # media buffer init + media.buffer_init() + # request media buffer for osd image + globals()["buffer"] = media.request_buffer(4 * DISPLAY_WIDTH * DISPLAY_HEIGHT) + # start stream for camera device0 + camera.start_stream(CAM_DEV_ID_0) + +def camera_deinit(): + # stop stream for camera device0 + camera.stop_stream(CAM_DEV_ID_0) + # deinit display + display.deinit() + os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) + time.sleep_ms(100) + # release media buffer + media.release_buffer(globals()["buffer"]) + # destroy media link + media.destroy_link(globals()["meida_source"], globals()["meida_sink"]) + # deinit media buffer + media.buffer_deinit() + +def capture_picture(): + # create image for drawing + draw_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888) + # create image for osd + buffer = globals()["buffer"] + osd_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888, alloc=image.ALLOC_VB, phyaddr=buffer.phys_addr, virtaddr=buffer.virt_addr, poolid=buffer.pool_id) + osd_img.clear() + display.show_image(osd_img, 0, 0, DISPLAY_CHN_OSD0) + fps = time.clock() + while True: + fps.tick() + try: + os.exitpoint() + yuv420_img = camera.capture_image(CAM_DEV_ID_0, CAM_CHN_ID_1) + img = image.Image(yuv420_img.width(), yuv420_img.height(), image.GRAYSCALE, alloc=image.ALLOC_HEAP, data=yuv420_img) + camera.release_image(CAM_DEV_ID_0, CAM_CHN_ID_1, yuv420_img) + matrices = img.find_datamatrices() + draw_img.clear() + for matrix in matrices: + draw_img.draw_rectangle([v*SCALE for v in matrix.rect()], color=(255, 0, 0)) + print_args = (matrix.rows(), matrix.columns(), matrix.payload(), (180 * matrix.rotation()) / math.pi, fps.fps()) + print("Matrix [%d:%d], Payload \"%s\", rotation %f (degrees), FPS %f" % print_args) + draw_img.copy_to(osd_img) + if not matrices: + print("FPS %f" % fps.fps()) + del img + gc.collect() + except KeyboardInterrupt as e: + print("user stop: ", e) + break + except BaseException as e: + sys.print_exception(e) + break + +def main(): + os.exitpoint(os.EXITPOINT_ENABLE) + camera_is_init = False + try: + print("camera init") + camera_init() + camera_is_init = True + print("camera capture") + capture_picture() + except Exception as e: + sys.print_exception(e) + finally: + if camera_is_init: + print("camera deinit") + camera_deinit() + +if __name__ == "__main__": + main() diff --git a/share/qtcreator/examples/08-Codes/find_qrcodes.py b/share/qtcreator/examples/08-Codes/find_qrcodes.py new file mode 100644 index 00000000000..ec0bd57593e --- /dev/null +++ b/share/qtcreator/examples/08-Codes/find_qrcodes.py @@ -0,0 +1,115 @@ +# QRCode Example +# +# This example shows the power of the CanMV Cam to detect QR Codes. + +from media.camera import * +from media.display import * +from media.media import * +import time, os, gc, sys + +DISPLAY_WIDTH = ALIGN_UP(1920, 16) +DISPLAY_HEIGHT = 1080 +SCALE = 4 +DETECT_WIDTH = DISPLAY_WIDTH // SCALE +DETECT_HEIGHT = DISPLAY_HEIGHT // SCALE + +def camera_init(): + # use hdmi for display + display.init(LT9611_1920X1080_30FPS) + # config vb for osd layer + config = k_vb_config() + config.max_pool_cnt = 1 + config.comm_pool[0].blk_size = 4*DISPLAY_WIDTH*DISPLAY_HEIGHT + config.comm_pool[0].blk_cnt = 1 + config.comm_pool[0].mode = VB_REMAP_MODE_NOCACHE + # meida buffer config + media.buffer_config(config) + # init default sensor + camera.sensor_init(CAM_DEV_ID_0, CAM_DEFAULT_SENSOR) + # set chn0 output size + camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_0, DISPLAY_WIDTH, DISPLAY_HEIGHT) + # set chn0 output format + camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_0, PIXEL_FORMAT_YUV_SEMIPLANAR_420) + # create meida source device + globals()["meida_source"] = media_device(CAMERA_MOD_ID, CAM_DEV_ID_0, CAM_CHN_ID_0) + # create meida sink device + globals()["meida_sink"] = media_device(DISPLAY_MOD_ID, DISPLAY_DEV_ID, DISPLAY_CHN_VIDEO1) + # create meida link + media.create_link(meida_source, meida_sink) + # set display plane with video channel + display.set_plane(0, 0, DISPLAY_WIDTH, DISPLAY_HEIGHT, PIXEL_FORMAT_YVU_PLANAR_420, DISPLAY_MIRROR_NONE, DISPLAY_CHN_VIDEO1) + # set chn1 output nv12 + camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_1, DETECT_WIDTH, DETECT_HEIGHT) + camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_1, PIXEL_FORMAT_YUV_SEMIPLANAR_420) + # media buffer init + media.buffer_init() + # request media buffer for osd image + globals()["buffer"] = media.request_buffer(4 * DISPLAY_WIDTH * DISPLAY_HEIGHT) + # start stream for camera device0 + camera.start_stream(CAM_DEV_ID_0) + +def camera_deinit(): + # stop stream for camera device0 + camera.stop_stream(CAM_DEV_ID_0) + # deinit display + display.deinit() + os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) + time.sleep_ms(100) + # release media buffer + media.release_buffer(globals()["buffer"]) + # destroy media link + media.destroy_link(globals()["meida_source"], globals()["meida_sink"]) + # deinit media buffer + media.buffer_deinit() + +def capture_picture(): + # create image for drawing + draw_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888) + # create image for osd + buffer = globals()["buffer"] + osd_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888, alloc=image.ALLOC_VB, phyaddr=buffer.phys_addr, virtaddr=buffer.virt_addr, poolid=buffer.pool_id) + osd_img.clear() + display.show_image(osd_img, 0, 0, DISPLAY_CHN_OSD0) + fps = time.clock() + while True: + fps.tick() + try: + os.exitpoint() + yuv420_img = camera.capture_image(CAM_DEV_ID_0, CAM_CHN_ID_1) + img = image.Image(yuv420_img.width(), yuv420_img.height(), image.GRAYSCALE, alloc=image.ALLOC_HEAP, data=yuv420_img) + camera.release_image(CAM_DEV_ID_0, CAM_CHN_ID_1, yuv420_img) + codes = img.find_qrcodes() + draw_img.clear() + for code in codes: + draw_img.draw_rectangle([v*SCALE for v in code.rect()], color=(255, 0, 0)) + print(code) + draw_img.copy_to(osd_img) + if not codes: + print("FPS %f" % fps.fps()) + del img + gc.collect() + except KeyboardInterrupt as e: + print("user stop: ", e) + break + except BaseException as e: + sys.print_exception(e) + break + +def main(): + os.exitpoint(os.EXITPOINT_ENABLE) + camera_is_init = False + try: + print("camera init") + camera_init() + camera_is_init = True + print("camera capture") + capture_picture() + except Exception as e: + sys.print_exception(e) + finally: + if camera_is_init: + print("camera deinit") + camera_deinit() + +if __name__ == "__main__": + main() diff --git a/share/qtcreator/examples/09-Color-Tracking/automatic_grayscale_color_tracking.py b/share/qtcreator/examples/09-Color-Tracking/automatic_grayscale_color_tracking.py new file mode 100644 index 00000000000..c97e1286ecf --- /dev/null +++ b/share/qtcreator/examples/09-Color-Tracking/automatic_grayscale_color_tracking.py @@ -0,0 +1,145 @@ +# Automatic Grayscale Color Tracking Example +# +# This example shows off single color automatic grayscale color tracking using the CanMV Cam. + +from media.camera import * +from media.display import * +from media.media import * +import time, os, gc, sys, math + +DISPLAY_WIDTH = ALIGN_UP(1920, 16) +DISPLAY_HEIGHT = 1080 +SCALE = 4 +DETECT_WIDTH = DISPLAY_WIDTH // SCALE +DETECT_HEIGHT = DISPLAY_HEIGHT // SCALE + +def camera_init(): + # use hdmi for display + display.init(LT9611_1920X1080_30FPS) + # config vb for osd layer + config = k_vb_config() + config.max_pool_cnt = 1 + config.comm_pool[0].blk_size = 4*DISPLAY_WIDTH*DISPLAY_HEIGHT + config.comm_pool[0].blk_cnt = 1 + config.comm_pool[0].mode = VB_REMAP_MODE_NOCACHE + # meida buffer config + media.buffer_config(config) + # init default sensor + camera.sensor_init(CAM_DEV_ID_0, CAM_DEFAULT_SENSOR) + # set chn0 output size + camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_0, DISPLAY_WIDTH, DISPLAY_HEIGHT) + # set chn0 output format + camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_0, PIXEL_FORMAT_YUV_SEMIPLANAR_420) + # create meida source device + globals()["meida_source"] = media_device(CAMERA_MOD_ID, CAM_DEV_ID_0, CAM_CHN_ID_0) + # create meida sink device + globals()["meida_sink"] = media_device(DISPLAY_MOD_ID, DISPLAY_DEV_ID, DISPLAY_CHN_VIDEO1) + # create meida link + media.create_link(meida_source, meida_sink) + # set display plane with video channel + display.set_plane(0, 0, DISPLAY_WIDTH, DISPLAY_HEIGHT, PIXEL_FORMAT_YVU_PLANAR_420, DISPLAY_MIRROR_NONE, DISPLAY_CHN_VIDEO1) + # set chn1 output nv12 + camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_1, DETECT_WIDTH, DETECT_HEIGHT) + camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_1, PIXEL_FORMAT_RGB_888) + # media buffer init + media.buffer_init() + # request media buffer for osd image + globals()["buffer"] = media.request_buffer(4 * DISPLAY_WIDTH * DISPLAY_HEIGHT) + # start stream for camera device0 + camera.start_stream(CAM_DEV_ID_0) + +def camera_deinit(): + # stop stream for camera device0 + camera.stop_stream(CAM_DEV_ID_0) + # deinit display + display.deinit() + os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) + time.sleep_ms(100) + # release media buffer + media.release_buffer(globals()["buffer"]) + # destroy media link + media.destroy_link(globals()["meida_source"], globals()["meida_sink"]) + # deinit media buffer + media.buffer_deinit() + +def capture_picture(): + # create image for drawing + draw_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888) + # create image for osd + buffer = globals()["buffer"] + osd_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888, alloc=image.ALLOC_VB, phyaddr=buffer.phys_addr, virtaddr=buffer.virt_addr, poolid=buffer.pool_id) + osd_img.clear() + display.show_image(osd_img, 0, 0, DISPLAY_CHN_OSD0) + # Capture the color thresholds for whatever was in the center of the image. + r = [(DETECT_WIDTH//2)-(50//2), (DETECT_HEIGHT//2)-(50//2), 50, 50] # 50x50 center of QVGA. + threshold = [128, 128] # Middle grayscale values. + frame_count = 0 + fps = time.clock() + while True: + fps.tick() + try: + os.exitpoint() + rgb888_img = camera.capture_image(CAM_DEV_ID_0, CAM_CHN_ID_1) + img = rgb888_img.to_grayscale() + camera.release_image(CAM_DEV_ID_0, CAM_CHN_ID_1, rgb888_img) + draw_img.clear() + if frame_count < 60: + if frame_count == 0: + print("Letting auto algorithms run. Don't put anything in front of the camera!") + print("Auto algorithms done. Hold the object you want to track in front of the camera in the box.") + print("MAKE SURE THE COLOR OF THE OBJECT YOU WANT TO TRACK IS FULLY ENCLOSED BY THE BOX!") + draw_img.draw_rectangle([v*SCALE for v in r]) + frame_count = frame_count + 1 + elif frame_count < 120: + if frame_count == 60: + print("Learning thresholds...") + elif frame_count == 119: + print("Thresholds learned...") + print("Tracking colors...") + hist = img.get_histogram(roi=r) + lo = hist.get_percentile(0.01) # Get the CDF of the histogram at the 1% range (ADJUST AS NECESSARY)! + hi = hist.get_percentile(0.99) # Get the CDF of the histogram at the 99% range (ADJUST AS NECESSARY)! + # Average in percentile values. + threshold[0] = (threshold[0] + lo.value()) // 2 + threshold[1] = (threshold[1] + hi.value()) // 2 + for blob in img.find_blobs([threshold], pixels_threshold=100, area_threshold=100, merge=True, margin=10): + draw_img.draw_rectangle([v*SCALE for v in blob.rect()]) + draw_img.draw_cross(blob.cx()*SCALE, blob.cy()*SCALE) + draw_img.draw_rectangle([v*SCALE for v in r]) + frame_count = frame_count + 1 + del hist + else: + for blob in img.find_blobs([threshold], pixels_threshold=100, area_threshold=100, merge=True, margin=10): + draw_img.draw_rectangle([v*SCALE for v in blob.rect()]) + draw_img.draw_cross(blob.cx()*SCALE, blob.cy()*SCALE) + + draw_img.copy_to(osd_img) + del img + gc.collect() + if frame_count >= 120: + print(fps.fps()) + except KeyboardInterrupt as e: + print("user stop: ", e) + break + except BaseException as e: + sys.print_exception(e) + break + +def main(): + os.exitpoint(os.EXITPOINT_ENABLE) + camera_is_init = False + try: + print("camera init") + camera_init() + camera_is_init = True + print("camera capture") + capture_picture() + except Exception as e: + sys.print_exception(e) + finally: + if camera_is_init: + print("camera deinit") + camera_deinit() + +if __name__ == "__main__": + main() diff --git a/share/qtcreator/examples/09-Color-Tracking/automatic_rgb565_color_tracking.py b/share/qtcreator/examples/09-Color-Tracking/automatic_rgb565_color_tracking.py new file mode 100644 index 00000000000..85efd03081e --- /dev/null +++ b/share/qtcreator/examples/09-Color-Tracking/automatic_rgb565_color_tracking.py @@ -0,0 +1,149 @@ +# Automatic RGB565 Color Tracking Example +# +# This example shows off single color automatic RGB565 color tracking using the CanMV Cam. + +from media.camera import * +from media.display import * +from media.media import * +import time, os, gc, sys, math + +DISPLAY_WIDTH = ALIGN_UP(1920, 16) +DISPLAY_HEIGHT = 1080 +SCALE = 4 +DETECT_WIDTH = DISPLAY_WIDTH // SCALE +DETECT_HEIGHT = DISPLAY_HEIGHT // SCALE + +def camera_init(): + # use hdmi for display + display.init(LT9611_1920X1080_30FPS) + # config vb for osd layer + config = k_vb_config() + config.max_pool_cnt = 1 + config.comm_pool[0].blk_size = 4*DISPLAY_WIDTH*DISPLAY_HEIGHT + config.comm_pool[0].blk_cnt = 1 + config.comm_pool[0].mode = VB_REMAP_MODE_NOCACHE + # meida buffer config + media.buffer_config(config) + # init default sensor + camera.sensor_init(CAM_DEV_ID_0, CAM_DEFAULT_SENSOR) + # set chn0 output size + camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_0, DISPLAY_WIDTH, DISPLAY_HEIGHT) + # set chn0 output format + camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_0, PIXEL_FORMAT_YUV_SEMIPLANAR_420) + # create meida source device + globals()["meida_source"] = media_device(CAMERA_MOD_ID, CAM_DEV_ID_0, CAM_CHN_ID_0) + # create meida sink device + globals()["meida_sink"] = media_device(DISPLAY_MOD_ID, DISPLAY_DEV_ID, DISPLAY_CHN_VIDEO1) + # create meida link + media.create_link(meida_source, meida_sink) + # set display plane with video channel + display.set_plane(0, 0, DISPLAY_WIDTH, DISPLAY_HEIGHT, PIXEL_FORMAT_YVU_PLANAR_420, DISPLAY_MIRROR_NONE, DISPLAY_CHN_VIDEO1) + # set chn1 output nv12 + camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_1, DETECT_WIDTH, DETECT_HEIGHT) + camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_1, PIXEL_FORMAT_RGB_888) + # media buffer init + media.buffer_init() + # request media buffer for osd image + globals()["buffer"] = media.request_buffer(4 * DISPLAY_WIDTH * DISPLAY_HEIGHT) + # start stream for camera device0 + camera.start_stream(CAM_DEV_ID_0) + +def camera_deinit(): + # stop stream for camera device0 + camera.stop_stream(CAM_DEV_ID_0) + # deinit display + display.deinit() + os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) + time.sleep_ms(100) + # release media buffer + media.release_buffer(globals()["buffer"]) + # destroy media link + media.destroy_link(globals()["meida_source"], globals()["meida_sink"]) + # deinit media buffer + media.buffer_deinit() + +def capture_picture(): + # create image for drawing + draw_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888) + # create image for osd + buffer = globals()["buffer"] + osd_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888, alloc=image.ALLOC_VB, phyaddr=buffer.phys_addr, virtaddr=buffer.virt_addr, poolid=buffer.pool_id) + osd_img.clear() + display.show_image(osd_img, 0, 0, DISPLAY_CHN_OSD0) + # Capture the color thresholds for whatever was in the center of the image. + r = [(DETECT_WIDTH//2)-(50//2), (DETECT_HEIGHT//2)-(50//2), 50, 50] # 50x50 center of QVGA. + threshold = [50, 50, 0, 0, 0, 0] # Middle L, A, B values. + frame_count = 0 + fps = time.clock() + while True: + fps.tick() + try: + os.exitpoint() + rgb888_img = camera.capture_image(CAM_DEV_ID_0, CAM_CHN_ID_1) + img = rgb888_img.to_rgb565() + camera.release_image(CAM_DEV_ID_0, CAM_CHN_ID_1, rgb888_img) + draw_img.clear() + if frame_count < 60: + if frame_count == 0: + print("Letting auto algorithms run. Don't put anything in front of the camera!") + print("Auto algorithms done. Hold the object you want to track in front of the camera in the box.") + print("MAKE SURE THE COLOR OF THE OBJECT YOU WANT TO TRACK IS FULLY ENCLOSED BY THE BOX!") + draw_img.draw_rectangle([v*SCALE for v in r]) + frame_count = frame_count + 1 + elif frame_count < 120: + if frame_count == 60: + print("Learning thresholds...") + elif frame_count == 119: + print("Thresholds learned...") + print("Tracking colors...") + hist = img.get_histogram(roi=r) + lo = hist.get_percentile(0.01) # Get the CDF of the histogram at the 1% range (ADJUST AS NECESSARY)! + hi = hist.get_percentile(0.99) # Get the CDF of the histogram at the 99% range (ADJUST AS NECESSARY)! + # Average in percentile values. + threshold[0] = (threshold[0] + lo.l_value()) // 2 + threshold[1] = (threshold[1] + hi.l_value()) // 2 + threshold[2] = (threshold[2] + lo.a_value()) // 2 + threshold[3] = (threshold[3] + hi.a_value()) // 2 + threshold[4] = (threshold[4] + lo.b_value()) // 2 + threshold[5] = (threshold[5] + hi.b_value()) // 2 + for blob in img.find_blobs([threshold], pixels_threshold=100, area_threshold=100, merge=True, margin=10): + draw_img.draw_rectangle([v*SCALE for v in blob.rect()]) + draw_img.draw_cross(blob.cx()*SCALE, blob.cy()*SCALE) + draw_img.draw_rectangle([v*SCALE for v in r]) + frame_count = frame_count + 1 + del hist + else: + for blob in img.find_blobs([threshold], pixels_threshold=100, area_threshold=100, merge=True, margin=10): + draw_img.draw_rectangle([v*SCALE for v in blob.rect()]) + draw_img.draw_cross(blob.cx()*SCALE, blob.cy()*SCALE) + + draw_img.copy_to(osd_img) + del img + gc.collect() + if frame_count >= 120: + print(fps.fps()) + except KeyboardInterrupt as e: + print("user stop: ", e) + break + except BaseException as e: + sys.print_exception(e) + break + +def main(): + os.exitpoint(os.EXITPOINT_ENABLE) + camera_is_init = False + try: + print("camera init") + camera_init() + camera_is_init = True + print("camera capture") + capture_picture() + except Exception as e: + sys.print_exception(e) + finally: + if camera_is_init: + print("camera deinit") + camera_deinit() + +if __name__ == "__main__": + main() diff --git a/share/qtcreator/examples/09-Color-Tracking/black_grayscale_line_following.py b/share/qtcreator/examples/09-Color-Tracking/black_grayscale_line_following.py new file mode 100644 index 00000000000..beded0344e9 --- /dev/null +++ b/share/qtcreator/examples/09-Color-Tracking/black_grayscale_line_following.py @@ -0,0 +1,171 @@ +# Black Grayscale Line Following Example +# +# Making a line following robot requires a lot of effort. This example script +# shows how to do the machine vision part of the line following robot. You +# can use the output from this script to drive a differential drive robot to +# follow a line. This script just generates a single turn value that tells +# your robot to go left or right. +# +# For this script to work properly you should point the camera at a line at a +# 45 or so degree angle. Please make sure that only the line is within the +# camera's field of view. + +from media.camera import * +from media.display import * +from media.media import * +import time, os, gc, sys, math + +DISPLAY_WIDTH = ALIGN_UP(1920, 16) +DISPLAY_HEIGHT = 1080 +SCALE = 4 +DETECT_WIDTH = DISPLAY_WIDTH // SCALE +DETECT_HEIGHT = DISPLAY_HEIGHT // SCALE + +# Tracks a black line. Use [(128, 255)] for a tracking a white line. +GRAYSCALE_THRESHOLD = [(0, 64)] + +# Each roi is (x, y, w, h). The line detection algorithm will try to find the +# centroid of the largest blob in each roi. The x position of the centroids +# will then be averaged with different weights where the most weight is assigned +# to the roi near the bottom of the image and less to the next roi and so on. +ROIS = [ # [ROI, weight] + (0, 100, 160, 20, 0.7), # You'll need to tweak the weights for your app + (0, 50, 160, 20, 0.3), # depending on how your robot is setup. + (0, 0, 160, 20, 0.1) + ] + +# Compute the weight divisor (we're computing this so you don't have to make weights add to 1). +weight_sum = 0 +for r in ROIS: weight_sum += r[4] # r[4] is the roi weight. + +def camera_init(): + # use hdmi for display + display.init(LT9611_1920X1080_30FPS) + # config vb for osd layer + config = k_vb_config() + config.max_pool_cnt = 1 + config.comm_pool[0].blk_size = 4*DISPLAY_WIDTH*DISPLAY_HEIGHT + config.comm_pool[0].blk_cnt = 1 + config.comm_pool[0].mode = VB_REMAP_MODE_NOCACHE + # meida buffer config + media.buffer_config(config) + # init default sensor + camera.sensor_init(CAM_DEV_ID_0, CAM_DEFAULT_SENSOR) + # set chn0 output size + camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_0, DISPLAY_WIDTH, DISPLAY_HEIGHT) + # set chn0 output format + camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_0, PIXEL_FORMAT_YUV_SEMIPLANAR_420) + # create meida source device + globals()["meida_source"] = media_device(CAMERA_MOD_ID, CAM_DEV_ID_0, CAM_CHN_ID_0) + # create meida sink device + globals()["meida_sink"] = media_device(DISPLAY_MOD_ID, DISPLAY_DEV_ID, DISPLAY_CHN_VIDEO1) + # create meida link + media.create_link(meida_source, meida_sink) + # set display plane with video channel + display.set_plane(0, 0, DISPLAY_WIDTH, DISPLAY_HEIGHT, PIXEL_FORMAT_YVU_PLANAR_420, DISPLAY_MIRROR_NONE, DISPLAY_CHN_VIDEO1) + # set chn1 output nv12 + camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_1, DETECT_WIDTH, DETECT_HEIGHT) + camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_1, PIXEL_FORMAT_RGB_888) + # media buffer init + media.buffer_init() + # request media buffer for osd image + globals()["buffer"] = media.request_buffer(4 * DISPLAY_WIDTH * DISPLAY_HEIGHT) + # start stream for camera device0 + camera.start_stream(CAM_DEV_ID_0) + +def camera_deinit(): + # stop stream for camera device0 + camera.stop_stream(CAM_DEV_ID_0) + # deinit display + display.deinit() + os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) + time.sleep_ms(100) + # release media buffer + media.release_buffer(globals()["buffer"]) + # destroy media link + media.destroy_link(globals()["meida_source"], globals()["meida_sink"]) + # deinit media buffer + media.buffer_deinit() + +def capture_picture(): + # create image for drawing + draw_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888) + # create image for osd + buffer = globals()["buffer"] + osd_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888, alloc=image.ALLOC_VB, phyaddr=buffer.phys_addr, virtaddr=buffer.virt_addr, poolid=buffer.pool_id) + osd_img.clear() + display.show_image(osd_img, 0, 0, DISPLAY_CHN_OSD0) + fps = time.clock() + while True: + fps.tick() + try: + os.exitpoint() + rgb888_img = camera.capture_image(CAM_DEV_ID_0, CAM_CHN_ID_1) + img = rgb888_img.to_grayscale() + camera.release_image(CAM_DEV_ID_0, CAM_CHN_ID_1, rgb888_img) + draw_img.clear() + centroid_sum = 0 + for r in ROIS: + blobs = img.find_blobs(GRAYSCALE_THRESHOLD, roi=r[0:4], merge=True) # r[0:4] is roi tuple. + + if blobs: + # Find the blob with the most pixels. + largest_blob = max(blobs, key=lambda b: b.pixels()) + + # Draw a rect around the blob. + draw_img.draw_rectangle([v*SCALE for v in largest_blob.rect()]) + draw_img.draw_cross(largest_blob.cx()*SCALE, largest_blob.cy()*SCALE) + + centroid_sum += largest_blob.cx() * r[4] # r[4] is the roi weight. + + center_pos = (centroid_sum / weight_sum) # Determine center of line. + + # Convert the center_pos to a deflection angle. We're using a non-linear + # operation so that the response gets stronger the farther off the line we + # are. Non-linear operations are good to use on the output of algorithms + # like this to cause a response "trigger". + deflection_angle = 0 + + # The 80 is from half the X res, the 60 is from half the Y res. The + # equation below is just computing the angle of a triangle where the + # opposite side of the triangle is the deviation of the center position + # from the center and the adjacent side is half the Y res. This limits + # the angle output to around -45 to 45. (It's not quite -45 and 45). + deflection_angle = -math.atan((center_pos-80)/60) + + # Convert angle in radians to degrees. + deflection_angle = math.degrees(deflection_angle) + + # Now you have an angle telling you how much to turn the robot by which + # incorporates the part of the line nearest to the robot and parts of + # the line farther away from the robot for a better prediction. + print("Turn Angle: %f" % deflection_angle) + draw_img.copy_to(osd_img) + del img + gc.collect() + print(fps.fps()) + except KeyboardInterrupt as e: + print("user stop: ", e) + break + except BaseException as e: + sys.print_exception(e) + break + +def main(): + os.exitpoint(os.EXITPOINT_ENABLE) + camera_is_init = False + try: + print("camera init") + camera_init() + camera_is_init = True + print("camera capture") + capture_picture() + except Exception as e: + sys.print_exception(e) + finally: + if camera_is_init: + print("camera deinit") + camera_deinit() + +if __name__ == "__main__": + main() diff --git a/share/qtcreator/examples/09-Color-Tracking/image_histogram_info.py b/share/qtcreator/examples/09-Color-Tracking/image_histogram_info.py new file mode 100644 index 00000000000..cea35e2e46f --- /dev/null +++ b/share/qtcreator/examples/09-Color-Tracking/image_histogram_info.py @@ -0,0 +1,106 @@ +# Image Histogram Info Example +# +# This script computes the histogram of the image and prints it out. + +# You can also pass get_histogram() an "roi=" to get just the histogram of that area. +# get_histogram() allows you to quickly determine the color channel information of +# any any area in the image. + +from media.camera import * +from media.display import * +from media.media import * +import time, os, gc, sys + +DISPLAY_WIDTH = ALIGN_UP(1920, 16) +DISPLAY_HEIGHT = 1080 +SCALE = 4 +DETECT_WIDTH = DISPLAY_WIDTH // SCALE +DETECT_HEIGHT = DISPLAY_HEIGHT // SCALE + +def camera_init(): + # use hdmi for display + display.init(LT9611_1920X1080_30FPS) + # config vb for osd layer + config = k_vb_config() + config.max_pool_cnt = 1 + config.comm_pool[0].blk_size = 4*DISPLAY_WIDTH*DISPLAY_HEIGHT + config.comm_pool[0].blk_cnt = 1 + config.comm_pool[0].mode = VB_REMAP_MODE_NOCACHE + # meida buffer config + media.buffer_config(config) + # init default sensor + camera.sensor_init(CAM_DEV_ID_0, CAM_DEFAULT_SENSOR) + # set chn0 output size + camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_0, DISPLAY_WIDTH, DISPLAY_HEIGHT) + # set chn0 output format + camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_0, PIXEL_FORMAT_YUV_SEMIPLANAR_420) + # create meida source device + globals()["meida_source"] = media_device(CAMERA_MOD_ID, CAM_DEV_ID_0, CAM_CHN_ID_0) + # create meida sink device + globals()["meida_sink"] = media_device(DISPLAY_MOD_ID, DISPLAY_DEV_ID, DISPLAY_CHN_VIDEO1) + # create meida link + media.create_link(meida_source, meida_sink) + # set display plane with video channel + display.set_plane(0, 0, DISPLAY_WIDTH, DISPLAY_HEIGHT, PIXEL_FORMAT_YVU_PLANAR_420, DISPLAY_MIRROR_NONE, DISPLAY_CHN_VIDEO1) + # set chn1 output nv12 + camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_1, DETECT_WIDTH, DETECT_HEIGHT) + camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_1, PIXEL_FORMAT_RGB_888) + # media buffer init + media.buffer_init() + # request media buffer for osd image + globals()["buffer"] = media.request_buffer(4 * DISPLAY_WIDTH * DISPLAY_HEIGHT) + # start stream for camera device0 + camera.start_stream(CAM_DEV_ID_0) + +def camera_deinit(): + # stop stream for camera device0 + camera.stop_stream(CAM_DEV_ID_0) + # deinit display + display.deinit() + os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) + time.sleep_ms(100) + # release media buffer + media.release_buffer(globals()["buffer"]) + # destroy media link + media.destroy_link(globals()["meida_source"], globals()["meida_sink"]) + # deinit media buffer + media.buffer_deinit() + +def capture_picture(): + fps = time.clock() + while True: + fps.tick() + try: + os.exitpoint() + rgb888_img = camera.capture_image(CAM_DEV_ID_0, CAM_CHN_ID_1) + img = rgb888_img.to_grayscale() + camera.release_image(CAM_DEV_ID_0, CAM_CHN_ID_1, rgb888_img) + print(img.get_histogram(bins=8)) + del img + gc.collect() + print(fps.fps()) + except KeyboardInterrupt as e: + print("user stop: ", e) + break + except BaseException as e: + sys.print_exception(e) + break + +def main(): + os.exitpoint(os.EXITPOINT_ENABLE) + camera_is_init = False + try: + print("camera init") + camera_init() + camera_is_init = True + print("camera capture") + capture_picture() + except Exception as e: + sys.print_exception(e) + finally: + if camera_is_init: + print("camera deinit") + camera_deinit() + +if __name__ == "__main__": + main() diff --git a/share/qtcreator/examples/09-Color-Tracking/image_statistics_info.py b/share/qtcreator/examples/09-Color-Tracking/image_statistics_info.py new file mode 100644 index 00000000000..ba5ab393cd1 --- /dev/null +++ b/share/qtcreator/examples/09-Color-Tracking/image_statistics_info.py @@ -0,0 +1,106 @@ +# Image Statistics Info Example +# +# This script computes the statistics of the image and prints it out. + +# You can also pass get_statistics() an "roi=" to get just the statistics of that area. +# get_statistics() allows you to quickly determine the color channel information of +# any any area in the image. + +from media.camera import * +from media.display import * +from media.media import * +import time, os, gc, sys + +DISPLAY_WIDTH = ALIGN_UP(1920, 16) +DISPLAY_HEIGHT = 1080 +SCALE = 4 +DETECT_WIDTH = DISPLAY_WIDTH // SCALE +DETECT_HEIGHT = DISPLAY_HEIGHT // SCALE + +def camera_init(): + # use hdmi for display + display.init(LT9611_1920X1080_30FPS) + # config vb for osd layer + config = k_vb_config() + config.max_pool_cnt = 1 + config.comm_pool[0].blk_size = 4*DISPLAY_WIDTH*DISPLAY_HEIGHT + config.comm_pool[0].blk_cnt = 1 + config.comm_pool[0].mode = VB_REMAP_MODE_NOCACHE + # meida buffer config + media.buffer_config(config) + # init default sensor + camera.sensor_init(CAM_DEV_ID_0, CAM_DEFAULT_SENSOR) + # set chn0 output size + camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_0, DISPLAY_WIDTH, DISPLAY_HEIGHT) + # set chn0 output format + camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_0, PIXEL_FORMAT_YUV_SEMIPLANAR_420) + # create meida source device + globals()["meida_source"] = media_device(CAMERA_MOD_ID, CAM_DEV_ID_0, CAM_CHN_ID_0) + # create meida sink device + globals()["meida_sink"] = media_device(DISPLAY_MOD_ID, DISPLAY_DEV_ID, DISPLAY_CHN_VIDEO1) + # create meida link + media.create_link(meida_source, meida_sink) + # set display plane with video channel + display.set_plane(0, 0, DISPLAY_WIDTH, DISPLAY_HEIGHT, PIXEL_FORMAT_YVU_PLANAR_420, DISPLAY_MIRROR_NONE, DISPLAY_CHN_VIDEO1) + # set chn1 output nv12 + camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_1, DETECT_WIDTH, DETECT_HEIGHT) + camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_1, PIXEL_FORMAT_RGB_888) + # media buffer init + media.buffer_init() + # request media buffer for osd image + globals()["buffer"] = media.request_buffer(4 * DISPLAY_WIDTH * DISPLAY_HEIGHT) + # start stream for camera device0 + camera.start_stream(CAM_DEV_ID_0) + +def camera_deinit(): + # stop stream for camera device0 + camera.stop_stream(CAM_DEV_ID_0) + # deinit display + display.deinit() + os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) + time.sleep_ms(100) + # release media buffer + media.release_buffer(globals()["buffer"]) + # destroy media link + media.destroy_link(globals()["meida_source"], globals()["meida_sink"]) + # deinit media buffer + media.buffer_deinit() + +def capture_picture(): + fps = time.clock() + while True: + fps.tick() + try: + os.exitpoint() + rgb888_img = camera.capture_image(CAM_DEV_ID_0, CAM_CHN_ID_1) + img = rgb888_img.to_grayscale() + camera.release_image(CAM_DEV_ID_0, CAM_CHN_ID_1, rgb888_img) + print(img.get_statistics()) + del img + gc.collect() + print(fps.fps()) + except KeyboardInterrupt as e: + print("user stop: ", e) + break + except BaseException as e: + sys.print_exception(e) + break + +def main(): + os.exitpoint(os.EXITPOINT_ENABLE) + camera_is_init = False + try: + print("camera init") + camera_init() + camera_is_init = True + print("camera capture") + capture_picture() + except Exception as e: + sys.print_exception(e) + finally: + if camera_is_init: + print("camera deinit") + camera_deinit() + +if __name__ == "__main__": + main() diff --git a/share/qtcreator/examples/09-Color-Tracking/multi_color_code_tracking.py b/share/qtcreator/examples/09-Color-Tracking/multi_color_code_tracking.py new file mode 100644 index 00000000000..954970307b8 --- /dev/null +++ b/share/qtcreator/examples/09-Color-Tracking/multi_color_code_tracking.py @@ -0,0 +1,141 @@ +# Multi Color Code Tracking Example +# +# This example shows off multi color code tracking using the CanMV Cam. +# +# A color code is a blob composed of two or more colors. The example below will +# only track colored objects which have two or more the colors below in them. + +from media.camera import * +from media.display import * +from media.media import * +import time, os, gc, sys, math + +DISPLAY_WIDTH = ALIGN_UP(1920, 16) +DISPLAY_HEIGHT = 1080 +SCALE = 4 +DETECT_WIDTH = DISPLAY_WIDTH // SCALE +DETECT_HEIGHT = DISPLAY_HEIGHT // SCALE + +# Color Tracking Thresholds (L Min, L Max, A Min, A Max, B Min, B Max) +# The below thresholds track in general red/green things. You may wish to tune them... +thresholds = [(30, 100, 15, 127, 15, 127), # generic_red_thresholds -> index is 0 so code == (1 << 0) + (30, 100, -64, -8, -32, 32), # generic_green_thresholds -> index is 1 so code == (1 << 1) + (0, 15, 0, 40, -80, -20)] # generic_blue_thresholds -> index is 2 so code == (1 << 2) +# Codes are or'ed together when "merge=True" for "find_blobs". + +# Only blobs that with more pixels than "pixel_threshold" and more area than "area_threshold" are +# returned by "find_blobs" below. Change "pixels_threshold" and "area_threshold" if you change the +# camera resolution. "merge=True" must be set to merge overlapping color blobs for color codes. + +def camera_init(): + # use hdmi for display + display.init(LT9611_1920X1080_30FPS) + # config vb for osd layer + config = k_vb_config() + config.max_pool_cnt = 1 + config.comm_pool[0].blk_size = 4*DISPLAY_WIDTH*DISPLAY_HEIGHT + config.comm_pool[0].blk_cnt = 1 + config.comm_pool[0].mode = VB_REMAP_MODE_NOCACHE + # meida buffer config + media.buffer_config(config) + # init default sensor + camera.sensor_init(CAM_DEV_ID_0, CAM_DEFAULT_SENSOR) + # set chn0 output size + camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_0, DISPLAY_WIDTH, DISPLAY_HEIGHT) + # set chn0 output format + camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_0, PIXEL_FORMAT_YUV_SEMIPLANAR_420) + # create meida source device + globals()["meida_source"] = media_device(CAMERA_MOD_ID, CAM_DEV_ID_0, CAM_CHN_ID_0) + # create meida sink device + globals()["meida_sink"] = media_device(DISPLAY_MOD_ID, DISPLAY_DEV_ID, DISPLAY_CHN_VIDEO1) + # create meida link + media.create_link(meida_source, meida_sink) + # set display plane with video channel + display.set_plane(0, 0, DISPLAY_WIDTH, DISPLAY_HEIGHT, PIXEL_FORMAT_YVU_PLANAR_420, DISPLAY_MIRROR_NONE, DISPLAY_CHN_VIDEO1) + # set chn1 output nv12 + camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_1, DETECT_WIDTH, DETECT_HEIGHT) + camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_1, PIXEL_FORMAT_RGB_888) + # media buffer init + media.buffer_init() + # request media buffer for osd image + globals()["buffer"] = media.request_buffer(4 * DISPLAY_WIDTH * DISPLAY_HEIGHT) + # start stream for camera device0 + camera.start_stream(CAM_DEV_ID_0) + +def camera_deinit(): + # stop stream for camera device0 + camera.stop_stream(CAM_DEV_ID_0) + # deinit display + display.deinit() + os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) + time.sleep_ms(100) + # release media buffer + media.release_buffer(globals()["buffer"]) + # destroy media link + media.destroy_link(globals()["meida_source"], globals()["meida_sink"]) + # deinit media buffer + media.buffer_deinit() + +def capture_picture(): + # create image for drawing + draw_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888) + # create image for osd + buffer = globals()["buffer"] + osd_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888, alloc=image.ALLOC_VB, phyaddr=buffer.phys_addr, virtaddr=buffer.virt_addr, poolid=buffer.pool_id) + osd_img.clear() + display.show_image(osd_img, 0, 0, DISPLAY_CHN_OSD0) + fps = time.clock() + while True: + fps.tick() + try: + os.exitpoint() + rgb888_img = camera.capture_image(CAM_DEV_ID_0, CAM_CHN_ID_1) + img = rgb888_img.to_rgb565() + camera.release_image(CAM_DEV_ID_0, CAM_CHN_ID_1, rgb888_img) + draw_img.clear() + for blob in img.find_blobs(thresholds, pixels_threshold=100, area_threshold=100, merge=True): + if blob.code() == 3: # r/g code + draw_img.draw_rectangle([v*SCALE for v in blob.rect()]) + draw_img.draw_cross(blob.cx()*SCALE, blob.cy()*SCALE) + draw_img.draw_string(blob.x()*SCALE + 2, blob.y()*SCALE + 2, "r/g") + if blob.code() == 5: # r/b code + draw_img.draw_rectangle([v*SCALE for v in blob.rect()]) + draw_img.draw_cross(blob.cx()*SCALE, blob.cy()*SCALE) + draw_img.draw_string(blob.x()*SCALE + 2, blob.y()*SCALE + 2, "r/b") + if blob.code() == 6: # g/b code + draw_img.draw_rectangle([v*SCALE for v in blob.rect()]) + draw_img.draw_cross(blob.cx()*SCALE, blob.cy()*SCALE) + draw_img.draw_string(blob.x()*SCALE + 2, blob.y()*SCALE + 2, "g/b") + if blob.code() == 7: # r/g/b code + draw_img.draw_rectangle([v*SCALE for v in blob.rect()]) + draw_img.draw_cross(blob.cx()*SCALE, blob.cy()*SCALE) + draw_img.draw_string(blob.x()*SCALE + 2, blob.y()*SCALE + 2, "r/g/b") + draw_img.copy_to(osd_img) + del img + gc.collect() + print(fps.fps()) + except KeyboardInterrupt as e: + print("user stop: ", e) + break + except BaseException as e: + sys.print_exception(e) + break + +def main(): + os.exitpoint(os.EXITPOINT_ENABLE) + camera_is_init = False + try: + print("camera init") + camera_init() + camera_is_init = True + print("camera capture") + capture_picture() + except Exception as e: + sys.print_exception(e) + finally: + if camera_is_init: + print("camera deinit") + camera_deinit() + +if __name__ == "__main__": + main() diff --git a/share/qtcreator/examples/09-Color-Tracking/single_color_code_tracking.py b/share/qtcreator/examples/09-Color-Tracking/single_color_code_tracking.py new file mode 100644 index 00000000000..10838ee7b7e --- /dev/null +++ b/share/qtcreator/examples/09-Color-Tracking/single_color_code_tracking.py @@ -0,0 +1,135 @@ +# Single Color Code Tracking Example +# +# This example shows off single color code tracking using the CanMV Cam. +# +# A color code is a blob composed of two or more colors. The example below will +# only track colored objects which have both the colors below in them. + +from media.camera import * +from media.display import * +from media.media import * +import time, os, gc, sys, math + +DISPLAY_WIDTH = ALIGN_UP(1920, 16) +DISPLAY_HEIGHT = 1080 +SCALE = 4 +DETECT_WIDTH = DISPLAY_WIDTH // SCALE +DETECT_HEIGHT = DISPLAY_HEIGHT // SCALE + +# Color Tracking Thresholds (L Min, L Max, A Min, A Max, B Min, B Max) +# The below thresholds track in general red/green things. You may wish to tune them... +thresholds = [(30, 100, 15, 127, 15, 127), # generic_red_thresholds -> index is 0 so code == (1 << 0) + (30, 100, -64, -8, -32, 32)] # generic_green_thresholds -> index is 1 so code == (1 << 1) +# Codes are or'ed together when "merge=True" for "find_blobs" + +# Only blobs that with more pixels than "pixel_threshold" and more area than "area_threshold" are +# returned by "find_blobs" below. Change "pixels_threshold" and "area_threshold" if you change the +# camera resolution. "merge=True" must be set to merge overlapping color blobs for color codes. + +def camera_init(): + # use hdmi for display + display.init(LT9611_1920X1080_30FPS) + # config vb for osd layer + config = k_vb_config() + config.max_pool_cnt = 1 + config.comm_pool[0].blk_size = 4*DISPLAY_WIDTH*DISPLAY_HEIGHT + config.comm_pool[0].blk_cnt = 1 + config.comm_pool[0].mode = VB_REMAP_MODE_NOCACHE + # meida buffer config + media.buffer_config(config) + # init default sensor + camera.sensor_init(CAM_DEV_ID_0, CAM_DEFAULT_SENSOR) + # set chn0 output size + camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_0, DISPLAY_WIDTH, DISPLAY_HEIGHT) + # set chn0 output format + camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_0, PIXEL_FORMAT_YUV_SEMIPLANAR_420) + # create meida source device + globals()["meida_source"] = media_device(CAMERA_MOD_ID, CAM_DEV_ID_0, CAM_CHN_ID_0) + # create meida sink device + globals()["meida_sink"] = media_device(DISPLAY_MOD_ID, DISPLAY_DEV_ID, DISPLAY_CHN_VIDEO1) + # create meida link + media.create_link(meida_source, meida_sink) + # set display plane with video channel + display.set_plane(0, 0, DISPLAY_WIDTH, DISPLAY_HEIGHT, PIXEL_FORMAT_YVU_PLANAR_420, DISPLAY_MIRROR_NONE, DISPLAY_CHN_VIDEO1) + # set chn1 output nv12 + camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_1, DETECT_WIDTH, DETECT_HEIGHT) + camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_1, PIXEL_FORMAT_RGB_888) + # media buffer init + media.buffer_init() + # request media buffer for osd image + globals()["buffer"] = media.request_buffer(4 * DISPLAY_WIDTH * DISPLAY_HEIGHT) + # start stream for camera device0 + camera.start_stream(CAM_DEV_ID_0) + +def camera_deinit(): + # stop stream for camera device0 + camera.stop_stream(CAM_DEV_ID_0) + # deinit display + display.deinit() + os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) + time.sleep_ms(100) + # release media buffer + media.release_buffer(globals()["buffer"]) + # destroy media link + media.destroy_link(globals()["meida_source"], globals()["meida_sink"]) + # deinit media buffer + media.buffer_deinit() + +def capture_picture(): + # create image for drawing + draw_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888) + # create image for osd + buffer = globals()["buffer"] + osd_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888, alloc=image.ALLOC_VB, phyaddr=buffer.phys_addr, virtaddr=buffer.virt_addr, poolid=buffer.pool_id) + osd_img.clear() + display.show_image(osd_img, 0, 0, DISPLAY_CHN_OSD0) + fps = time.clock() + while True: + fps.tick() + try: + os.exitpoint() + rgb888_img = camera.capture_image(CAM_DEV_ID_0, CAM_CHN_ID_1) + img = rgb888_img.to_rgb565() + camera.release_image(CAM_DEV_ID_0, CAM_CHN_ID_1, rgb888_img) + draw_img.clear() + for blob in img.find_blobs(thresholds, pixels_threshold=100, area_threshold=100, merge=True): + if blob.code() == 3: # r/g code == (1 << 1) | (1 << 0) + # These values depend on the blob not being circular - otherwise they will be shaky. + # if blob.elongation() > 0.5: + # img.draw_edges(blob.min_corners(), color=(255,0,0)) + # img.draw_line(blob.major_axis_line(), color=(0,255,0)) + # img.draw_line(blob.minor_axis_line(), color=(0,0,255)) + # These values are stable all the time. + draw_img.draw_rectangle([v*SCALE for v in blob.rect()]) + draw_img.draw_cross(blob.cx()*SCALE, blob.cy()*SCALE) + # Note - the blob rotation is unique to 0-180 only. + draw_img.draw_keypoints([(blob.cx()*SCALE, blob.cy()*SCALE, int(math.degrees(blob.rotation())))], size=20) + draw_img.copy_to(osd_img) + del img + gc.collect() + print(fps.fps()) + except KeyboardInterrupt as e: + print("user stop: ", e) + break + except BaseException as e: + sys.print_exception(e) + break + +def main(): + os.exitpoint(os.EXITPOINT_ENABLE) + camera_is_init = False + try: + print("camera init") + camera_init() + camera_is_init = True + print("camera capture") + capture_picture() + except Exception as e: + sys.print_exception(e) + finally: + if camera_is_init: + print("camera deinit") + camera_deinit() + +if __name__ == "__main__": + main() diff --git a/share/qtcreator/examples/10-Drawing/arrow_drawing.py b/share/qtcreator/examples/10-Drawing/arrow_drawing.py new file mode 100644 index 00000000000..bb63d445a52 --- /dev/null +++ b/share/qtcreator/examples/10-Drawing/arrow_drawing.py @@ -0,0 +1,105 @@ +# Arrow Drawing +# +# This example shows off drawing arrows on the CanMV Cam. + +from media.camera import * +from media.display import * +from media.media import * +import time, os, gc, sys, urandom + +DISPLAY_WIDTH = ALIGN_UP(1920, 16) +DISPLAY_HEIGHT = 1080 + +def camera_init(): + # use hdmi for display + display.init(LT9611_1920X1080_30FPS) + # config vb for osd layer + config = k_vb_config() + config.max_pool_cnt = 1 + config.comm_pool[0].blk_size = 4*DISPLAY_WIDTH*DISPLAY_HEIGHT + config.comm_pool[0].blk_cnt = 1 + config.comm_pool[0].mode = VB_REMAP_MODE_NOCACHE + # meida buffer config + media.buffer_config(config) + # init default sensor + camera.sensor_init(CAM_DEV_ID_0, CAM_DEFAULT_SENSOR) + # set chn0 output size + camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_0, DISPLAY_WIDTH, DISPLAY_HEIGHT) + # set chn0 output format + camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_0, PIXEL_FORMAT_YUV_SEMIPLANAR_420) + # create meida source device + globals()["meida_source"] = media_device(CAMERA_MOD_ID, CAM_DEV_ID_0, CAM_CHN_ID_0) + # create meida sink device + globals()["meida_sink"] = media_device(DISPLAY_MOD_ID, DISPLAY_DEV_ID, DISPLAY_CHN_VIDEO1) + # create meida link + media.create_link(meida_source, meida_sink) + # set display plane with video channel + display.set_plane(0, 0, DISPLAY_WIDTH, DISPLAY_HEIGHT, PIXEL_FORMAT_YVU_PLANAR_420, DISPLAY_MIRROR_NONE, DISPLAY_CHN_VIDEO1) + # media buffer init + media.buffer_init() + # request media buffer for osd image + globals()["buffer"] = media.request_buffer(4 * DISPLAY_WIDTH * DISPLAY_HEIGHT) + # start stream for camera device0 + camera.start_stream(CAM_DEV_ID_0) + +def camera_deinit(): + # stop stream for camera device0 + camera.stop_stream(CAM_DEV_ID_0) + # deinit display + display.deinit() + os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) + time.sleep_ms(100) + # release media buffer + media.release_buffer(globals()["buffer"]) + # destroy media link + media.destroy_link(globals()["meida_source"], globals()["meida_sink"]) + # deinit media buffer + media.buffer_deinit() + +def draw(): + # create image for drawing + img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888) + # create image for osd + buffer = globals()["buffer"] + osd_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888, alloc=image.ALLOC_VB, phyaddr=buffer.phys_addr, virtaddr=buffer.virt_addr, poolid=buffer.pool_id) + osd_img.clear() + display.show_image(osd_img, 0, 0, DISPLAY_CHN_OSD0) + fps = time.clock() + while True: + fps.tick() + img.clear() + for i in range(10): + x0 = (urandom.getrandbits(30) % (2*img.width())) - (img.width()//2) + y0 = (urandom.getrandbits(30) % (2*img.height())) - (img.height()//2) + x1 = (urandom.getrandbits(30) % (2*img.width())) - (img.width()//2) + y1 = (urandom.getrandbits(30) % (2*img.height())) - (img.height()//2) + r = (urandom.getrandbits(30) % 127) + 128 + g = (urandom.getrandbits(30) % 127) + 128 + b = (urandom.getrandbits(30) % 127) + 128 + # If the first argument is a scaler then this method expects + # to see x0, y0, x1, and y1. Otherwise, it expects a (x0,y0,x1,y1) tuple. + img.draw_arrow(x0, y0, x1, y1, color = (r, g, b), size = 30, thickness = 2) + img.copy_to(osd_img) + time.sleep(1) + os.exitpoint() + +def main(): + os.exitpoint(os.EXITPOINT_ENABLE) + camera_is_init = False + try: + print("camera init") + camera_init() + camera_is_init = True + print("draw") + draw() + except KeyboardInterrupt as e: + print("user stop: ", e) + except BaseException as e: + sys.print_exception(e) + finally: + if camera_is_init: + print("camera deinit") + camera_deinit() + +if __name__ == "__main__": + main() diff --git a/share/qtcreator/examples/10-Drawing/circle_drawing.py b/share/qtcreator/examples/10-Drawing/circle_drawing.py new file mode 100644 index 00000000000..b495f786316 --- /dev/null +++ b/share/qtcreator/examples/10-Drawing/circle_drawing.py @@ -0,0 +1,104 @@ +# Circle Drawing +# +# This example shows off drawing circles on the CanMV Cam. + +from media.camera import * +from media.display import * +from media.media import * +import time, os, gc, sys, urandom + +DISPLAY_WIDTH = ALIGN_UP(1920, 16) +DISPLAY_HEIGHT = 1080 + +def camera_init(): + # use hdmi for display + display.init(LT9611_1920X1080_30FPS) + # config vb for osd layer + config = k_vb_config() + config.max_pool_cnt = 1 + config.comm_pool[0].blk_size = 4*DISPLAY_WIDTH*DISPLAY_HEIGHT + config.comm_pool[0].blk_cnt = 1 + config.comm_pool[0].mode = VB_REMAP_MODE_NOCACHE + # meida buffer config + media.buffer_config(config) + # init default sensor + camera.sensor_init(CAM_DEV_ID_0, CAM_DEFAULT_SENSOR) + # set chn0 output size + camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_0, DISPLAY_WIDTH, DISPLAY_HEIGHT) + # set chn0 output format + camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_0, PIXEL_FORMAT_YUV_SEMIPLANAR_420) + # create meida source device + globals()["meida_source"] = media_device(CAMERA_MOD_ID, CAM_DEV_ID_0, CAM_CHN_ID_0) + # create meida sink device + globals()["meida_sink"] = media_device(DISPLAY_MOD_ID, DISPLAY_DEV_ID, DISPLAY_CHN_VIDEO1) + # create meida link + media.create_link(meida_source, meida_sink) + # set display plane with video channel + display.set_plane(0, 0, DISPLAY_WIDTH, DISPLAY_HEIGHT, PIXEL_FORMAT_YVU_PLANAR_420, DISPLAY_MIRROR_NONE, DISPLAY_CHN_VIDEO1) + # media buffer init + media.buffer_init() + # request media buffer for osd image + globals()["buffer"] = media.request_buffer(4 * DISPLAY_WIDTH * DISPLAY_HEIGHT) + # start stream for camera device0 + camera.start_stream(CAM_DEV_ID_0) + +def camera_deinit(): + # stop stream for camera device0 + camera.stop_stream(CAM_DEV_ID_0) + # deinit display + display.deinit() + os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) + time.sleep_ms(100) + # release media buffer + media.release_buffer(globals()["buffer"]) + # destroy media link + media.destroy_link(globals()["meida_source"], globals()["meida_sink"]) + # deinit media buffer + media.buffer_deinit() + +def draw(): + # create image for drawing + img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888) + # create image for osd + buffer = globals()["buffer"] + osd_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888, alloc=image.ALLOC_VB, phyaddr=buffer.phys_addr, virtaddr=buffer.virt_addr, poolid=buffer.pool_id) + osd_img.clear() + display.show_image(osd_img, 0, 0, DISPLAY_CHN_OSD0) + fps = time.clock() + while True: + fps.tick() + img.clear() + for i in range(10): + x = (urandom.getrandbits(30) % (2*img.width())) - (img.width()//2) + y = (urandom.getrandbits(30) % (2*img.height())) - (img.height()//2) + radius = urandom.getrandbits(30) % (max(img.height(), img.width())//2) + r = (urandom.getrandbits(30) % 127) + 128 + g = (urandom.getrandbits(30) % 127) + 128 + b = (urandom.getrandbits(30) % 127) + 128 + # If the first argument is a scaler then this method expects + # to see x, y, and radius. Otherwise, it expects a (x,y,radius) tuple. + img.draw_circle(x, y, radius, color = (r, g, b), thickness = 2, fill = False) + img.copy_to(osd_img) + time.sleep(1) + os.exitpoint() + +def main(): + os.exitpoint(os.EXITPOINT_ENABLE) + camera_is_init = False + try: + print("camera init") + camera_init() + camera_is_init = True + print("draw") + draw() + except KeyboardInterrupt as e: + print("user stop: ", e) + except BaseException as e: + sys.print_exception(e) + finally: + if camera_is_init: + print("camera deinit") + camera_deinit() + +if __name__ == "__main__": + main() diff --git a/share/qtcreator/examples/10-Drawing/cross_drawing.py b/share/qtcreator/examples/10-Drawing/cross_drawing.py new file mode 100644 index 00000000000..7ed92f877cb --- /dev/null +++ b/share/qtcreator/examples/10-Drawing/cross_drawing.py @@ -0,0 +1,103 @@ +# Cross Drawing +# +# This example shows off drawing crosses on the CanMV Cam. + +from media.camera import * +from media.display import * +from media.media import * +import time, os, gc, sys, urandom + +DISPLAY_WIDTH = ALIGN_UP(1920, 16) +DISPLAY_HEIGHT = 1080 + +def camera_init(): + # use hdmi for display + display.init(LT9611_1920X1080_30FPS) + # config vb for osd layer + config = k_vb_config() + config.max_pool_cnt = 1 + config.comm_pool[0].blk_size = 4*DISPLAY_WIDTH*DISPLAY_HEIGHT + config.comm_pool[0].blk_cnt = 1 + config.comm_pool[0].mode = VB_REMAP_MODE_NOCACHE + # meida buffer config + media.buffer_config(config) + # init default sensor + camera.sensor_init(CAM_DEV_ID_0, CAM_DEFAULT_SENSOR) + # set chn0 output size + camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_0, DISPLAY_WIDTH, DISPLAY_HEIGHT) + # set chn0 output format + camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_0, PIXEL_FORMAT_YUV_SEMIPLANAR_420) + # create meida source device + globals()["meida_source"] = media_device(CAMERA_MOD_ID, CAM_DEV_ID_0, CAM_CHN_ID_0) + # create meida sink device + globals()["meida_sink"] = media_device(DISPLAY_MOD_ID, DISPLAY_DEV_ID, DISPLAY_CHN_VIDEO1) + # create meida link + media.create_link(meida_source, meida_sink) + # set display plane with video channel + display.set_plane(0, 0, DISPLAY_WIDTH, DISPLAY_HEIGHT, PIXEL_FORMAT_YVU_PLANAR_420, DISPLAY_MIRROR_NONE, DISPLAY_CHN_VIDEO1) + # media buffer init + media.buffer_init() + # request media buffer for osd image + globals()["buffer"] = media.request_buffer(4 * DISPLAY_WIDTH * DISPLAY_HEIGHT) + # start stream for camera device0 + camera.start_stream(CAM_DEV_ID_0) + +def camera_deinit(): + # stop stream for camera device0 + camera.stop_stream(CAM_DEV_ID_0) + # deinit display + display.deinit() + os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) + time.sleep_ms(100) + # release media buffer + media.release_buffer(globals()["buffer"]) + # destroy media link + media.destroy_link(globals()["meida_source"], globals()["meida_sink"]) + # deinit media buffer + media.buffer_deinit() + +def draw(): + # create image for drawing + img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888) + # create image for osd + buffer = globals()["buffer"] + osd_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888, alloc=image.ALLOC_VB, phyaddr=buffer.phys_addr, virtaddr=buffer.virt_addr, poolid=buffer.pool_id) + osd_img.clear() + display.show_image(osd_img, 0, 0, DISPLAY_CHN_OSD0) + fps = time.clock() + while True: + fps.tick() + img.clear() + for i in range(10): + x = (urandom.getrandbits(30) % (2*img.width())) - (img.width()//2) + y = (urandom.getrandbits(30) % (2*img.height())) - (img.height()//2) + r = (urandom.getrandbits(30) % 127) + 128 + g = (urandom.getrandbits(30) % 127) + 128 + b = (urandom.getrandbits(30) % 127) + 128 + # If the first argument is a scaler then this method expects + # to see x and y. Otherwise, it expects a (x,y) tuple. + img.draw_cross(x, y, color = (r, g, b), size = 10, thickness = 2) + img.copy_to(osd_img) + time.sleep(1) + os.exitpoint() + +def main(): + os.exitpoint(os.EXITPOINT_ENABLE) + camera_is_init = False + try: + print("camera init") + camera_init() + camera_is_init = True + print("draw") + draw() + except KeyboardInterrupt as e: + print("user stop: ", e) + except BaseException as e: + sys.print_exception(e) + finally: + if camera_is_init: + print("camera deinit") + camera_deinit() + +if __name__ == "__main__": + main() diff --git a/share/qtcreator/examples/10-Drawing/ellipse_drawing.py b/share/qtcreator/examples/10-Drawing/ellipse_drawing.py new file mode 100644 index 00000000000..767e3a7b7bf --- /dev/null +++ b/share/qtcreator/examples/10-Drawing/ellipse_drawing.py @@ -0,0 +1,107 @@ +# Ellipse Drawing +# +# This example shows off drawing ellipses on the CanMV Cam. + +from media.camera import * +from media.display import * +from media.media import * +import time, os, gc, sys, urandom + +DISPLAY_WIDTH = ALIGN_UP(1920, 16) +DISPLAY_HEIGHT = 1080 + +def camera_init(): + # use hdmi for display + display.init(LT9611_1920X1080_30FPS) + # config vb for osd layer + config = k_vb_config() + config.max_pool_cnt = 1 + config.comm_pool[0].blk_size = 4*DISPLAY_WIDTH*DISPLAY_HEIGHT + config.comm_pool[0].blk_cnt = 1 + config.comm_pool[0].mode = VB_REMAP_MODE_NOCACHE + # meida buffer config + media.buffer_config(config) + # init default sensor + camera.sensor_init(CAM_DEV_ID_0, CAM_DEFAULT_SENSOR) + # set chn0 output size + camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_0, DISPLAY_WIDTH, DISPLAY_HEIGHT) + # set chn0 output format + camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_0, PIXEL_FORMAT_YUV_SEMIPLANAR_420) + # create meida source device + globals()["meida_source"] = media_device(CAMERA_MOD_ID, CAM_DEV_ID_0, CAM_CHN_ID_0) + # create meida sink device + globals()["meida_sink"] = media_device(DISPLAY_MOD_ID, DISPLAY_DEV_ID, DISPLAY_CHN_VIDEO1) + # create meida link + media.create_link(meida_source, meida_sink) + # set display plane with video channel + display.set_plane(0, 0, DISPLAY_WIDTH, DISPLAY_HEIGHT, PIXEL_FORMAT_YVU_PLANAR_420, DISPLAY_MIRROR_NONE, DISPLAY_CHN_VIDEO1) + # media buffer init + media.buffer_init() + # request media buffer for osd image + globals()["buffer"] = media.request_buffer(4 * DISPLAY_WIDTH * DISPLAY_HEIGHT) + # start stream for camera device0 + camera.start_stream(CAM_DEV_ID_0) + +def camera_deinit(): + # stop stream for camera device0 + camera.stop_stream(CAM_DEV_ID_0) + # deinit display + display.deinit() + os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) + time.sleep_ms(100) + # release media buffer + media.release_buffer(globals()["buffer"]) + # destroy media link + media.destroy_link(globals()["meida_source"], globals()["meida_sink"]) + # deinit media buffer + media.buffer_deinit() + +def draw(): + # create image for drawing + img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888) + # create image for osd + buffer = globals()["buffer"] + osd_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888, alloc=image.ALLOC_VB, phyaddr=buffer.phys_addr, virtaddr=buffer.virt_addr, poolid=buffer.pool_id) + osd_img.clear() + display.show_image(osd_img, 0, 0, DISPLAY_CHN_OSD0) + fps = time.clock() + while True: + fps.tick() + img.clear() + for i in range(10): + x = (urandom.getrandbits(30) % (2*img.width())) - (img.width()//2) + y = (urandom.getrandbits(30) % (2*img.height())) - (img.height()//2) + radius_x = urandom.getrandbits(30) % (max(img.height(), img.width())//2) + radius_y = urandom.getrandbits(30) % (max(img.height(), img.width())//2) + rot = urandom.getrandbits(30) + r = (urandom.getrandbits(30) % 127) + 128 + g = (urandom.getrandbits(30) % 127) + 128 + b = (urandom.getrandbits(30) % 127) + 128 + # If the first argument is a scaler then this method expects + # to see x, y, radius x, and radius y. + # Otherwise, it expects a (x,y,radius_x,radius_y) tuple. + img.draw_ellipse(x, y, radius_x, radius_y, rot, color = (r, g, b), thickness = 2, fill = False) + img.copy_to(osd_img) + time.sleep(1) + os.exitpoint() + +def main(): + os.exitpoint(os.EXITPOINT_ENABLE) + camera_is_init = False + try: + print("camera init") + camera_init() + camera_is_init = True + print("draw") + draw() + except KeyboardInterrupt as e: + print("user stop: ", e) + except BaseException as e: + sys.print_exception(e) + finally: + if camera_is_init: + print("camera deinit") + camera_deinit() + +if __name__ == "__main__": + main() diff --git a/share/qtcreator/examples/10-Drawing/flood_fill.py b/share/qtcreator/examples/10-Drawing/flood_fill.py new file mode 100644 index 00000000000..f452341ebbd --- /dev/null +++ b/share/qtcreator/examples/10-Drawing/flood_fill.py @@ -0,0 +1,103 @@ +# Flood Fill +# +# This example shows off flood filling areas in the image. + +from media.camera import * +from media.display import * +from media.media import * +import time, os, gc, sys + +DISPLAY_WIDTH = ALIGN_UP(1920, 16) +DISPLAY_HEIGHT = 1080 + +def camera_init(): + # use hdmi for display + display.init(LT9611_1920X1080_30FPS) + # config vb for osd layer + config = k_vb_config() + config.max_pool_cnt = 1 + config.comm_pool[0].blk_size = 4*DISPLAY_WIDTH*DISPLAY_HEIGHT + config.comm_pool[0].blk_cnt = 1 + config.comm_pool[0].mode = VB_REMAP_MODE_NOCACHE + # meida buffer config + media.buffer_config(config) + # init default sensor + camera.sensor_init(CAM_DEV_ID_0, CAM_DEFAULT_SENSOR) + # # set chn0 output size + # camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_0, DISPLAY_WIDTH, DISPLAY_HEIGHT) + # # set chn0 output format + # camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_0, PIXEL_FORMAT_YUV_SEMIPLANAR_420) + # # create meida source device + # globals()["meida_source"] = media_device(CAMERA_MOD_ID, CAM_DEV_ID_0, CAM_CHN_ID_0) + # # create meida sink device + # globals()["meida_sink"] = media_device(DISPLAY_MOD_ID, DISPLAY_DEV_ID, DISPLAY_CHN_VIDEO1) + # # create meida link + # media.create_link(meida_source, meida_sink) + # # set display plane with video channel + # display.set_plane(0, 0, DISPLAY_WIDTH, DISPLAY_HEIGHT, PIXEL_FORMAT_YVU_PLANAR_420, DISPLAY_MIRROR_NONE, DISPLAY_CHN_VIDEO1) + # set chn1 output nv12 + camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_1, DISPLAY_WIDTH, DISPLAY_HEIGHT) + camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_1, PIXEL_FORMAT_RGB_888) + # media buffer init + media.buffer_init() + # request media buffer for osd image + globals()["buffer"] = media.request_buffer(4 * DISPLAY_WIDTH * DISPLAY_HEIGHT) + # start stream for camera device0 + camera.start_stream(CAM_DEV_ID_0) + +def camera_deinit(): + # stop stream for camera device0 + camera.stop_stream(CAM_DEV_ID_0) + # deinit display + display.deinit() + os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) + time.sleep_ms(100) + # release media buffer + media.release_buffer(globals()["buffer"]) + # destroy media link + # media.destroy_link(globals()["meida_source"], globals()["meida_sink"]) + # deinit media buffer + media.buffer_deinit() + +def draw(): + # create image for osd + buffer = globals()["buffer"] + osd_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.RGB565, alloc=image.ALLOC_VB, phyaddr=buffer.phys_addr, virtaddr=buffer.virt_addr, poolid=buffer.pool_id) + osd_img.clear() + display.show_image(osd_img, 0, 0, DISPLAY_CHN_OSD0) + fps = time.clock() + while True: + fps.tick() + os.exitpoint() + rgb888_img = camera.capture_image(CAM_DEV_ID_0, CAM_CHN_ID_1) + img = rgb888_img.to_rgb565() + camera.release_image(CAM_DEV_ID_0, CAM_CHN_ID_1, rgb888_img) + x = img.width() // 2 + y = img.height() // 2 + img.flood_fill(x, y, seed_threshold=0.05, floating_thresholds=0.05, + color=(255, 0, 0), invert=False, clear_background=False) + img.copy_to(osd_img) + del img + gc.collect() + print(fps.fps()) + +def main(): + os.exitpoint(os.EXITPOINT_ENABLE) + camera_is_init = False + try: + print("camera init") + camera_init() + camera_is_init = True + print("draw") + draw() + except KeyboardInterrupt as e: + print("user stop: ", e) + except BaseException as e: + sys.print_exception(e) + finally: + if camera_is_init: + print("camera deinit") + camera_deinit() + +if __name__ == "__main__": + main() diff --git a/share/qtcreator/examples/10-Drawing/image_drawing.py b/share/qtcreator/examples/10-Drawing/image_drawing.py new file mode 100644 index 00000000000..bd114b08be3 --- /dev/null +++ b/share/qtcreator/examples/10-Drawing/image_drawing.py @@ -0,0 +1,110 @@ +# Draw Image Example +# +# This example shows off how to draw images in the frame buffer. + +from media.camera import * +from media.display import * +from media.media import * +import time, os, gc, sys + +DISPLAY_WIDTH = ALIGN_UP(1920, 16) +DISPLAY_HEIGHT = 1080 + +def camera_init(): + # use hdmi for display + display.init(LT9611_1920X1080_30FPS) + # config vb for osd layer + config = k_vb_config() + config.max_pool_cnt = 1 + config.comm_pool[0].blk_size = 4*DISPLAY_WIDTH*DISPLAY_HEIGHT + config.comm_pool[0].blk_cnt = 1 + config.comm_pool[0].mode = VB_REMAP_MODE_NOCACHE + # meida buffer config + media.buffer_config(config) + # init default sensor + camera.sensor_init(CAM_DEV_ID_0, CAM_DEFAULT_SENSOR) + # # set chn0 output size + # camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_0, DISPLAY_WIDTH, DISPLAY_HEIGHT) + # # set chn0 output format + # camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_0, PIXEL_FORMAT_YUV_SEMIPLANAR_420) + # # create meida source device + # globals()["meida_source"] = media_device(CAMERA_MOD_ID, CAM_DEV_ID_0, CAM_CHN_ID_0) + # # create meida sink device + # globals()["meida_sink"] = media_device(DISPLAY_MOD_ID, DISPLAY_DEV_ID, DISPLAY_CHN_VIDEO1) + # # create meida link + # media.create_link(meida_source, meida_sink) + # # set display plane with video channel + # display.set_plane(0, 0, DISPLAY_WIDTH, DISPLAY_HEIGHT, PIXEL_FORMAT_YVU_PLANAR_420, DISPLAY_MIRROR_NONE, DISPLAY_CHN_VIDEO1) + # set chn1 output nv12 + camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_1, DISPLAY_WIDTH, DISPLAY_HEIGHT) + camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_1, PIXEL_FORMAT_RGB_888) + # media buffer init + media.buffer_init() + # request media buffer for osd image + globals()["buffer"] = media.request_buffer(4 * DISPLAY_WIDTH * DISPLAY_HEIGHT) + # start stream for camera device0 + camera.start_stream(CAM_DEV_ID_0) + +def camera_deinit(): + # stop stream for camera device0 + camera.stop_stream(CAM_DEV_ID_0) + # deinit display + display.deinit() + os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) + time.sleep_ms(100) + # release media buffer + media.release_buffer(globals()["buffer"]) + # destroy media link + # media.destroy_link(globals()["meida_source"], globals()["meida_sink"]) + # deinit media buffer + media.buffer_deinit() + +def draw(): + # create image for osd + buffer = globals()["buffer"] + osd_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.RGB565, alloc=image.ALLOC_VB, phyaddr=buffer.phys_addr, virtaddr=buffer.virt_addr, poolid=buffer.pool_id) + osd_img.clear() + display.show_image(osd_img, 0, 0, DISPLAY_CHN_OSD0) + fps = time.clock() + while True: + fps.tick() + try: + os.exitpoint() + rgb888_img = camera.capture_image(CAM_DEV_ID_0, CAM_CHN_ID_1) + img = rgb888_img.to_rgb565() + camera.release_image(CAM_DEV_ID_0, CAM_CHN_ID_1, rgb888_img) + small_img = img.mean_pooled(4, 4) # Makes a copy. + x = (img.width()//2)-(small_img.width()//2) + y = (img.height()//2)-(small_img.height()//2) + # Draws an image in the frame buffer.Pass an optional + # mask image to control what pixels are drawn. + img.draw_image(small_img, x, y, x_scale=1, y_scale=1) + img.copy_to(osd_img) + del img, small_img + gc.collect() + print(fps.fps()) + except KeyboardInterrupt as e: + print("user stop: ", e) + break + except BaseException as e: + sys.print_exception(e) + break + +def main(): + os.exitpoint(os.EXITPOINT_ENABLE) + camera_is_init = False + try: + print("camera init") + camera_init() + camera_is_init = True + print("draw") + draw() + except Exception as e: + sys.print_exception(e) + finally: + if camera_is_init: + print("camera deinit") + camera_deinit() + +if __name__ == "__main__": + main() diff --git a/share/qtcreator/examples/10-Drawing/image_drawing_advanced.py b/share/qtcreator/examples/10-Drawing/image_drawing_advanced.py new file mode 100644 index 00000000000..775f9464042 --- /dev/null +++ b/share/qtcreator/examples/10-Drawing/image_drawing_advanced.py @@ -0,0 +1,183 @@ +# Draw Image Testing script with bounce +# +# Exercise draw image with many different values for testing + +from media.camera import * +from media.display import * +from media.media import * +import time, os, gc, sys + +DISPLAY_WIDTH = ALIGN_UP(1920, 16) +DISPLAY_HEIGHT = 1080 + +BOUNCE = True +RESCALE = True + +SMALL_IMAGE_SCALE = 3 + +CYCLE_FORMATS = True +CYCLE_MASK = True + +# Used when CYCLE_FORMATS or CYCLE_MASK is true +value_mixer_init = 0 + +# Location of small image +x_init = 100 +y_init = 50 + +# Bounce direction +xd_init = 1 +yd_init = 1 + +# Small image scaling +rescale_init = 1.0 +rd_init = 0.1 +max_rescale = 5 +min_rescale = 0.2 + +# Boundary to bounce within +xmin = -DISPLAY_WIDTH / SMALL_IMAGE_SCALE - 8 +ymin = -DISPLAY_HEIGHT / SMALL_IMAGE_SCALE - 8 +xmax = DISPLAY_WIDTH + 8 +ymax = DISPLAY_HEIGHT + 8 + +def camera_init(): + # use hdmi for display + display.init(LT9611_1920X1080_30FPS) + # config vb for osd layer + config = k_vb_config() + config.max_pool_cnt = 1 + config.comm_pool[0].blk_size = 4*DISPLAY_WIDTH*DISPLAY_HEIGHT + config.comm_pool[0].blk_cnt = 1 + config.comm_pool[0].mode = VB_REMAP_MODE_NOCACHE + # meida buffer config + media.buffer_config(config) + # init default sensor + camera.sensor_init(CAM_DEV_ID_0, CAM_DEFAULT_SENSOR) + # # set chn0 output size + # camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_0, DISPLAY_WIDTH, DISPLAY_HEIGHT) + # # set chn0 output format + # camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_0, PIXEL_FORMAT_YUV_SEMIPLANAR_420) + # # create meida source device + # globals()["meida_source"] = media_device(CAMERA_MOD_ID, CAM_DEV_ID_0, CAM_CHN_ID_0) + # # create meida sink device + # globals()["meida_sink"] = media_device(DISPLAY_MOD_ID, DISPLAY_DEV_ID, DISPLAY_CHN_VIDEO1) + # # create meida link + # media.create_link(meida_source, meida_sink) + # # set display plane with video channel + # display.set_plane(0, 0, DISPLAY_WIDTH, DISPLAY_HEIGHT, PIXEL_FORMAT_YVU_PLANAR_420, DISPLAY_MIRROR_NONE, DISPLAY_CHN_VIDEO1) + # set chn1 output nv12 + camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_1, DISPLAY_WIDTH, DISPLAY_HEIGHT) + camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_1, PIXEL_FORMAT_RGB_888) + # media buffer init + media.buffer_init() + # request media buffer for osd image + globals()["buffer"] = media.request_buffer(4 * DISPLAY_WIDTH * DISPLAY_HEIGHT) + # start stream for camera device0 + camera.start_stream(CAM_DEV_ID_0) + +def camera_deinit(): + # stop stream for camera device0 + camera.stop_stream(CAM_DEV_ID_0) + # deinit display + display.deinit() + os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) + time.sleep_ms(100) + # release media buffer + media.release_buffer(globals()["buffer"]) + # destroy media link + # media.destroy_link(globals()["meida_source"], globals()["meida_sink"]) + # deinit media buffer + media.buffer_deinit() + +def draw(): + # create image for osd + buffer = globals()["buffer"] + osd_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.RGB565, alloc=image.ALLOC_VB, phyaddr=buffer.phys_addr, virtaddr=buffer.virt_addr, poolid=buffer.pool_id) + osd_img.clear() + display.show_image(osd_img, 0, 0, DISPLAY_CHN_OSD0) + value_mixer = value_mixer_init + x = x_init + y = y_init + xd = xd_init + yd = yd_init + rescale = rescale_init + rd = rd_init + fps = time.clock() + while True: + fps.tick() + status = "" + value_mixer = value_mixer + 1 + try: + os.exitpoint() + rgb888_img = camera.capture_image(CAM_DEV_ID_0, CAM_CHN_ID_1) + img = rgb888_img.to_rgb565() + camera.release_image(CAM_DEV_ID_0, CAM_CHN_ID_1, rgb888_img) + small_img = img.mean_pooled(SMALL_IMAGE_SCALE, SMALL_IMAGE_SCALE) + status = 'rgb565 ' + if CYCLE_FORMATS: + image_format = (value_mixer >> 8) & 3 + # To test combining different formats + if (image_format==1): small_img = small_img.to_bitmap(); status = 'bitmap ' + if (image_format==2): small_img = small_img.to_grayscale(); status = 'grayscale ' + if (image_format==3): small_img = small_img.to_rgb565(); status = 'rgb565 ' + + # update small image location + if BOUNCE: + x = x + xd + if (xxmax): + xd = -xd + + y = y + yd + if (yymax): + yd = -yd + + # Update small image scale + if RESCALE: + rescale = rescale + rd + if (rescalemax_rescale): + rd = -rd + + # Find the center of the image + scaled_width = int(small_img.width() * abs(rescale)) + scaled_height= int(small_img.height() * abs(rescale)) + + apply_mask = CYCLE_MASK and ((value_mixer >> 9) & 1) + if apply_mask: + img.draw_image(small_img, int(x), int(y), mask=small_img.to_bitmap(), x_scale=rescale, y_scale=rescale, alpha=240) + status += 'alpha:240 ' + status += '+mask ' + else: + img.draw_image(small_img, int(x), int(y), x_scale=rescale, y_scale=rescale, alpha=128) + status += 'alpha:128 ' + + img.draw_string(8, 0, status, mono_space = False) + img.copy_to(osd_img) + del img, small_img + gc.collect() + print(fps.fps()) + except KeyboardInterrupt as e: + print("user stop: ", e) + break + except BaseException as e: + sys.print_exception(e) + break + +def main(): + os.exitpoint(os.EXITPOINT_ENABLE) + camera_is_init = False + try: + print("camera init") + camera_init() + camera_is_init = True + print("draw") + draw() + except Exception as e: + sys.print_exception(e) + finally: + if camera_is_init: + print("camera deinit") + camera_deinit() + +if __name__ == "__main__": + main() diff --git a/share/qtcreator/examples/10-Drawing/image_drawing_alpha_blending_test.py b/share/qtcreator/examples/10-Drawing/image_drawing_alpha_blending_test.py new file mode 100644 index 00000000000..c301d85b74c --- /dev/null +++ b/share/qtcreator/examples/10-Drawing/image_drawing_alpha_blending_test.py @@ -0,0 +1,155 @@ +# Image Drawing Alpha Blending Test +# +# This script tests the performance and quality of the draw_image() +# method which can perform nearest neighbor, bilinear, bicubic, and +# area scaling along with color channel extraction, alpha blending, +# color palette application, and alpha palette application. + +from media.camera import * +from media.display import * +from media.media import * +import time, os, gc, sys + +DISPLAY_WIDTH = ALIGN_UP(1920, 16) +DISPLAY_HEIGHT = 1080 + +small_img = image.Image(4,4,image.RGB565) +small_img.set_pixel(0, 0, (0, 0, 127)) +small_img.set_pixel(1, 0, (47, 255, 199)) +small_img.set_pixel(2, 0, (0, 188, 255)) +small_img.set_pixel(3, 0, (0, 0, 127)) +small_img.set_pixel(0, 1, (0, 176, 255)) +small_img.set_pixel(1, 1, (222, 0, 0 )) +small_img.set_pixel(2, 1, (50, 255, 195)) +small_img.set_pixel(3, 1, (86, 255, 160)) +small_img.set_pixel(0, 2, (255, 211, 0 )) +small_img.set_pixel(1, 2, (83, 255, 163)) +small_img.set_pixel(2, 2, (255, 211, 0)) +small_img.set_pixel(3, 2, (0, 80, 255)) +small_img.set_pixel(0, 3, (255, 118, 0 )) +small_img.set_pixel(1, 3, (127, 0, 0 )) +small_img.set_pixel(2, 3, (0, 144, 255)) +small_img.set_pixel(3, 3, (50, 255, 195)) + +big_img = image.Image(128,128,image.RGB565) +big_img.draw_image(small_img, 0, 0, x_scale=32, y_scale=32) + +alpha_div = 1 +alpha_value_init = 0 +alpha_step_init = 2 + +x_bounce_init = DISPLAY_WIDTH//2 +x_bounce_toggle_init = 1 + +y_bounce_init = DISPLAY_HEIGHT//2 +y_bounce_toggle_init = 1 + +def camera_init(): + # use hdmi for display + display.init(LT9611_1920X1080_30FPS) + # config vb for osd layer + config = k_vb_config() + config.max_pool_cnt = 1 + config.comm_pool[0].blk_size = 4*DISPLAY_WIDTH*DISPLAY_HEIGHT + config.comm_pool[0].blk_cnt = 1 + config.comm_pool[0].mode = VB_REMAP_MODE_NOCACHE + # meida buffer config + media.buffer_config(config) + # init default sensor + camera.sensor_init(CAM_DEV_ID_0, CAM_DEFAULT_SENSOR) + # # set chn0 output size + # camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_0, DISPLAY_WIDTH, DISPLAY_HEIGHT) + # # set chn0 output format + # camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_0, PIXEL_FORMAT_YUV_SEMIPLANAR_420) + # # create meida source device + # globals()["meida_source"] = media_device(CAMERA_MOD_ID, CAM_DEV_ID_0, CAM_CHN_ID_0) + # # create meida sink device + # globals()["meida_sink"] = media_device(DISPLAY_MOD_ID, DISPLAY_DEV_ID, DISPLAY_CHN_VIDEO1) + # # create meida link + # media.create_link(meida_source, meida_sink) + # # set display plane with video channel + # display.set_plane(0, 0, DISPLAY_WIDTH, DISPLAY_HEIGHT, PIXEL_FORMAT_YVU_PLANAR_420, DISPLAY_MIRROR_NONE, DISPLAY_CHN_VIDEO1) + # set chn1 output nv12 + camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_1, DISPLAY_WIDTH, DISPLAY_HEIGHT) + camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_1, PIXEL_FORMAT_RGB_888) + # media buffer init + media.buffer_init() + # request media buffer for osd image + globals()["buffer"] = media.request_buffer(4 * DISPLAY_WIDTH * DISPLAY_HEIGHT) + # start stream for camera device0 + camera.start_stream(CAM_DEV_ID_0) + +def camera_deinit(): + # stop stream for camera device0 + camera.stop_stream(CAM_DEV_ID_0) + # deinit display + display.deinit() + os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) + time.sleep_ms(100) + # release media buffer + media.release_buffer(globals()["buffer"]) + # destroy media link + # media.destroy_link(globals()["meida_source"], globals()["meida_sink"]) + # deinit media buffer + media.buffer_deinit() + +def draw(): + # create image for osd + buffer = globals()["buffer"] + osd_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.RGB565, alloc=image.ALLOC_VB, phyaddr=buffer.phys_addr, virtaddr=buffer.virt_addr, poolid=buffer.pool_id) + osd_img.clear() + display.show_image(osd_img, 0, 0, DISPLAY_CHN_OSD0) + alpha_value = alpha_value_init + alpha_step = alpha_step_init + x_bounce = x_bounce_init + x_bounce_toggle = x_bounce_toggle_init + y_bounce = y_bounce_init + y_bounce_toggle = y_bounce_toggle_init + fps = time.clock() + while True: + fps.tick() + try: + os.exitpoint() + rgb888_img = camera.capture_image(CAM_DEV_ID_0, CAM_CHN_ID_1) + img = rgb888_img.to_rgb565() + camera.release_image(CAM_DEV_ID_0, CAM_CHN_ID_1, rgb888_img) + img.draw_image(big_img, x_bounce, y_bounce, rgb_channel=-1, alpha=alpha_value//alpha_div) + + x_bounce += x_bounce_toggle + if abs(x_bounce-(img.width()//2)) >= (img.width()//2): x_bounce_toggle = -x_bounce_toggle + + y_bounce += y_bounce_toggle + if abs(y_bounce-(img.height()//2)) >= (img.height()//2): y_bounce_toggle = -y_bounce_toggle + + alpha_value += alpha_step + if not alpha_value or alpha_value//alpha_div == 256: alpha_step = -alpha_step + + img.copy_to(osd_img) + del img + gc.collect() + print(fps.fps()) + except KeyboardInterrupt as e: + print("user stop: ", e) + break + except BaseException as e: + sys.print_exception(e) + break + +def main(): + os.exitpoint(os.EXITPOINT_ENABLE) + camera_is_init = False + try: + print("camera init") + camera_init() + camera_is_init = True + print("draw") + draw() + except Exception as e: + sys.print_exception(e) + finally: + if camera_is_init: + print("camera deinit") + camera_deinit() + +if __name__ == "__main__": + main() diff --git a/share/qtcreator/examples/10-Drawing/keypoints_drawing.py b/share/qtcreator/examples/10-Drawing/keypoints_drawing.py new file mode 100644 index 00000000000..09baf624629 --- /dev/null +++ b/share/qtcreator/examples/10-Drawing/keypoints_drawing.py @@ -0,0 +1,104 @@ +# Keypoints Drawing +# +# This example shows off drawing keypoints on the CanMV Cam. Usually you call draw_keypoints() +# on a keypoints object but you can also call it on a list of 3-value tuples... + +from media.camera import * +from media.display import * +from media.media import * +import time, os, gc, sys, urandom + +DISPLAY_WIDTH = ALIGN_UP(1920, 16) +DISPLAY_HEIGHT = 1080 + +def camera_init(): + # use hdmi for display + display.init(LT9611_1920X1080_30FPS) + # config vb for osd layer + config = k_vb_config() + config.max_pool_cnt = 1 + config.comm_pool[0].blk_size = 4*DISPLAY_WIDTH*DISPLAY_HEIGHT + config.comm_pool[0].blk_cnt = 1 + config.comm_pool[0].mode = VB_REMAP_MODE_NOCACHE + # meida buffer config + media.buffer_config(config) + # init default sensor + camera.sensor_init(CAM_DEV_ID_0, CAM_DEFAULT_SENSOR) + # set chn0 output size + camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_0, DISPLAY_WIDTH, DISPLAY_HEIGHT) + # set chn0 output format + camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_0, PIXEL_FORMAT_YUV_SEMIPLANAR_420) + # create meida source device + globals()["meida_source"] = media_device(CAMERA_MOD_ID, CAM_DEV_ID_0, CAM_CHN_ID_0) + # create meida sink device + globals()["meida_sink"] = media_device(DISPLAY_MOD_ID, DISPLAY_DEV_ID, DISPLAY_CHN_VIDEO1) + # create meida link + media.create_link(meida_source, meida_sink) + # set display plane with video channel + display.set_plane(0, 0, DISPLAY_WIDTH, DISPLAY_HEIGHT, PIXEL_FORMAT_YVU_PLANAR_420, DISPLAY_MIRROR_NONE, DISPLAY_CHN_VIDEO1) + # media buffer init + media.buffer_init() + # request media buffer for osd image + globals()["buffer"] = media.request_buffer(4 * DISPLAY_WIDTH * DISPLAY_HEIGHT) + # start stream for camera device0 + camera.start_stream(CAM_DEV_ID_0) + +def camera_deinit(): + # stop stream for camera device0 + camera.stop_stream(CAM_DEV_ID_0) + # deinit display + display.deinit() + os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) + time.sleep_ms(100) + # release media buffer + media.release_buffer(globals()["buffer"]) + # destroy media link + media.destroy_link(globals()["meida_source"], globals()["meida_sink"]) + # deinit media buffer + media.buffer_deinit() + +def draw(): + # create image for drawing + img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888) + # create image for osd + buffer = globals()["buffer"] + osd_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888, alloc=image.ALLOC_VB, phyaddr=buffer.phys_addr, virtaddr=buffer.virt_addr, poolid=buffer.pool_id) + osd_img.clear() + display.show_image(osd_img, 0, 0, DISPLAY_CHN_OSD0) + fps = time.clock() + while True: + fps.tick() + img.clear() + for i in range(10): + x = (urandom.getrandbits(30) % (2*img.width())) - (img.width()//2) + y = (urandom.getrandbits(30) % (2*img.height())) - (img.height()//2) + rot = urandom.getrandbits(30) % 360 + r = (urandom.getrandbits(30) % 127) + 128 + g = (urandom.getrandbits(30) % 127) + 128 + b = (urandom.getrandbits(30) % 127) + 128 + # This method draws a keypoints object or a list of (x, y, rot) tuples... + img.draw_keypoints([(x, y, rot)], color = (r, g, b), size = 20, thickness = 2, fill = False) + img.copy_to(osd_img) + time.sleep(1) + os.exitpoint() + +def main(): + os.exitpoint(os.EXITPOINT_ENABLE) + camera_is_init = False + try: + print("camera init") + camera_init() + camera_is_init = True + print("draw") + draw() + except KeyboardInterrupt as e: + print("user stop: ", e) + except BaseException as e: + sys.print_exception(e) + finally: + if camera_is_init: + print("camera deinit") + camera_deinit() + +if __name__ == "__main__": + main() diff --git a/share/qtcreator/examples/10-Drawing/line_drawing.py b/share/qtcreator/examples/10-Drawing/line_drawing.py new file mode 100644 index 00000000000..0612dcf227a --- /dev/null +++ b/share/qtcreator/examples/10-Drawing/line_drawing.py @@ -0,0 +1,105 @@ +# Line Drawing +# +# This example shows off drawing lines on the CanMV Cam. + +from media.camera import * +from media.display import * +from media.media import * +import time, os, gc, sys, urandom + +DISPLAY_WIDTH = ALIGN_UP(1920, 16) +DISPLAY_HEIGHT = 1080 + +def camera_init(): + # use hdmi for display + display.init(LT9611_1920X1080_30FPS) + # config vb for osd layer + config = k_vb_config() + config.max_pool_cnt = 1 + config.comm_pool[0].blk_size = 4*DISPLAY_WIDTH*DISPLAY_HEIGHT + config.comm_pool[0].blk_cnt = 1 + config.comm_pool[0].mode = VB_REMAP_MODE_NOCACHE + # meida buffer config + media.buffer_config(config) + # init default sensor + camera.sensor_init(CAM_DEV_ID_0, CAM_DEFAULT_SENSOR) + # set chn0 output size + camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_0, DISPLAY_WIDTH, DISPLAY_HEIGHT) + # set chn0 output format + camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_0, PIXEL_FORMAT_YUV_SEMIPLANAR_420) + # create meida source device + globals()["meida_source"] = media_device(CAMERA_MOD_ID, CAM_DEV_ID_0, CAM_CHN_ID_0) + # create meida sink device + globals()["meida_sink"] = media_device(DISPLAY_MOD_ID, DISPLAY_DEV_ID, DISPLAY_CHN_VIDEO1) + # create meida link + media.create_link(meida_source, meida_sink) + # set display plane with video channel + display.set_plane(0, 0, DISPLAY_WIDTH, DISPLAY_HEIGHT, PIXEL_FORMAT_YVU_PLANAR_420, DISPLAY_MIRROR_NONE, DISPLAY_CHN_VIDEO1) + # media buffer init + media.buffer_init() + # request media buffer for osd image + globals()["buffer"] = media.request_buffer(4 * DISPLAY_WIDTH * DISPLAY_HEIGHT) + # start stream for camera device0 + camera.start_stream(CAM_DEV_ID_0) + +def camera_deinit(): + # stop stream for camera device0 + camera.stop_stream(CAM_DEV_ID_0) + # deinit display + display.deinit() + os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) + time.sleep_ms(100) + # release media buffer + media.release_buffer(globals()["buffer"]) + # destroy media link + media.destroy_link(globals()["meida_source"], globals()["meida_sink"]) + # deinit media buffer + media.buffer_deinit() + +def draw(): + # create image for drawing + img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888) + # create image for osd + buffer = globals()["buffer"] + osd_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888, alloc=image.ALLOC_VB, phyaddr=buffer.phys_addr, virtaddr=buffer.virt_addr, poolid=buffer.pool_id) + osd_img.clear() + display.show_image(osd_img, 0, 0, DISPLAY_CHN_OSD0) + fps = time.clock() + while True: + fps.tick() + img.clear() + for i in range(10): + x0 = (urandom.getrandbits(30) % (2*img.width())) - (img.width()//2) + y0 = (urandom.getrandbits(30) % (2*img.height())) - (img.height()//2) + x1 = (urandom.getrandbits(30) % (2*img.width())) - (img.width()//2) + y1 = (urandom.getrandbits(30) % (2*img.height())) - (img.height()//2) + r = (urandom.getrandbits(30) % 127) + 128 + g = (urandom.getrandbits(30) % 127) + 128 + b = (urandom.getrandbits(30) % 127) + 128 + # If the first argument is a scaler then this method expects + # to see x0, y0, x1, and y1. Otherwise, it expects a (x0,y0,x1,y1) tuple. + img.draw_line(x0, y0, x1, y1, color = (r, g, b), thickness = 2) + img.copy_to(osd_img) + time.sleep(1) + os.exitpoint() + +def main(): + os.exitpoint(os.EXITPOINT_ENABLE) + camera_is_init = False + try: + print("camera init") + camera_init() + camera_is_init = True + print("draw") + draw() + except KeyboardInterrupt as e: + print("user stop: ", e) + except BaseException as e: + sys.print_exception(e) + finally: + if camera_is_init: + print("camera deinit") + camera_deinit() + +if __name__ == "__main__": + main() diff --git a/share/qtcreator/examples/10-Drawing/rectangle_drawing.py b/share/qtcreator/examples/10-Drawing/rectangle_drawing.py new file mode 100644 index 00000000000..e9685114d2a --- /dev/null +++ b/share/qtcreator/examples/10-Drawing/rectangle_drawing.py @@ -0,0 +1,105 @@ +# Rectangle Drawing +# +# This example shows off drawing rectangles on the CanMV Cam. + +from media.camera import * +from media.display import * +from media.media import * +import time, os, gc, sys, urandom + +DISPLAY_WIDTH = ALIGN_UP(1920, 16) +DISPLAY_HEIGHT = 1080 + +def camera_init(): + # use hdmi for display + display.init(LT9611_1920X1080_30FPS) + # config vb for osd layer + config = k_vb_config() + config.max_pool_cnt = 1 + config.comm_pool[0].blk_size = 4*DISPLAY_WIDTH*DISPLAY_HEIGHT + config.comm_pool[0].blk_cnt = 1 + config.comm_pool[0].mode = VB_REMAP_MODE_NOCACHE + # meida buffer config + media.buffer_config(config) + # init default sensor + camera.sensor_init(CAM_DEV_ID_0, CAM_DEFAULT_SENSOR) + # set chn0 output size + camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_0, DISPLAY_WIDTH, DISPLAY_HEIGHT) + # set chn0 output format + camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_0, PIXEL_FORMAT_YUV_SEMIPLANAR_420) + # create meida source device + globals()["meida_source"] = media_device(CAMERA_MOD_ID, CAM_DEV_ID_0, CAM_CHN_ID_0) + # create meida sink device + globals()["meida_sink"] = media_device(DISPLAY_MOD_ID, DISPLAY_DEV_ID, DISPLAY_CHN_VIDEO1) + # create meida link + media.create_link(meida_source, meida_sink) + # set display plane with video channel + display.set_plane(0, 0, DISPLAY_WIDTH, DISPLAY_HEIGHT, PIXEL_FORMAT_YVU_PLANAR_420, DISPLAY_MIRROR_NONE, DISPLAY_CHN_VIDEO1) + # media buffer init + media.buffer_init() + # request media buffer for osd image + globals()["buffer"] = media.request_buffer(4 * DISPLAY_WIDTH * DISPLAY_HEIGHT) + # start stream for camera device0 + camera.start_stream(CAM_DEV_ID_0) + +def camera_deinit(): + # stop stream for camera device0 + camera.stop_stream(CAM_DEV_ID_0) + # deinit display + display.deinit() + os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) + time.sleep_ms(100) + # release media buffer + media.release_buffer(globals()["buffer"]) + # destroy media link + media.destroy_link(globals()["meida_source"], globals()["meida_sink"]) + # deinit media buffer + media.buffer_deinit() + +def draw(): + # create image for drawing + img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888) + # create image for osd + buffer = globals()["buffer"] + osd_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888, alloc=image.ALLOC_VB, phyaddr=buffer.phys_addr, virtaddr=buffer.virt_addr, poolid=buffer.pool_id) + osd_img.clear() + display.show_image(osd_img, 0, 0, DISPLAY_CHN_OSD0) + fps = time.clock() + while True: + fps.tick() + img.clear() + for i in range(10): + x = (urandom.getrandbits(30) % (2*img.width())) - (img.width()//2) + y = (urandom.getrandbits(30) % (2*img.height())) - (img.height()//2) + w = (urandom.getrandbits(30) % (img.width()//2)) + h = (urandom.getrandbits(30) % (img.height()//2)) + r = (urandom.getrandbits(30) % 127) + 128 + g = (urandom.getrandbits(30) % 127) + 128 + b = (urandom.getrandbits(30) % 127) + 128 + # If the first argument is a scaler then this method expects + # to see x, y, w, and h. Otherwise, it expects a (x,y,w,h) tuple. + img.draw_rectangle(x, y, w, h, color = (r, g, b), thickness = 2, fill = False) + img.copy_to(osd_img) + time.sleep(1) + os.exitpoint() + +def main(): + os.exitpoint(os.EXITPOINT_ENABLE) + camera_is_init = False + try: + print("camera init") + camera_init() + camera_is_init = True + print("draw") + draw() + except KeyboardInterrupt as e: + print("user stop: ", e) + except BaseException as e: + sys.print_exception(e) + finally: + if camera_is_init: + print("camera deinit") + camera_deinit() + +if __name__ == "__main__": + main() diff --git a/share/qtcreator/examples/10-Drawing/text_drawing.py b/share/qtcreator/examples/10-Drawing/text_drawing.py new file mode 100644 index 00000000000..d09d37bf13b --- /dev/null +++ b/share/qtcreator/examples/10-Drawing/text_drawing.py @@ -0,0 +1,106 @@ +# Text Drawing +# +# This example shows off drawing text on the CanMV Cam. + +from media.camera import * +from media.display import * +from media.media import * +import time, os, gc, sys, urandom + +DISPLAY_WIDTH = ALIGN_UP(1920, 16) +DISPLAY_HEIGHT = 1080 + +def camera_init(): + # use hdmi for display + display.init(LT9611_1920X1080_30FPS) + # config vb for osd layer + config = k_vb_config() + config.max_pool_cnt = 1 + config.comm_pool[0].blk_size = 4*DISPLAY_WIDTH*DISPLAY_HEIGHT + config.comm_pool[0].blk_cnt = 1 + config.comm_pool[0].mode = VB_REMAP_MODE_NOCACHE + # meida buffer config + media.buffer_config(config) + # init default sensor + camera.sensor_init(CAM_DEV_ID_0, CAM_DEFAULT_SENSOR) + # set chn0 output size + camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_0, DISPLAY_WIDTH, DISPLAY_HEIGHT) + # set chn0 output format + camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_0, PIXEL_FORMAT_YUV_SEMIPLANAR_420) + # create meida source device + globals()["meida_source"] = media_device(CAMERA_MOD_ID, CAM_DEV_ID_0, CAM_CHN_ID_0) + # create meida sink device + globals()["meida_sink"] = media_device(DISPLAY_MOD_ID, DISPLAY_DEV_ID, DISPLAY_CHN_VIDEO1) + # create meida link + media.create_link(meida_source, meida_sink) + # set display plane with video channel + display.set_plane(0, 0, DISPLAY_WIDTH, DISPLAY_HEIGHT, PIXEL_FORMAT_YVU_PLANAR_420, DISPLAY_MIRROR_NONE, DISPLAY_CHN_VIDEO1) + # media buffer init + media.buffer_init() + # request media buffer for osd image + globals()["buffer"] = media.request_buffer(4 * DISPLAY_WIDTH * DISPLAY_HEIGHT) + # start stream for camera device0 + camera.start_stream(CAM_DEV_ID_0) + +def camera_deinit(): + # stop stream for camera device0 + camera.stop_stream(CAM_DEV_ID_0) + # deinit display + display.deinit() + os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) + time.sleep_ms(100) + # release media buffer + media.release_buffer(globals()["buffer"]) + # destroy media link + media.destroy_link(globals()["meida_source"], globals()["meida_sink"]) + # deinit media buffer + media.buffer_deinit() + +def draw(): + # create image for drawing + img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888) + # create image for osd + buffer = globals()["buffer"] + osd_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888, alloc=image.ALLOC_VB, phyaddr=buffer.phys_addr, virtaddr=buffer.virt_addr, poolid=buffer.pool_id) + osd_img.clear() + display.show_image(osd_img, 0, 0, DISPLAY_CHN_OSD0) + fps = time.clock() + while True: + fps.tick() + img.clear() + for i in range(10): + x = (urandom.getrandbits(30) % (2*img.width())) - (img.width()//2) + y = (urandom.getrandbits(30) % (2*img.height())) - (img.height()//2) + r = (urandom.getrandbits(30) % 127) + 128 + g = (urandom.getrandbits(30) % 127) + 128 + b = (urandom.getrandbits(30) % 127) + 128 + # If the first argument is a scaler then this method expects + # to see x, y, and text. Otherwise, it expects a (x,y,text) tuple. + # Character and string rotation can be done at 0, 90, 180, 270, and etc. degrees. + img.draw_string(x, y, "Hello World!", color = (r, g, b), scale = 2, mono_space = False, + char_rotation = 0, char_hmirror = False, char_vflip = False, + string_rotation = 0, string_hmirror = False, string_vflip = False) + img.copy_to(osd_img) + time.sleep(1) + os.exitpoint() + +def main(): + os.exitpoint(os.EXITPOINT_ENABLE) + camera_is_init = False + try: + print("camera init") + camera_init() + camera_is_init = True + print("draw") + draw() + except KeyboardInterrupt as e: + print("user stop: ", e) + except BaseException as e: + sys.print_exception(e) + finally: + if camera_is_init: + print("camera deinit") + camera_deinit() + +if __name__ == "__main__": + main() diff --git a/share/qtcreator/examples/11-Feature-Detection/edges.py b/share/qtcreator/examples/11-Feature-Detection/edges.py new file mode 100644 index 00000000000..817ec3b22f5 --- /dev/null +++ b/share/qtcreator/examples/11-Feature-Detection/edges.py @@ -0,0 +1,111 @@ +# Edge detection with Canny: +# +# This example demonstrates the Canny edge detector. + +from media.camera import * +from media.display import * +from media.media import * +import time, os, gc, sys + +DISPLAY_WIDTH = ALIGN_UP(1920, 16) +DISPLAY_HEIGHT = 1080 +SCALE = 4 +DETECT_WIDTH = DISPLAY_WIDTH // SCALE +DETECT_HEIGHT = DISPLAY_HEIGHT // SCALE + +def camera_init(): + # use hdmi for display + display.init(LT9611_1920X1080_30FPS) + # config vb for osd layer + config = k_vb_config() + config.max_pool_cnt = 1 + config.comm_pool[0].blk_size = 4*DISPLAY_WIDTH*DISPLAY_HEIGHT + config.comm_pool[0].blk_cnt = 1 + config.comm_pool[0].mode = VB_REMAP_MODE_NOCACHE + # meida buffer config + media.buffer_config(config) + # init default sensor + camera.sensor_init(CAM_DEV_ID_0, CAM_DEFAULT_SENSOR) + # set chn0 output size + camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_0, DISPLAY_WIDTH, DISPLAY_HEIGHT) + # set chn0 output format + camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_0, PIXEL_FORMAT_YUV_SEMIPLANAR_420) + # create meida source device + globals()["meida_source"] = media_device(CAMERA_MOD_ID, CAM_DEV_ID_0, CAM_CHN_ID_0) + # create meida sink device + globals()["meida_sink"] = media_device(DISPLAY_MOD_ID, DISPLAY_DEV_ID, DISPLAY_CHN_VIDEO1) + # create meida link + media.create_link(meida_source, meida_sink) + # set display plane with video channel + display.set_plane(0, 0, DISPLAY_WIDTH, DISPLAY_HEIGHT, PIXEL_FORMAT_YVU_PLANAR_420, DISPLAY_MIRROR_NONE, DISPLAY_CHN_VIDEO1) + # set chn1 output nv12 + camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_1, DETECT_WIDTH, DETECT_HEIGHT) + camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_1, PIXEL_FORMAT_YUV_SEMIPLANAR_420) + # media buffer init + media.buffer_init() + # request media buffer for osd image + globals()["buffer"] = media.request_buffer(4 * DISPLAY_WIDTH * DISPLAY_HEIGHT) + # start stream for camera device0 + camera.start_stream(CAM_DEV_ID_0) + +def camera_deinit(): + # stop stream for camera device0 + camera.stop_stream(CAM_DEV_ID_0) + # deinit display + display.deinit() + os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) + time.sleep_ms(100) + # release media buffer + media.release_buffer(globals()["buffer"]) + # destroy media link + media.destroy_link(globals()["meida_source"], globals()["meida_sink"]) + # deinit media buffer + media.buffer_deinit() + +def capture_picture(): + # create image for osd + buffer = globals()["buffer"] + osd_img = image.Image(DETECT_WIDTH, DETECT_HEIGHT, image.GRAYSCALE, alloc=image.ALLOC_VB, phyaddr=buffer.phys_addr, virtaddr=buffer.virt_addr, poolid=buffer.pool_id) + osd_img.clear() + display.show_image(osd_img, 0, 0, DISPLAY_CHN_OSD0) + fps = time.clock() + while True: + fps.tick() + try: + os.exitpoint() + yuv420_img = camera.capture_image(CAM_DEV_ID_0, CAM_CHN_ID_1) + img = image.Image(yuv420_img.width(), yuv420_img.height(), image.GRAYSCALE, data=yuv420_img) + camera.release_image(CAM_DEV_ID_0, CAM_CHN_ID_1, yuv420_img) + # Use Canny edge detector + img.find_edges(image.EDGE_CANNY, threshold=(50, 80)) + # Faster simpler edge detection + #img.find_edges(image.EDGE_SIMPLE, threshold=(100, 255)) + img.copy_to(osd_img) + del img + gc.collect() + print(fps.fps()) + except KeyboardInterrupt as e: + print("user stop: ", e) + break + except BaseException as e: + sys.print_exception(e) + break + +def main(): + os.exitpoint(os.EXITPOINT_ENABLE) + camera_is_init = False + try: + print("camera init") + camera_init() + camera_is_init = True + print("camera capture") + capture_picture() + except Exception as e: + sys.print_exception(e) + finally: + if camera_is_init: + print("camera deinit") + camera_deinit() + +if __name__ == "__main__": + main() diff --git a/share/qtcreator/examples/11-Feature-Detection/find_circles.py b/share/qtcreator/examples/11-Feature-Detection/find_circles.py new file mode 100644 index 00000000000..fd4f06c8239 --- /dev/null +++ b/share/qtcreator/examples/11-Feature-Detection/find_circles.py @@ -0,0 +1,131 @@ +# Find Circles Example +# +# This example shows off how to find circles in the image using the Hough +# Transform. https://en.wikipedia.org/wiki/Circle_Hough_Transform +# +# Note that the find_circles() method will only find circles which are completely +# inside of the image. Circles which go outside of the image/roi are ignored... + +from media.camera import * +from media.display import * +from media.media import * +import time, os, gc, sys + +DISPLAY_WIDTH = ALIGN_UP(1920, 16) +DISPLAY_HEIGHT = 1080 +SCALE = 4 +DETECT_WIDTH = DISPLAY_WIDTH // SCALE +DETECT_HEIGHT = DISPLAY_HEIGHT // SCALE + +def camera_init(): + # use hdmi for display + display.init(LT9611_1920X1080_30FPS) + # config vb for osd layer + config = k_vb_config() + config.max_pool_cnt = 1 + config.comm_pool[0].blk_size = 4*DISPLAY_WIDTH*DISPLAY_HEIGHT + config.comm_pool[0].blk_cnt = 1 + config.comm_pool[0].mode = VB_REMAP_MODE_NOCACHE + # meida buffer config + media.buffer_config(config) + # init default sensor + camera.sensor_init(CAM_DEV_ID_0, CAM_DEFAULT_SENSOR) + # set chn0 output size + camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_0, DISPLAY_WIDTH, DISPLAY_HEIGHT) + # set chn0 output format + camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_0, PIXEL_FORMAT_YUV_SEMIPLANAR_420) + # create meida source device + globals()["meida_source"] = media_device(CAMERA_MOD_ID, CAM_DEV_ID_0, CAM_CHN_ID_0) + # create meida sink device + globals()["meida_sink"] = media_device(DISPLAY_MOD_ID, DISPLAY_DEV_ID, DISPLAY_CHN_VIDEO1) + # create meida link + media.create_link(meida_source, meida_sink) + # set display plane with video channel + display.set_plane(0, 0, DISPLAY_WIDTH, DISPLAY_HEIGHT, PIXEL_FORMAT_YVU_PLANAR_420, DISPLAY_MIRROR_NONE, DISPLAY_CHN_VIDEO1) + # set chn1 output nv12 + camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_1, DETECT_WIDTH, DETECT_HEIGHT) + camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_1, PIXEL_FORMAT_RGB_888) + # media buffer init + media.buffer_init() + # request media buffer for osd image + globals()["buffer"] = media.request_buffer(4 * DISPLAY_WIDTH * DISPLAY_HEIGHT) + # start stream for camera device0 + camera.start_stream(CAM_DEV_ID_0) + +def camera_deinit(): + # stop stream for camera device0 + camera.stop_stream(CAM_DEV_ID_0) + # deinit display + display.deinit() + os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) + time.sleep_ms(100) + # release media buffer + media.release_buffer(globals()["buffer"]) + # destroy media link + media.destroy_link(globals()["meida_source"], globals()["meida_sink"]) + # deinit media buffer + media.buffer_deinit() + +def capture_picture(): + # create image for drawing + draw_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888) + # create image for osd + buffer = globals()["buffer"] + osd_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888, alloc=image.ALLOC_VB, phyaddr=buffer.phys_addr, virtaddr=buffer.virt_addr, poolid=buffer.pool_id) + osd_img.clear() + display.show_image(osd_img, 0, 0, DISPLAY_CHN_OSD0) + fps = time.clock() + while True: + fps.tick() + try: + os.exitpoint() + rgb888_img = camera.capture_image(CAM_DEV_ID_0, CAM_CHN_ID_1) + img = rgb888_img.to_rgb565() + camera.release_image(CAM_DEV_ID_0, CAM_CHN_ID_1, rgb888_img) + draw_img.clear() + # Circle objects have four values: x, y, r (radius), and magnitude. The + # magnitude is the strength of the detection of the circle. Higher is + # better... + + # `threshold` controls how many circles are found. Increase its value + # to decrease the number of circles detected... + + # `x_margin`, `y_margin`, and `r_margin` control the merging of similar + # circles in the x, y, and r (radius) directions. + + # r_min, r_max, and r_step control what radiuses of circles are tested. + # Shrinking the number of tested circle radiuses yields a big performance boost. + + for c in img.find_circles(threshold = 2000, x_margin = 10, y_margin = 10, r_margin = 10, + r_min = 2, r_max = 100, r_step = 2): + draw_img.draw_circle(c.x()*SCALE, c.y()*SCALE, c.r()*SCALE, color = (255, 0, 0)) + print(c) + draw_img.copy_to(osd_img) + del img + gc.collect() + print(fps.fps()) + except KeyboardInterrupt as e: + print("user stop: ", e) + break + except BaseException as e: + sys.print_exception(e) + break + +def main(): + os.exitpoint(os.EXITPOINT_ENABLE) + camera_is_init = False + try: + print("camera init") + camera_init() + camera_is_init = True + print("camera capture") + capture_picture() + except Exception as e: + sys.print_exception(e) + finally: + if camera_is_init: + print("camera deinit") + camera_deinit() + +if __name__ == "__main__": + main() diff --git a/share/qtcreator/examples/11-Feature-Detection/find_lines.py b/share/qtcreator/examples/11-Feature-Detection/find_lines.py new file mode 100644 index 00000000000..c02d401eb53 --- /dev/null +++ b/share/qtcreator/examples/11-Feature-Detection/find_lines.py @@ -0,0 +1,146 @@ +# Find Lines Example +# +# This example shows off how to find lines in the image. For each line object +# found in the image a line object is returned which includes the line's rotation. + +# Note: Line detection is done by using the Hough Transform: +# http://en.wikipedia.org/wiki/Hough_transform +# Please read about it above for more information on what `theta` and `rho` are. + +# find_lines() finds infinite length lines. Use find_line_segments() to find non-infinite lines. + +from media.camera import * +from media.display import * +from media.media import * +import time, os, gc, sys + +DISPLAY_WIDTH = ALIGN_UP(1920, 16) +DISPLAY_HEIGHT = 1080 +SCALE = 4 +DETECT_WIDTH = DISPLAY_WIDTH // SCALE +DETECT_HEIGHT = DISPLAY_HEIGHT // SCALE + +# All line objects have a `theta()` method to get their rotation angle in degrees. +# You can filter lines based on their rotation angle. + +min_degree = 0 +max_degree = 179 + +# All lines also have `x1()`, `y1()`, `x2()`, and `y2()` methods to get their end-points +# and a `line()` method to get all the above as one 4 value tuple for `draw_line()`. + +# About negative rho values: +# +# A [theta+0:-rho] tuple is the same as [theta+180:+rho]. + +def camera_init(): + # use hdmi for display + display.init(LT9611_1920X1080_30FPS) + # config vb for osd layer + config = k_vb_config() + config.max_pool_cnt = 1 + config.comm_pool[0].blk_size = 4*DISPLAY_WIDTH*DISPLAY_HEIGHT + config.comm_pool[0].blk_cnt = 1 + config.comm_pool[0].mode = VB_REMAP_MODE_NOCACHE + # meida buffer config + media.buffer_config(config) + # init default sensor + camera.sensor_init(CAM_DEV_ID_0, CAM_DEFAULT_SENSOR) + # set chn0 output size + camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_0, DISPLAY_WIDTH, DISPLAY_HEIGHT) + # set chn0 output format + camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_0, PIXEL_FORMAT_YUV_SEMIPLANAR_420) + # create meida source device + globals()["meida_source"] = media_device(CAMERA_MOD_ID, CAM_DEV_ID_0, CAM_CHN_ID_0) + # create meida sink device + globals()["meida_sink"] = media_device(DISPLAY_MOD_ID, DISPLAY_DEV_ID, DISPLAY_CHN_VIDEO1) + # create meida link + media.create_link(meida_source, meida_sink) + # set display plane with video channel + display.set_plane(0, 0, DISPLAY_WIDTH, DISPLAY_HEIGHT, PIXEL_FORMAT_YVU_PLANAR_420, DISPLAY_MIRROR_NONE, DISPLAY_CHN_VIDEO1) + # set chn1 output nv12 + camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_1, DETECT_WIDTH, DETECT_HEIGHT) + camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_1, PIXEL_FORMAT_RGB_888) + # media buffer init + media.buffer_init() + # request media buffer for osd image + globals()["buffer"] = media.request_buffer(4 * DISPLAY_WIDTH * DISPLAY_HEIGHT) + # start stream for camera device0 + camera.start_stream(CAM_DEV_ID_0) + +def camera_deinit(): + # stop stream for camera device0 + camera.stop_stream(CAM_DEV_ID_0) + # deinit display + display.deinit() + os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) + time.sleep_ms(100) + # release media buffer + media.release_buffer(globals()["buffer"]) + # destroy media link + media.destroy_link(globals()["meida_source"], globals()["meida_sink"]) + # deinit media buffer + media.buffer_deinit() + +def capture_picture(): + # create image for drawing + draw_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888) + # create image for osd + buffer = globals()["buffer"] + osd_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888, alloc=image.ALLOC_VB, phyaddr=buffer.phys_addr, virtaddr=buffer.virt_addr, poolid=buffer.pool_id) + osd_img.clear() + display.show_image(osd_img, 0, 0, DISPLAY_CHN_OSD0) + fps = time.clock() + while True: + fps.tick() + try: + os.exitpoint() + rgb888_img = camera.capture_image(CAM_DEV_ID_0, CAM_CHN_ID_1) + img = rgb888_img.to_rgb565() + camera.release_image(CAM_DEV_ID_0, CAM_CHN_ID_1, rgb888_img) + draw_img.clear() + # `threshold` controls how many lines in the image are found. Only lines with + # edge difference magnitude sums greater than `threshold` are detected... + + # More about `threshold` - each pixel in the image contributes a magnitude value + # to a line. The sum of all contributions is the magintude for that line. Then + # when lines are merged their magnitudes are added togheter. Note that `threshold` + # filters out lines with low magnitudes before merging. To see the magnitude of + # un-merged lines set `theta_margin` and `rho_margin` to 0... + + # `theta_margin` and `rho_margin` control merging similar lines. If two lines + # theta and rho value differences are less than the margins then they are merged. + + for l in img.find_lines(threshold = 1000, theta_margin = 25, rho_margin = 25): + if (min_degree <= l.theta()) and (l.theta() <= max_degree): + draw_img.draw_line([v*SCALE for v in l.line()], color = (255, 0, 0)) + print(l) + draw_img.copy_to(osd_img) + del img + gc.collect() + print(fps.fps()) + except KeyboardInterrupt as e: + print("user stop: ", e) + break + except BaseException as e: + sys.print_exception(e) + break + +def main(): + os.exitpoint(os.EXITPOINT_ENABLE) + camera_is_init = False + try: + print("camera init") + camera_init() + camera_is_init = True + print("camera capture") + capture_picture() + except Exception as e: + sys.print_exception(e) + finally: + if camera_is_init: + print("camera deinit") + camera_deinit() + +if __name__ == "__main__": + main() diff --git a/share/qtcreator/examples/11-Feature-Detection/find_rects.py b/share/qtcreator/examples/11-Feature-Detection/find_rects.py new file mode 100644 index 00000000000..889f1af7c98 --- /dev/null +++ b/share/qtcreator/examples/11-Feature-Detection/find_rects.py @@ -0,0 +1,123 @@ +# Find Rects Example +# +# This example shows off how to find rectangles in the image using the quad threshold +# detection code from our April Tags code. The quad threshold detection algorithm +# detects rectangles in an extremely robust way and is much better than Hough +# Transform based methods. For example, it can still detect rectangles even when lens +# distortion causes those rectangles to look bent. Rounded rectangles are no problem! +# (But, given this the code will also detect small radius circles too)... + +from media.camera import * +from media.display import * +from media.media import * +import time, os, gc, sys + +DISPLAY_WIDTH = ALIGN_UP(1920, 16) +DISPLAY_HEIGHT = 1080 +SCALE = 4 +DETECT_WIDTH = DISPLAY_WIDTH // SCALE +DETECT_HEIGHT = DISPLAY_HEIGHT // SCALE + +def camera_init(): + # use hdmi for display + display.init(LT9611_1920X1080_30FPS) + # config vb for osd layer + config = k_vb_config() + config.max_pool_cnt = 1 + config.comm_pool[0].blk_size = 4*DISPLAY_WIDTH*DISPLAY_HEIGHT + config.comm_pool[0].blk_cnt = 1 + config.comm_pool[0].mode = VB_REMAP_MODE_NOCACHE + # meida buffer config + media.buffer_config(config) + # init default sensor + camera.sensor_init(CAM_DEV_ID_0, CAM_DEFAULT_SENSOR) + # set chn0 output size + camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_0, DISPLAY_WIDTH, DISPLAY_HEIGHT) + # set chn0 output format + camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_0, PIXEL_FORMAT_YUV_SEMIPLANAR_420) + # create meida source device + globals()["meida_source"] = media_device(CAMERA_MOD_ID, CAM_DEV_ID_0, CAM_CHN_ID_0) + # create meida sink device + globals()["meida_sink"] = media_device(DISPLAY_MOD_ID, DISPLAY_DEV_ID, DISPLAY_CHN_VIDEO1) + # create meida link + media.create_link(meida_source, meida_sink) + # set display plane with video channel + display.set_plane(0, 0, DISPLAY_WIDTH, DISPLAY_HEIGHT, PIXEL_FORMAT_YVU_PLANAR_420, DISPLAY_MIRROR_NONE, DISPLAY_CHN_VIDEO1) + # set chn1 output nv12 + camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_1, DETECT_WIDTH, DETECT_HEIGHT) + camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_1, PIXEL_FORMAT_RGB_888) + # media buffer init + media.buffer_init() + # request media buffer for osd image + globals()["buffer"] = media.request_buffer(4 * DISPLAY_WIDTH * DISPLAY_HEIGHT) + # start stream for camera device0 + camera.start_stream(CAM_DEV_ID_0) + +def camera_deinit(): + # stop stream for camera device0 + camera.stop_stream(CAM_DEV_ID_0) + # deinit display + display.deinit() + os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) + time.sleep_ms(100) + # release media buffer + media.release_buffer(globals()["buffer"]) + # destroy media link + media.destroy_link(globals()["meida_source"], globals()["meida_sink"]) + # deinit media buffer + media.buffer_deinit() + +def capture_picture(): + # create image for drawing + draw_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888) + # create image for osd + buffer = globals()["buffer"] + osd_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888, alloc=image.ALLOC_VB, phyaddr=buffer.phys_addr, virtaddr=buffer.virt_addr, poolid=buffer.pool_id) + osd_img.clear() + display.show_image(osd_img, 0, 0, DISPLAY_CHN_OSD0) + fps = time.clock() + while True: + fps.tick() + try: + os.exitpoint() + rgb888_img = camera.capture_image(CAM_DEV_ID_0, CAM_CHN_ID_1) + img = rgb888_img.to_rgb565() + camera.release_image(CAM_DEV_ID_0, CAM_CHN_ID_1, rgb888_img) + draw_img.clear() + # `threshold` below should be set to a high enough value to filter out noise + # rectangles detected in the image which have low edge magnitudes. Rectangles + # have larger edge magnitudes the larger and more contrasty they are... + + for r in img.find_rects(threshold = 10000): + draw_img.draw_rectangle([v*SCALE for v in r.rect()], color = (255, 0, 0)) + for p in r.corners(): draw_img.draw_circle(p[0]*SCALE, p[1]*SCALE, 5*SCALE, color = (0, 255, 0)) + print(r) + draw_img.copy_to(osd_img) + del img + gc.collect() + print(fps.fps()) + except KeyboardInterrupt as e: + print("user stop: ", e) + break + except BaseException as e: + sys.print_exception(e) + break + +def main(): + os.exitpoint(os.EXITPOINT_ENABLE) + camera_is_init = False + try: + print("camera init") + camera_init() + camera_is_init = True + print("camera capture") + capture_picture() + except Exception as e: + sys.print_exception(e) + finally: + if camera_is_init: + print("camera deinit") + camera_deinit() + +if __name__ == "__main__": + main() diff --git a/share/qtcreator/examples/11-Feature-Detection/hog.py b/share/qtcreator/examples/11-Feature-Detection/hog.py new file mode 100644 index 00000000000..f32c070ede3 --- /dev/null +++ b/share/qtcreator/examples/11-Feature-Detection/hog.py @@ -0,0 +1,111 @@ +# Histogram of Oriented Gradients (HoG) Example +# +# This example demonstrates HoG visualization. +# +# Note: Due to JPEG artifacts, the HoG visualization looks blurry. To see the +# image without JPEG artifacts, uncomment the lines that save the image to uSD. + +from media.camera import * +from media.display import * +from media.media import * +import time, os, gc, sys + +DISPLAY_WIDTH = ALIGN_UP(1920, 16) +DISPLAY_HEIGHT = 1080 +SCALE = 4 +DETECT_WIDTH = DISPLAY_WIDTH // SCALE +DETECT_HEIGHT = DISPLAY_HEIGHT // SCALE + +def camera_init(): + # use hdmi for display + display.init(LT9611_1920X1080_30FPS) + # config vb for osd layer + config = k_vb_config() + config.max_pool_cnt = 1 + config.comm_pool[0].blk_size = 4*DISPLAY_WIDTH*DISPLAY_HEIGHT + config.comm_pool[0].blk_cnt = 1 + config.comm_pool[0].mode = VB_REMAP_MODE_NOCACHE + # meida buffer config + media.buffer_config(config) + # init default sensor + camera.sensor_init(CAM_DEV_ID_0, CAM_DEFAULT_SENSOR) + # set chn0 output size + camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_0, DISPLAY_WIDTH, DISPLAY_HEIGHT) + # set chn0 output format + camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_0, PIXEL_FORMAT_YUV_SEMIPLANAR_420) + # create meida source device + globals()["meida_source"] = media_device(CAMERA_MOD_ID, CAM_DEV_ID_0, CAM_CHN_ID_0) + # create meida sink device + globals()["meida_sink"] = media_device(DISPLAY_MOD_ID, DISPLAY_DEV_ID, DISPLAY_CHN_VIDEO1) + # create meida link + media.create_link(meida_source, meida_sink) + # set display plane with video channel + display.set_plane(0, 0, DISPLAY_WIDTH, DISPLAY_HEIGHT, PIXEL_FORMAT_YVU_PLANAR_420, DISPLAY_MIRROR_NONE, DISPLAY_CHN_VIDEO1) + # set chn1 output nv12 + camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_1, DETECT_WIDTH, DETECT_HEIGHT) + camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_1, PIXEL_FORMAT_YUV_SEMIPLANAR_420) + # media buffer init + media.buffer_init() + # request media buffer for osd image + globals()["buffer"] = media.request_buffer(4 * DISPLAY_WIDTH * DISPLAY_HEIGHT) + # start stream for camera device0 + camera.start_stream(CAM_DEV_ID_0) + +def camera_deinit(): + # stop stream for camera device0 + camera.stop_stream(CAM_DEV_ID_0) + # deinit display + display.deinit() + os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) + time.sleep_ms(100) + # release media buffer + media.release_buffer(globals()["buffer"]) + # destroy media link + media.destroy_link(globals()["meida_source"], globals()["meida_sink"]) + # deinit media buffer + media.buffer_deinit() + +def capture_picture(): + # create image for osd + buffer = globals()["buffer"] + osd_img = image.Image(DETECT_WIDTH, DETECT_HEIGHT, image.GRAYSCALE, alloc=image.ALLOC_VB, phyaddr=buffer.phys_addr, virtaddr=buffer.virt_addr, poolid=buffer.pool_id) + osd_img.clear() + display.show_image(osd_img, 0, 0, DISPLAY_CHN_OSD0) + fps = time.clock() + while True: + fps.tick() + try: + os.exitpoint() + yuv420_img = camera.capture_image(CAM_DEV_ID_0, CAM_CHN_ID_1) + img = image.Image(yuv420_img.width(), yuv420_img.height(), image.GRAYSCALE, data=yuv420_img) + camera.release_image(CAM_DEV_ID_0, CAM_CHN_ID_1, yuv420_img) + img.find_hog() + img.copy_to(osd_img) + del img + gc.collect() + print(fps.fps()) + except KeyboardInterrupt as e: + print("user stop: ", e) + break + except BaseException as e: + sys.print_exception(e) + break + +def main(): + os.exitpoint(os.EXITPOINT_ENABLE) + camera_is_init = False + try: + print("camera init") + camera_init() + camera_is_init = True + print("camera capture") + capture_picture() + except Exception as e: + sys.print_exception(e) + finally: + if camera_is_init: + print("camera deinit") + camera_deinit() + +if __name__ == "__main__": + main() diff --git a/share/qtcreator/examples/11-Feature-Detection/keypoints.py b/share/qtcreator/examples/11-Feature-Detection/keypoints.py new file mode 100644 index 00000000000..64c013ffb2f --- /dev/null +++ b/share/qtcreator/examples/11-Feature-Detection/keypoints.py @@ -0,0 +1,145 @@ +# Object tracking with keypoints example. +# Show the camera an object and then run the script. A set of keypoints will be extracted +# once and then tracked in the following frames. If you want a new set of keypoints re-run +# the script. NOTE: see the docs for arguments to tune find_keypoints and match_keypoints. + +from media.camera import * +from media.display import * +from media.media import * +import time, os, gc, sys + +DISPLAY_WIDTH = ALIGN_UP(1920, 16) +DISPLAY_HEIGHT = 1080 +SCALE = 4 +DETECT_WIDTH = DISPLAY_WIDTH // SCALE +DETECT_HEIGHT = DISPLAY_HEIGHT // SCALE + +def camera_init(): + # use hdmi for display + display.init(LT9611_1920X1080_30FPS) + # config vb for osd layer + config = k_vb_config() + config.max_pool_cnt = 1 + config.comm_pool[0].blk_size = 4*DISPLAY_WIDTH*DISPLAY_HEIGHT + config.comm_pool[0].blk_cnt = 1 + config.comm_pool[0].mode = VB_REMAP_MODE_NOCACHE + # meida buffer config + media.buffer_config(config) + # init default sensor + camera.sensor_init(CAM_DEV_ID_0, CAM_DEFAULT_SENSOR) + # set chn0 output size + camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_0, DISPLAY_WIDTH, DISPLAY_HEIGHT) + # set chn0 output format + camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_0, PIXEL_FORMAT_YUV_SEMIPLANAR_420) + # create meida source device + globals()["meida_source"] = media_device(CAMERA_MOD_ID, CAM_DEV_ID_0, CAM_CHN_ID_0) + # create meida sink device + globals()["meida_sink"] = media_device(DISPLAY_MOD_ID, DISPLAY_DEV_ID, DISPLAY_CHN_VIDEO1) + # create meida link + media.create_link(meida_source, meida_sink) + # set display plane with video channel + display.set_plane(0, 0, DISPLAY_WIDTH, DISPLAY_HEIGHT, PIXEL_FORMAT_YVU_PLANAR_420, DISPLAY_MIRROR_NONE, DISPLAY_CHN_VIDEO1) + # set chn1 output nv12 + camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_1, DETECT_WIDTH, DETECT_HEIGHT) + camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_1, PIXEL_FORMAT_RGB_888) + # media buffer init + media.buffer_init() + # request media buffer for osd image + globals()["buffer"] = media.request_buffer(4 * DISPLAY_WIDTH * DISPLAY_HEIGHT) + # start stream for camera device0 + camera.start_stream(CAM_DEV_ID_0) + +def camera_deinit(): + # stop stream for camera device0 + camera.stop_stream(CAM_DEV_ID_0) + # deinit display + display.deinit() + os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) + time.sleep_ms(100) + # release media buffer + media.release_buffer(globals()["buffer"]) + # destroy media link + media.destroy_link(globals()["meida_source"], globals()["meida_sink"]) + # deinit media buffer + media.buffer_deinit() + +def camera_drop(frame): + for i in range(frame): + os.exitpoint() + img = camera.capture_image(CAM_DEV_ID_0, CAM_CHN_ID_1) + camera.release_image(CAM_DEV_ID_0, CAM_CHN_ID_1, img) + +def capture_picture(): + # create image for drawing + draw_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888) + # create image for osd + buffer = globals()["buffer"] + osd_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888, alloc=image.ALLOC_VB, phyaddr=buffer.phys_addr, virtaddr=buffer.virt_addr, poolid=buffer.pool_id) + osd_img.clear() + display.show_image(osd_img, 0, 0, DISPLAY_CHN_OSD0) + kpts1 = None + # NOTE: uncomment to load a keypoints descriptor from file + #kpts1 = image.load_descriptor("/desc.orb") + camera_drop(60) + fps = time.clock() + while True: + fps.tick() + try: + os.exitpoint() + rgb888_img = camera.capture_image(CAM_DEV_ID_0, CAM_CHN_ID_1) + img = rgb888_img.to_grayscale() + camera.release_image(CAM_DEV_ID_0, CAM_CHN_ID_1, rgb888_img) + draw_img.clear() + if kpts1 == None: + # NOTE: By default find_keypoints returns multi-scale keypoints extracted from an image pyramid. + kpts1 = img.find_keypoints(max_keypoints=150, threshold=10, scale_factor=1.2) + if kpts1: + if SCALE == 1: + draw_img.draw_keypoints(kpts1) + draw_img.copy_to(osd_img) + time.sleep(2) + fps.reset() + else: + # NOTE: When extracting keypoints to match the first descriptor, we use normalized=True to extract + # keypoints from the first scale only, which will match one of the scales in the first descriptor. + kpts2 = img.find_keypoints(max_keypoints=150, threshold=10, normalized=True) + if kpts2: + match = image.match_descriptor(kpts1, kpts2, threshold=85) + if (match.count()>10): + # If we have at least n "good matches" + # Draw bounding rectangle and cross. + draw_img.draw_rectangle([v*SCALE for v in match.rect()]) + draw_img.draw_cross(match.cx()*SCALE, match.cy()*SCALE, size=10) + + print(kpts2, "matched:%d dt:%d"%(match.count(), match.theta())) + # NOTE: uncomment if you want to draw the keypoints + #img.draw_keypoints(kpts2, size=KEYPOINTS_SIZE, matched=True) + draw_img.copy_to(osd_img) + del img + gc.collect() + print(fps.fps()) + except KeyboardInterrupt as e: + print("user stop: ", e) + break + except BaseException as e: + sys.print_exception(e) + break + +def main(): + os.exitpoint(os.EXITPOINT_ENABLE) + camera_is_init = False + try: + print("camera init") + camera_init() + camera_is_init = True + print("camera capture") + capture_picture() + except Exception as e: + sys.print_exception(e) + finally: + if camera_is_init: + print("camera deinit") + camera_deinit() + +if __name__ == "__main__": + main() diff --git a/share/qtcreator/examples/11-Feature-Detection/lbp.py b/share/qtcreator/examples/11-Feature-Detection/lbp.py new file mode 100644 index 00000000000..6d7e95c26ca --- /dev/null +++ b/share/qtcreator/examples/11-Feature-Detection/lbp.py @@ -0,0 +1,144 @@ +# Local Binary Patterns (LBP) Example +# +# This example shows off how to use the local binary pattern feature descriptor +# on your CanMV Cam. LBP descriptors work like Freak feature descriptors. +# +# WARNING: LBP supports needs to be reworked! As of right now this feature needs +# a lot of work to be made into somethin useful. This script will reamin to show +# that the functionality exists, but, in its current state is inadequate. + +from media.camera import * +from media.display import * +from media.media import * +import time, os, gc, sys + +DISPLAY_WIDTH = ALIGN_UP(1920, 16) +DISPLAY_HEIGHT = 1080 +SCALE = 4 +DETECT_WIDTH = DISPLAY_WIDTH // SCALE +DETECT_HEIGHT = DISPLAY_HEIGHT // SCALE + +def camera_init(): + # use hdmi for display + display.init(LT9611_1920X1080_30FPS) + # config vb for osd layer + config = k_vb_config() + config.max_pool_cnt = 1 + config.comm_pool[0].blk_size = 4*DISPLAY_WIDTH*DISPLAY_HEIGHT + config.comm_pool[0].blk_cnt = 1 + config.comm_pool[0].mode = VB_REMAP_MODE_NOCACHE + # meida buffer config + media.buffer_config(config) + # init default sensor + camera.sensor_init(CAM_DEV_ID_0, CAM_DEFAULT_SENSOR) + # set chn0 output size + camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_0, DISPLAY_WIDTH, DISPLAY_HEIGHT) + # set chn0 output format + camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_0, PIXEL_FORMAT_YUV_SEMIPLANAR_420) + # create meida source device + globals()["meida_source"] = media_device(CAMERA_MOD_ID, CAM_DEV_ID_0, CAM_CHN_ID_0) + # create meida sink device + globals()["meida_sink"] = media_device(DISPLAY_MOD_ID, DISPLAY_DEV_ID, DISPLAY_CHN_VIDEO1) + # create meida link + media.create_link(meida_source, meida_sink) + # set display plane with video channel + display.set_plane(0, 0, DISPLAY_WIDTH, DISPLAY_HEIGHT, PIXEL_FORMAT_YVU_PLANAR_420, DISPLAY_MIRROR_NONE, DISPLAY_CHN_VIDEO1) + # set chn1 output nv12 + camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_1, DETECT_WIDTH, DETECT_HEIGHT) + camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_1, PIXEL_FORMAT_YUV_SEMIPLANAR_420) + # media buffer init + media.buffer_init() + # request media buffer for osd image + globals()["buffer"] = media.request_buffer(4 * DISPLAY_WIDTH * DISPLAY_HEIGHT) + # start stream for camera device0 + camera.start_stream(CAM_DEV_ID_0) + +def camera_deinit(): + # stop stream for camera device0 + camera.stop_stream(CAM_DEV_ID_0) + # deinit display + display.deinit() + os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) + time.sleep_ms(100) + # release media buffer + media.release_buffer(globals()["buffer"]) + # destroy media link + media.destroy_link(globals()["meida_source"], globals()["meida_sink"]) + # deinit media buffer + media.buffer_deinit() + +def camera_drop(frame): + for i in range(frame): + os.exitpoint() + img = camera.capture_image(CAM_DEV_ID_0, CAM_CHN_ID_1) + camera.release_image(CAM_DEV_ID_0, CAM_CHN_ID_1, img) + +def capture_picture(): + # create image for drawing + draw_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888) + # create image for osd + buffer = globals()["buffer"] + osd_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888, alloc=image.ALLOC_VB, phyaddr=buffer.phys_addr, virtaddr=buffer.virt_addr, poolid=buffer.pool_id) + osd_img.clear() + osd_img.draw_string(0, 0, "Please wait...") + display.show_image(osd_img, 0, 0, DISPLAY_CHN_OSD0) + # Load Haar Cascade + # By default this will use all stages, lower satges is faster but less accurate. + face_cascade = image.HaarCascade("frontalface", stages=25) + print(face_cascade) + d0 = None + #d0 = image.load_descriptor("/desc.lbp") + # Skip a few frames to allow the sensor settle down + # Note: This takes more time when exec from the IDE. + camera_drop(90) + fps = time.clock() + while True: + fps.tick() + try: + os.exitpoint() + yuv420_img = camera.capture_image(CAM_DEV_ID_0, CAM_CHN_ID_1) + img = image.Image(yuv420_img.width(), yuv420_img.height(), image.GRAYSCALE, data=yuv420_img) + camera.release_image(CAM_DEV_ID_0, CAM_CHN_ID_1, yuv420_img) + draw_img.clear() + objects = img.find_features(face_cascade, threshold=0.5, scale_factor=1.25) + if objects: + face = objects[0] + d1 = img.find_lbp(face) + if (d0 == None): + d0 = d1 + else: + dist = image.match_descriptor(d0, d1) + draw_img.draw_string(0, 10, "Match %d%%"%(dist)) + print("Match %d%%"%(dist)) + + draw_img.draw_rectangle([v*SCALE for v in face]) + # Draw FPS + draw_img.draw_string(0, 0, "FPS:%.2f"%(fps.fps())) + draw_img.copy_to(osd_img) + del img + gc.collect() + except KeyboardInterrupt as e: + print("user stop: ", e) + break + except BaseException as e: + sys.print_exception(e) + break + +def main(): + os.exitpoint(os.EXITPOINT_ENABLE) + camera_is_init = False + try: + print("camera init") + camera_init() + camera_is_init = True + print("camera capture") + capture_picture() + except Exception as e: + sys.print_exception(e) + finally: + if camera_is_init: + print("camera deinit") + camera_deinit() + +if __name__ == "__main__": + main() diff --git a/share/qtcreator/examples/11-Feature-Detection/linear_regression_fast.py b/share/qtcreator/examples/11-Feature-Detection/linear_regression_fast.py new file mode 100644 index 00000000000..8f8856a01d2 --- /dev/null +++ b/share/qtcreator/examples/11-Feature-Detection/linear_regression_fast.py @@ -0,0 +1,130 @@ +# Fast Linear Regression Example +# +# This example shows off how to use the get_regression() method on your CanMV Cam +# to get the linear regression of a ROI. Using this method you can easily build +# a robot which can track lines which all point in the same general direction +# but are not actually connected. Use find_blobs() on lines that are nicely +# connected for better filtering options and control. +# +# This is called the fast linear regression because we use the least-squares +# method to fit the line. However, this method is NOT GOOD FOR ANY images that +# have a lot (or really any) outlier points which corrupt the line fit... + +from media.camera import * +from media.display import * +from media.media import * +import time, os, gc, sys + +DISPLAY_WIDTH = ALIGN_UP(1920, 16) +DISPLAY_HEIGHT = 1080 +SCALE = 4 +DETECT_WIDTH = DISPLAY_WIDTH // SCALE +DETECT_HEIGHT = DISPLAY_HEIGHT // SCALE + +THRESHOLD = (0, 100) # Grayscale threshold for dark things... +BINARY_VISIBLE = True # Does binary first so you can see what the linear regression + # is being run on... might lower FPS though. + +def camera_init(): + # use hdmi for display + display.init(LT9611_1920X1080_30FPS) + # config vb for osd layer + config = k_vb_config() + config.max_pool_cnt = 1 + config.comm_pool[0].blk_size = 4*DISPLAY_WIDTH*DISPLAY_HEIGHT + config.comm_pool[0].blk_cnt = 1 + config.comm_pool[0].mode = VB_REMAP_MODE_NOCACHE + # meida buffer config + media.buffer_config(config) + # init default sensor + camera.sensor_init(CAM_DEV_ID_0, CAM_DEFAULT_SENSOR) + # set chn0 output size + camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_0, DISPLAY_WIDTH, DISPLAY_HEIGHT) + # set chn0 output format + camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_0, PIXEL_FORMAT_YUV_SEMIPLANAR_420) + # create meida source device + globals()["meida_source"] = media_device(CAMERA_MOD_ID, CAM_DEV_ID_0, CAM_CHN_ID_0) + # create meida sink device + globals()["meida_sink"] = media_device(DISPLAY_MOD_ID, DISPLAY_DEV_ID, DISPLAY_CHN_VIDEO1) + # create meida link + media.create_link(meida_source, meida_sink) + # set display plane with video channel + display.set_plane(0, 0, DISPLAY_WIDTH, DISPLAY_HEIGHT, PIXEL_FORMAT_YVU_PLANAR_420, DISPLAY_MIRROR_NONE, DISPLAY_CHN_VIDEO1) + # set chn1 output nv12 + camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_1, DETECT_WIDTH, DETECT_HEIGHT) + camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_1, PIXEL_FORMAT_YUV_SEMIPLANAR_420) + # media buffer init + media.buffer_init() + # request media buffer for osd image + globals()["buffer"] = media.request_buffer(4 * DISPLAY_WIDTH * DISPLAY_HEIGHT) + # start stream for camera device0 + camera.start_stream(CAM_DEV_ID_0) + +def camera_deinit(): + # stop stream for camera device0 + camera.stop_stream(CAM_DEV_ID_0) + # deinit display + display.deinit() + os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) + time.sleep_ms(100) + # release media buffer + media.release_buffer(globals()["buffer"]) + # destroy media link + media.destroy_link(globals()["meida_source"], globals()["meida_sink"]) + # deinit media buffer + media.buffer_deinit() + +def capture_picture(): + # create image for osd + buffer = globals()["buffer"] + osd_img = image.Image(DETECT_WIDTH, DETECT_HEIGHT, image.GRAYSCALE, alloc=image.ALLOC_VB, phyaddr=buffer.phys_addr, virtaddr=buffer.virt_addr, poolid=buffer.pool_id) + osd_img.clear() + osd_img.draw_string(0, 0, "Please wait...") + display.show_image(osd_img, 0, 0, DISPLAY_CHN_OSD0) + fps = time.clock() + while True: + fps.tick() + try: + os.exitpoint() + yuv420_img = camera.capture_image(CAM_DEV_ID_0, CAM_CHN_ID_1) + img = image.Image(yuv420_img.width(), yuv420_img.height(), image.GRAYSCALE, data=yuv420_img) + camera.release_image(CAM_DEV_ID_0, CAM_CHN_ID_1, yuv420_img) + img = img.binary([THRESHOLD]) if BINARY_VISIBLE else img + # Returns a line object similar to line objects returned by find_lines() and + # find_line_segments(). You have x1(), y1(), x2(), y2(), length(), + # theta() (rotation in degrees), rho(), and magnitude(). + # + # magnitude() represents how well the linear regression worked. It goes from + # (0, INF] where 0 is returned for a circle. The more linear the + # scene is the higher the magnitude. + line = img.get_regression([(255,255) if BINARY_VISIBLE else THRESHOLD]) + if (line): img.draw_line(line.line(), color = 127) + print("FPS %f, mag = %s" % (fps.fps(), str(line.magnitude()) if (line) else "N/A")) + img.copy_to(osd_img) + del img + gc.collect() + except KeyboardInterrupt as e: + print("user stop: ", e) + break + except BaseException as e: + sys.print_exception(e) + break + +def main(): + os.exitpoint(os.EXITPOINT_ENABLE) + camera_is_init = False + try: + print("camera init") + camera_init() + camera_is_init = True + print("camera capture") + capture_picture() + except Exception as e: + sys.print_exception(e) + finally: + if camera_is_init: + print("camera deinit") + camera_deinit() + +if __name__ == "__main__": + main() diff --git a/share/qtcreator/examples/11-Feature-Detection/linear_regression_robust.py b/share/qtcreator/examples/11-Feature-Detection/linear_regression_robust.py new file mode 100644 index 00000000000..7b97425edcc --- /dev/null +++ b/share/qtcreator/examples/11-Feature-Detection/linear_regression_robust.py @@ -0,0 +1,132 @@ +# Robust Linear Regression Example +# +# This example shows off how to use the get_regression() method on your CanMV Cam +# to get the linear regression of a ROI. Using this method you can easily build +# a robot which can track lines which all point in the same general direction +# but are not actually connected. Use find_blobs() on lines that are nicely +# connected for better filtering options and control. +# +# We're using the robust=True argument for get_regression() in this script which +# computes the linear regression using a much more robust algorithm... but potentially +# much slower. The robust algorithm runs in O(N^2) time on the image. So, YOU NEED +# TO LIMIT THE NUMBER OF PIXELS the robust algorithm works on or it can actually +# take seconds for the algorithm to give you a result... THRESHOLD VERY CAREFULLY! + +from media.camera import * +from media.display import * +from media.media import * +import time, os, gc, sys + +DISPLAY_WIDTH = ALIGN_UP(1920, 16) +DISPLAY_HEIGHT = 1080 +SCALE = 4 +DETECT_WIDTH = DISPLAY_WIDTH // SCALE +DETECT_HEIGHT = DISPLAY_HEIGHT // SCALE + +THRESHOLD = (0, 100) # Grayscale threshold for dark things... +BINARY_VISIBLE = True # Does binary first so you can see what the linear regression + # is being run on... might lower FPS though. + +def camera_init(): + # use hdmi for display + display.init(LT9611_1920X1080_30FPS) + # config vb for osd layer + config = k_vb_config() + config.max_pool_cnt = 1 + config.comm_pool[0].blk_size = 4*DISPLAY_WIDTH*DISPLAY_HEIGHT + config.comm_pool[0].blk_cnt = 1 + config.comm_pool[0].mode = VB_REMAP_MODE_NOCACHE + # meida buffer config + media.buffer_config(config) + # init default sensor + camera.sensor_init(CAM_DEV_ID_0, CAM_DEFAULT_SENSOR) + # set chn0 output size + camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_0, DISPLAY_WIDTH, DISPLAY_HEIGHT) + # set chn0 output format + camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_0, PIXEL_FORMAT_YUV_SEMIPLANAR_420) + # create meida source device + globals()["meida_source"] = media_device(CAMERA_MOD_ID, CAM_DEV_ID_0, CAM_CHN_ID_0) + # create meida sink device + globals()["meida_sink"] = media_device(DISPLAY_MOD_ID, DISPLAY_DEV_ID, DISPLAY_CHN_VIDEO1) + # create meida link + media.create_link(meida_source, meida_sink) + # set display plane with video channel + display.set_plane(0, 0, DISPLAY_WIDTH, DISPLAY_HEIGHT, PIXEL_FORMAT_YVU_PLANAR_420, DISPLAY_MIRROR_NONE, DISPLAY_CHN_VIDEO1) + # set chn1 output nv12 + camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_1, DETECT_WIDTH, DETECT_HEIGHT) + camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_1, PIXEL_FORMAT_YUV_SEMIPLANAR_420) + # media buffer init + media.buffer_init() + # request media buffer for osd image + globals()["buffer"] = media.request_buffer(4 * DISPLAY_WIDTH * DISPLAY_HEIGHT) + # start stream for camera device0 + camera.start_stream(CAM_DEV_ID_0) + +def camera_deinit(): + # stop stream for camera device0 + camera.stop_stream(CAM_DEV_ID_0) + # deinit display + display.deinit() + os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) + time.sleep_ms(100) + # release media buffer + media.release_buffer(globals()["buffer"]) + # destroy media link + media.destroy_link(globals()["meida_source"], globals()["meida_sink"]) + # deinit media buffer + media.buffer_deinit() + +def capture_picture(): + # create image for osd + buffer = globals()["buffer"] + osd_img = image.Image(DETECT_WIDTH, DETECT_HEIGHT, image.GRAYSCALE, alloc=image.ALLOC_VB, phyaddr=buffer.phys_addr, virtaddr=buffer.virt_addr, poolid=buffer.pool_id) + osd_img.clear() + osd_img.draw_string(0, 0, "Please wait...") + display.show_image(osd_img, 0, 0, DISPLAY_CHN_OSD0) + fps = time.clock() + while True: + fps.tick() + try: + os.exitpoint() + yuv420_img = camera.capture_image(CAM_DEV_ID_0, CAM_CHN_ID_1) + img = image.Image(yuv420_img.width(), yuv420_img.height(), image.GRAYSCALE, data=yuv420_img) + camera.release_image(CAM_DEV_ID_0, CAM_CHN_ID_1, yuv420_img) + img = img.binary([THRESHOLD]) if BINARY_VISIBLE else img + # Returns a line object similar to line objects returned by find_lines() and + # find_line_segments(). You have x1(), y1(), x2(), y2(), length(), + # theta() (rotation in degrees), rho(), and magnitude(). + # + # magnitude() represents how well the linear regression worked. It goes from + # (0, INF] where 0 is returned for a circle. The more linear the + # scene is the higher the magnitude. + line = img.get_regression([(255,255) if BINARY_VISIBLE else THRESHOLD], robust = True) + if (line): img.draw_line(line.line(), color = 127) + print("FPS %f, mag = %s" % (fps.fps(), str(line.magnitude()) if (line) else "N/A")) + img.copy_to(osd_img) + del img + gc.collect() + except KeyboardInterrupt as e: + print("user stop: ", e) + break + except BaseException as e: + sys.print_exception(e) + break + +def main(): + os.exitpoint(os.EXITPOINT_ENABLE) + camera_is_init = False + try: + print("camera init") + camera_init() + camera_is_init = True + print("camera capture") + capture_picture() + except Exception as e: + sys.print_exception(e) + finally: + if camera_is_init: + print("camera deinit") + camera_deinit() + +if __name__ == "__main__": + main() diff --git a/share/qtcreator/examples/11-Feature-Detection/template_matching.py b/share/qtcreator/examples/11-Feature-Detection/template_matching.py new file mode 100644 index 00000000000..23e09713410 --- /dev/null +++ b/share/qtcreator/examples/11-Feature-Detection/template_matching.py @@ -0,0 +1,134 @@ +# Template Matching Example - Normalized Cross Correlation (NCC) +# +# This example shows off how to use the NCC feature of your CanMV Cam to match +# image patches to parts of an image... expect for extremely controlled enviorments +# NCC is not all to useful. +# +# WARNING: NCC supports needs to be reworked! As of right now this feature needs +# a lot of work to be made into somethin useful. This script will reamin to show +# that the functionality exists, but, in its current state is inadequate. + +from media.camera import * +from media.display import * +from media.media import * +import time, os, gc, sys +from image import SEARCH_EX, SEARCH_DS + +DISPLAY_WIDTH = ALIGN_UP(1920, 16) +DISPLAY_HEIGHT = 1080 +SCALE = 4 +DETECT_WIDTH = DISPLAY_WIDTH // SCALE +DETECT_HEIGHT = DISPLAY_HEIGHT // SCALE + +def camera_init(): + # use hdmi for display + display.init(LT9611_1920X1080_30FPS) + # config vb for osd layer + config = k_vb_config() + config.max_pool_cnt = 1 + config.comm_pool[0].blk_size = 4*DISPLAY_WIDTH*DISPLAY_HEIGHT + config.comm_pool[0].blk_cnt = 1 + config.comm_pool[0].mode = VB_REMAP_MODE_NOCACHE + # meida buffer config + media.buffer_config(config) + # init default sensor + camera.sensor_init(CAM_DEV_ID_0, CAM_DEFAULT_SENSOR) + # set chn0 output size + camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_0, DISPLAY_WIDTH, DISPLAY_HEIGHT) + # set chn0 output format + camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_0, PIXEL_FORMAT_YUV_SEMIPLANAR_420) + # create meida source device + globals()["meida_source"] = media_device(CAMERA_MOD_ID, CAM_DEV_ID_0, CAM_CHN_ID_0) + # create meida sink device + globals()["meida_sink"] = media_device(DISPLAY_MOD_ID, DISPLAY_DEV_ID, DISPLAY_CHN_VIDEO1) + # create meida link + media.create_link(meida_source, meida_sink) + # set display plane with video channel + display.set_plane(0, 0, DISPLAY_WIDTH, DISPLAY_HEIGHT, PIXEL_FORMAT_YVU_PLANAR_420, DISPLAY_MIRROR_NONE, DISPLAY_CHN_VIDEO1) + # set chn1 output nv12 + camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_1, DETECT_WIDTH, DETECT_HEIGHT) + camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_1, PIXEL_FORMAT_YUV_SEMIPLANAR_420) + # media buffer init + media.buffer_init() + # request media buffer for osd image + globals()["buffer"] = media.request_buffer(4 * DISPLAY_WIDTH * DISPLAY_HEIGHT) + # start stream for camera device0 + camera.start_stream(CAM_DEV_ID_0) + +def camera_deinit(): + # stop stream for camera device0 + camera.stop_stream(CAM_DEV_ID_0) + # deinit display + display.deinit() + os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) + time.sleep_ms(100) + # release media buffer + media.release_buffer(globals()["buffer"]) + # destroy media link + media.destroy_link(globals()["meida_source"], globals()["meida_sink"]) + # deinit media buffer + media.buffer_deinit() + +def capture_picture(): + # create image for drawing + draw_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888) + # create image for osd + buffer = globals()["buffer"] + osd_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888, alloc=image.ALLOC_VB, phyaddr=buffer.phys_addr, virtaddr=buffer.virt_addr, poolid=buffer.pool_id) + osd_img.clear() + osd_img.draw_string(0, 0, "Please wait...") + display.show_image(osd_img, 0, 0, DISPLAY_CHN_OSD0) + # Load template. + # Template should be a small (eg. 32x32 pixels) grayscale image. + # template = image.Image("/sd/template.bmp") + # template.to_grayscale() + template = image.Image(32, 32, image.GRAYSCALE) + template.draw_circle(16,16,16,color=(255,255,255),fill=True) + fps = time.clock() + while True: + fps.tick() + try: + os.exitpoint() + yuv420_img = camera.capture_image(CAM_DEV_ID_0, CAM_CHN_ID_1) + img = image.Image(yuv420_img.width(), yuv420_img.height(), image.GRAYSCALE, data=yuv420_img) + camera.release_image(CAM_DEV_ID_0, CAM_CHN_ID_1, yuv420_img) + draw_img.clear() + # find_template(template, threshold, [roi, step, search]) + # ROI: The region of interest tuple (x, y, w, h). + # Step: The loop step used (y+=step, x+=step) use a bigger step to make it faster. + # Search is either image.SEARCH_EX for exhaustive search or image.SEARCH_DS for diamond search + # + # Note1: ROI has to be smaller than the image and bigger than the template. + # Note2: In diamond search, step and ROI are both ignored. + r = img.find_template(template, 0.50, step=4, search=SEARCH_EX) #, roi=(10, 0, 60, 60)) + if r: + draw_img.draw_rectangle([v*SCALE for v in r],color=(255,0,0)) + draw_img.copy_to(osd_img) + del img + gc.collect() + print(fps.fps()) + except KeyboardInterrupt as e: + print("user stop: ", e) + break + except BaseException as e: + sys.print_exception(e) + break + +def main(): + os.exitpoint(os.EXITPOINT_ENABLE) + camera_is_init = False + try: + print("camera init") + camera_init() + camera_is_init = True + print("camera capture") + capture_picture() + except Exception as e: + sys.print_exception(e) + finally: + if camera_is_init: + print("camera deinit") + camera_deinit() + +if __name__ == "__main__": + main() diff --git a/share/qtcreator/examples/12-Image-Filters/adaptive_histogram_equalization.py b/share/qtcreator/examples/12-Image-Filters/adaptive_histogram_equalization.py new file mode 100644 index 00000000000..0444835aa7d --- /dev/null +++ b/share/qtcreator/examples/12-Image-Filters/adaptive_histogram_equalization.py @@ -0,0 +1,118 @@ +# Adaptive Histogram Equalization +# +# This example shows off how to use adaptive histogram equalization to improve +# the contrast in the image. Adaptive histogram equalization splits the image +# into regions and then equalizes the histogram in those regions to improve +# the image contrast versus a global histogram equalization. Additionally, +# you may specify a clip limit to prevent the contrast from going wild. + +from media.camera import * +from media.display import * +from media.media import * +import time, os, gc, sys + +DISPLAY_WIDTH = ALIGN_UP(1920, 16) +DISPLAY_HEIGHT = 1080 +SCALE = 4 +DETECT_WIDTH = DISPLAY_WIDTH // SCALE +DETECT_HEIGHT = DISPLAY_HEIGHT // SCALE + +def camera_init(): + # use hdmi for display + display.init(LT9611_1920X1080_30FPS) + # config vb for osd layer + config = k_vb_config() + config.max_pool_cnt = 1 + config.comm_pool[0].blk_size = 4*DISPLAY_WIDTH*DISPLAY_HEIGHT + config.comm_pool[0].blk_cnt = 1 + config.comm_pool[0].mode = VB_REMAP_MODE_NOCACHE + # meida buffer config + media.buffer_config(config) + # init default sensor + camera.sensor_init(CAM_DEV_ID_0, CAM_DEFAULT_SENSOR) + # set chn0 output size + camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_0, DISPLAY_WIDTH, DISPLAY_HEIGHT) + # set chn0 output format + camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_0, PIXEL_FORMAT_YUV_SEMIPLANAR_420) + # create meida source device + globals()["meida_source"] = media_device(CAMERA_MOD_ID, CAM_DEV_ID_0, CAM_CHN_ID_0) + # create meida sink device + globals()["meida_sink"] = media_device(DISPLAY_MOD_ID, DISPLAY_DEV_ID, DISPLAY_CHN_VIDEO1) + # create meida link + media.create_link(meida_source, meida_sink) + # set display plane with video channel + display.set_plane(0, 0, DISPLAY_WIDTH, DISPLAY_HEIGHT, PIXEL_FORMAT_YVU_PLANAR_420, DISPLAY_MIRROR_NONE, DISPLAY_CHN_VIDEO1) + # set chn1 output nv12 + camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_1, DETECT_WIDTH, DETECT_HEIGHT) + camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_1, PIXEL_FORMAT_RGB_888) + # media buffer init + media.buffer_init() + # request media buffer for osd image + globals()["buffer"] = media.request_buffer(4 * DISPLAY_WIDTH * DISPLAY_HEIGHT) + # start stream for camera device0 + camera.start_stream(CAM_DEV_ID_0) + +def camera_deinit(): + # stop stream for camera device0 + camera.stop_stream(CAM_DEV_ID_0) + # deinit display + display.deinit() + os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) + time.sleep_ms(100) + # release media buffer + media.release_buffer(globals()["buffer"]) + # destroy media link + media.destroy_link(globals()["meida_source"], globals()["meida_sink"]) + # deinit media buffer + media.buffer_deinit() + +def capture_picture(): + # create image for osd + buffer = globals()["buffer"] + osd_img = image.Image(DETECT_WIDTH, DETECT_HEIGHT, image.RGB565, alloc=image.ALLOC_VB, phyaddr=buffer.phys_addr, virtaddr=buffer.virt_addr, poolid=buffer.pool_id) + osd_img.clear() + display.show_image(osd_img, 0, 0, DISPLAY_CHN_OSD0) + fps = time.clock() + while True: + fps.tick() + try: + os.exitpoint() + rgb888_img = camera.capture_image(CAM_DEV_ID_0, CAM_CHN_ID_1) + img = rgb888_img.to_rgb565() + camera.release_image(CAM_DEV_ID_0, CAM_CHN_ID_1, rgb888_img) + # A clip_limit of < 0 gives you normal adaptive histogram equalization + # which may result in huge amounts of contrast noise... + + # A clip_limit of 1 does nothing. For best results go slightly higher + # than 1 like below. The higher you go the closer you get back to + # standard adaptive histogram equalization with huge contrast swings. + img.histeq(adaptive=True, clip_limit=3) + img.copy_to(osd_img) + del img + gc.collect() + print(fps.fps()) + except KeyboardInterrupt as e: + print("user stop: ", e) + break + except BaseException as e: + sys.print_exception(e) + break + +def main(): + os.exitpoint(os.EXITPOINT_ENABLE) + camera_is_init = False + try: + print("camera init") + camera_init() + camera_is_init = True + print("camera capture") + capture_picture() + except Exception as e: + sys.print_exception(e) + finally: + if camera_is_init: + print("camera deinit") + camera_deinit() + +if __name__ == "__main__": + main() diff --git a/share/qtcreator/examples/12-Image-Filters/blur_filter.py b/share/qtcreator/examples/12-Image-Filters/blur_filter.py new file mode 100644 index 00000000000..1dadebc45a4 --- /dev/null +++ b/share/qtcreator/examples/12-Image-Filters/blur_filter.py @@ -0,0 +1,109 @@ +# Blur Filter Example +# +# This example shows off using the guassian filter to blur images. + +from media.camera import * +from media.display import * +from media.media import * +import time, os, gc, sys + +DISPLAY_WIDTH = ALIGN_UP(1920, 16) +DISPLAY_HEIGHT = 1080 +SCALE = 4 +DETECT_WIDTH = DISPLAY_WIDTH // SCALE +DETECT_HEIGHT = DISPLAY_HEIGHT // SCALE + +def camera_init(): + # use hdmi for display + display.init(LT9611_1920X1080_30FPS) + # config vb for osd layer + config = k_vb_config() + config.max_pool_cnt = 1 + config.comm_pool[0].blk_size = 4*DISPLAY_WIDTH*DISPLAY_HEIGHT + config.comm_pool[0].blk_cnt = 1 + config.comm_pool[0].mode = VB_REMAP_MODE_NOCACHE + # meida buffer config + media.buffer_config(config) + # init default sensor + camera.sensor_init(CAM_DEV_ID_0, CAM_DEFAULT_SENSOR) + # set chn0 output size + camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_0, DISPLAY_WIDTH, DISPLAY_HEIGHT) + # set chn0 output format + camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_0, PIXEL_FORMAT_YUV_SEMIPLANAR_420) + # create meida source device + globals()["meida_source"] = media_device(CAMERA_MOD_ID, CAM_DEV_ID_0, CAM_CHN_ID_0) + # create meida sink device + globals()["meida_sink"] = media_device(DISPLAY_MOD_ID, DISPLAY_DEV_ID, DISPLAY_CHN_VIDEO1) + # create meida link + media.create_link(meida_source, meida_sink) + # set display plane with video channel + display.set_plane(0, 0, DISPLAY_WIDTH, DISPLAY_HEIGHT, PIXEL_FORMAT_YVU_PLANAR_420, DISPLAY_MIRROR_NONE, DISPLAY_CHN_VIDEO1) + # set chn1 output nv12 + camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_1, DETECT_WIDTH, DETECT_HEIGHT) + camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_1, PIXEL_FORMAT_RGB_888) + # media buffer init + media.buffer_init() + # request media buffer for osd image + globals()["buffer"] = media.request_buffer(4 * DISPLAY_WIDTH * DISPLAY_HEIGHT) + # start stream for camera device0 + camera.start_stream(CAM_DEV_ID_0) + +def camera_deinit(): + # stop stream for camera device0 + camera.stop_stream(CAM_DEV_ID_0) + # deinit display + display.deinit() + os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) + time.sleep_ms(100) + # release media buffer + media.release_buffer(globals()["buffer"]) + # destroy media link + media.destroy_link(globals()["meida_source"], globals()["meida_sink"]) + # deinit media buffer + media.buffer_deinit() + +def capture_picture(): + # create image for osd + buffer = globals()["buffer"] + osd_img = image.Image(DETECT_WIDTH, DETECT_HEIGHT, image.RGB565, alloc=image.ALLOC_VB, phyaddr=buffer.phys_addr, virtaddr=buffer.virt_addr, poolid=buffer.pool_id) + osd_img.clear() + display.show_image(osd_img, 0, 0, DISPLAY_CHN_OSD0) + fps = time.clock() + while True: + fps.tick() + try: + os.exitpoint() + rgb888_img = camera.capture_image(CAM_DEV_ID_0, CAM_CHN_ID_1) + img = rgb888_img.to_rgb565() + camera.release_image(CAM_DEV_ID_0, CAM_CHN_ID_1, rgb888_img) + # Run the kernel on every pixel of the image. + img.gaussian(1) + img.copy_to(osd_img) + del img + gc.collect() + print(fps.fps()) + except KeyboardInterrupt as e: + print("user stop: ", e) + break + except BaseException as e: + sys.print_exception(e) + break + +def main(): + os.exitpoint(os.EXITPOINT_ENABLE) + camera_is_init = False + try: + print("camera init") + camera_init() + camera_is_init = True + print("camera capture") + capture_picture() + except Exception as e: + sys.print_exception(e) + finally: + if camera_is_init: + print("camera deinit") + camera_deinit() + +if __name__ == "__main__": + main() diff --git a/share/qtcreator/examples/12-Image-Filters/cartoon_filter.py b/share/qtcreator/examples/12-Image-Filters/cartoon_filter.py new file mode 100644 index 00000000000..e1c44c69d46 --- /dev/null +++ b/share/qtcreator/examples/12-Image-Filters/cartoon_filter.py @@ -0,0 +1,119 @@ +# Cartoon Filter +# +# This example shows off a simple cartoon filter on images. The cartoon +# filter works by joining similar pixel areas of an image and replacing +# the pixels in those areas with the area mean. + +from media.camera import * +from media.display import * +from media.media import * +import time, os, gc, sys + +DISPLAY_WIDTH = ALIGN_UP(1920, 16) +DISPLAY_HEIGHT = 1080 +SCALE = 4 +DETECT_WIDTH = DISPLAY_WIDTH // SCALE +DETECT_HEIGHT = DISPLAY_HEIGHT // SCALE + +def camera_init(): + # use hdmi for display + display.init(LT9611_1920X1080_30FPS) + # config vb for osd layer + config = k_vb_config() + config.max_pool_cnt = 1 + config.comm_pool[0].blk_size = 4*DISPLAY_WIDTH*DISPLAY_HEIGHT + config.comm_pool[0].blk_cnt = 1 + config.comm_pool[0].mode = VB_REMAP_MODE_NOCACHE + # meida buffer config + media.buffer_config(config) + # init default sensor + camera.sensor_init(CAM_DEV_ID_0, CAM_DEFAULT_SENSOR) + # set chn0 output size + camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_0, DISPLAY_WIDTH, DISPLAY_HEIGHT) + # set chn0 output format + camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_0, PIXEL_FORMAT_YUV_SEMIPLANAR_420) + # create meida source device + globals()["meida_source"] = media_device(CAMERA_MOD_ID, CAM_DEV_ID_0, CAM_CHN_ID_0) + # create meida sink device + globals()["meida_sink"] = media_device(DISPLAY_MOD_ID, DISPLAY_DEV_ID, DISPLAY_CHN_VIDEO1) + # create meida link + media.create_link(meida_source, meida_sink) + # set display plane with video channel + display.set_plane(0, 0, DISPLAY_WIDTH, DISPLAY_HEIGHT, PIXEL_FORMAT_YVU_PLANAR_420, DISPLAY_MIRROR_NONE, DISPLAY_CHN_VIDEO1) + # set chn1 output nv12 + camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_1, DETECT_WIDTH, DETECT_HEIGHT) + camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_1, PIXEL_FORMAT_RGB_888) + # media buffer init + media.buffer_init() + # request media buffer for osd image + globals()["buffer"] = media.request_buffer(4 * DISPLAY_WIDTH * DISPLAY_HEIGHT) + # start stream for camera device0 + camera.start_stream(CAM_DEV_ID_0) + +def camera_deinit(): + # stop stream for camera device0 + camera.stop_stream(CAM_DEV_ID_0) + # deinit display + display.deinit() + os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) + time.sleep_ms(100) + # release media buffer + media.release_buffer(globals()["buffer"]) + # destroy media link + media.destroy_link(globals()["meida_source"], globals()["meida_sink"]) + # deinit media buffer + media.buffer_deinit() + +def capture_picture(): + # create image for osd + buffer = globals()["buffer"] + osd_img = image.Image(DETECT_WIDTH, DETECT_HEIGHT, image.RGB565, alloc=image.ALLOC_VB, phyaddr=buffer.phys_addr, virtaddr=buffer.virt_addr, poolid=buffer.pool_id) + osd_img.clear() + display.show_image(osd_img, 0, 0, DISPLAY_CHN_OSD0) + fps = time.clock() + while True: + fps.tick() + try: + os.exitpoint() + rgb888_img = camera.capture_image(CAM_DEV_ID_0, CAM_CHN_ID_1) + img = rgb888_img.to_rgb565() + camera.release_image(CAM_DEV_ID_0, CAM_CHN_ID_1, rgb888_img) + # seed_threshold controls the maximum area growth of a colored + # region. Making this larger will merge more pixels. + + # floating_threshold controls the maximum pixel-to-pixel difference + # when growing a region. Settings this very high will quickly combine + # all pixels in the image. You should keep this small. + + # cartoon() will grow regions while both thresholds are statisfied... + + img.cartoon(seed_threshold=0.05, floating_thresholds=0.05) + img.copy_to(osd_img) + del img + gc.collect() + print(fps.fps()) + except KeyboardInterrupt as e: + print("user stop: ", e) + break + except BaseException as e: + sys.print_exception(e) + break + +def main(): + os.exitpoint(os.EXITPOINT_ENABLE) + camera_is_init = False + try: + print("camera init") + camera_init() + camera_is_init = True + print("camera capture") + capture_picture() + except Exception as e: + sys.print_exception(e) + finally: + if camera_is_init: + print("camera deinit") + camera_deinit() + +if __name__ == "__main__": + main() diff --git a/share/qtcreator/examples/12-Image-Filters/color_bilateral_filter.py b/share/qtcreator/examples/12-Image-Filters/color_bilateral_filter.py new file mode 100644 index 00000000000..62feb0048a4 --- /dev/null +++ b/share/qtcreator/examples/12-Image-Filters/color_bilateral_filter.py @@ -0,0 +1,121 @@ +# Color Bilteral Filter Example +# +# This example shows off using the bilateral filter on color images. + +from media.camera import * +from media.display import * +from media.media import * +import time, os, gc, sys + +DISPLAY_WIDTH = ALIGN_UP(1920, 16) +DISPLAY_HEIGHT = 1080 +SCALE = 4 +DETECT_WIDTH = DISPLAY_WIDTH // SCALE +DETECT_HEIGHT = DISPLAY_HEIGHT // SCALE + +def camera_init(): + # use hdmi for display + display.init(LT9611_1920X1080_30FPS) + # config vb for osd layer + config = k_vb_config() + config.max_pool_cnt = 1 + config.comm_pool[0].blk_size = 4*DISPLAY_WIDTH*DISPLAY_HEIGHT + config.comm_pool[0].blk_cnt = 1 + config.comm_pool[0].mode = VB_REMAP_MODE_NOCACHE + # meida buffer config + media.buffer_config(config) + # init default sensor + camera.sensor_init(CAM_DEV_ID_0, CAM_DEFAULT_SENSOR) + # set chn0 output size + camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_0, DISPLAY_WIDTH, DISPLAY_HEIGHT) + # set chn0 output format + camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_0, PIXEL_FORMAT_YUV_SEMIPLANAR_420) + # create meida source device + globals()["meida_source"] = media_device(CAMERA_MOD_ID, CAM_DEV_ID_0, CAM_CHN_ID_0) + # create meida sink device + globals()["meida_sink"] = media_device(DISPLAY_MOD_ID, DISPLAY_DEV_ID, DISPLAY_CHN_VIDEO1) + # create meida link + media.create_link(meida_source, meida_sink) + # set display plane with video channel + display.set_plane(0, 0, DISPLAY_WIDTH, DISPLAY_HEIGHT, PIXEL_FORMAT_YVU_PLANAR_420, DISPLAY_MIRROR_NONE, DISPLAY_CHN_VIDEO1) + # set chn1 output nv12 + camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_1, DETECT_WIDTH, DETECT_HEIGHT) + camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_1, PIXEL_FORMAT_RGB_888) + # media buffer init + media.buffer_init() + # request media buffer for osd image + globals()["buffer"] = media.request_buffer(4 * DISPLAY_WIDTH * DISPLAY_HEIGHT) + # start stream for camera device0 + camera.start_stream(CAM_DEV_ID_0) + +def camera_deinit(): + # stop stream for camera device0 + camera.stop_stream(CAM_DEV_ID_0) + # deinit display + display.deinit() + os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) + time.sleep_ms(100) + # release media buffer + media.release_buffer(globals()["buffer"]) + # destroy media link + media.destroy_link(globals()["meida_source"], globals()["meida_sink"]) + # deinit media buffer + media.buffer_deinit() + +def capture_picture(): + # create image for osd + buffer = globals()["buffer"] + osd_img = image.Image(DETECT_WIDTH, DETECT_HEIGHT, image.RGB565, alloc=image.ALLOC_VB, phyaddr=buffer.phys_addr, virtaddr=buffer.virt_addr, poolid=buffer.pool_id) + osd_img.clear() + display.show_image(osd_img, 0, 0, DISPLAY_CHN_OSD0) + fps = time.clock() + while True: + fps.tick() + try: + os.exitpoint() + rgb888_img = camera.capture_image(CAM_DEV_ID_0, CAM_CHN_ID_1) + img = rgb888_img.to_rgb565() + camera.release_image(CAM_DEV_ID_0, CAM_CHN_ID_1, rgb888_img) + # color_sigma controls how close color wise pixels have to be to each other to be + # blured togheter. A smaller value means they have to be closer. + # A larger value is less strict. + + # space_sigma controls how close space wise pixels have to be to each other to be + # blured togheter. A smaller value means they have to be closer. + # A larger value is less strict. + + # Run the kernel on every pixel of the image. + img.bilateral(3, color_sigma=0.1, space_sigma=1) + + # Note that the bilateral filter can introduce image defects if you set + # color_sigma/space_sigma to aggresively. Increase the sigma values until + # the defects go away if you see them. + img.copy_to(osd_img) + del img + gc.collect() + print(fps.fps()) + except KeyboardInterrupt as e: + print("user stop: ", e) + break + except BaseException as e: + sys.print_exception(e) + break + +def main(): + os.exitpoint(os.EXITPOINT_ENABLE) + camera_is_init = False + try: + print("camera init") + camera_init() + camera_is_init = True + print("camera capture") + capture_picture() + except Exception as e: + sys.print_exception(e) + finally: + if camera_is_init: + print("camera deinit") + camera_deinit() + +if __name__ == "__main__": + main() diff --git a/share/qtcreator/examples/12-Image-Filters/color_binary_filter.py b/share/qtcreator/examples/12-Image-Filters/color_binary_filter.py new file mode 100644 index 00000000000..16f25b29452 --- /dev/null +++ b/share/qtcreator/examples/12-Image-Filters/color_binary_filter.py @@ -0,0 +1,135 @@ +# Color Binary Filter Example +# +# This script shows off the binary image filter. You may pass binary any +# number of thresholds to segment the image by. + +from media.camera import * +from media.display import * +from media.media import * +import time, os, gc, sys + +DISPLAY_WIDTH = ALIGN_UP(1920, 16) +DISPLAY_HEIGHT = 1080 +SCALE = 4 +DETECT_WIDTH = DISPLAY_WIDTH // SCALE +DETECT_HEIGHT = DISPLAY_HEIGHT // SCALE + +# Use the Tools -> Machine Vision -> Threshold Edtor to pick better thresholds. +red_threshold = (0,100, 0,127, 0,127) # L A B +green_threshold = (0,100, -128,0, 0,127) # L A B +blue_threshold = (0,100, -128,127, -128,0) # L A B + +def camera_init(): + # use hdmi for display + display.init(LT9611_1920X1080_30FPS) + # config vb for osd layer + config = k_vb_config() + config.max_pool_cnt = 1 + config.comm_pool[0].blk_size = 4*DISPLAY_WIDTH*DISPLAY_HEIGHT + config.comm_pool[0].blk_cnt = 1 + config.comm_pool[0].mode = VB_REMAP_MODE_NOCACHE + # meida buffer config + media.buffer_config(config) + # init default sensor + camera.sensor_init(CAM_DEV_ID_0, CAM_DEFAULT_SENSOR) + # set chn0 output size + camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_0, DISPLAY_WIDTH, DISPLAY_HEIGHT) + # set chn0 output format + camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_0, PIXEL_FORMAT_YUV_SEMIPLANAR_420) + # create meida source device + globals()["meida_source"] = media_device(CAMERA_MOD_ID, CAM_DEV_ID_0, CAM_CHN_ID_0) + # create meida sink device + globals()["meida_sink"] = media_device(DISPLAY_MOD_ID, DISPLAY_DEV_ID, DISPLAY_CHN_VIDEO1) + # create meida link + media.create_link(meida_source, meida_sink) + # set display plane with video channel + display.set_plane(0, 0, DISPLAY_WIDTH, DISPLAY_HEIGHT, PIXEL_FORMAT_YVU_PLANAR_420, DISPLAY_MIRROR_NONE, DISPLAY_CHN_VIDEO1) + # set chn1 output nv12 + camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_1, DETECT_WIDTH, DETECT_HEIGHT) + camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_1, PIXEL_FORMAT_RGB_888) + # media buffer init + media.buffer_init() + # request media buffer for osd image + globals()["buffer"] = media.request_buffer(4 * DISPLAY_WIDTH * DISPLAY_HEIGHT) + # start stream for camera device0 + camera.start_stream(CAM_DEV_ID_0) + +def camera_deinit(): + # stop stream for camera device0 + camera.stop_stream(CAM_DEV_ID_0) + # deinit display + display.deinit() + os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) + time.sleep_ms(100) + # release media buffer + media.release_buffer(globals()["buffer"]) + # destroy media link + media.destroy_link(globals()["meida_source"], globals()["meida_sink"]) + # deinit media buffer + media.buffer_deinit() + +def capture_picture(): + # create image for osd + buffer = globals()["buffer"] + osd_img = image.Image(DETECT_WIDTH, DETECT_HEIGHT, image.RGB565, alloc=image.ALLOC_VB, phyaddr=buffer.phys_addr, virtaddr=buffer.virt_addr, poolid=buffer.pool_id) + osd_img.clear() + display.show_image(osd_img, 0, 0, DISPLAY_CHN_OSD0) + frame_count = 0 + fps = time.clock() + while True: + fps.tick() + try: + os.exitpoint() + rgb888_img = camera.capture_image(CAM_DEV_ID_0, CAM_CHN_ID_1) + img = rgb888_img.to_rgb565() + camera.release_image(CAM_DEV_ID_0, CAM_CHN_ID_1, rgb888_img) + # Test red threshold + if frame_count < 100: + img.binary([red_threshold]) + # Test green threshold + elif frame_count < 200: + img.binary([green_threshold]) + # Test blue threshold + elif frame_count < 300: + img.binary([blue_threshold]) + # Test not red threshold + elif frame_count < 400: + img.binary([red_threshold], invert = 1) + # Test not green threshold + elif frame_count < 500: + img.binary([green_threshold], invert = 1) + # Test not blue threshold + elif frame_count < 600: + img.binary([blue_threshold], invert = 1) + else: + frame_count = 0 + frame_count = frame_count + 1 + img.copy_to(osd_img) + del img + gc.collect() + print(fps.fps()) + except KeyboardInterrupt as e: + print("user stop: ", e) + break + except BaseException as e: + sys.print_exception(e) + break + +def main(): + os.exitpoint(os.EXITPOINT_ENABLE) + camera_is_init = False + try: + print("camera init") + camera_init() + camera_is_init = True + print("camera capture") + capture_picture() + except Exception as e: + sys.print_exception(e) + finally: + if camera_is_init: + print("camera deinit") + camera_deinit() + +if __name__ == "__main__": + main() diff --git a/share/qtcreator/examples/12-Image-Filters/color_light_removal.py b/share/qtcreator/examples/12-Image-Filters/color_light_removal.py new file mode 100644 index 00000000000..c6319e79438 --- /dev/null +++ b/share/qtcreator/examples/12-Image-Filters/color_light_removal.py @@ -0,0 +1,115 @@ +# Color Light Removal +# +# This example shows off how to remove bright lights from the image. +# You can do this using the binary() method with the "zero=" argument. +# +# Removing bright lights from the image allows you to now use +# histeq() on the image without outliers from oversaturated +# parts of the image breaking the algorithm... + +from media.camera import * +from media.display import * +from media.media import * +import time, os, gc, sys + +DISPLAY_WIDTH = ALIGN_UP(1920, 16) +DISPLAY_HEIGHT = 1080 +SCALE = 4 +DETECT_WIDTH = DISPLAY_WIDTH // SCALE +DETECT_HEIGHT = DISPLAY_HEIGHT // SCALE + +thresholds = (90, 100, -128, 127, -128, 127) + +def camera_init(): + # use hdmi for display + display.init(LT9611_1920X1080_30FPS) + # config vb for osd layer + config = k_vb_config() + config.max_pool_cnt = 1 + config.comm_pool[0].blk_size = 4*DISPLAY_WIDTH*DISPLAY_HEIGHT + config.comm_pool[0].blk_cnt = 1 + config.comm_pool[0].mode = VB_REMAP_MODE_NOCACHE + # meida buffer config + media.buffer_config(config) + # init default sensor + camera.sensor_init(CAM_DEV_ID_0, CAM_DEFAULT_SENSOR) + # set chn0 output size + camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_0, DISPLAY_WIDTH, DISPLAY_HEIGHT) + # set chn0 output format + camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_0, PIXEL_FORMAT_YUV_SEMIPLANAR_420) + # create meida source device + globals()["meida_source"] = media_device(CAMERA_MOD_ID, CAM_DEV_ID_0, CAM_CHN_ID_0) + # create meida sink device + globals()["meida_sink"] = media_device(DISPLAY_MOD_ID, DISPLAY_DEV_ID, DISPLAY_CHN_VIDEO1) + # create meida link + media.create_link(meida_source, meida_sink) + # set display plane with video channel + display.set_plane(0, 0, DISPLAY_WIDTH, DISPLAY_HEIGHT, PIXEL_FORMAT_YVU_PLANAR_420, DISPLAY_MIRROR_NONE, DISPLAY_CHN_VIDEO1) + # set chn1 output nv12 + camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_1, DETECT_WIDTH, DETECT_HEIGHT) + camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_1, PIXEL_FORMAT_RGB_888) + # media buffer init + media.buffer_init() + # request media buffer for osd image + globals()["buffer"] = media.request_buffer(4 * DISPLAY_WIDTH * DISPLAY_HEIGHT) + # start stream for camera device0 + camera.start_stream(CAM_DEV_ID_0) + +def camera_deinit(): + # stop stream for camera device0 + camera.stop_stream(CAM_DEV_ID_0) + # deinit display + display.deinit() + os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) + time.sleep_ms(100) + # release media buffer + media.release_buffer(globals()["buffer"]) + # destroy media link + media.destroy_link(globals()["meida_source"], globals()["meida_sink"]) + # deinit media buffer + media.buffer_deinit() + +def capture_picture(): + # create image for osd + buffer = globals()["buffer"] + osd_img = image.Image(DETECT_WIDTH, DETECT_HEIGHT, image.RGB565, alloc=image.ALLOC_VB, phyaddr=buffer.phys_addr, virtaddr=buffer.virt_addr, poolid=buffer.pool_id) + osd_img.clear() + display.show_image(osd_img, 0, 0, DISPLAY_CHN_OSD0) + fps = time.clock() + while True: + fps.tick() + try: + os.exitpoint() + rgb888_img = camera.capture_image(CAM_DEV_ID_0, CAM_CHN_ID_1) + img = rgb888_img.to_rgb565() + camera.release_image(CAM_DEV_ID_0, CAM_CHN_ID_1, rgb888_img) + img.binary([thresholds], invert=False, zero=True) + img.copy_to(osd_img) + del img + gc.collect() + print(fps.fps()) + except KeyboardInterrupt as e: + print("user stop: ", e) + break + except BaseException as e: + sys.print_exception(e) + break + +def main(): + os.exitpoint(os.EXITPOINT_ENABLE) + camera_is_init = False + try: + print("camera init") + camera_init() + camera_is_init = True + print("camera capture") + capture_picture() + except Exception as e: + sys.print_exception(e) + finally: + if camera_is_init: + print("camera deinit") + camera_deinit() + +if __name__ == "__main__": + main() diff --git a/share/qtcreator/examples/12-Image-Filters/edge_filter.py b/share/qtcreator/examples/12-Image-Filters/edge_filter.py new file mode 100644 index 00000000000..84355a86d60 --- /dev/null +++ b/share/qtcreator/examples/12-Image-Filters/edge_filter.py @@ -0,0 +1,109 @@ +# Edge Filter Example +# +# This example shows off using the laplacian filter to detect edges. + +from media.camera import * +from media.display import * +from media.media import * +import time, os, gc, sys + +DISPLAY_WIDTH = ALIGN_UP(1920, 16) +DISPLAY_HEIGHT = 1080 +SCALE = 4 +DETECT_WIDTH = DISPLAY_WIDTH // SCALE +DETECT_HEIGHT = DISPLAY_HEIGHT // SCALE + +def camera_init(): + # use hdmi for display + display.init(LT9611_1920X1080_30FPS) + # config vb for osd layer + config = k_vb_config() + config.max_pool_cnt = 1 + config.comm_pool[0].blk_size = 4*DISPLAY_WIDTH*DISPLAY_HEIGHT + config.comm_pool[0].blk_cnt = 1 + config.comm_pool[0].mode = VB_REMAP_MODE_NOCACHE + # meida buffer config + media.buffer_config(config) + # init default sensor + camera.sensor_init(CAM_DEV_ID_0, CAM_DEFAULT_SENSOR) + # set chn0 output size + camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_0, DISPLAY_WIDTH, DISPLAY_HEIGHT) + # set chn0 output format + camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_0, PIXEL_FORMAT_YUV_SEMIPLANAR_420) + # create meida source device + globals()["meida_source"] = media_device(CAMERA_MOD_ID, CAM_DEV_ID_0, CAM_CHN_ID_0) + # create meida sink device + globals()["meida_sink"] = media_device(DISPLAY_MOD_ID, DISPLAY_DEV_ID, DISPLAY_CHN_VIDEO1) + # create meida link + media.create_link(meida_source, meida_sink) + # set display plane with video channel + display.set_plane(0, 0, DISPLAY_WIDTH, DISPLAY_HEIGHT, PIXEL_FORMAT_YVU_PLANAR_420, DISPLAY_MIRROR_NONE, DISPLAY_CHN_VIDEO1) + # set chn1 output nv12 + camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_1, DETECT_WIDTH, DETECT_HEIGHT) + camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_1, PIXEL_FORMAT_RGB_888) + # media buffer init + media.buffer_init() + # request media buffer for osd image + globals()["buffer"] = media.request_buffer(4 * DISPLAY_WIDTH * DISPLAY_HEIGHT) + # start stream for camera device0 + camera.start_stream(CAM_DEV_ID_0) + +def camera_deinit(): + # stop stream for camera device0 + camera.stop_stream(CAM_DEV_ID_0) + # deinit display + display.deinit() + os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) + time.sleep_ms(100) + # release media buffer + media.release_buffer(globals()["buffer"]) + # destroy media link + media.destroy_link(globals()["meida_source"], globals()["meida_sink"]) + # deinit media buffer + media.buffer_deinit() + +def capture_picture(): + # create image for osd + buffer = globals()["buffer"] + osd_img = image.Image(DETECT_WIDTH, DETECT_HEIGHT, image.RGB565, alloc=image.ALLOC_VB, phyaddr=buffer.phys_addr, virtaddr=buffer.virt_addr, poolid=buffer.pool_id) + osd_img.clear() + display.show_image(osd_img, 0, 0, DISPLAY_CHN_OSD0) + fps = time.clock() + while True: + fps.tick() + try: + os.exitpoint() + rgb888_img = camera.capture_image(CAM_DEV_ID_0, CAM_CHN_ID_1) + img = rgb888_img.to_rgb565() + camera.release_image(CAM_DEV_ID_0, CAM_CHN_ID_1, rgb888_img) + # Run the kernel on every pixel of the image. + img.laplacian(1,sharpen=True) + img.copy_to(osd_img) + del img + gc.collect() + print(fps.fps()) + except KeyboardInterrupt as e: + print("user stop: ", e) + break + except BaseException as e: + sys.print_exception(e) + break + +def main(): + os.exitpoint(os.EXITPOINT_ENABLE) + camera_is_init = False + try: + print("camera init") + camera_init() + camera_is_init = True + print("camera capture") + capture_picture() + except Exception as e: + sys.print_exception(e) + finally: + if camera_is_init: + print("camera deinit") + camera_deinit() + +if __name__ == "__main__": + main() diff --git a/share/qtcreator/examples/12-Image-Filters/erode_and_dilate.py b/share/qtcreator/examples/12-Image-Filters/erode_and_dilate.py new file mode 100644 index 00000000000..b38e868ca37 --- /dev/null +++ b/share/qtcreator/examples/12-Image-Filters/erode_and_dilate.py @@ -0,0 +1,136 @@ +# Erode and Dilate Example +# +# This example shows off the erode and dilate functions which you can run on +# a binary image to remove noise. This example was originally a test but its +# useful for showing off how these functions work. + +from media.camera import * +from media.display import * +from media.media import * +import time, os, gc, sys + +DISPLAY_WIDTH = ALIGN_UP(1920, 16) +DISPLAY_HEIGHT = 1080 +SCALE = 4 +DETECT_WIDTH = DISPLAY_WIDTH // SCALE +DETECT_HEIGHT = DISPLAY_HEIGHT // SCALE + +grayscale_thres = (170, 255) +rgb565_thres = (70, 100, -128, 127, -128, 127) + +def camera_init(): + # use hdmi for display + display.init(LT9611_1920X1080_30FPS) + # config vb for osd layer + config = k_vb_config() + config.max_pool_cnt = 1 + config.comm_pool[0].blk_size = 4*DISPLAY_WIDTH*DISPLAY_HEIGHT + config.comm_pool[0].blk_cnt = 1 + config.comm_pool[0].mode = VB_REMAP_MODE_NOCACHE + # meida buffer config + media.buffer_config(config) + # init default sensor + camera.sensor_init(CAM_DEV_ID_0, CAM_DEFAULT_SENSOR) + # set chn0 output size + camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_0, DISPLAY_WIDTH, DISPLAY_HEIGHT) + # set chn0 output format + camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_0, PIXEL_FORMAT_YUV_SEMIPLANAR_420) + # create meida source device + globals()["meida_source"] = media_device(CAMERA_MOD_ID, CAM_DEV_ID_0, CAM_CHN_ID_0) + # create meida sink device + globals()["meida_sink"] = media_device(DISPLAY_MOD_ID, DISPLAY_DEV_ID, DISPLAY_CHN_VIDEO1) + # create meida link + media.create_link(meida_source, meida_sink) + # set display plane with video channel + display.set_plane(0, 0, DISPLAY_WIDTH, DISPLAY_HEIGHT, PIXEL_FORMAT_YVU_PLANAR_420, DISPLAY_MIRROR_NONE, DISPLAY_CHN_VIDEO1) + # set chn1 output nv12 + camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_1, DETECT_WIDTH, DETECT_HEIGHT) + camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_1, PIXEL_FORMAT_RGB_888) + # media buffer init + media.buffer_init() + # request media buffer for osd image + globals()["buffer"] = media.request_buffer(4 * DISPLAY_WIDTH * DISPLAY_HEIGHT) + # start stream for camera device0 + camera.start_stream(CAM_DEV_ID_0) + +def camera_deinit(): + # stop stream for camera device0 + camera.stop_stream(CAM_DEV_ID_0) + # deinit display + display.deinit() + os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) + time.sleep_ms(100) + # release media buffer + media.release_buffer(globals()["buffer"]) + # destroy media link + media.destroy_link(globals()["meida_source"], globals()["meida_sink"]) + # deinit media buffer + media.buffer_deinit() + +def capture_picture(): + # create image for osd + buffer = globals()["buffer"] + osd_img = image.Image(DETECT_WIDTH, DETECT_HEIGHT, image.RGB565, alloc=image.ALLOC_VB, phyaddr=buffer.phys_addr, virtaddr=buffer.virt_addr, poolid=buffer.pool_id) + osd_img.clear() + display.show_image(osd_img, 0, 0, DISPLAY_CHN_OSD0) + frame_count = 0 + fps = time.clock() + while True: + fps.tick() + try: + os.exitpoint() + rgb888_img = camera.capture_image(CAM_DEV_ID_0, CAM_CHN_ID_1) + img = rgb888_img.to_rgb565() + camera.release_image(CAM_DEV_ID_0, CAM_CHN_ID_1, rgb888_img) + # Test red threshold + if frame_count < 100: + img.binary([rgb565_thres]) + img.erode(2) + # Test green threshold + elif frame_count < 200: + img.binary([rgb565_thres]) + img.dilate(2) + # Test blue threshold + elif frame_count < 300: + img = img.to_grayscale() + img.binary([grayscale_thres]) + img.erode(2) + img = img.to_rgb565() + # Test not red threshold + elif frame_count < 400: + img = img.to_grayscale() + img.binary([grayscale_thres]) + img.dilate(2) + img = img.to_rgb565() + else: + frame_count = 0 + frame_count = frame_count + 1 + img.copy_to(osd_img) + del img + gc.collect() + print(fps.fps()) + except KeyboardInterrupt as e: + print("user stop: ", e) + break + except BaseException as e: + sys.print_exception(e) + break + +def main(): + os.exitpoint(os.EXITPOINT_ENABLE) + camera_is_init = False + try: + print("camera init") + camera_init() + camera_is_init = True + print("camera capture") + capture_picture() + except Exception as e: + sys.print_exception(e) + finally: + if camera_is_init: + print("camera deinit") + camera_deinit() + +if __name__ == "__main__": + main() diff --git a/share/qtcreator/examples/12-Image-Filters/gamma_correction.py b/share/qtcreator/examples/12-Image-Filters/gamma_correction.py new file mode 100644 index 00000000000..646abd687b3 --- /dev/null +++ b/share/qtcreator/examples/12-Image-Filters/gamma_correction.py @@ -0,0 +1,111 @@ +# Gamma Correction +# +# This example shows off gamma correction to make the image brighter. The gamma +# correction method can also fix contrast and brightness too. + +from media.camera import * +from media.display import * +from media.media import * +import time, os, gc, sys + +DISPLAY_WIDTH = ALIGN_UP(1920, 16) +DISPLAY_HEIGHT = 1080 +SCALE = 4 +DETECT_WIDTH = DISPLAY_WIDTH // SCALE +DETECT_HEIGHT = DISPLAY_HEIGHT // SCALE + +def camera_init(): + # use hdmi for display + display.init(LT9611_1920X1080_30FPS) + # config vb for osd layer + config = k_vb_config() + config.max_pool_cnt = 1 + config.comm_pool[0].blk_size = 4*DISPLAY_WIDTH*DISPLAY_HEIGHT + config.comm_pool[0].blk_cnt = 1 + config.comm_pool[0].mode = VB_REMAP_MODE_NOCACHE + # meida buffer config + media.buffer_config(config) + # init default sensor + camera.sensor_init(CAM_DEV_ID_0, CAM_DEFAULT_SENSOR) + # set chn0 output size + camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_0, DISPLAY_WIDTH, DISPLAY_HEIGHT) + # set chn0 output format + camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_0, PIXEL_FORMAT_YUV_SEMIPLANAR_420) + # create meida source device + globals()["meida_source"] = media_device(CAMERA_MOD_ID, CAM_DEV_ID_0, CAM_CHN_ID_0) + # create meida sink device + globals()["meida_sink"] = media_device(DISPLAY_MOD_ID, DISPLAY_DEV_ID, DISPLAY_CHN_VIDEO1) + # create meida link + media.create_link(meida_source, meida_sink) + # set display plane with video channel + display.set_plane(0, 0, DISPLAY_WIDTH, DISPLAY_HEIGHT, PIXEL_FORMAT_YVU_PLANAR_420, DISPLAY_MIRROR_NONE, DISPLAY_CHN_VIDEO1) + # set chn1 output nv12 + camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_1, DETECT_WIDTH, DETECT_HEIGHT) + camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_1, PIXEL_FORMAT_RGB_888) + # media buffer init + media.buffer_init() + # request media buffer for osd image + globals()["buffer"] = media.request_buffer(4 * DISPLAY_WIDTH * DISPLAY_HEIGHT) + # start stream for camera device0 + camera.start_stream(CAM_DEV_ID_0) + +def camera_deinit(): + # stop stream for camera device0 + camera.stop_stream(CAM_DEV_ID_0) + # deinit display + display.deinit() + os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) + time.sleep_ms(100) + # release media buffer + media.release_buffer(globals()["buffer"]) + # destroy media link + media.destroy_link(globals()["meida_source"], globals()["meida_sink"]) + # deinit media buffer + media.buffer_deinit() + +def capture_picture(): + # create image for osd + buffer = globals()["buffer"] + osd_img = image.Image(DETECT_WIDTH, DETECT_HEIGHT, image.RGB565, alloc=image.ALLOC_VB, phyaddr=buffer.phys_addr, virtaddr=buffer.virt_addr, poolid=buffer.pool_id) + osd_img.clear() + display.show_image(osd_img, 0, 0, DISPLAY_CHN_OSD0) + fps = time.clock() + while True: + fps.tick() + try: + os.exitpoint() + rgb888_img = camera.capture_image(CAM_DEV_ID_0, CAM_CHN_ID_1) + img = rgb888_img.to_rgb565() + camera.release_image(CAM_DEV_ID_0, CAM_CHN_ID_1, rgb888_img) + # Gamma, contrast, and brightness correction are applied to each color channel. The + # values are scaled to the range per color channel per image type... + img.gamma_corr(gamma = 0.5, contrast = 1.0, brightness = 0.0) + img.copy_to(osd_img) + del img + gc.collect() + print(fps.fps()) + except KeyboardInterrupt as e: + print("user stop: ", e) + break + except BaseException as e: + sys.print_exception(e) + break + +def main(): + os.exitpoint(os.EXITPOINT_ENABLE) + camera_is_init = False + try: + print("camera init") + camera_init() + camera_is_init = True + print("camera capture") + capture_picture() + except Exception as e: + sys.print_exception(e) + finally: + if camera_is_init: + print("camera deinit") + camera_deinit() + +if __name__ == "__main__": + main() diff --git a/share/qtcreator/examples/12-Image-Filters/grayscale_bilateral_filter.py b/share/qtcreator/examples/12-Image-Filters/grayscale_bilateral_filter.py new file mode 100644 index 00000000000..57a82ed8c66 --- /dev/null +++ b/share/qtcreator/examples/12-Image-Filters/grayscale_bilateral_filter.py @@ -0,0 +1,122 @@ +# Grayscale Bilteral Filter Example +# +# This example shows off using the bilateral filter on grayscale images. + +from media.camera import * +from media.display import * +from media.media import * +import time, os, gc, sys + +DISPLAY_WIDTH = ALIGN_UP(1920, 16) +DISPLAY_HEIGHT = 1080 +SCALE = 4 +DETECT_WIDTH = DISPLAY_WIDTH // SCALE +DETECT_HEIGHT = DISPLAY_HEIGHT // SCALE + +def camera_init(): + # use hdmi for display + display.init(LT9611_1920X1080_30FPS) + # config vb for osd layer + config = k_vb_config() + config.max_pool_cnt = 1 + config.comm_pool[0].blk_size = 4*DISPLAY_WIDTH*DISPLAY_HEIGHT + config.comm_pool[0].blk_cnt = 1 + config.comm_pool[0].mode = VB_REMAP_MODE_NOCACHE + # meida buffer config + media.buffer_config(config) + # init default sensor + camera.sensor_init(CAM_DEV_ID_0, CAM_DEFAULT_SENSOR) + # set chn0 output size + camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_0, DISPLAY_WIDTH, DISPLAY_HEIGHT) + # set chn0 output format + camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_0, PIXEL_FORMAT_YUV_SEMIPLANAR_420) + # create meida source device + globals()["meida_source"] = media_device(CAMERA_MOD_ID, CAM_DEV_ID_0, CAM_CHN_ID_0) + # create meida sink device + globals()["meida_sink"] = media_device(DISPLAY_MOD_ID, DISPLAY_DEV_ID, DISPLAY_CHN_VIDEO1) + # create meida link + media.create_link(meida_source, meida_sink) + # set display plane with video channel + display.set_plane(0, 0, DISPLAY_WIDTH, DISPLAY_HEIGHT, PIXEL_FORMAT_YVU_PLANAR_420, DISPLAY_MIRROR_NONE, DISPLAY_CHN_VIDEO1) + # set chn1 output nv12 + camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_1, DETECT_WIDTH, DETECT_HEIGHT) + camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_1, PIXEL_FORMAT_RGB_888) + # media buffer init + media.buffer_init() + # request media buffer for osd image + globals()["buffer"] = media.request_buffer(4 * DISPLAY_WIDTH * DISPLAY_HEIGHT) + # start stream for camera device0 + camera.start_stream(CAM_DEV_ID_0) + +def camera_deinit(): + # stop stream for camera device0 + camera.stop_stream(CAM_DEV_ID_0) + # deinit display + display.deinit() + os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) + time.sleep_ms(100) + # release media buffer + media.release_buffer(globals()["buffer"]) + # destroy media link + media.destroy_link(globals()["meida_source"], globals()["meida_sink"]) + # deinit media buffer + media.buffer_deinit() + +def capture_picture(): + # create image for osd + buffer = globals()["buffer"] + osd_img = image.Image(DETECT_WIDTH, DETECT_HEIGHT, image.GRAYSCALE, alloc=image.ALLOC_VB, phyaddr=buffer.phys_addr, virtaddr=buffer.virt_addr, poolid=buffer.pool_id) + osd_img.clear() + display.show_image(osd_img, 0, 0, DISPLAY_CHN_OSD0) + fps = time.clock() + while True: + fps.tick() + try: + os.exitpoint() + rgb888_img = camera.capture_image(CAM_DEV_ID_0, CAM_CHN_ID_1) + img = rgb888_img.to_grayscale() + camera.release_image(CAM_DEV_ID_0, CAM_CHN_ID_1, rgb888_img) + # color_sigma controls how close color wise pixels have to be to each other to be + # blured togheter. A smaller value means they have to be closer. + # A larger value is less strict. + + # space_sigma controls how close space wise pixels have to be to each other to be + # blured togheter. A smaller value means they have to be closer. + # A larger value is less strict. + + # Run the kernel on every pixel of the image. + img.bilateral(3, color_sigma=0.1, space_sigma=1) + + # Note that the bilateral filter can introduce image defects if you set + # color_sigma/space_sigma to aggresively. Increase the sigma values until + # the defects go away if you see them. + + img.copy_to(osd_img) + del img + gc.collect() + print(fps.fps()) + except KeyboardInterrupt as e: + print("user stop: ", e) + break + except BaseException as e: + sys.print_exception(e) + break + +def main(): + os.exitpoint(os.EXITPOINT_ENABLE) + camera_is_init = False + try: + print("camera init") + camera_init() + camera_is_init = True + print("camera capture") + capture_picture() + except Exception as e: + sys.print_exception(e) + finally: + if camera_is_init: + print("camera deinit") + camera_deinit() + +if __name__ == "__main__": + main() diff --git a/share/qtcreator/examples/12-Image-Filters/grayscale_binary_filter.py b/share/qtcreator/examples/12-Image-Filters/grayscale_binary_filter.py new file mode 100644 index 00000000000..08d8e02211e --- /dev/null +++ b/share/qtcreator/examples/12-Image-Filters/grayscale_binary_filter.py @@ -0,0 +1,127 @@ +# Grayscale Binary Filter Example +# +# This script shows off the binary image filter. You may pass binary any +# number of thresholds to segment the image by. + +from media.camera import * +from media.display import * +from media.media import * +import time, os, gc, sys + +DISPLAY_WIDTH = ALIGN_UP(1920, 16) +DISPLAY_HEIGHT = 1080 +SCALE = 4 +DETECT_WIDTH = DISPLAY_WIDTH // SCALE +DETECT_HEIGHT = DISPLAY_HEIGHT // SCALE + +low_threshold = (0, 50) +high_threshold = (205, 255) + +def camera_init(): + # use hdmi for display + display.init(LT9611_1920X1080_30FPS) + # config vb for osd layer + config = k_vb_config() + config.max_pool_cnt = 1 + config.comm_pool[0].blk_size = 4*DISPLAY_WIDTH*DISPLAY_HEIGHT + config.comm_pool[0].blk_cnt = 1 + config.comm_pool[0].mode = VB_REMAP_MODE_NOCACHE + # meida buffer config + media.buffer_config(config) + # init default sensor + camera.sensor_init(CAM_DEV_ID_0, CAM_DEFAULT_SENSOR) + # set chn0 output size + camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_0, DISPLAY_WIDTH, DISPLAY_HEIGHT) + # set chn0 output format + camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_0, PIXEL_FORMAT_YUV_SEMIPLANAR_420) + # create meida source device + globals()["meida_source"] = media_device(CAMERA_MOD_ID, CAM_DEV_ID_0, CAM_CHN_ID_0) + # create meida sink device + globals()["meida_sink"] = media_device(DISPLAY_MOD_ID, DISPLAY_DEV_ID, DISPLAY_CHN_VIDEO1) + # create meida link + media.create_link(meida_source, meida_sink) + # set display plane with video channel + display.set_plane(0, 0, DISPLAY_WIDTH, DISPLAY_HEIGHT, PIXEL_FORMAT_YVU_PLANAR_420, DISPLAY_MIRROR_NONE, DISPLAY_CHN_VIDEO1) + # set chn1 output nv12 + camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_1, DETECT_WIDTH, DETECT_HEIGHT) + camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_1, PIXEL_FORMAT_RGB_888) + # media buffer init + media.buffer_init() + # request media buffer for osd image + globals()["buffer"] = media.request_buffer(4 * DISPLAY_WIDTH * DISPLAY_HEIGHT) + # start stream for camera device0 + camera.start_stream(CAM_DEV_ID_0) + +def camera_deinit(): + # stop stream for camera device0 + camera.stop_stream(CAM_DEV_ID_0) + # deinit display + display.deinit() + os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) + time.sleep_ms(100) + # release media buffer + media.release_buffer(globals()["buffer"]) + # destroy media link + media.destroy_link(globals()["meida_source"], globals()["meida_sink"]) + # deinit media buffer + media.buffer_deinit() + +def capture_picture(): + # create image for osd + buffer = globals()["buffer"] + osd_img = image.Image(DETECT_WIDTH, DETECT_HEIGHT, image.GRAYSCALE, alloc=image.ALLOC_VB, phyaddr=buffer.phys_addr, virtaddr=buffer.virt_addr, poolid=buffer.pool_id) + osd_img.clear() + display.show_image(osd_img, 0, 0, DISPLAY_CHN_OSD0) + frame_count = 0 + fps = time.clock() + while True: + fps.tick() + try: + os.exitpoint() + rgb888_img = camera.capture_image(CAM_DEV_ID_0, CAM_CHN_ID_1) + img = rgb888_img.to_grayscale() + camera.release_image(CAM_DEV_ID_0, CAM_CHN_ID_1, rgb888_img) + # Test low threshold + if frame_count < 100: + img.binary([low_threshold]) + # Test high threshold + elif frame_count < 200: + img.binary([high_threshold]) + # Test not low threshold + elif frame_count < 300: + img.binary([low_threshold], invert = 1) + # Test not high threshold + elif frame_count < 400: + img.binary([high_threshold], invert = 1) + else: + frame_count = 0 + frame_count = frame_count + 1 + img.copy_to(osd_img) + del img + gc.collect() + print(fps.fps()) + except KeyboardInterrupt as e: + print("user stop: ", e) + break + except BaseException as e: + sys.print_exception(e) + break + +def main(): + os.exitpoint(os.EXITPOINT_ENABLE) + camera_is_init = False + try: + print("camera init") + camera_init() + camera_is_init = True + print("camera capture") + capture_picture() + except Exception as e: + sys.print_exception(e) + finally: + if camera_is_init: + print("camera deinit") + camera_deinit() + +if __name__ == "__main__": + main() diff --git a/share/qtcreator/examples/12-Image-Filters/grayscale_light_removal.py b/share/qtcreator/examples/12-Image-Filters/grayscale_light_removal.py new file mode 100644 index 00000000000..b03181aed39 --- /dev/null +++ b/share/qtcreator/examples/12-Image-Filters/grayscale_light_removal.py @@ -0,0 +1,115 @@ +# Grayscale Light Removal +# +# This example shows off how to remove bright lights from the image. +# You can do this using the binary() method with the "zero=" argument. +# +# Removing bright lights from the image allows you to now use +# histeq() on the image without outliers from oversaturated +# parts of the image breaking the algorithm... + +from media.camera import * +from media.display import * +from media.media import * +import time, os, gc, sys + +DISPLAY_WIDTH = ALIGN_UP(1920, 16) +DISPLAY_HEIGHT = 1080 +SCALE = 4 +DETECT_WIDTH = DISPLAY_WIDTH // SCALE +DETECT_HEIGHT = DISPLAY_HEIGHT // SCALE + +thresholds = (220, 255) + +def camera_init(): + # use hdmi for display + display.init(LT9611_1920X1080_30FPS) + # config vb for osd layer + config = k_vb_config() + config.max_pool_cnt = 1 + config.comm_pool[0].blk_size = 4*DISPLAY_WIDTH*DISPLAY_HEIGHT + config.comm_pool[0].blk_cnt = 1 + config.comm_pool[0].mode = VB_REMAP_MODE_NOCACHE + # meida buffer config + media.buffer_config(config) + # init default sensor + camera.sensor_init(CAM_DEV_ID_0, CAM_DEFAULT_SENSOR) + # set chn0 output size + camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_0, DISPLAY_WIDTH, DISPLAY_HEIGHT) + # set chn0 output format + camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_0, PIXEL_FORMAT_YUV_SEMIPLANAR_420) + # create meida source device + globals()["meida_source"] = media_device(CAMERA_MOD_ID, CAM_DEV_ID_0, CAM_CHN_ID_0) + # create meida sink device + globals()["meida_sink"] = media_device(DISPLAY_MOD_ID, DISPLAY_DEV_ID, DISPLAY_CHN_VIDEO1) + # create meida link + media.create_link(meida_source, meida_sink) + # set display plane with video channel + display.set_plane(0, 0, DISPLAY_WIDTH, DISPLAY_HEIGHT, PIXEL_FORMAT_YVU_PLANAR_420, DISPLAY_MIRROR_NONE, DISPLAY_CHN_VIDEO1) + # set chn1 output nv12 + camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_1, DETECT_WIDTH, DETECT_HEIGHT) + camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_1, PIXEL_FORMAT_RGB_888) + # media buffer init + media.buffer_init() + # request media buffer for osd image + globals()["buffer"] = media.request_buffer(4 * DISPLAY_WIDTH * DISPLAY_HEIGHT) + # start stream for camera device0 + camera.start_stream(CAM_DEV_ID_0) + +def camera_deinit(): + # stop stream for camera device0 + camera.stop_stream(CAM_DEV_ID_0) + # deinit display + display.deinit() + os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) + time.sleep_ms(100) + # release media buffer + media.release_buffer(globals()["buffer"]) + # destroy media link + media.destroy_link(globals()["meida_source"], globals()["meida_sink"]) + # deinit media buffer + media.buffer_deinit() + +def capture_picture(): + # create image for osd + buffer = globals()["buffer"] + osd_img = image.Image(DETECT_WIDTH, DETECT_HEIGHT, image.GRAYSCALE, alloc=image.ALLOC_VB, phyaddr=buffer.phys_addr, virtaddr=buffer.virt_addr, poolid=buffer.pool_id) + osd_img.clear() + display.show_image(osd_img, 0, 0, DISPLAY_CHN_OSD0) + fps = time.clock() + while True: + fps.tick() + try: + os.exitpoint() + rgb888_img = camera.capture_image(CAM_DEV_ID_0, CAM_CHN_ID_1) + img = rgb888_img.to_grayscale() + camera.release_image(CAM_DEV_ID_0, CAM_CHN_ID_1, rgb888_img) + img.binary([thresholds], invert=False, zero=True) + img.copy_to(osd_img) + del img + gc.collect() + print(fps.fps()) + except KeyboardInterrupt as e: + print("user stop: ", e) + break + except BaseException as e: + sys.print_exception(e) + break + +def main(): + os.exitpoint(os.EXITPOINT_ENABLE) + camera_is_init = False + try: + print("camera init") + camera_init() + camera_is_init = True + print("camera capture") + capture_picture() + except Exception as e: + sys.print_exception(e) + finally: + if camera_is_init: + print("camera deinit") + camera_deinit() + +if __name__ == "__main__": + main() diff --git a/share/qtcreator/examples/12-Image-Filters/histogram_equalization.py b/share/qtcreator/examples/12-Image-Filters/histogram_equalization.py new file mode 100644 index 00000000000..1165539c759 --- /dev/null +++ b/share/qtcreator/examples/12-Image-Filters/histogram_equalization.py @@ -0,0 +1,109 @@ +# Histogram Equalization +# +# This example shows off how to use histogram equalization to improve +# the contrast in the image. + +from media.camera import * +from media.display import * +from media.media import * +import time, os, gc, sys + +DISPLAY_WIDTH = ALIGN_UP(1920, 16) +DISPLAY_HEIGHT = 1080 +SCALE = 4 +DETECT_WIDTH = DISPLAY_WIDTH // SCALE +DETECT_HEIGHT = DISPLAY_HEIGHT // SCALE + +def camera_init(): + # use hdmi for display + display.init(LT9611_1920X1080_30FPS) + # config vb for osd layer + config = k_vb_config() + config.max_pool_cnt = 1 + config.comm_pool[0].blk_size = 4*DISPLAY_WIDTH*DISPLAY_HEIGHT + config.comm_pool[0].blk_cnt = 1 + config.comm_pool[0].mode = VB_REMAP_MODE_NOCACHE + # meida buffer config + media.buffer_config(config) + # init default sensor + camera.sensor_init(CAM_DEV_ID_0, CAM_DEFAULT_SENSOR) + # set chn0 output size + camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_0, DISPLAY_WIDTH, DISPLAY_HEIGHT) + # set chn0 output format + camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_0, PIXEL_FORMAT_YUV_SEMIPLANAR_420) + # create meida source device + globals()["meida_source"] = media_device(CAMERA_MOD_ID, CAM_DEV_ID_0, CAM_CHN_ID_0) + # create meida sink device + globals()["meida_sink"] = media_device(DISPLAY_MOD_ID, DISPLAY_DEV_ID, DISPLAY_CHN_VIDEO1) + # create meida link + media.create_link(meida_source, meida_sink) + # set display plane with video channel + display.set_plane(0, 0, DISPLAY_WIDTH, DISPLAY_HEIGHT, PIXEL_FORMAT_YVU_PLANAR_420, DISPLAY_MIRROR_NONE, DISPLAY_CHN_VIDEO1) + # set chn1 output nv12 + camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_1, DETECT_WIDTH, DETECT_HEIGHT) + camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_1, PIXEL_FORMAT_RGB_888) + # media buffer init + media.buffer_init() + # request media buffer for osd image + globals()["buffer"] = media.request_buffer(4 * DISPLAY_WIDTH * DISPLAY_HEIGHT) + # start stream for camera device0 + camera.start_stream(CAM_DEV_ID_0) + +def camera_deinit(): + # stop stream for camera device0 + camera.stop_stream(CAM_DEV_ID_0) + # deinit display + display.deinit() + os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) + time.sleep_ms(100) + # release media buffer + media.release_buffer(globals()["buffer"]) + # destroy media link + media.destroy_link(globals()["meida_source"], globals()["meida_sink"]) + # deinit media buffer + media.buffer_deinit() + +def capture_picture(): + # create image for osd + buffer = globals()["buffer"] + osd_img = image.Image(DETECT_WIDTH, DETECT_HEIGHT, image.RGB565, alloc=image.ALLOC_VB, phyaddr=buffer.phys_addr, virtaddr=buffer.virt_addr, poolid=buffer.pool_id) + osd_img.clear() + display.show_image(osd_img, 0, 0, DISPLAY_CHN_OSD0) + fps = time.clock() + while True: + fps.tick() + try: + os.exitpoint() + rgb888_img = camera.capture_image(CAM_DEV_ID_0, CAM_CHN_ID_1) + img = rgb888_img.to_rgb565() + camera.release_image(CAM_DEV_ID_0, CAM_CHN_ID_1, rgb888_img) + img.histeq() + img.copy_to(osd_img) + del img + gc.collect() + print(fps.fps()) + except KeyboardInterrupt as e: + print("user stop: ", e) + break + except BaseException as e: + sys.print_exception(e) + break + +def main(): + os.exitpoint(os.EXITPOINT_ENABLE) + camera_is_init = False + try: + print("camera init") + camera_init() + camera_is_init = True + print("camera capture") + capture_picture() + except Exception as e: + sys.print_exception(e) + finally: + if camera_is_init: + print("camera deinit") + camera_deinit() + +if __name__ == "__main__": + main() diff --git a/share/qtcreator/examples/12-Image-Filters/kernel_filters.py b/share/qtcreator/examples/12-Image-Filters/kernel_filters.py new file mode 100644 index 00000000000..3c8294fd008 --- /dev/null +++ b/share/qtcreator/examples/12-Image-Filters/kernel_filters.py @@ -0,0 +1,115 @@ +# Kernel Filtering Example +# +# This example shows off how to use a generic kernel filter. + +from media.camera import * +from media.display import * +from media.media import * +import time, os, gc, sys + +DISPLAY_WIDTH = ALIGN_UP(1920, 16) +DISPLAY_HEIGHT = 1080 +SCALE = 4 +DETECT_WIDTH = DISPLAY_WIDTH // SCALE +DETECT_HEIGHT = DISPLAY_HEIGHT // SCALE + +kernel_size = 1 # 3x3==1, 5x5==2, 7x7==3, etc. + +kernel = [-2, -1, 0, + -1, 1, 1, + 0, 1, 2] + +def camera_init(): + # use hdmi for display + display.init(LT9611_1920X1080_30FPS) + # config vb for osd layer + config = k_vb_config() + config.max_pool_cnt = 1 + config.comm_pool[0].blk_size = 4*DISPLAY_WIDTH*DISPLAY_HEIGHT + config.comm_pool[0].blk_cnt = 1 + config.comm_pool[0].mode = VB_REMAP_MODE_NOCACHE + # meida buffer config + media.buffer_config(config) + # init default sensor + camera.sensor_init(CAM_DEV_ID_0, CAM_DEFAULT_SENSOR) + # set chn0 output size + camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_0, DISPLAY_WIDTH, DISPLAY_HEIGHT) + # set chn0 output format + camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_0, PIXEL_FORMAT_YUV_SEMIPLANAR_420) + # create meida source device + globals()["meida_source"] = media_device(CAMERA_MOD_ID, CAM_DEV_ID_0, CAM_CHN_ID_0) + # create meida sink device + globals()["meida_sink"] = media_device(DISPLAY_MOD_ID, DISPLAY_DEV_ID, DISPLAY_CHN_VIDEO1) + # create meida link + media.create_link(meida_source, meida_sink) + # set display plane with video channel + display.set_plane(0, 0, DISPLAY_WIDTH, DISPLAY_HEIGHT, PIXEL_FORMAT_YVU_PLANAR_420, DISPLAY_MIRROR_NONE, DISPLAY_CHN_VIDEO1) + # set chn1 output nv12 + camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_1, DETECT_WIDTH, DETECT_HEIGHT) + camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_1, PIXEL_FORMAT_RGB_888) + # media buffer init + media.buffer_init() + # request media buffer for osd image + globals()["buffer"] = media.request_buffer(4 * DISPLAY_WIDTH * DISPLAY_HEIGHT) + # start stream for camera device0 + camera.start_stream(CAM_DEV_ID_0) + +def camera_deinit(): + # stop stream for camera device0 + camera.stop_stream(CAM_DEV_ID_0) + # deinit display + display.deinit() + os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) + time.sleep_ms(100) + # release media buffer + media.release_buffer(globals()["buffer"]) + # destroy media link + media.destroy_link(globals()["meida_source"], globals()["meida_sink"]) + # deinit media buffer + media.buffer_deinit() + +def capture_picture(): + # create image for osd + buffer = globals()["buffer"] + osd_img = image.Image(DETECT_WIDTH, DETECT_HEIGHT, image.GRAYSCALE, alloc=image.ALLOC_VB, phyaddr=buffer.phys_addr, virtaddr=buffer.virt_addr, poolid=buffer.pool_id) + osd_img.clear() + display.show_image(osd_img, 0, 0, DISPLAY_CHN_OSD0) + fps = time.clock() + while True: + fps.tick() + try: + os.exitpoint() + rgb888_img = camera.capture_image(CAM_DEV_ID_0, CAM_CHN_ID_1) + img = rgb888_img.to_grayscale() + camera.release_image(CAM_DEV_ID_0, CAM_CHN_ID_1, rgb888_img) + # Run the kernel on every pixel of the image. + img.morph(kernel_size, kernel) + img.copy_to(osd_img) + del img + gc.collect() + print(fps.fps()) + except KeyboardInterrupt as e: + print("user stop: ", e) + break + except BaseException as e: + sys.print_exception(e) + break + +def main(): + os.exitpoint(os.EXITPOINT_ENABLE) + camera_is_init = False + try: + print("camera init") + camera_init() + camera_is_init = True + print("camera capture") + capture_picture() + except Exception as e: + sys.print_exception(e) + finally: + if camera_is_init: + print("camera deinit") + camera_deinit() + +if __name__ == "__main__": + main() diff --git a/share/qtcreator/examples/12-Image-Filters/lens_correction.py b/share/qtcreator/examples/12-Image-Filters/lens_correction.py new file mode 100644 index 00000000000..7fc05609477 --- /dev/null +++ b/share/qtcreator/examples/12-Image-Filters/lens_correction.py @@ -0,0 +1,111 @@ +# Lens Correction +# +# This example shows off how to use the lens correction method to fix lens +# distortion in an image. You need to do this for qrcode / barcode / data matrix +# detection. Increase the strength below until lines are straight in the view. +# Zoom in (higher) or out (lower) until you see enough of the image. + +from media.camera import * +from media.display import * +from media.media import * +import time, os, gc, sys + +DISPLAY_WIDTH = ALIGN_UP(1920, 16) +DISPLAY_HEIGHT = 1080 +SCALE = 4 +DETECT_WIDTH = DISPLAY_WIDTH // SCALE +DETECT_HEIGHT = DISPLAY_HEIGHT // SCALE + +def camera_init(): + # use hdmi for display + display.init(LT9611_1920X1080_30FPS) + # config vb for osd layer + config = k_vb_config() + config.max_pool_cnt = 1 + config.comm_pool[0].blk_size = 4*DISPLAY_WIDTH*DISPLAY_HEIGHT + config.comm_pool[0].blk_cnt = 1 + config.comm_pool[0].mode = VB_REMAP_MODE_NOCACHE + # meida buffer config + media.buffer_config(config) + # init default sensor + camera.sensor_init(CAM_DEV_ID_0, CAM_DEFAULT_SENSOR) + # set chn0 output size + camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_0, DISPLAY_WIDTH, DISPLAY_HEIGHT) + # set chn0 output format + camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_0, PIXEL_FORMAT_YUV_SEMIPLANAR_420) + # create meida source device + globals()["meida_source"] = media_device(CAMERA_MOD_ID, CAM_DEV_ID_0, CAM_CHN_ID_0) + # create meida sink device + globals()["meida_sink"] = media_device(DISPLAY_MOD_ID, DISPLAY_DEV_ID, DISPLAY_CHN_VIDEO1) + # create meida link + media.create_link(meida_source, meida_sink) + # set display plane with video channel + display.set_plane(0, 0, DISPLAY_WIDTH, DISPLAY_HEIGHT, PIXEL_FORMAT_YVU_PLANAR_420, DISPLAY_MIRROR_NONE, DISPLAY_CHN_VIDEO1) + # set chn1 output nv12 + camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_1, DETECT_WIDTH, DETECT_HEIGHT) + camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_1, PIXEL_FORMAT_RGB_888) + # media buffer init + media.buffer_init() + # request media buffer for osd image + globals()["buffer"] = media.request_buffer(4 * DISPLAY_WIDTH * DISPLAY_HEIGHT) + # start stream for camera device0 + camera.start_stream(CAM_DEV_ID_0) + +def camera_deinit(): + # stop stream for camera device0 + camera.stop_stream(CAM_DEV_ID_0) + # deinit display + display.deinit() + os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) + time.sleep_ms(100) + # release media buffer + media.release_buffer(globals()["buffer"]) + # destroy media link + media.destroy_link(globals()["meida_source"], globals()["meida_sink"]) + # deinit media buffer + media.buffer_deinit() + +def capture_picture(): + # create image for osd + buffer = globals()["buffer"] + osd_img = image.Image(DETECT_WIDTH, DETECT_HEIGHT, image.RGB565, alloc=image.ALLOC_VB, phyaddr=buffer.phys_addr, virtaddr=buffer.virt_addr, poolid=buffer.pool_id) + osd_img.clear() + display.show_image(osd_img, 0, 0, DISPLAY_CHN_OSD0) + fps = time.clock() + while True: + fps.tick() + try: + os.exitpoint() + rgb888_img = camera.capture_image(CAM_DEV_ID_0, CAM_CHN_ID_1) + img = rgb888_img.to_rgb565() + camera.release_image(CAM_DEV_ID_0, CAM_CHN_ID_1, rgb888_img) + img.lens_corr(strength = 1.8, zoom = 1.0) + img.copy_to(osd_img) + del img + gc.collect() + print(fps.fps()) + except KeyboardInterrupt as e: + print("user stop: ", e) + break + except BaseException as e: + sys.print_exception(e) + break + +def main(): + os.exitpoint(os.EXITPOINT_ENABLE) + camera_is_init = False + try: + print("camera init") + camera_init() + camera_is_init = True + print("camera capture") + capture_picture() + except Exception as e: + sys.print_exception(e) + finally: + if camera_is_init: + print("camera deinit") + camera_deinit() + +if __name__ == "__main__": + main() diff --git a/share/qtcreator/examples/12-Image-Filters/linear_polar.py b/share/qtcreator/examples/12-Image-Filters/linear_polar.py new file mode 100644 index 00000000000..a8de929ebb5 --- /dev/null +++ b/share/qtcreator/examples/12-Image-Filters/linear_polar.py @@ -0,0 +1,111 @@ +# Linear Polar Mapping Example +# +# This example shows off re-projecting the image using a linear polar +# transformation. Linear polar images are useful in that rotations +# become translations in the X direction and linear changes +# in scale become linear translations in the Y direction. + +from media.camera import * +from media.display import * +from media.media import * +import time, os, gc, sys + +DISPLAY_WIDTH = ALIGN_UP(1920, 16) +DISPLAY_HEIGHT = 1080 +SCALE = 4 +DETECT_WIDTH = DISPLAY_WIDTH // SCALE +DETECT_HEIGHT = DISPLAY_HEIGHT // SCALE + +def camera_init(): + # use hdmi for display + display.init(LT9611_1920X1080_30FPS) + # config vb for osd layer + config = k_vb_config() + config.max_pool_cnt = 1 + config.comm_pool[0].blk_size = 4*DISPLAY_WIDTH*DISPLAY_HEIGHT + config.comm_pool[0].blk_cnt = 1 + config.comm_pool[0].mode = VB_REMAP_MODE_NOCACHE + # meida buffer config + media.buffer_config(config) + # init default sensor + camera.sensor_init(CAM_DEV_ID_0, CAM_DEFAULT_SENSOR) + # set chn0 output size + camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_0, DISPLAY_WIDTH, DISPLAY_HEIGHT) + # set chn0 output format + camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_0, PIXEL_FORMAT_YUV_SEMIPLANAR_420) + # create meida source device + globals()["meida_source"] = media_device(CAMERA_MOD_ID, CAM_DEV_ID_0, CAM_CHN_ID_0) + # create meida sink device + globals()["meida_sink"] = media_device(DISPLAY_MOD_ID, DISPLAY_DEV_ID, DISPLAY_CHN_VIDEO1) + # create meida link + media.create_link(meida_source, meida_sink) + # set display plane with video channel + display.set_plane(0, 0, DISPLAY_WIDTH, DISPLAY_HEIGHT, PIXEL_FORMAT_YVU_PLANAR_420, DISPLAY_MIRROR_NONE, DISPLAY_CHN_VIDEO1) + # set chn1 output nv12 + camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_1, DETECT_WIDTH, DETECT_HEIGHT) + camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_1, PIXEL_FORMAT_RGB_888) + # media buffer init + media.buffer_init() + # request media buffer for osd image + globals()["buffer"] = media.request_buffer(4 * DISPLAY_WIDTH * DISPLAY_HEIGHT) + # start stream for camera device0 + camera.start_stream(CAM_DEV_ID_0) + +def camera_deinit(): + # stop stream for camera device0 + camera.stop_stream(CAM_DEV_ID_0) + # deinit display + display.deinit() + os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) + time.sleep_ms(100) + # release media buffer + media.release_buffer(globals()["buffer"]) + # destroy media link + media.destroy_link(globals()["meida_source"], globals()["meida_sink"]) + # deinit media buffer + media.buffer_deinit() + +def capture_picture(): + # create image for osd + buffer = globals()["buffer"] + osd_img = image.Image(DETECT_WIDTH, DETECT_HEIGHT, image.RGB565, alloc=image.ALLOC_VB, phyaddr=buffer.phys_addr, virtaddr=buffer.virt_addr, poolid=buffer.pool_id) + osd_img.clear() + display.show_image(osd_img, 0, 0, DISPLAY_CHN_OSD0) + fps = time.clock() + while True: + fps.tick() + try: + os.exitpoint() + rgb888_img = camera.capture_image(CAM_DEV_ID_0, CAM_CHN_ID_1) + img = rgb888_img.to_rgb565() + camera.release_image(CAM_DEV_ID_0, CAM_CHN_ID_1, rgb888_img) + img.linpolar(reverse=False) + img.copy_to(osd_img) + del img + gc.collect() + print(fps.fps()) + except KeyboardInterrupt as e: + print("user stop: ", e) + break + except BaseException as e: + sys.print_exception(e) + break + +def main(): + os.exitpoint(os.EXITPOINT_ENABLE) + camera_is_init = False + try: + print("camera init") + camera_init() + camera_is_init = True + print("camera capture") + capture_picture() + except Exception as e: + sys.print_exception(e) + finally: + if camera_is_init: + print("camera deinit") + camera_deinit() + +if __name__ == "__main__": + main() diff --git a/share/qtcreator/examples/12-Image-Filters/log_polar.py b/share/qtcreator/examples/12-Image-Filters/log_polar.py new file mode 100644 index 00000000000..52a185837da --- /dev/null +++ b/share/qtcreator/examples/12-Image-Filters/log_polar.py @@ -0,0 +1,111 @@ +# Log Polar Mapping Example +# +# This example shows off re-projecting the image using a log polar +# transformation. Log polar images are useful in that rotations +# become translations in the X direction and exponential changes +# in scale (x2, x4, etc.) become linear translations in the Y direction. + +from media.camera import * +from media.display import * +from media.media import * +import time, os, gc, sys + +DISPLAY_WIDTH = ALIGN_UP(1920, 16) +DISPLAY_HEIGHT = 1080 +SCALE = 4 +DETECT_WIDTH = DISPLAY_WIDTH // SCALE +DETECT_HEIGHT = DISPLAY_HEIGHT // SCALE + +def camera_init(): + # use hdmi for display + display.init(LT9611_1920X1080_30FPS) + # config vb for osd layer + config = k_vb_config() + config.max_pool_cnt = 1 + config.comm_pool[0].blk_size = 4*DISPLAY_WIDTH*DISPLAY_HEIGHT + config.comm_pool[0].blk_cnt = 1 + config.comm_pool[0].mode = VB_REMAP_MODE_NOCACHE + # meida buffer config + media.buffer_config(config) + # init default sensor + camera.sensor_init(CAM_DEV_ID_0, CAM_DEFAULT_SENSOR) + # set chn0 output size + camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_0, DISPLAY_WIDTH, DISPLAY_HEIGHT) + # set chn0 output format + camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_0, PIXEL_FORMAT_YUV_SEMIPLANAR_420) + # create meida source device + globals()["meida_source"] = media_device(CAMERA_MOD_ID, CAM_DEV_ID_0, CAM_CHN_ID_0) + # create meida sink device + globals()["meida_sink"] = media_device(DISPLAY_MOD_ID, DISPLAY_DEV_ID, DISPLAY_CHN_VIDEO1) + # create meida link + media.create_link(meida_source, meida_sink) + # set display plane with video channel + display.set_plane(0, 0, DISPLAY_WIDTH, DISPLAY_HEIGHT, PIXEL_FORMAT_YVU_PLANAR_420, DISPLAY_MIRROR_NONE, DISPLAY_CHN_VIDEO1) + # set chn1 output nv12 + camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_1, DETECT_WIDTH, DETECT_HEIGHT) + camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_1, PIXEL_FORMAT_RGB_888) + # media buffer init + media.buffer_init() + # request media buffer for osd image + globals()["buffer"] = media.request_buffer(4 * DISPLAY_WIDTH * DISPLAY_HEIGHT) + # start stream for camera device0 + camera.start_stream(CAM_DEV_ID_0) + +def camera_deinit(): + # stop stream for camera device0 + camera.stop_stream(CAM_DEV_ID_0) + # deinit display + display.deinit() + os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) + time.sleep_ms(100) + # release media buffer + media.release_buffer(globals()["buffer"]) + # destroy media link + media.destroy_link(globals()["meida_source"], globals()["meida_sink"]) + # deinit media buffer + media.buffer_deinit() + +def capture_picture(): + # create image for osd + buffer = globals()["buffer"] + osd_img = image.Image(DETECT_WIDTH, DETECT_HEIGHT, image.RGB565, alloc=image.ALLOC_VB, phyaddr=buffer.phys_addr, virtaddr=buffer.virt_addr, poolid=buffer.pool_id) + osd_img.clear() + display.show_image(osd_img, 0, 0, DISPLAY_CHN_OSD0) + fps = time.clock() + while True: + fps.tick() + try: + os.exitpoint() + rgb888_img = camera.capture_image(CAM_DEV_ID_0, CAM_CHN_ID_1) + img = rgb888_img.to_rgb565() + camera.release_image(CAM_DEV_ID_0, CAM_CHN_ID_1, rgb888_img) + img.logpolar(reverse=False) + img.copy_to(osd_img) + del img + gc.collect() + print(fps.fps()) + except KeyboardInterrupt as e: + print("user stop: ", e) + break + except BaseException as e: + sys.print_exception(e) + break + +def main(): + os.exitpoint(os.EXITPOINT_ENABLE) + camera_is_init = False + try: + print("camera init") + camera_init() + camera_is_init = True + print("camera capture") + capture_picture() + except Exception as e: + sys.print_exception(e) + finally: + if camera_is_init: + print("camera deinit") + camera_deinit() + +if __name__ == "__main__": + main() diff --git a/share/qtcreator/examples/12-Image-Filters/mean_adaptive_threshold_filter.py b/share/qtcreator/examples/12-Image-Filters/mean_adaptive_threshold_filter.py new file mode 100644 index 00000000000..c5375cdd591 --- /dev/null +++ b/share/qtcreator/examples/12-Image-Filters/mean_adaptive_threshold_filter.py @@ -0,0 +1,113 @@ +# Mean Adaptive Threshold Filter Example +# +# This example shows off mean filtering with adaptive thresholding. +# When mean(threshold=True) the mean() method adaptive thresholds the image +# by comparing the mean of the pixels around a pixel, minus an offset, with that pixel. + +from media.camera import * +from media.display import * +from media.media import * +import time, os, gc, sys + +DISPLAY_WIDTH = ALIGN_UP(1920, 16) +DISPLAY_HEIGHT = 1080 +SCALE = 4 +DETECT_WIDTH = DISPLAY_WIDTH // SCALE +DETECT_HEIGHT = DISPLAY_HEIGHT // SCALE + +def camera_init(): + # use hdmi for display + display.init(LT9611_1920X1080_30FPS) + # config vb for osd layer + config = k_vb_config() + config.max_pool_cnt = 1 + config.comm_pool[0].blk_size = 4*DISPLAY_WIDTH*DISPLAY_HEIGHT + config.comm_pool[0].blk_cnt = 1 + config.comm_pool[0].mode = VB_REMAP_MODE_NOCACHE + # meida buffer config + media.buffer_config(config) + # init default sensor + camera.sensor_init(CAM_DEV_ID_0, CAM_DEFAULT_SENSOR) + # set chn0 output size + camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_0, DISPLAY_WIDTH, DISPLAY_HEIGHT) + # set chn0 output format + camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_0, PIXEL_FORMAT_YUV_SEMIPLANAR_420) + # create meida source device + globals()["meida_source"] = media_device(CAMERA_MOD_ID, CAM_DEV_ID_0, CAM_CHN_ID_0) + # create meida sink device + globals()["meida_sink"] = media_device(DISPLAY_MOD_ID, DISPLAY_DEV_ID, DISPLAY_CHN_VIDEO1) + # create meida link + media.create_link(meida_source, meida_sink) + # set display plane with video channel + display.set_plane(0, 0, DISPLAY_WIDTH, DISPLAY_HEIGHT, PIXEL_FORMAT_YVU_PLANAR_420, DISPLAY_MIRROR_NONE, DISPLAY_CHN_VIDEO1) + # set chn1 output nv12 + camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_1, DETECT_WIDTH, DETECT_HEIGHT) + camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_1, PIXEL_FORMAT_RGB_888) + # media buffer init + media.buffer_init() + # request media buffer for osd image + globals()["buffer"] = media.request_buffer(4 * DISPLAY_WIDTH * DISPLAY_HEIGHT) + # start stream for camera device0 + camera.start_stream(CAM_DEV_ID_0) + +def camera_deinit(): + # stop stream for camera device0 + camera.stop_stream(CAM_DEV_ID_0) + # deinit display + display.deinit() + os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) + time.sleep_ms(100) + # release media buffer + media.release_buffer(globals()["buffer"]) + # destroy media link + media.destroy_link(globals()["meida_source"], globals()["meida_sink"]) + # deinit media buffer + media.buffer_deinit() + +def capture_picture(): + # create image for osd + buffer = globals()["buffer"] + osd_img = image.Image(DETECT_WIDTH, DETECT_HEIGHT, image.RGB565, alloc=image.ALLOC_VB, phyaddr=buffer.phys_addr, virtaddr=buffer.virt_addr, poolid=buffer.pool_id) + osd_img.clear() + display.show_image(osd_img, 0, 0, DISPLAY_CHN_OSD0) + fps = time.clock() + while True: + fps.tick() + try: + os.exitpoint() + rgb888_img = camera.capture_image(CAM_DEV_ID_0, CAM_CHN_ID_1) + img = rgb888_img.to_rgb565() + camera.release_image(CAM_DEV_ID_0, CAM_CHN_ID_1, rgb888_img) + # The first argument is the kernel size. N coresponds to a ((N*2)+1)^2 + # kernel size. E.g. 1 == 3x3 kernel, 2 == 5x5 kernel, etc. Note: You + # shouldn't ever need to use a value bigger than 2. + img.mean(1, threshold=True, offset=5, invert=True) + img.copy_to(osd_img) + del img + gc.collect() + print(fps.fps()) + except KeyboardInterrupt as e: + print("user stop: ", e) + break + except BaseException as e: + sys.print_exception(e) + break + +def main(): + os.exitpoint(os.EXITPOINT_ENABLE) + camera_is_init = False + try: + print("camera init") + camera_init() + camera_is_init = True + print("camera capture") + capture_picture() + except Exception as e: + sys.print_exception(e) + finally: + if camera_is_init: + print("camera deinit") + camera_deinit() + +if __name__ == "__main__": + main() diff --git a/share/qtcreator/examples/12-Image-Filters/mean_filter.py b/share/qtcreator/examples/12-Image-Filters/mean_filter.py new file mode 100644 index 00000000000..fe320130e75 --- /dev/null +++ b/share/qtcreator/examples/12-Image-Filters/mean_filter.py @@ -0,0 +1,113 @@ +# Mean Filter Example +# +# This example shows off mean filtering. Mean filtering is your standard average +# filter in a NxN neighborhood. Mean filtering removes noise in the image by +# bluring everything. But, it's the fastest kernel filter operation. + +from media.camera import * +from media.display import * +from media.media import * +import time, os, gc, sys + +DISPLAY_WIDTH = ALIGN_UP(1920, 16) +DISPLAY_HEIGHT = 1080 +SCALE = 4 +DETECT_WIDTH = DISPLAY_WIDTH // SCALE +DETECT_HEIGHT = DISPLAY_HEIGHT // SCALE + +def camera_init(): + # use hdmi for display + display.init(LT9611_1920X1080_30FPS) + # config vb for osd layer + config = k_vb_config() + config.max_pool_cnt = 1 + config.comm_pool[0].blk_size = 4*DISPLAY_WIDTH*DISPLAY_HEIGHT + config.comm_pool[0].blk_cnt = 1 + config.comm_pool[0].mode = VB_REMAP_MODE_NOCACHE + # meida buffer config + media.buffer_config(config) + # init default sensor + camera.sensor_init(CAM_DEV_ID_0, CAM_DEFAULT_SENSOR) + # set chn0 output size + camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_0, DISPLAY_WIDTH, DISPLAY_HEIGHT) + # set chn0 output format + camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_0, PIXEL_FORMAT_YUV_SEMIPLANAR_420) + # create meida source device + globals()["meida_source"] = media_device(CAMERA_MOD_ID, CAM_DEV_ID_0, CAM_CHN_ID_0) + # create meida sink device + globals()["meida_sink"] = media_device(DISPLAY_MOD_ID, DISPLAY_DEV_ID, DISPLAY_CHN_VIDEO1) + # create meida link + media.create_link(meida_source, meida_sink) + # set display plane with video channel + display.set_plane(0, 0, DISPLAY_WIDTH, DISPLAY_HEIGHT, PIXEL_FORMAT_YVU_PLANAR_420, DISPLAY_MIRROR_NONE, DISPLAY_CHN_VIDEO1) + # set chn1 output nv12 + camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_1, DETECT_WIDTH, DETECT_HEIGHT) + camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_1, PIXEL_FORMAT_RGB_888) + # media buffer init + media.buffer_init() + # request media buffer for osd image + globals()["buffer"] = media.request_buffer(4 * DISPLAY_WIDTH * DISPLAY_HEIGHT) + # start stream for camera device0 + camera.start_stream(CAM_DEV_ID_0) + +def camera_deinit(): + # stop stream for camera device0 + camera.stop_stream(CAM_DEV_ID_0) + # deinit display + display.deinit() + os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) + time.sleep_ms(100) + # release media buffer + media.release_buffer(globals()["buffer"]) + # destroy media link + media.destroy_link(globals()["meida_source"], globals()["meida_sink"]) + # deinit media buffer + media.buffer_deinit() + +def capture_picture(): + # create image for osd + buffer = globals()["buffer"] + osd_img = image.Image(DETECT_WIDTH, DETECT_HEIGHT, image.RGB565, alloc=image.ALLOC_VB, phyaddr=buffer.phys_addr, virtaddr=buffer.virt_addr, poolid=buffer.pool_id) + osd_img.clear() + display.show_image(osd_img, 0, 0, DISPLAY_CHN_OSD0) + fps = time.clock() + while True: + fps.tick() + try: + os.exitpoint() + rgb888_img = camera.capture_image(CAM_DEV_ID_0, CAM_CHN_ID_1) + img = rgb888_img.to_rgb565() + camera.release_image(CAM_DEV_ID_0, CAM_CHN_ID_1, rgb888_img) + # The only argument is the kernel size. N coresponds to a ((N*2)+1)^2 + # kernel size. E.g. 1 == 3x3 kernel, 2 == 5x5 kernel, etc. Note: You + # shouldn't ever need to use a value bigger than 2. + img.mean(1) + img.copy_to(osd_img) + del img + gc.collect() + print(fps.fps()) + except KeyboardInterrupt as e: + print("user stop: ", e) + break + except BaseException as e: + sys.print_exception(e) + break + +def main(): + os.exitpoint(os.EXITPOINT_ENABLE) + camera_is_init = False + try: + print("camera init") + camera_init() + camera_is_init = True + print("camera capture") + capture_picture() + except Exception as e: + sys.print_exception(e) + finally: + if camera_is_init: + print("camera deinit") + camera_deinit() + +if __name__ == "__main__": + main() diff --git a/share/qtcreator/examples/12-Image-Filters/median_adaptive_threshold_filter.py b/share/qtcreator/examples/12-Image-Filters/median_adaptive_threshold_filter.py new file mode 100644 index 00000000000..a5860f22a73 --- /dev/null +++ b/share/qtcreator/examples/12-Image-Filters/median_adaptive_threshold_filter.py @@ -0,0 +1,115 @@ +# Median Adaptive Threshold Filter Example +# +# This example shows off median filtering with adaptive thresholding. +# When median(threshold=True) the median() method adaptive thresholds the image +# by comparing the median of the pixels around a pixel, minus an offset, with that pixel. + +from media.camera import * +from media.display import * +from media.media import * +import time, os, gc, sys + +DISPLAY_WIDTH = ALIGN_UP(1920, 16) +DISPLAY_HEIGHT = 1080 +SCALE = 4 +DETECT_WIDTH = DISPLAY_WIDTH // SCALE +DETECT_HEIGHT = DISPLAY_HEIGHT // SCALE + +def camera_init(): + # use hdmi for display + display.init(LT9611_1920X1080_30FPS) + # config vb for osd layer + config = k_vb_config() + config.max_pool_cnt = 1 + config.comm_pool[0].blk_size = 4*DISPLAY_WIDTH*DISPLAY_HEIGHT + config.comm_pool[0].blk_cnt = 1 + config.comm_pool[0].mode = VB_REMAP_MODE_NOCACHE + # meida buffer config + media.buffer_config(config) + # init default sensor + camera.sensor_init(CAM_DEV_ID_0, CAM_DEFAULT_SENSOR) + # set chn0 output size + camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_0, DISPLAY_WIDTH, DISPLAY_HEIGHT) + # set chn0 output format + camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_0, PIXEL_FORMAT_YUV_SEMIPLANAR_420) + # create meida source device + globals()["meida_source"] = media_device(CAMERA_MOD_ID, CAM_DEV_ID_0, CAM_CHN_ID_0) + # create meida sink device + globals()["meida_sink"] = media_device(DISPLAY_MOD_ID, DISPLAY_DEV_ID, DISPLAY_CHN_VIDEO1) + # create meida link + media.create_link(meida_source, meida_sink) + # set display plane with video channel + display.set_plane(0, 0, DISPLAY_WIDTH, DISPLAY_HEIGHT, PIXEL_FORMAT_YVU_PLANAR_420, DISPLAY_MIRROR_NONE, DISPLAY_CHN_VIDEO1) + # set chn1 output nv12 + camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_1, DETECT_WIDTH, DETECT_HEIGHT) + camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_1, PIXEL_FORMAT_RGB_888) + # media buffer init + media.buffer_init() + # request media buffer for osd image + globals()["buffer"] = media.request_buffer(4 * DISPLAY_WIDTH * DISPLAY_HEIGHT) + # start stream for camera device0 + camera.start_stream(CAM_DEV_ID_0) + +def camera_deinit(): + # stop stream for camera device0 + camera.stop_stream(CAM_DEV_ID_0) + # deinit display + display.deinit() + os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) + time.sleep_ms(100) + # release media buffer + media.release_buffer(globals()["buffer"]) + # destroy media link + media.destroy_link(globals()["meida_source"], globals()["meida_sink"]) + # deinit media buffer + media.buffer_deinit() + +def capture_picture(): + # create image for osd + buffer = globals()["buffer"] + osd_img = image.Image(DETECT_WIDTH, DETECT_HEIGHT, image.RGB565, alloc=image.ALLOC_VB, phyaddr=buffer.phys_addr, virtaddr=buffer.virt_addr, poolid=buffer.pool_id) + osd_img.clear() + display.show_image(osd_img, 0, 0, DISPLAY_CHN_OSD0) + fps = time.clock() + while True: + fps.tick() + try: + os.exitpoint() + rgb888_img = camera.capture_image(CAM_DEV_ID_0, CAM_CHN_ID_1) + img = rgb888_img.to_rgb565() + camera.release_image(CAM_DEV_ID_0, CAM_CHN_ID_1, rgb888_img) + # The first argument to the median filter is the kernel size, it can be + # either 0, 1, or 2 for a 1x1, 3x3, or 5x5 kernel respectively. The second + # argument "percentile" is the percentile number to choose from the NxN + # neighborhood. 0.5 is the median, 0.25 is the lower quartile, and 0.75 + # would be the upper quartile. + img.median(1, percentile=0.5, threshold=True, offset=5, invert=True) + img.copy_to(osd_img) + del img + gc.collect() + print(fps.fps()) + except KeyboardInterrupt as e: + print("user stop: ", e) + break + except BaseException as e: + sys.print_exception(e) + break + +def main(): + os.exitpoint(os.EXITPOINT_ENABLE) + camera_is_init = False + try: + print("camera init") + camera_init() + camera_is_init = True + print("camera capture") + capture_picture() + except Exception as e: + sys.print_exception(e) + finally: + if camera_is_init: + print("camera deinit") + camera_deinit() + +if __name__ == "__main__": + main() diff --git a/share/qtcreator/examples/12-Image-Filters/median_filter.py b/share/qtcreator/examples/12-Image-Filters/median_filter.py new file mode 100644 index 00000000000..6fa5ff9a84e --- /dev/null +++ b/share/qtcreator/examples/12-Image-Filters/median_filter.py @@ -0,0 +1,115 @@ +# Median Filter Example +# +# This example shows off median filtering. Median filtering replaces every pixel +# with the median value of it's NxN neighborhood. Median filtering is good for +# removing noise in the image while preserving edges. + +from media.camera import * +from media.display import * +from media.media import * +import time, os, gc, sys + +DISPLAY_WIDTH = ALIGN_UP(1920, 16) +DISPLAY_HEIGHT = 1080 +SCALE = 4 +DETECT_WIDTH = DISPLAY_WIDTH // SCALE +DETECT_HEIGHT = DISPLAY_HEIGHT // SCALE + +def camera_init(): + # use hdmi for display + display.init(LT9611_1920X1080_30FPS) + # config vb for osd layer + config = k_vb_config() + config.max_pool_cnt = 1 + config.comm_pool[0].blk_size = 4*DISPLAY_WIDTH*DISPLAY_HEIGHT + config.comm_pool[0].blk_cnt = 1 + config.comm_pool[0].mode = VB_REMAP_MODE_NOCACHE + # meida buffer config + media.buffer_config(config) + # init default sensor + camera.sensor_init(CAM_DEV_ID_0, CAM_DEFAULT_SENSOR) + # set chn0 output size + camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_0, DISPLAY_WIDTH, DISPLAY_HEIGHT) + # set chn0 output format + camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_0, PIXEL_FORMAT_YUV_SEMIPLANAR_420) + # create meida source device + globals()["meida_source"] = media_device(CAMERA_MOD_ID, CAM_DEV_ID_0, CAM_CHN_ID_0) + # create meida sink device + globals()["meida_sink"] = media_device(DISPLAY_MOD_ID, DISPLAY_DEV_ID, DISPLAY_CHN_VIDEO1) + # create meida link + media.create_link(meida_source, meida_sink) + # set display plane with video channel + display.set_plane(0, 0, DISPLAY_WIDTH, DISPLAY_HEIGHT, PIXEL_FORMAT_YVU_PLANAR_420, DISPLAY_MIRROR_NONE, DISPLAY_CHN_VIDEO1) + # set chn1 output nv12 + camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_1, DETECT_WIDTH, DETECT_HEIGHT) + camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_1, PIXEL_FORMAT_RGB_888) + # media buffer init + media.buffer_init() + # request media buffer for osd image + globals()["buffer"] = media.request_buffer(4 * DISPLAY_WIDTH * DISPLAY_HEIGHT) + # start stream for camera device0 + camera.start_stream(CAM_DEV_ID_0) + +def camera_deinit(): + # stop stream for camera device0 + camera.stop_stream(CAM_DEV_ID_0) + # deinit display + display.deinit() + os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) + time.sleep_ms(100) + # release media buffer + media.release_buffer(globals()["buffer"]) + # destroy media link + media.destroy_link(globals()["meida_source"], globals()["meida_sink"]) + # deinit media buffer + media.buffer_deinit() + +def capture_picture(): + # create image for osd + buffer = globals()["buffer"] + osd_img = image.Image(DETECT_WIDTH, DETECT_HEIGHT, image.RGB565, alloc=image.ALLOC_VB, phyaddr=buffer.phys_addr, virtaddr=buffer.virt_addr, poolid=buffer.pool_id) + osd_img.clear() + display.show_image(osd_img, 0, 0, DISPLAY_CHN_OSD0) + fps = time.clock() + while True: + fps.tick() + try: + os.exitpoint() + rgb888_img = camera.capture_image(CAM_DEV_ID_0, CAM_CHN_ID_1) + img = rgb888_img.to_rgb565() + camera.release_image(CAM_DEV_ID_0, CAM_CHN_ID_1, rgb888_img) + # The first argument to the median filter is the kernel size, it can be + # either 0, 1, or 2 for a 1x1, 3x3, or 5x5 kernel respectively. The second + # argument "percentile" is the percentile number to choose from the NxN + # neighborhood. 0.5 is the median, 0.25 is the lower quartile, and 0.75 + # would be the upper quartile. + img.median(1, percentile=0.5) + img.copy_to(osd_img) + del img + gc.collect() + print(fps.fps()) + except KeyboardInterrupt as e: + print("user stop: ", e) + break + except BaseException as e: + sys.print_exception(e) + break + +def main(): + os.exitpoint(os.EXITPOINT_ENABLE) + camera_is_init = False + try: + print("camera init") + camera_init() + camera_is_init = True + print("camera capture") + capture_picture() + except Exception as e: + sys.print_exception(e) + finally: + if camera_is_init: + print("camera deinit") + camera_deinit() + +if __name__ == "__main__": + main() diff --git a/share/qtcreator/examples/12-Image-Filters/midpoint_adaptive_threshold_filter.py b/share/qtcreator/examples/12-Image-Filters/midpoint_adaptive_threshold_filter.py new file mode 100644 index 00000000000..a4a4dbde461 --- /dev/null +++ b/share/qtcreator/examples/12-Image-Filters/midpoint_adaptive_threshold_filter.py @@ -0,0 +1,116 @@ +# Midpoint Adaptive Threshold Filter Example +# +# This example shows off midpoint filtering with adaptive thresholding. +# When midpoint(threshold=True) the midpoint() method adaptive thresholds the image +# by comparing the midpoint of the pixels around a pixel, minus an offset, with that pixel. + +from media.camera import * +from media.display import * +from media.media import * +import time, os, gc, sys + +DISPLAY_WIDTH = ALIGN_UP(1920, 16) +DISPLAY_HEIGHT = 1080 +SCALE = 4 +DETECT_WIDTH = DISPLAY_WIDTH // SCALE +DETECT_HEIGHT = DISPLAY_HEIGHT // SCALE + +def camera_init(): + # use hdmi for display + display.init(LT9611_1920X1080_30FPS) + # config vb for osd layer + config = k_vb_config() + config.max_pool_cnt = 1 + config.comm_pool[0].blk_size = 4*DISPLAY_WIDTH*DISPLAY_HEIGHT + config.comm_pool[0].blk_cnt = 1 + config.comm_pool[0].mode = VB_REMAP_MODE_NOCACHE + # meida buffer config + media.buffer_config(config) + # init default sensor + camera.sensor_init(CAM_DEV_ID_0, CAM_DEFAULT_SENSOR) + # set chn0 output size + camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_0, DISPLAY_WIDTH, DISPLAY_HEIGHT) + # set chn0 output format + camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_0, PIXEL_FORMAT_YUV_SEMIPLANAR_420) + # create meida source device + globals()["meida_source"] = media_device(CAMERA_MOD_ID, CAM_DEV_ID_0, CAM_CHN_ID_0) + # create meida sink device + globals()["meida_sink"] = media_device(DISPLAY_MOD_ID, DISPLAY_DEV_ID, DISPLAY_CHN_VIDEO1) + # create meida link + media.create_link(meida_source, meida_sink) + # set display plane with video channel + display.set_plane(0, 0, DISPLAY_WIDTH, DISPLAY_HEIGHT, PIXEL_FORMAT_YVU_PLANAR_420, DISPLAY_MIRROR_NONE, DISPLAY_CHN_VIDEO1) + # set chn1 output nv12 + camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_1, DETECT_WIDTH, DETECT_HEIGHT) + camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_1, PIXEL_FORMAT_RGB_888) + # media buffer init + media.buffer_init() + # request media buffer for osd image + globals()["buffer"] = media.request_buffer(4 * DISPLAY_WIDTH * DISPLAY_HEIGHT) + # start stream for camera device0 + camera.start_stream(CAM_DEV_ID_0) + +def camera_deinit(): + # stop stream for camera device0 + camera.stop_stream(CAM_DEV_ID_0) + # deinit display + display.deinit() + os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) + time.sleep_ms(100) + # release media buffer + media.release_buffer(globals()["buffer"]) + # destroy media link + media.destroy_link(globals()["meida_source"], globals()["meida_sink"]) + # deinit media buffer + media.buffer_deinit() + +def capture_picture(): + # create image for osd + buffer = globals()["buffer"] + osd_img = image.Image(DETECT_WIDTH, DETECT_HEIGHT, image.RGB565, alloc=image.ALLOC_VB, phyaddr=buffer.phys_addr, virtaddr=buffer.virt_addr, poolid=buffer.pool_id) + osd_img.clear() + display.show_image(osd_img, 0, 0, DISPLAY_CHN_OSD0) + fps = time.clock() + while True: + fps.tick() + try: + os.exitpoint() + rgb888_img = camera.capture_image(CAM_DEV_ID_0, CAM_CHN_ID_1) + img = rgb888_img.to_rgb565() + camera.release_image(CAM_DEV_ID_0, CAM_CHN_ID_1, rgb888_img) + # The first argument is the kernel size. N coresponds to a ((N*2)+1)^2 + # kernel size. E.g. 1 == 3x3 kernel, 2 == 5x5 kernel, etc. Note: You + # shouldn't ever need to use a value bigger than 2. The "bias" argument + # lets you select between min and max blending. 0.5 == midpoint filter, + # 0.0 == min filter, and 1.0 == max filter. Note that the min filter + # makes images darker while the max filter makes images lighter. + img.midpoint(1, bias=0.5, threshold=True, offset=5, invert=True) + img.copy_to(osd_img) + del img + gc.collect() + print(fps.fps()) + except KeyboardInterrupt as e: + print("user stop: ", e) + break + except BaseException as e: + sys.print_exception(e) + break + +def main(): + os.exitpoint(os.EXITPOINT_ENABLE) + camera_is_init = False + try: + print("camera init") + camera_init() + camera_is_init = True + print("camera capture") + capture_picture() + except Exception as e: + sys.print_exception(e) + finally: + if camera_is_init: + print("camera deinit") + camera_deinit() + +if __name__ == "__main__": + main() diff --git a/share/qtcreator/examples/12-Image-Filters/midpoint_filter.py b/share/qtcreator/examples/12-Image-Filters/midpoint_filter.py new file mode 100644 index 00000000000..cc7c426c8b7 --- /dev/null +++ b/share/qtcreator/examples/12-Image-Filters/midpoint_filter.py @@ -0,0 +1,115 @@ +# Midpoint Filter Example +# +# This example shows off midpoint filtering. Midpoint filtering replaces each +# pixel by the average of the min and max pixel values for a NxN neighborhood. + +from media.camera import * +from media.display import * +from media.media import * +import time, os, gc, sys + +DISPLAY_WIDTH = ALIGN_UP(1920, 16) +DISPLAY_HEIGHT = 1080 +SCALE = 4 +DETECT_WIDTH = DISPLAY_WIDTH // SCALE +DETECT_HEIGHT = DISPLAY_HEIGHT // SCALE + +def camera_init(): + # use hdmi for display + display.init(LT9611_1920X1080_30FPS) + # config vb for osd layer + config = k_vb_config() + config.max_pool_cnt = 1 + config.comm_pool[0].blk_size = 4*DISPLAY_WIDTH*DISPLAY_HEIGHT + config.comm_pool[0].blk_cnt = 1 + config.comm_pool[0].mode = VB_REMAP_MODE_NOCACHE + # meida buffer config + media.buffer_config(config) + # init default sensor + camera.sensor_init(CAM_DEV_ID_0, CAM_DEFAULT_SENSOR) + # set chn0 output size + camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_0, DISPLAY_WIDTH, DISPLAY_HEIGHT) + # set chn0 output format + camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_0, PIXEL_FORMAT_YUV_SEMIPLANAR_420) + # create meida source device + globals()["meida_source"] = media_device(CAMERA_MOD_ID, CAM_DEV_ID_0, CAM_CHN_ID_0) + # create meida sink device + globals()["meida_sink"] = media_device(DISPLAY_MOD_ID, DISPLAY_DEV_ID, DISPLAY_CHN_VIDEO1) + # create meida link + media.create_link(meida_source, meida_sink) + # set display plane with video channel + display.set_plane(0, 0, DISPLAY_WIDTH, DISPLAY_HEIGHT, PIXEL_FORMAT_YVU_PLANAR_420, DISPLAY_MIRROR_NONE, DISPLAY_CHN_VIDEO1) + # set chn1 output nv12 + camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_1, DETECT_WIDTH, DETECT_HEIGHT) + camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_1, PIXEL_FORMAT_RGB_888) + # media buffer init + media.buffer_init() + # request media buffer for osd image + globals()["buffer"] = media.request_buffer(4 * DISPLAY_WIDTH * DISPLAY_HEIGHT) + # start stream for camera device0 + camera.start_stream(CAM_DEV_ID_0) + +def camera_deinit(): + # stop stream for camera device0 + camera.stop_stream(CAM_DEV_ID_0) + # deinit display + display.deinit() + os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) + time.sleep_ms(100) + # release media buffer + media.release_buffer(globals()["buffer"]) + # destroy media link + media.destroy_link(globals()["meida_source"], globals()["meida_sink"]) + # deinit media buffer + media.buffer_deinit() + +def capture_picture(): + # create image for osd + buffer = globals()["buffer"] + osd_img = image.Image(DETECT_WIDTH, DETECT_HEIGHT, image.RGB565, alloc=image.ALLOC_VB, phyaddr=buffer.phys_addr, virtaddr=buffer.virt_addr, poolid=buffer.pool_id) + osd_img.clear() + display.show_image(osd_img, 0, 0, DISPLAY_CHN_OSD0) + fps = time.clock() + while True: + fps.tick() + try: + os.exitpoint() + rgb888_img = camera.capture_image(CAM_DEV_ID_0, CAM_CHN_ID_1) + img = rgb888_img.to_rgb565() + camera.release_image(CAM_DEV_ID_0, CAM_CHN_ID_1, rgb888_img) + # The first argument is the kernel size. N coresponds to a ((N*2)+1)^2 + # kernel size. E.g. 1 == 3x3 kernel, 2 == 5x5 kernel, etc. Note: You + # shouldn't ever need to use a value bigger than 2. The "bias" argument + # lets you select between min and max blending. 0.5 == midpoint filter, + # 0.0 == min filter, and 1.0 == max filter. Note that the min filter + # makes images darker while the max filter makes images lighter. + img.midpoint(1, bias=0.5) + img.copy_to(osd_img) + del img + gc.collect() + print(fps.fps()) + except KeyboardInterrupt as e: + print("user stop: ", e) + break + except BaseException as e: + sys.print_exception(e) + break + +def main(): + os.exitpoint(os.EXITPOINT_ENABLE) + camera_is_init = False + try: + print("camera init") + camera_init() + camera_is_init = True + print("camera capture") + capture_picture() + except Exception as e: + sys.print_exception(e) + finally: + if camera_is_init: + print("camera deinit") + camera_deinit() + +if __name__ == "__main__": + main() diff --git a/share/qtcreator/examples/12-Image-Filters/mode_adaptive_threshold_filter.py b/share/qtcreator/examples/12-Image-Filters/mode_adaptive_threshold_filter.py new file mode 100644 index 00000000000..8b17cae93e8 --- /dev/null +++ b/share/qtcreator/examples/12-Image-Filters/mode_adaptive_threshold_filter.py @@ -0,0 +1,113 @@ +# Mode Adaptive Threshold Filter Example +# +# This example shows off mode filtering with adaptive thresholding. +# When mode(threshold=True) the mode() method adaptive thresholds the image +# by comparing the mode of the pixels around a pixel, minus an offset, with that pixel. +# Avoid using the mode filter on RGB565 images. It will cause artifacts on image edges... + +from media.camera import * +from media.display import * +from media.media import * +import time, os, gc, sys + +DISPLAY_WIDTH = ALIGN_UP(1920, 16) +DISPLAY_HEIGHT = 1080 +SCALE = 4 +DETECT_WIDTH = DISPLAY_WIDTH // SCALE +DETECT_HEIGHT = DISPLAY_HEIGHT // SCALE + +def camera_init(): + # use hdmi for display + display.init(LT9611_1920X1080_30FPS) + # config vb for osd layer + config = k_vb_config() + config.max_pool_cnt = 1 + config.comm_pool[0].blk_size = 4*DISPLAY_WIDTH*DISPLAY_HEIGHT + config.comm_pool[0].blk_cnt = 1 + config.comm_pool[0].mode = VB_REMAP_MODE_NOCACHE + # meida buffer config + media.buffer_config(config) + # init default sensor + camera.sensor_init(CAM_DEV_ID_0, CAM_DEFAULT_SENSOR) + # set chn0 output size + camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_0, DISPLAY_WIDTH, DISPLAY_HEIGHT) + # set chn0 output format + camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_0, PIXEL_FORMAT_YUV_SEMIPLANAR_420) + # create meida source device + globals()["meida_source"] = media_device(CAMERA_MOD_ID, CAM_DEV_ID_0, CAM_CHN_ID_0) + # create meida sink device + globals()["meida_sink"] = media_device(DISPLAY_MOD_ID, DISPLAY_DEV_ID, DISPLAY_CHN_VIDEO1) + # create meida link + media.create_link(meida_source, meida_sink) + # set display plane with video channel + display.set_plane(0, 0, DISPLAY_WIDTH, DISPLAY_HEIGHT, PIXEL_FORMAT_YVU_PLANAR_420, DISPLAY_MIRROR_NONE, DISPLAY_CHN_VIDEO1) + # set chn1 output nv12 + camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_1, DETECT_WIDTH, DETECT_HEIGHT) + camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_1, PIXEL_FORMAT_RGB_888) + # media buffer init + media.buffer_init() + # request media buffer for osd image + globals()["buffer"] = media.request_buffer(4 * DISPLAY_WIDTH * DISPLAY_HEIGHT) + # start stream for camera device0 + camera.start_stream(CAM_DEV_ID_0) + +def camera_deinit(): + # stop stream for camera device0 + camera.stop_stream(CAM_DEV_ID_0) + # deinit display + display.deinit() + os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) + time.sleep_ms(100) + # release media buffer + media.release_buffer(globals()["buffer"]) + # destroy media link + media.destroy_link(globals()["meida_source"], globals()["meida_sink"]) + # deinit media buffer + media.buffer_deinit() + +def capture_picture(): + # create image for osd + buffer = globals()["buffer"] + osd_img = image.Image(DETECT_WIDTH, DETECT_HEIGHT, image.RGB565, alloc=image.ALLOC_VB, phyaddr=buffer.phys_addr, virtaddr=buffer.virt_addr, poolid=buffer.pool_id) + osd_img.clear() + display.show_image(osd_img, 0, 0, DISPLAY_CHN_OSD0) + fps = time.clock() + while True: + fps.tick() + try: + os.exitpoint() + rgb888_img = camera.capture_image(CAM_DEV_ID_0, CAM_CHN_ID_1) + img = rgb888_img.to_rgb565() + camera.release_image(CAM_DEV_ID_0, CAM_CHN_ID_1, rgb888_img) + # The only argument to the median filter is the kernel size, it can be + # either 0, 1, or 2 for a 1x1, 3x3, or 5x5 kernel respectively. + img.mode(1, threshold=True, offset=5, invert=True) + img.copy_to(osd_img) + del img + gc.collect() + print(fps.fps()) + except KeyboardInterrupt as e: + print("user stop: ", e) + break + except BaseException as e: + sys.print_exception(e) + break + +def main(): + os.exitpoint(os.EXITPOINT_ENABLE) + camera_is_init = False + try: + print("camera init") + camera_init() + camera_is_init = True + print("camera capture") + capture_picture() + except Exception as e: + sys.print_exception(e) + finally: + if camera_is_init: + print("camera deinit") + camera_deinit() + +if __name__ == "__main__": + main() diff --git a/share/qtcreator/examples/12-Image-Filters/mode_filter.py b/share/qtcreator/examples/12-Image-Filters/mode_filter.py new file mode 100644 index 00000000000..fa63b142a74 --- /dev/null +++ b/share/qtcreator/examples/12-Image-Filters/mode_filter.py @@ -0,0 +1,113 @@ +# Mode Filter Example +# +# This example shows off mode filtering. Mode filtering is a highly non-linear +# operation which replaces each pixel with the mode of the NxN neighborhood +# of pixels around it. Avoid using the mode filter on RGB565 images. It will +# cause artifacts on image edges... + +from media.camera import * +from media.display import * +from media.media import * +import time, os, gc, sys + +DISPLAY_WIDTH = ALIGN_UP(1920, 16) +DISPLAY_HEIGHT = 1080 +SCALE = 4 +DETECT_WIDTH = DISPLAY_WIDTH // SCALE +DETECT_HEIGHT = DISPLAY_HEIGHT // SCALE + +def camera_init(): + # use hdmi for display + display.init(LT9611_1920X1080_30FPS) + # config vb for osd layer + config = k_vb_config() + config.max_pool_cnt = 1 + config.comm_pool[0].blk_size = 4*DISPLAY_WIDTH*DISPLAY_HEIGHT + config.comm_pool[0].blk_cnt = 1 + config.comm_pool[0].mode = VB_REMAP_MODE_NOCACHE + # meida buffer config + media.buffer_config(config) + # init default sensor + camera.sensor_init(CAM_DEV_ID_0, CAM_DEFAULT_SENSOR) + # set chn0 output size + camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_0, DISPLAY_WIDTH, DISPLAY_HEIGHT) + # set chn0 output format + camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_0, PIXEL_FORMAT_YUV_SEMIPLANAR_420) + # create meida source device + globals()["meida_source"] = media_device(CAMERA_MOD_ID, CAM_DEV_ID_0, CAM_CHN_ID_0) + # create meida sink device + globals()["meida_sink"] = media_device(DISPLAY_MOD_ID, DISPLAY_DEV_ID, DISPLAY_CHN_VIDEO1) + # create meida link + media.create_link(meida_source, meida_sink) + # set display plane with video channel + display.set_plane(0, 0, DISPLAY_WIDTH, DISPLAY_HEIGHT, PIXEL_FORMAT_YVU_PLANAR_420, DISPLAY_MIRROR_NONE, DISPLAY_CHN_VIDEO1) + # set chn1 output nv12 + camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_1, DETECT_WIDTH, DETECT_HEIGHT) + camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_1, PIXEL_FORMAT_RGB_888) + # media buffer init + media.buffer_init() + # request media buffer for osd image + globals()["buffer"] = media.request_buffer(4 * DISPLAY_WIDTH * DISPLAY_HEIGHT) + # start stream for camera device0 + camera.start_stream(CAM_DEV_ID_0) + +def camera_deinit(): + # stop stream for camera device0 + camera.stop_stream(CAM_DEV_ID_0) + # deinit display + display.deinit() + os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) + time.sleep_ms(100) + # release media buffer + media.release_buffer(globals()["buffer"]) + # destroy media link + media.destroy_link(globals()["meida_source"], globals()["meida_sink"]) + # deinit media buffer + media.buffer_deinit() + +def capture_picture(): + # create image for osd + buffer = globals()["buffer"] + osd_img = image.Image(DETECT_WIDTH, DETECT_HEIGHT, image.RGB565, alloc=image.ALLOC_VB, phyaddr=buffer.phys_addr, virtaddr=buffer.virt_addr, poolid=buffer.pool_id) + osd_img.clear() + display.show_image(osd_img, 0, 0, DISPLAY_CHN_OSD0) + fps = time.clock() + while True: + fps.tick() + try: + os.exitpoint() + rgb888_img = camera.capture_image(CAM_DEV_ID_0, CAM_CHN_ID_1) + img = rgb888_img.to_rgb565() + camera.release_image(CAM_DEV_ID_0, CAM_CHN_ID_1, rgb888_img) + # The only argument to the median filter is the kernel size, it can be + # either 0, 1, or 2 for a 1x1, 3x3, or 5x5 kernel respectively. + img.mode(1) + img.copy_to(osd_img) + del img + gc.collect() + print(fps.fps()) + except KeyboardInterrupt as e: + print("user stop: ", e) + break + except BaseException as e: + sys.print_exception(e) + break + +def main(): + os.exitpoint(os.EXITPOINT_ENABLE) + camera_is_init = False + try: + print("camera init") + camera_init() + camera_is_init = True + print("camera capture") + capture_picture() + except Exception as e: + sys.print_exception(e) + finally: + if camera_is_init: + print("camera deinit") + camera_deinit() + +if __name__ == "__main__": + main() diff --git a/share/qtcreator/examples/12-Image-Filters/negative.py b/share/qtcreator/examples/12-Image-Filters/negative.py new file mode 100644 index 00000000000..dde77acc7d6 --- /dev/null +++ b/share/qtcreator/examples/12-Image-Filters/negative.py @@ -0,0 +1,109 @@ +# Negative Example +# +# This example shows off negating the image. This is not a particularly +# useful method but it can come in handy once in a while. + +from media.camera import * +from media.display import * +from media.media import * +import time, os, gc, sys + +DISPLAY_WIDTH = ALIGN_UP(1920, 16) +DISPLAY_HEIGHT = 1080 +SCALE = 4 +DETECT_WIDTH = DISPLAY_WIDTH // SCALE +DETECT_HEIGHT = DISPLAY_HEIGHT // SCALE + +def camera_init(): + # use hdmi for display + display.init(LT9611_1920X1080_30FPS) + # config vb for osd layer + config = k_vb_config() + config.max_pool_cnt = 1 + config.comm_pool[0].blk_size = 4*DISPLAY_WIDTH*DISPLAY_HEIGHT + config.comm_pool[0].blk_cnt = 1 + config.comm_pool[0].mode = VB_REMAP_MODE_NOCACHE + # meida buffer config + media.buffer_config(config) + # init default sensor + camera.sensor_init(CAM_DEV_ID_0, CAM_DEFAULT_SENSOR) + # set chn0 output size + camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_0, DISPLAY_WIDTH, DISPLAY_HEIGHT) + # set chn0 output format + camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_0, PIXEL_FORMAT_YUV_SEMIPLANAR_420) + # create meida source device + globals()["meida_source"] = media_device(CAMERA_MOD_ID, CAM_DEV_ID_0, CAM_CHN_ID_0) + # create meida sink device + globals()["meida_sink"] = media_device(DISPLAY_MOD_ID, DISPLAY_DEV_ID, DISPLAY_CHN_VIDEO1) + # create meida link + media.create_link(meida_source, meida_sink) + # set display plane with video channel + display.set_plane(0, 0, DISPLAY_WIDTH, DISPLAY_HEIGHT, PIXEL_FORMAT_YVU_PLANAR_420, DISPLAY_MIRROR_NONE, DISPLAY_CHN_VIDEO1) + # set chn1 output nv12 + camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_1, DETECT_WIDTH, DETECT_HEIGHT) + camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_1, PIXEL_FORMAT_RGB_888) + # media buffer init + media.buffer_init() + # request media buffer for osd image + globals()["buffer"] = media.request_buffer(4 * DISPLAY_WIDTH * DISPLAY_HEIGHT) + # start stream for camera device0 + camera.start_stream(CAM_DEV_ID_0) + +def camera_deinit(): + # stop stream for camera device0 + camera.stop_stream(CAM_DEV_ID_0) + # deinit display + display.deinit() + os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) + time.sleep_ms(100) + # release media buffer + media.release_buffer(globals()["buffer"]) + # destroy media link + media.destroy_link(globals()["meida_source"], globals()["meida_sink"]) + # deinit media buffer + media.buffer_deinit() + +def capture_picture(): + # create image for osd + buffer = globals()["buffer"] + osd_img = image.Image(DETECT_WIDTH, DETECT_HEIGHT, image.RGB565, alloc=image.ALLOC_VB, phyaddr=buffer.phys_addr, virtaddr=buffer.virt_addr, poolid=buffer.pool_id) + osd_img.clear() + display.show_image(osd_img, 0, 0, DISPLAY_CHN_OSD0) + fps = time.clock() + while True: + fps.tick() + try: + os.exitpoint() + rgb888_img = camera.capture_image(CAM_DEV_ID_0, CAM_CHN_ID_1) + img = rgb888_img.to_rgb565() + camera.release_image(CAM_DEV_ID_0, CAM_CHN_ID_1, rgb888_img) + img.negate() + img.copy_to(osd_img) + del img + gc.collect() + print(fps.fps()) + except KeyboardInterrupt as e: + print("user stop: ", e) + break + except BaseException as e: + sys.print_exception(e) + break + +def main(): + os.exitpoint(os.EXITPOINT_ENABLE) + camera_is_init = False + try: + print("camera init") + camera_init() + camera_is_init = True + print("camera capture") + capture_picture() + except Exception as e: + sys.print_exception(e) + finally: + if camera_is_init: + print("camera deinit") + camera_deinit() + +if __name__ == "__main__": + main() diff --git a/share/qtcreator/examples/12-Image-Filters/perspective_and_rotation_correction.py b/share/qtcreator/examples/12-Image-Filters/perspective_and_rotation_correction.py new file mode 100644 index 00000000000..574b5a7fc48 --- /dev/null +++ b/share/qtcreator/examples/12-Image-Filters/perspective_and_rotation_correction.py @@ -0,0 +1,159 @@ +# Rotation Correction +# +# This example shows off how to use the rotation_corr() to both correct for +# perspective distortion and then to rotate the new corrected image in 3D +# space aftwards to handle movement. + +from media.camera import * +from media.display import * +from media.media import * +import time, os, gc, sys + +DISPLAY_WIDTH = ALIGN_UP(1920, 16) +DISPLAY_HEIGHT = 1080 +SCALE = 4 +DETECT_WIDTH = DISPLAY_WIDTH // SCALE +DETECT_HEIGHT = DISPLAY_HEIGHT // SCALE + +# The image will be warped such that the following points become the new: +# +# (0, 0) +# (w-1, 0) +# (w-1, h-1) +# (0, h-1) +# +# Try setting the points below to the corners of a quadrilateral +# (in clock-wise order) in the field-of-view. You can get points +# on the image by clicking and dragging on the frame buffer and +# recording the values shown in the histogram widget. + +w = DETECT_WIDTH +h = DETECT_HEIGHT + +TARGET_POINTS = [(0, 0), # (x, y) CHANGE ME! + (w-1, 0), # (x, y) CHANGE ME! + (w-1, h-1), # (x, y) CHANGE ME! + (0, h-1)] # (x, y) CHANGE ME! + +# Degrees per frame to rotation by... +X_ROTATION_DEGREE_RATE = 5 +Y_ROTATION_DEGREE_RATE = 0.5 +Z_ROTATION_DEGREE_RATE = 0 +X_OFFSET = 0 +Y_OFFSET = 0 + +ZOOM_AMOUNT = 1 # Lower zooms out - Higher zooms in. +FOV_WINDOW = 25 # Between 0 and 180. Represents the field-of-view of the scene + # window when rotating the image in 3D space. When closer to + # zero results in lines becoming straighter as the window + # moves away from the image being rotated in 3D space. A large + # value moves the window closer to the image in 3D space which + # results in the more perspective distortion and sometimes + # the image in 3D intersecting the scene window. + +def camera_init(): + # use hdmi for display + display.init(LT9611_1920X1080_30FPS) + # config vb for osd layer + config = k_vb_config() + config.max_pool_cnt = 1 + config.comm_pool[0].blk_size = 4*DISPLAY_WIDTH*DISPLAY_HEIGHT + config.comm_pool[0].blk_cnt = 1 + config.comm_pool[0].mode = VB_REMAP_MODE_NOCACHE + # meida buffer config + media.buffer_config(config) + # init default sensor + camera.sensor_init(CAM_DEV_ID_0, CAM_DEFAULT_SENSOR) + # set chn0 output size + camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_0, DISPLAY_WIDTH, DISPLAY_HEIGHT) + # set chn0 output format + camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_0, PIXEL_FORMAT_YUV_SEMIPLANAR_420) + # create meida source device + globals()["meida_source"] = media_device(CAMERA_MOD_ID, CAM_DEV_ID_0, CAM_CHN_ID_0) + # create meida sink device + globals()["meida_sink"] = media_device(DISPLAY_MOD_ID, DISPLAY_DEV_ID, DISPLAY_CHN_VIDEO1) + # create meida link + media.create_link(meida_source, meida_sink) + # set display plane with video channel + display.set_plane(0, 0, DISPLAY_WIDTH, DISPLAY_HEIGHT, PIXEL_FORMAT_YVU_PLANAR_420, DISPLAY_MIRROR_NONE, DISPLAY_CHN_VIDEO1) + # set chn1 output nv12 + camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_1, DETECT_WIDTH, DETECT_HEIGHT) + camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_1, PIXEL_FORMAT_RGB_888) + # media buffer init + media.buffer_init() + # request media buffer for osd image + globals()["buffer"] = media.request_buffer(4 * DISPLAY_WIDTH * DISPLAY_HEIGHT) + # start stream for camera device0 + camera.start_stream(CAM_DEV_ID_0) + +def camera_deinit(): + # stop stream for camera device0 + camera.stop_stream(CAM_DEV_ID_0) + # deinit display + display.deinit() + os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) + time.sleep_ms(100) + # release media buffer + media.release_buffer(globals()["buffer"]) + # destroy media link + media.destroy_link(globals()["meida_source"], globals()["meida_sink"]) + # deinit media buffer + media.buffer_deinit() + +def capture_picture(): + # create image for osd + buffer = globals()["buffer"] + osd_img = image.Image(DETECT_WIDTH, DETECT_HEIGHT, image.RGB565, alloc=image.ALLOC_VB, phyaddr=buffer.phys_addr, virtaddr=buffer.virt_addr, poolid=buffer.pool_id) + osd_img.clear() + display.show_image(osd_img, 0, 0, DISPLAY_CHN_OSD0) + x_rotation_counter = 0 + y_rotation_counter = 0 + z_rotation_counter = 0 + fps = time.clock() + while True: + fps.tick() + try: + os.exitpoint() + rgb888_img = camera.capture_image(CAM_DEV_ID_0, CAM_CHN_ID_1) + img = rgb888_img.to_rgb565() + camera.release_image(CAM_DEV_ID_0, CAM_CHN_ID_1, rgb888_img) + img.rotation_corr(x_rotation = x_rotation_counter, + y_rotation = y_rotation_counter, + z_rotation = z_rotation_counter, + x_translation = X_OFFSET, + y_translation = Y_OFFSET, + zoom = ZOOM_AMOUNT, + fov = FOV_WINDOW, + corners = TARGET_POINTS) + x_rotation_counter += X_ROTATION_DEGREE_RATE + y_rotation_counter += Y_ROTATION_DEGREE_RATE + z_rotation_counter += Z_ROTATION_DEGREE_RATE + img.copy_to(osd_img) + del img + gc.collect() + print(fps.fps()) + except KeyboardInterrupt as e: + print("user stop: ", e) + break + except BaseException as e: + sys.print_exception(e) + break + +def main(): + os.exitpoint(os.EXITPOINT_ENABLE) + camera_is_init = False + try: + print("camera init") + camera_init() + camera_is_init = True + print("camera capture") + capture_picture() + except Exception as e: + sys.print_exception(e) + finally: + if camera_is_init: + print("camera deinit") + camera_deinit() + +if __name__ == "__main__": + main() diff --git a/share/qtcreator/examples/12-Image-Filters/perspective_correction.py b/share/qtcreator/examples/12-Image-Filters/perspective_correction.py new file mode 100644 index 00000000000..da36f3821bc --- /dev/null +++ b/share/qtcreator/examples/12-Image-Filters/perspective_correction.py @@ -0,0 +1,129 @@ +# Perspective Correction +# +# This example shows off how to use the rotation_corr() to fix perspective +# issues related to how your CanMV Cam is mounted. + +from media.camera import * +from media.display import * +from media.media import * +import time, os, gc, sys + +DISPLAY_WIDTH = ALIGN_UP(1920, 16) +DISPLAY_HEIGHT = 1080 +SCALE = 4 +DETECT_WIDTH = DISPLAY_WIDTH // SCALE +DETECT_HEIGHT = DISPLAY_HEIGHT // SCALE + +# The image will be warped such that the following points become the new: +# +# (0, 0) +# (w-1, 0) +# (w-1, h-1) +# (0, h-1) +# +# Try setting the points below to the corners of a quadrilateral +# (in clock-wise order) in the field-of-view. You can get points +# on the image by clicking and dragging on the frame buffer and +# recording the values shown in the histogram widget. + +w = DETECT_WIDTH +h = DETECT_HEIGHT + +TARGET_POINTS = [(0, 0), # (x, y) CHANGE ME! + (w-1, 0), # (x, y) CHANGE ME! + (w-1, h-1), # (x, y) CHANGE ME! + (0, h-1)] # (x, y) CHANGE ME! + +def camera_init(): + # use hdmi for display + display.init(LT9611_1920X1080_30FPS) + # config vb for osd layer + config = k_vb_config() + config.max_pool_cnt = 1 + config.comm_pool[0].blk_size = 4*DISPLAY_WIDTH*DISPLAY_HEIGHT + config.comm_pool[0].blk_cnt = 1 + config.comm_pool[0].mode = VB_REMAP_MODE_NOCACHE + # meida buffer config + media.buffer_config(config) + # init default sensor + camera.sensor_init(CAM_DEV_ID_0, CAM_DEFAULT_SENSOR) + # set chn0 output size + camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_0, DISPLAY_WIDTH, DISPLAY_HEIGHT) + # set chn0 output format + camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_0, PIXEL_FORMAT_YUV_SEMIPLANAR_420) + # create meida source device + globals()["meida_source"] = media_device(CAMERA_MOD_ID, CAM_DEV_ID_0, CAM_CHN_ID_0) + # create meida sink device + globals()["meida_sink"] = media_device(DISPLAY_MOD_ID, DISPLAY_DEV_ID, DISPLAY_CHN_VIDEO1) + # create meida link + media.create_link(meida_source, meida_sink) + # set display plane with video channel + display.set_plane(0, 0, DISPLAY_WIDTH, DISPLAY_HEIGHT, PIXEL_FORMAT_YVU_PLANAR_420, DISPLAY_MIRROR_NONE, DISPLAY_CHN_VIDEO1) + # set chn1 output nv12 + camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_1, DETECT_WIDTH, DETECT_HEIGHT) + camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_1, PIXEL_FORMAT_RGB_888) + # media buffer init + media.buffer_init() + # request media buffer for osd image + globals()["buffer"] = media.request_buffer(4 * DISPLAY_WIDTH * DISPLAY_HEIGHT) + # start stream for camera device0 + camera.start_stream(CAM_DEV_ID_0) + +def camera_deinit(): + # stop stream for camera device0 + camera.stop_stream(CAM_DEV_ID_0) + # deinit display + display.deinit() + os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) + time.sleep_ms(100) + # release media buffer + media.release_buffer(globals()["buffer"]) + # destroy media link + media.destroy_link(globals()["meida_source"], globals()["meida_sink"]) + # deinit media buffer + media.buffer_deinit() + +def capture_picture(): + # create image for osd + buffer = globals()["buffer"] + osd_img = image.Image(DETECT_WIDTH, DETECT_HEIGHT, image.RGB565, alloc=image.ALLOC_VB, phyaddr=buffer.phys_addr, virtaddr=buffer.virt_addr, poolid=buffer.pool_id) + osd_img.clear() + display.show_image(osd_img, 0, 0, DISPLAY_CHN_OSD0) + fps = time.clock() + while True: + fps.tick() + try: + os.exitpoint() + rgb888_img = camera.capture_image(CAM_DEV_ID_0, CAM_CHN_ID_1) + img = rgb888_img.to_rgb565() + camera.release_image(CAM_DEV_ID_0, CAM_CHN_ID_1, rgb888_img) + img.rotation_corr(corners = TARGET_POINTS) + img.copy_to(osd_img) + del img + gc.collect() + print(fps.fps()) + except KeyboardInterrupt as e: + print("user stop: ", e) + break + except BaseException as e: + sys.print_exception(e) + break + +def main(): + os.exitpoint(os.EXITPOINT_ENABLE) + camera_is_init = False + try: + print("camera init") + camera_init() + camera_is_init = True + print("camera capture") + capture_picture() + except Exception as e: + sys.print_exception(e) + finally: + if camera_is_init: + print("camera deinit") + camera_deinit() + +if __name__ == "__main__": + main() diff --git a/share/qtcreator/examples/12-Image-Filters/rotation_correction.py b/share/qtcreator/examples/12-Image-Filters/rotation_correction.py new file mode 100644 index 00000000000..8789df09176 --- /dev/null +++ b/share/qtcreator/examples/12-Image-Filters/rotation_correction.py @@ -0,0 +1,138 @@ +# Rotation Correction +# +# This example shows off how to use the rotation_corr() to play with the scene +# window your CanMV Cam sees. + +from media.camera import * +from media.display import * +from media.media import * +import time, os, gc, sys + +DISPLAY_WIDTH = ALIGN_UP(1920, 16) +DISPLAY_HEIGHT = 1080 +SCALE = 4 +DETECT_WIDTH = DISPLAY_WIDTH // SCALE +DETECT_HEIGHT = DISPLAY_HEIGHT // SCALE + +# Degrees per frame to rotation by... +X_ROTATION_DEGREE_RATE = 5 +Y_ROTATION_DEGREE_RATE = 0.5 +Z_ROTATION_DEGREE_RATE = 0 +X_OFFSET = 0 +Y_OFFSET = 0 + +ZOOM_AMOUNT = 1 # Lower zooms out - Higher zooms in. +FOV_WINDOW = 60 # Between 0 and 180. Represents the field-of-view of the scene + # window when rotating the image in 3D space. When closer to + # zero results in lines becoming straighter as the window + # moves away from the image being rotated in 3D space. A large + # value moves the window closer to the image in 3D space which + # results in the more perspective distortion and sometimes + # the image in 3D intersecting the scene window. + +def camera_init(): + # use hdmi for display + display.init(LT9611_1920X1080_30FPS) + # config vb for osd layer + config = k_vb_config() + config.max_pool_cnt = 1 + config.comm_pool[0].blk_size = 4*DISPLAY_WIDTH*DISPLAY_HEIGHT + config.comm_pool[0].blk_cnt = 1 + config.comm_pool[0].mode = VB_REMAP_MODE_NOCACHE + # meida buffer config + media.buffer_config(config) + # init default sensor + camera.sensor_init(CAM_DEV_ID_0, CAM_DEFAULT_SENSOR) + # set chn0 output size + camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_0, DISPLAY_WIDTH, DISPLAY_HEIGHT) + # set chn0 output format + camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_0, PIXEL_FORMAT_YUV_SEMIPLANAR_420) + # create meida source device + globals()["meida_source"] = media_device(CAMERA_MOD_ID, CAM_DEV_ID_0, CAM_CHN_ID_0) + # create meida sink device + globals()["meida_sink"] = media_device(DISPLAY_MOD_ID, DISPLAY_DEV_ID, DISPLAY_CHN_VIDEO1) + # create meida link + media.create_link(meida_source, meida_sink) + # set display plane with video channel + display.set_plane(0, 0, DISPLAY_WIDTH, DISPLAY_HEIGHT, PIXEL_FORMAT_YVU_PLANAR_420, DISPLAY_MIRROR_NONE, DISPLAY_CHN_VIDEO1) + # set chn1 output nv12 + camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_1, DETECT_WIDTH, DETECT_HEIGHT) + camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_1, PIXEL_FORMAT_RGB_888) + # media buffer init + media.buffer_init() + # request media buffer for osd image + globals()["buffer"] = media.request_buffer(4 * DISPLAY_WIDTH * DISPLAY_HEIGHT) + # start stream for camera device0 + camera.start_stream(CAM_DEV_ID_0) + +def camera_deinit(): + # stop stream for camera device0 + camera.stop_stream(CAM_DEV_ID_0) + # deinit display + display.deinit() + os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) + time.sleep_ms(100) + # release media buffer + media.release_buffer(globals()["buffer"]) + # destroy media link + media.destroy_link(globals()["meida_source"], globals()["meida_sink"]) + # deinit media buffer + media.buffer_deinit() + +def capture_picture(): + # create image for osd + buffer = globals()["buffer"] + osd_img = image.Image(DETECT_WIDTH, DETECT_HEIGHT, image.RGB565, alloc=image.ALLOC_VB, phyaddr=buffer.phys_addr, virtaddr=buffer.virt_addr, poolid=buffer.pool_id) + osd_img.clear() + display.show_image(osd_img, 0, 0, DISPLAY_CHN_OSD0) + x_rotation_counter = 0 + y_rotation_counter = 0 + z_rotation_counter = 0 + fps = time.clock() + while True: + fps.tick() + try: + os.exitpoint() + rgb888_img = camera.capture_image(CAM_DEV_ID_0, CAM_CHN_ID_1) + img = rgb888_img.to_rgb565() + camera.release_image(CAM_DEV_ID_0, CAM_CHN_ID_1, rgb888_img) + img.rotation_corr(x_rotation = x_rotation_counter, + y_rotation = y_rotation_counter, + z_rotation = z_rotation_counter, + x_translation = X_OFFSET, + y_translation = Y_OFFSET, + zoom = ZOOM_AMOUNT, + fov = FOV_WINDOW) + + x_rotation_counter += X_ROTATION_DEGREE_RATE + y_rotation_counter += Y_ROTATION_DEGREE_RATE + z_rotation_counter += Z_ROTATION_DEGREE_RATE + img.copy_to(osd_img) + del img + gc.collect() + print(fps.fps()) + except KeyboardInterrupt as e: + print("user stop: ", e) + break + except BaseException as e: + sys.print_exception(e) + break + +def main(): + os.exitpoint(os.EXITPOINT_ENABLE) + camera_is_init = False + try: + print("camera init") + camera_init() + camera_is_init = True + print("camera capture") + capture_picture() + except Exception as e: + sys.print_exception(e) + finally: + if camera_is_init: + print("camera deinit") + camera_deinit() + +if __name__ == "__main__": + main() diff --git a/share/qtcreator/examples/12-Image-Filters/sharpen_filter.py b/share/qtcreator/examples/12-Image-Filters/sharpen_filter.py new file mode 100644 index 00000000000..7fca5b46075 --- /dev/null +++ b/share/qtcreator/examples/12-Image-Filters/sharpen_filter.py @@ -0,0 +1,109 @@ +# Sharpen Filter Example +# +# This example shows off using the laplacian filter to sharpen images. + +from media.camera import * +from media.display import * +from media.media import * +import time, os, gc, sys + +DISPLAY_WIDTH = ALIGN_UP(1920, 16) +DISPLAY_HEIGHT = 1080 +SCALE = 4 +DETECT_WIDTH = DISPLAY_WIDTH // SCALE +DETECT_HEIGHT = DISPLAY_HEIGHT // SCALE + +def camera_init(): + # use hdmi for display + display.init(LT9611_1920X1080_30FPS) + # config vb for osd layer + config = k_vb_config() + config.max_pool_cnt = 1 + config.comm_pool[0].blk_size = 4*DISPLAY_WIDTH*DISPLAY_HEIGHT + config.comm_pool[0].blk_cnt = 1 + config.comm_pool[0].mode = VB_REMAP_MODE_NOCACHE + # meida buffer config + media.buffer_config(config) + # init default sensor + camera.sensor_init(CAM_DEV_ID_0, CAM_DEFAULT_SENSOR) + # set chn0 output size + camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_0, DISPLAY_WIDTH, DISPLAY_HEIGHT) + # set chn0 output format + camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_0, PIXEL_FORMAT_YUV_SEMIPLANAR_420) + # create meida source device + globals()["meida_source"] = media_device(CAMERA_MOD_ID, CAM_DEV_ID_0, CAM_CHN_ID_0) + # create meida sink device + globals()["meida_sink"] = media_device(DISPLAY_MOD_ID, DISPLAY_DEV_ID, DISPLAY_CHN_VIDEO1) + # create meida link + media.create_link(meida_source, meida_sink) + # set display plane with video channel + display.set_plane(0, 0, DISPLAY_WIDTH, DISPLAY_HEIGHT, PIXEL_FORMAT_YVU_PLANAR_420, DISPLAY_MIRROR_NONE, DISPLAY_CHN_VIDEO1) + # set chn1 output nv12 + camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_1, DETECT_WIDTH, DETECT_HEIGHT) + camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_1, PIXEL_FORMAT_RGB_888) + # media buffer init + media.buffer_init() + # request media buffer for osd image + globals()["buffer"] = media.request_buffer(4 * DISPLAY_WIDTH * DISPLAY_HEIGHT) + # start stream for camera device0 + camera.start_stream(CAM_DEV_ID_0) + +def camera_deinit(): + # stop stream for camera device0 + camera.stop_stream(CAM_DEV_ID_0) + # deinit display + display.deinit() + os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) + time.sleep_ms(100) + # release media buffer + media.release_buffer(globals()["buffer"]) + # destroy media link + media.destroy_link(globals()["meida_source"], globals()["meida_sink"]) + # deinit media buffer + media.buffer_deinit() + +def capture_picture(): + # create image for osd + buffer = globals()["buffer"] + osd_img = image.Image(DETECT_WIDTH, DETECT_HEIGHT, image.RGB565, alloc=image.ALLOC_VB, phyaddr=buffer.phys_addr, virtaddr=buffer.virt_addr, poolid=buffer.pool_id) + osd_img.clear() + display.show_image(osd_img, 0, 0, DISPLAY_CHN_OSD0) + fps = time.clock() + while True: + fps.tick() + try: + os.exitpoint() + rgb888_img = camera.capture_image(CAM_DEV_ID_0, CAM_CHN_ID_1) + img = rgb888_img.to_rgb565() + camera.release_image(CAM_DEV_ID_0, CAM_CHN_ID_1, rgb888_img) + # Run the kernel on every pixel of the image. + img.laplacian(1,sharpen=True) + img.copy_to(osd_img) + del img + gc.collect() + print(fps.fps()) + except KeyboardInterrupt as e: + print("user stop: ", e) + break + except BaseException as e: + sys.print_exception(e) + break + +def main(): + os.exitpoint(os.EXITPOINT_ENABLE) + camera_is_init = False + try: + print("camera init") + camera_init() + camera_is_init = True + print("camera capture") + capture_picture() + except Exception as e: + sys.print_exception(e) + finally: + if camera_is_init: + print("camera deinit") + camera_deinit() + +if __name__ == "__main__": + main() diff --git a/share/qtcreator/examples/12-Image-Filters/unsharp_filter.py b/share/qtcreator/examples/12-Image-Filters/unsharp_filter.py new file mode 100644 index 00000000000..e43e89e3e68 --- /dev/null +++ b/share/qtcreator/examples/12-Image-Filters/unsharp_filter.py @@ -0,0 +1,109 @@ +# Unsharp Filter Example +# +# This example shows off using the guassian filter to unsharp mask filter images. + +from media.camera import * +from media.display import * +from media.media import * +import time, os, gc, sys + +DISPLAY_WIDTH = ALIGN_UP(1920, 16) +DISPLAY_HEIGHT = 1080 +SCALE = 4 +DETECT_WIDTH = DISPLAY_WIDTH // SCALE +DETECT_HEIGHT = DISPLAY_HEIGHT // SCALE + +def camera_init(): + # use hdmi for display + display.init(LT9611_1920X1080_30FPS) + # config vb for osd layer + config = k_vb_config() + config.max_pool_cnt = 1 + config.comm_pool[0].blk_size = 4*DISPLAY_WIDTH*DISPLAY_HEIGHT + config.comm_pool[0].blk_cnt = 1 + config.comm_pool[0].mode = VB_REMAP_MODE_NOCACHE + # meida buffer config + media.buffer_config(config) + # init default sensor + camera.sensor_init(CAM_DEV_ID_0, CAM_DEFAULT_SENSOR) + # set chn0 output size + camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_0, DISPLAY_WIDTH, DISPLAY_HEIGHT) + # set chn0 output format + camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_0, PIXEL_FORMAT_YUV_SEMIPLANAR_420) + # create meida source device + globals()["meida_source"] = media_device(CAMERA_MOD_ID, CAM_DEV_ID_0, CAM_CHN_ID_0) + # create meida sink device + globals()["meida_sink"] = media_device(DISPLAY_MOD_ID, DISPLAY_DEV_ID, DISPLAY_CHN_VIDEO1) + # create meida link + media.create_link(meida_source, meida_sink) + # set display plane with video channel + display.set_plane(0, 0, DISPLAY_WIDTH, DISPLAY_HEIGHT, PIXEL_FORMAT_YVU_PLANAR_420, DISPLAY_MIRROR_NONE, DISPLAY_CHN_VIDEO1) + # set chn1 output nv12 + camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_1, DETECT_WIDTH, DETECT_HEIGHT) + camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_1, PIXEL_FORMAT_RGB_888) + # media buffer init + media.buffer_init() + # request media buffer for osd image + globals()["buffer"] = media.request_buffer(4 * DISPLAY_WIDTH * DISPLAY_HEIGHT) + # start stream for camera device0 + camera.start_stream(CAM_DEV_ID_0) + +def camera_deinit(): + # stop stream for camera device0 + camera.stop_stream(CAM_DEV_ID_0) + # deinit display + display.deinit() + os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) + time.sleep_ms(100) + # release media buffer + media.release_buffer(globals()["buffer"]) + # destroy media link + media.destroy_link(globals()["meida_source"], globals()["meida_sink"]) + # deinit media buffer + media.buffer_deinit() + +def capture_picture(): + # create image for osd + buffer = globals()["buffer"] + osd_img = image.Image(DETECT_WIDTH, DETECT_HEIGHT, image.RGB565, alloc=image.ALLOC_VB, phyaddr=buffer.phys_addr, virtaddr=buffer.virt_addr, poolid=buffer.pool_id) + osd_img.clear() + display.show_image(osd_img, 0, 0, DISPLAY_CHN_OSD0) + fps = time.clock() + while True: + fps.tick() + try: + os.exitpoint() + rgb888_img = camera.capture_image(CAM_DEV_ID_0, CAM_CHN_ID_1) + img = rgb888_img.to_rgb565() + camera.release_image(CAM_DEV_ID_0, CAM_CHN_ID_1, rgb888_img) + # Run the kernel on every pixel of the image. + img.gaussian(1,unsharp=True) + img.copy_to(osd_img) + del img + gc.collect() + print(fps.fps()) + except KeyboardInterrupt as e: + print("user stop: ", e) + break + except BaseException as e: + sys.print_exception(e) + break + +def main(): + os.exitpoint(os.EXITPOINT_ENABLE) + camera_is_init = False + try: + print("camera init") + camera_init() + camera_is_init = True + print("camera capture") + capture_picture() + except Exception as e: + sys.print_exception(e) + finally: + if camera_is_init: + print("camera deinit") + camera_deinit() + +if __name__ == "__main__": + main() diff --git a/share/qtcreator/examples/12-Image-Filters/vflip_hmirror_transpose.py b/share/qtcreator/examples/12-Image-Filters/vflip_hmirror_transpose.py new file mode 100644 index 00000000000..1d4371906b4 --- /dev/null +++ b/share/qtcreator/examples/12-Image-Filters/vflip_hmirror_transpose.py @@ -0,0 +1,122 @@ +# Vertical Flip - Horizontal Mirror - Transpose +# +# This example shows off how to vertically flip, horizontally mirror, or +# transpose an image. Note that: +# +# vflip=False, hmirror=False, transpose=False -> 0 degree rotation +# vflip=True, hmirror=False, transpose=True -> 90 degree rotation +# vflip=True, hmirror=True, transpose=False -> 180 degree rotation +# vflip=False, hmirror=True, transpose=True -> 270 degree rotation + +from media.camera import * +from media.display import * +from media.media import * +import time, os, gc, sys + +DISPLAY_WIDTH = ALIGN_UP(1920, 16) +DISPLAY_HEIGHT = 1080 +SCALE = 4 +DETECT_WIDTH = DISPLAY_WIDTH // SCALE +DETECT_HEIGHT = DISPLAY_HEIGHT // SCALE + +def camera_init(): + # use hdmi for display + display.init(LT9611_1920X1080_30FPS) + # config vb for osd layer + config = k_vb_config() + config.max_pool_cnt = 1 + config.comm_pool[0].blk_size = 4*DISPLAY_WIDTH*DISPLAY_HEIGHT + config.comm_pool[0].blk_cnt = 1 + config.comm_pool[0].mode = VB_REMAP_MODE_NOCACHE + # meida buffer config + media.buffer_config(config) + # init default sensor + camera.sensor_init(CAM_DEV_ID_0, CAM_DEFAULT_SENSOR) + # set chn0 output size + camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_0, DISPLAY_WIDTH, DISPLAY_HEIGHT) + # set chn0 output format + camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_0, PIXEL_FORMAT_YUV_SEMIPLANAR_420) + # create meida source device + globals()["meida_source"] = media_device(CAMERA_MOD_ID, CAM_DEV_ID_0, CAM_CHN_ID_0) + # create meida sink device + globals()["meida_sink"] = media_device(DISPLAY_MOD_ID, DISPLAY_DEV_ID, DISPLAY_CHN_VIDEO1) + # create meida link + media.create_link(meida_source, meida_sink) + # set display plane with video channel + display.set_plane(0, 0, DISPLAY_WIDTH, DISPLAY_HEIGHT, PIXEL_FORMAT_YVU_PLANAR_420, DISPLAY_MIRROR_NONE, DISPLAY_CHN_VIDEO1) + # set chn1 output nv12 + camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_1, DETECT_WIDTH, DETECT_HEIGHT) + camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_1, PIXEL_FORMAT_RGB_888) + # media buffer init + media.buffer_init() + # request media buffer for osd image + globals()["buffer"] = media.request_buffer(4 * DISPLAY_WIDTH * DISPLAY_HEIGHT) + # start stream for camera device0 + camera.start_stream(CAM_DEV_ID_0) + +def camera_deinit(): + # stop stream for camera device0 + camera.stop_stream(CAM_DEV_ID_0) + # deinit display + display.deinit() + os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) + time.sleep_ms(100) + # release media buffer + media.release_buffer(globals()["buffer"]) + # destroy media link + media.destroy_link(globals()["meida_source"], globals()["meida_sink"]) + # deinit media buffer + media.buffer_deinit() + +def capture_picture(): + # create image for osd + buffer = globals()["buffer"] + osd_img = image.Image(DETECT_WIDTH, DETECT_HEIGHT, image.RGB565, alloc=image.ALLOC_VB, phyaddr=buffer.phys_addr, virtaddr=buffer.virt_addr, poolid=buffer.pool_id) + osd_img.clear() + display.show_image(osd_img, 0, 0, DISPLAY_CHN_OSD0) + mills = time.ticks_ms() + counter = 0 + fps = time.clock() + while True: + fps.tick() + try: + os.exitpoint() + rgb888_img = camera.capture_image(CAM_DEV_ID_0, CAM_CHN_ID_1) + img = rgb888_img.to_rgb565() + camera.release_image(CAM_DEV_ID_0, CAM_CHN_ID_1, rgb888_img) + img.replace(vflip=(counter//2)%2, + hmirror=(counter//4)%2, + transpose=False) + + if (time.ticks_ms() > (mills + 1000)): + mills = time.ticks_ms() + counter += 1 + img.copy_to(osd_img) + del img + gc.collect() + print(fps.fps()) + except KeyboardInterrupt as e: + print("user stop: ", e) + break + except BaseException as e: + sys.print_exception(e) + break + +def main(): + os.exitpoint(os.EXITPOINT_ENABLE) + camera_is_init = False + try: + print("camera init") + camera_init() + camera_is_init = True + print("camera capture") + capture_picture() + except Exception as e: + sys.print_exception(e) + finally: + if camera_is_init: + print("camera deinit") + camera_deinit() + +if __name__ == "__main__": + main() diff --git a/share/qtcreator/examples/13-Snapshot/emboss_snapshot.py b/share/qtcreator/examples/13-Snapshot/emboss_snapshot.py new file mode 100644 index 00000000000..7a7a513bd25 --- /dev/null +++ b/share/qtcreator/examples/13-Snapshot/emboss_snapshot.py @@ -0,0 +1,77 @@ +# Emboss Snapshot Example +# +# Note: You will need an SD card to run this example. +# +# You can use your CanMV Cam to save modified image files. + +from media.camera import * +from media.display import * +from media.media import * +import time, os + +DISPLAY_WIDTH = ALIGN_UP(1920, 16) +DISPLAY_HEIGHT = 1080 + +def camera_init(): + # use hdmi for display + display.init(LT9611_1920X1080_30FPS) + # init default sensor + camera.sensor_init(CAM_DEV_ID_0, CAM_DEFAULT_SENSOR) + # set chn0 output size + camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_0, DISPLAY_WIDTH, DISPLAY_HEIGHT) + # set chn0 output format + camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_0, PIXEL_FORMAT_YUV_SEMIPLANAR_420) + # create meida source device + globals()["meida_source"] = media_device(CAMERA_MOD_ID, CAM_DEV_ID_0, CAM_CHN_ID_0) + # create meida sink device + globals()["meida_sink"] = media_device(DISPLAY_MOD_ID, DISPLAY_DEV_ID, DISPLAY_CHN_VIDEO1) + # create meida link + media.create_link(meida_source, meida_sink) + # set display plane with video channel + display.set_plane(0, 0, DISPLAY_WIDTH, DISPLAY_HEIGHT, PIXEL_FORMAT_YVU_PLANAR_420, DISPLAY_MIRROR_NONE, DISPLAY_CHN_VIDEO1) + # set chn1 output rgb888 + camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_1, DISPLAY_WIDTH, DISPLAY_HEIGHT) + camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_1, PIXEL_FORMAT_RGB_888) + # media buffer init + media.buffer_init() + # start stream for camera device0 + camera.start_stream(CAM_DEV_ID_0) + +def camera_deinit(): + # stop stream for camera device0 + camera.stop_stream(CAM_DEV_ID_0) + # deinit display + display.deinit() + os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) + time.sleep_ms(100) + # destroy media link + media.destroy_link(globals()["meida_source"], globals()["meida_sink"]) + # deinit media buffer + media.buffer_deinit() + +def capture_picture(): + time.sleep(1) + rgb888_img = camera.capture_image(CAM_DEV_ID_0, CAM_CHN_ID_1) + try: + img = rgb888_img.to_rgb565() + img.morph(1, [+2, +1, +0, + +1, +1, -1, + +0, -1, -2]) # Emboss the image. + img.save("/sdcard/snapshot_emboss.jpg") + print("save image ok") + except Exception as e: + print("save image fail: ", e) + # release image for dev and chn + camera.release_image(CAM_DEV_ID_0, CAM_CHN_ID_1, rgb888_img) + +def main(): + os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) + print("camera init") + camera_init() + print("camera capture") + capture_picture() + print("camera deinit") + camera_deinit() + +if __name__ == "__main__": + main() diff --git a/share/qtcreator/examples/13-Snapshot/snapshot.py b/share/qtcreator/examples/13-Snapshot/snapshot.py new file mode 100644 index 00000000000..9656f264693 --- /dev/null +++ b/share/qtcreator/examples/13-Snapshot/snapshot.py @@ -0,0 +1,73 @@ +# Snapshot Example +# +# Note: You will need an SD card to run this example. +# +# You can use your CanMV Cam to save image files. + +from media.camera import * +from media.display import * +from media.media import * +import time, os + +DISPLAY_WIDTH = ALIGN_UP(1920, 16) +DISPLAY_HEIGHT = 1080 + +def camera_init(): + # use hdmi for display + display.init(LT9611_1920X1080_30FPS) + # init default sensor + camera.sensor_init(CAM_DEV_ID_0, CAM_DEFAULT_SENSOR) + # set chn0 output size + camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_0, DISPLAY_WIDTH, DISPLAY_HEIGHT) + # set chn0 output format + camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_0, PIXEL_FORMAT_YUV_SEMIPLANAR_420) + # create meida source device + globals()["meida_source"] = media_device(CAMERA_MOD_ID, CAM_DEV_ID_0, CAM_CHN_ID_0) + # create meida sink device + globals()["meida_sink"] = media_device(DISPLAY_MOD_ID, DISPLAY_DEV_ID, DISPLAY_CHN_VIDEO1) + # create meida link + media.create_link(meida_source, meida_sink) + # set display plane with video channel + display.set_plane(0, 0, DISPLAY_WIDTH, DISPLAY_HEIGHT, PIXEL_FORMAT_YVU_PLANAR_420, DISPLAY_MIRROR_NONE, DISPLAY_CHN_VIDEO1) + # set chn1 output rgb888 + camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_1, DISPLAY_WIDTH, DISPLAY_HEIGHT) + camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_1, PIXEL_FORMAT_RGB_888) + # media buffer init + media.buffer_init() + # start stream for camera device0 + camera.start_stream(CAM_DEV_ID_0) + +def camera_deinit(): + # stop stream for camera device0 + camera.stop_stream(CAM_DEV_ID_0) + # deinit display + display.deinit() + os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) + time.sleep_ms(100) + # destroy media link + media.destroy_link(globals()["meida_source"], globals()["meida_sink"]) + # deinit media buffer + media.buffer_deinit() + +def capture_picture(): + time.sleep(1) + rgb888_img = camera.capture_image(CAM_DEV_ID_0, CAM_CHN_ID_1) + try: + rgb888_img.to_jpeg().save("/sdcard/snapshot.jpg") + print("save image ok") + except Exception as e: + print("save image fail: ", e) + # release image for dev and chn + camera.release_image(CAM_DEV_ID_0, CAM_CHN_ID_1, rgb888_img) + +def main(): + os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) + print("camera init") + camera_init() + print("camera capture") + capture_picture() + print("camera deinit") + camera_deinit() + +if __name__ == "__main__": + main() diff --git a/share/qtcreator/examples/14-Socket/http_client.py b/share/qtcreator/examples/14-Socket/http_client.py new file mode 100644 index 00000000000..4882afee386 --- /dev/null +++ b/share/qtcreator/examples/14-Socket/http_client.py @@ -0,0 +1,39 @@ +import socket + + +def main(use_stream=True): + #创建socket + s = socket.socket() + #获取地址及端口号 对应地址 + ai = socket.getaddrinfo("www.baidu.com", 80) + #ai = socket.getaddrinfo("10.100.228.5", 8080) + + print("Address infos:", ai) + addr = ai[0][-1] + + print("Connect address:", addr) + #连接 + s.connect(addr) + + if use_stream: + # MicroPython socket objects support stream (aka file) interface + # directly, but the line below is needed for CPython. + s = s.makefile("rwb", 0) + #发送http请求 + s.write(b"GET /index.html HTTP/1.0\r\n\r\n") + #打印请求内容 + print(s.read()) + else: + #发送http请求 + s.send(b"GET /index.html HTTP/1.0\r\n\r\n") + #打印请求内容 + print(s.recv(4096)) + #print(s.read()) + #关闭socket + s.close() + + +#main() +main(use_stream=True) +main(use_stream=False) + diff --git a/share/qtcreator/examples/14-Socket/http_server.py b/share/qtcreator/examples/14-Socket/http_server.py new file mode 100644 index 00000000000..8bb647116bb --- /dev/null +++ b/share/qtcreator/examples/14-Socket/http_server.py @@ -0,0 +1,81 @@ +# port from micropython/examples/network/http_server.py +import socket +import network +import time +# print(network.LAN().ifconfig()[0]) +# print("Listening, connect your browser to http://%s:8081/" % (network.LAN().ifconfig()[0])) + +CONTENT = b"""\ +HTTP/1.0 200 OK + +Hello #%d from k230 canmv MicroPython! +""" + + +def main(micropython_optimize=True): + #建立socket + s = socket.socket() + #获取地址及端口号 对应地址 + # Binding to all interfaces - server will be accessible to other hosts! + ai = socket.getaddrinfo("0.0.0.0", 8081) + print("Bind address info:", ai) + addr = ai[0][-1] + #设置属性 + s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) + #绑定地址 + s.bind(addr) + #开始监听 + s.listen(5) + print("Listening, connect your browser to http://%s:8081/" % (network.LAN().ifconfig()[0])) + + counter = 0 + while True: + #接受连接 + res = s.accept() + client_sock = res[0] + client_addr = res[1] + print("Client address:", client_addr) + print("Client socket:", client_sock) + #非阻塞模式 + client_sock.setblocking(False) + if not micropython_optimize: + # To read line-oriented protocol (like HTTP) from a socket (and + # avoid short read problem), it must be wrapped in a stream (aka + # file-like) object. That's how you do it in CPython: + client_stream = client_sock.makefile("rwb") + else: + # .. but MicroPython socket objects support stream interface + # directly, so calling .makefile() method is not required. If + # you develop application which will run only on MicroPython, + # especially on a resource-constrained embedded device, you + # may take this shortcut to save resources. + client_stream = client_sock + + print("Request:") + #获取内容 + req = client_stream.read() + print(req) + + while True: + ##获取内容 + h = client_stream.read() + if h == b"" or h == b"\r\n": + break + print(h) + #回复内容 + client_stream.write(CONTENT % counter) + #time.sleep(0.5) + #关闭 + client_stream.close() + # if not micropython_optimize: + # client_sock.close() + counter += 1 + #print("wjx", counter) + if counter > 20 : + print("http server exit!") + #关闭 + s.close() + break + + +main() diff --git a/share/qtcreator/examples/14-Socket/iperf3.py b/share/qtcreator/examples/14-Socket/iperf3.py new file mode 100755 index 00000000000..996836396e9 --- /dev/null +++ b/share/qtcreator/examples/14-Socket/iperf3.py @@ -0,0 +1,582 @@ +""" +Pure Python, iperf3-compatible network performance test tool. + +MIT license; Copyright (c) 2018-2019 Damien P. George + +Supported modes: server & client, TCP & UDP, normal & reverse + +Usage: + import iperf3 + iperf3.server() + iperf3.client('192.168.1.5') + iperf3.client('192.168.1.5', udp=True, reverse=True) +""" + +import sys, struct +import time, select, socket +import json + +# Provide a urandom() function, supporting devices without os.urandom(). +try: + from os import urandom +except ImportError: + from random import randint + + def urandom(n): + return bytes(randint(0, 255) for _ in range(n)) + + +#DEBUG = False +DEBUG = True + +# iperf3 cookie size, last byte is null byte +COOKIE_SIZE = 37 + +# iperf3 commands +TEST_START = 1 +TEST_RUNNING = 2 +TEST_END = 4 +PARAM_EXCHANGE = 9 +CREATE_STREAMS = 10 +EXCHANGE_RESULTS = 13 +DISPLAY_RESULTS = 14 +IPERF_DONE = 16 + +if DEBUG: + cmd_string = { + TEST_START: "TEST_START", + TEST_RUNNING: "TEST_RUNNING", + TEST_END: "TEST_END", + PARAM_EXCHANGE: "PARAM_EXCHANGE", + CREATE_STREAMS: "CREATE_STREAMS", + EXCHANGE_RESULTS: "EXCHANGE_RESULTS", + DISPLAY_RESULTS: "DISPLAY_RESULTS", + IPERF_DONE: "IPERF_DONE", + } + + +def fmt_size(val, div): + for mult in ("", "K", "M", "G"): + if val < 10: + return "% 5.2f %s" % (val, mult) + elif val < 100: + return "% 5.1f %s" % (val, mult) + elif mult == "G" or val < 1000: + return "% 5.0f %s" % (val, mult) + else: + val /= div + + +class Stats: + def __init__(self, param): + self.pacing_timer_us = param["pacing_timer"] * 1000 + self.udp = param.get("udp", False) + self.reverse = param.get("reverse", False) + self.running = False + + def start(self): + self.running = True + self.t0 = self.t1 = ticks_us() + self.nb0 = self.nb1 = 0 # num bytes + self.np0 = self.np1 = 0 # num packets + self.nm0 = self.nm1 = 0 # num lost packets + if self.udp: + if self.reverse: + extra = " Jitter Lost/Total Datagrams" + else: + extra = " Total Datagrams" + else: + extra = "" + print("Interval Transfer Bitrate" + extra) + + def max_dt_ms(self): + if not self.running: + return -1 + return max(0, (self.pacing_timer_us - ticks_diff(ticks_us(), self.t1)) // 1000) + + def add_bytes(self, n): + if not self.running: + return + self.nb0 += n + self.nb1 += n + self.np0 += 1 + self.np1 += 1 + + def add_lost_packets(self, n): + self.np0 += n + self.np1 += n + self.nm0 += n + self.nm1 += n + + def print_line(self, ta, tb, nb, np, nm, extra=""): + dt = tb - ta + print( + " %5.2f-%-5.2f sec %sBytes %sbits/sec" + % (ta, tb, fmt_size(nb, 1024), fmt_size(nb * 8 / dt, 1000)), + end="", + ) + if self.udp: + if self.reverse: + print( + " %6.3f ms %u/%u (%.1f%%)" % (0, nm, np, 100 * nm / (max(1, np + nm))), end="" + ) + else: + print(" %u" % np, end="") + print(extra) + + def update(self, final=False): + if not self.running: + return + t2 = ticks_us() + dt = ticks_diff(t2, self.t1) + if final or dt > self.pacing_timer_us: + ta = ticks_diff(self.t1, self.t0) * 1e-6 + tb = ticks_diff(t2, self.t0) * 1e-6 + self.print_line(ta, tb, self.nb1, self.np1, self.nm1) + self.t1 = t2 + self.nb1 = 0 + self.np1 = 0 + self.nm1 = 0 + + def stop(self): + self.update(True) + self.running = False + self.t3 = ticks_us() + dt = ticks_diff(self.t3, self.t0) + print("- " * 30) + self.print_line(0, dt * 1e-6, self.nb0, self.np0, self.nm0, " sender") + + def report_receiver(self, stats): + st = stats["streams"][0] + + # iperf servers pre 3.2 do not transmit start or end time, + # so use local as fallback if not available. + dt = ticks_diff(self.t3, self.t0) + + self.print_line( + st.get("start_time", 0.0), + st.get("end_time", dt * 1e-6), + st["bytes"], + st["packets"], + st["errors"], + " receiver", + ) + + +def recvn(s, n): + data = b"" + while len(data) < n: + data += s.recv(n - len(data)) + return data + + +def recvinto(s, buf): + if hasattr(s, "readinto"): + return s.readinto(buf) + else: + return s.recv_into(buf) + + +def recvninto(s, buf): + if hasattr(s, "readinto"): + n = s.readinto(buf) + assert n == len(buf) + else: + mv = memoryview(buf) + off = 0 + while off < len(buf): + off += s.recv_into(mv[off:]) + + +def make_cookie(): + cookie_chars = b"abcdefghijklmnopqrstuvwxyz234567" + cookie = bytearray(COOKIE_SIZE) + for i, x in enumerate(urandom(COOKIE_SIZE - 1)): + cookie[i] = cookie_chars[x & 31] + return cookie + + +def server_once(): + # Listen for a connection + ai = socket.getaddrinfo("0.0.0.0", 5201) + ai = ai[0] + print("Server listening on", ai[-1]) + s_listen = socket.socket(ai[0], socket.SOCK_STREAM) + s_listen.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) + s_listen.bind(ai[-1]) + s_listen.listen(1) + s_ctrl, addr = s_listen.accept() + + # Read client's cookie + cookie = recvn(s_ctrl, COOKIE_SIZE) + if DEBUG: + print(cookie) + + # Ask for parameters + s_ctrl.sendall(bytes([PARAM_EXCHANGE])) + + # Get parameters + n = struct.unpack(">I", recvn(s_ctrl, 4))[0] + param = recvn(s_ctrl, n) + param = json.loads(str(param, "ascii")) + if DEBUG: + print(param) + reverse = param.get("reverse", False) + + # Ask to create streams + s_ctrl.sendall(bytes([CREATE_STREAMS])) + + if param.get("tcp", False): + # Accept stream + s_data, addr = s_listen.accept() + print("Accepted connection:", addr) + recvn(s_data, COOKIE_SIZE) + elif param.get("udp", False): + # Close TCP connection and open UDP "connection" + s_listen.close() + s_data = socket.socket(ai[0], socket.SOCK_DGRAM) + s_data.bind(ai[-1]) + data, addr = s_data.recvfrom(4) + s_data.sendto(b"\x12\x34\x56\x78", addr) + else: + assert False + + # Start test + s_ctrl.sendall(bytes([TEST_START])) + + # Run test + s_ctrl.sendall(bytes([TEST_RUNNING])) + + # Read data, and wait for client to send TEST_END + poll = select.poll() + poll.register(s_ctrl, select.POLLIN) + if reverse: + poll.register(s_data, select.POLLOUT) + else: + poll.register(s_data, select.POLLIN) + stats = Stats(param) + stats.start() + running = True + data_buf = bytearray(urandom(param["len"])) + while running: + for pollable in poll.poll(stats.max_dt_ms()): + if pollable_is_sock(pollable, s_ctrl): + cmd = recvn(s_ctrl, 1)[0] + if DEBUG: + print(cmd_string.get(cmd, "UNKNOWN_COMMAND")) + if cmd == TEST_END: + running = False + elif pollable_is_sock(pollable, s_data): + if reverse: + n = s_data.send(data_buf) + stats.add_bytes(n) + else: + recvninto(s_data, data_buf) + stats.add_bytes(len(data_buf)) + stats.update() + + # Need to continue writing so other side doesn't get blocked waiting for data + if reverse: + while True: + for pollable in poll.poll(0): + if pollable_is_sock(pollable, s_data): + s_data.send(data_buf) + break + else: + break + + stats.stop() + + # Ask to exchange results + s_ctrl.sendall(bytes([EXCHANGE_RESULTS])) + + # Get client results + n = struct.unpack(">I", recvn(s_ctrl, 4))[0] + results = recvn(s_ctrl, n) + results = json.loads(str(results, "ascii")) + if DEBUG: + print(results) + + # Send our results + results = { + "cpu_util_total": 1, + "cpu_util_user": 0.5, + "cpu_util_system": 0.5, + "sender_has_retransmits": 1, + "congestion_used": "cubic", + "streams": [ + { + "id": 1, + "bytes": stats.nb0, + "retransmits": 0, + "jitter": 0, + "errors": 0, + "packets": stats.np0, + "start_time": 0, + "end_time": ticks_diff(stats.t3, stats.t0) * 1e-6, + } + ], + } + results = json.dumps(results) + s_ctrl.sendall(struct.pack(">I", len(results))) + s_ctrl.sendall(bytes(results, "ascii")) + + # Ask to display results + s_ctrl.sendall(bytes([DISPLAY_RESULTS])) + + # Wait for client to send IPERF_DONE + cmd = recvn(s_ctrl, 1)[0] + assert cmd == IPERF_DONE + + # Close all sockets + s_data.close() + s_ctrl.close() + s_listen.close() + + +def server(): + while True: + server_once() + + +def client(host, udp=False, reverse=False, bandwidth=10 * 1024 * 1024): + print("CLIENT MODE:", "UDP" if udp else "TCP", "receiving" if reverse else "sending") + + param = { + "client_version": "3.6", + "omit": 0, + "parallel": 1, + "pacing_timer": 1000, + "time": 10, + } + + if udp: + param["udp"] = True + param["len"] = 1500 - 42 + param["bandwidth"] = bandwidth # this should be should be intended bits per second + udp_interval = 1000000 * 8 * param["len"] // param["bandwidth"] + else: + param["tcp"] = True + param["len"] = 3000 + + if reverse: + param["reverse"] = True + + # Connect to server + ai = socket.getaddrinfo(host, 5201)[0] + print("Connecting to", ai[-1]) + s_ctrl = socket.socket(ai[0], socket.SOCK_STREAM) + + s_ctrl.connect(ai[-1]) + + # Send our cookie + cookie = make_cookie() + if DEBUG: + print(cookie) + s_ctrl.sendall(cookie) + + # Object to gather statistics about the run + stats = Stats(param) + + # Run the main loop, waiting for incoming commands and dat + ticks_us_end = param["time"] * 1000000 + poll = select.poll() + poll.register(s_ctrl, select.POLLIN) + buf = None + s_data = None + start = None + udp_packet_id = 0 + udp_last_send = None + while True: + for pollable in poll.poll(stats.max_dt_ms()): + if pollable_is_sock(pollable, s_data): + # Data socket is writable/readable + t = ticks_us() + if ticks_diff(t, start) > ticks_us_end: + if reverse: + # Continue to drain any incoming data + recvinto(s_data, buf) + if stats.running: + # End of run + s_ctrl.sendall(bytes([TEST_END])) + stats.stop() + else: + # Send/receiver data + if udp: + if reverse: + recvninto(s_data, buf) + udp_in_sec, udp_in_usec, udp_in_id = struct.unpack_from(">III", buf, 0) + # print(udp_in_sec, udp_in_usec, udp_in_id) + if udp_in_id != udp_packet_id + 1: + stats.add_lost_packets(udp_in_id - (udp_packet_id + 1)) + udp_packet_id = udp_in_id + stats.add_bytes(len(buf)) + else: + # print('UDP send', udp_last_send, t, udp_interval) + if t - udp_last_send > udp_interval: + udp_last_send += udp_interval + udp_packet_id += 1 + struct.pack_into( + ">III", buf, 0, t // 1000000, t % 1000000, udp_packet_id + ) + n = s_data.sendto(buf, ai[-1]) + stats.add_bytes(n) + else: + if reverse: + recvninto(s_data, buf) + n = len(buf) + else: + # print('TCP send', len(buf)) + n = s_data.send(buf) + stats.add_bytes(n) + + elif pollable_is_sock(pollable, s_ctrl): + # Receive command + cmd = recvn(s_ctrl, 1)[0] + if DEBUG: + print(cmd_string.get(cmd, "UNKNOWN_COMMAND")) + if cmd == TEST_START: + if reverse: + # Start receiving data now, because data socket is open + poll.register(s_data, select.POLLIN) + start = ticks_us() + stats.start() + elif cmd == TEST_RUNNING: + if not reverse: + # Start sending data now + poll.register(s_data, select.POLLOUT) + start = ticks_us() + if udp: + udp_last_send = start - udp_interval + stats.start() + elif cmd == PARAM_EXCHANGE: + param_j = json.dumps(param) + s_ctrl.sendall(struct.pack(">I", len(param_j))) + s_ctrl.sendall(bytes(param_j, "ascii")) + elif cmd == CREATE_STREAMS: + if udp: + s_data = socket.socket(ai[0], socket.SOCK_DGRAM) + time.sleep(0.5) + s_data.sendto(struct.pack("I", len(results))) + s_ctrl.sendall(bytes(results, "ascii")) + + n = struct.unpack(">I", recvn(s_ctrl, 4))[0] + results = recvn(s_ctrl, n) + results = json.loads(str(results, "ascii")) + stats.report_receiver(results) + + elif cmd == DISPLAY_RESULTS: + s_ctrl.sendall(bytes([IPERF_DONE])) + s_ctrl.close() + time.sleep(1) # delay so server is ready for any subsequent client connections + return + + stats.update() + +#udp client ok --187 Kbits/sec +#tcp client ok --249 Kbits/sec +#server tcp-- 250 Kbits/sec +#server udp -- 187Kbits/sec. +def main(): + # opt_mode = None + # #-s -c + # opt_udp = False + # # True False + # opt_reverse = False + + #opt_mode = "-c" + opt_mode = "-c" + opt_host = "10.10.1.94" + #-s -c + opt_udp = False + #opt_udp = True + #True + # True False + opt_reverse = False + + + + # sys.argv.pop(0) + # while sys.argv: + # opt = sys.argv.pop(0) + # if opt == "-R": + # opt_reverse = True + # elif opt == "-u": + # opt_udp = True + # elif opt == "-s": + # opt_mode = opt + # elif opt == "-c": + # opt_mode = opt + # opt_host = sys.argv.pop(0) + # else: + # print("unknown option:", opt) + # raise SystemExit(1) + + if opt_mode == "-s": + server() + else: + client(opt_host, opt_udp, opt_reverse) + + +# if sys.platform == "linux": + +# def pollable_is_sock(pollable, sock): +# # if sock : +# # print(sock.fileno(), pollable[0],sock) +# return sock is not None and pollable[0] == sock + +# def ticks_us(): +# return int(time.time() * 1e6) + +# def ticks_diff(a, b): +# return a - b + +# if __name__ == "__main__": +# main() +# else: + +# def pollable_is_sock(pollable, sock): +# return pollable[0] == sock + +# from time import ticks_us, ticks_diff + + + +def pollable_is_sock(pollable, sock): + return sock is not None and pollable[0] == sock + +from time import ticks_us, ticks_diff +main() diff --git a/share/qtcreator/examples/14-Socket/network_lan.py b/share/qtcreator/examples/14-Socket/network_lan.py new file mode 100644 index 00000000000..5bbda94a410 --- /dev/null +++ b/share/qtcreator/examples/14-Socket/network_lan.py @@ -0,0 +1,38 @@ +import network + + +def main(): + #获取lan接口 + a=network.LAN() + #获取网口是否在使用 + print(a.active()) + #关闭网口 + print(a.active(0)) + #使能网口 + print(a.active(1)) + #查看网口 ip,掩码,网关,dns配置 + print(a.ifconfig()) + #设置网口 ip,掩码,网关,dns配置 + print(a.ifconfig(('192.168.0.4', '255.255.255.0', '192.168.0.1', '8.8.8.8'))) + #查看网口 ip,掩码,网关,dns配置 + print(a.ifconfig()) + #设置网口为dhcp模式 + print(a.ifconfig("dhcp")) + #查看网口 ip,掩码,网关,dns配置 + print(a.ifconfig()) + #查看网口mac地址 + print(a.config("mac")) + #设置网口mac地址 + print(a.config(mac="42:EA:D0:C2:0D:83")) + #查看网口mac地址 + print(a.config("mac")) + #设置网口为dhcp模式 + print(a.ifconfig("dhcp")) + #查看网口 ip,掩码,网关,dns配置 + print(a.ifconfig()) + + + + +main() + diff --git a/share/qtcreator/examples/14-Socket/tcp_client.py b/share/qtcreator/examples/14-Socket/tcp_client.py new file mode 100644 index 00000000000..007fcda380a --- /dev/null +++ b/share/qtcreator/examples/14-Socket/tcp_client.py @@ -0,0 +1,41 @@ +#配置 tcp/udp socket调试工具 +import socket +import time + +PORT=60000 + +def client(): + #获取地址及端口号 对应地址 + ai = socket.getaddrinfo("10.100.228.5", PORT) + #ai = socket.getaddrinfo("10.10.1.94", PORT) + print("Address infos:", ai) + addr = ai[0][-1] + + print("Connect address:", addr) + #建立socket + s = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0) + #连接地址 + s.connect(addr) + + for i in range(10): + str="K230 tcp client send test {0} \r\n".format(i) + print(str) + #print(s.send(str)) + #发送字符串 + print(s.write(str)) + time.sleep(0.2) + #time.sleep(1) + #print(s.recv(4096)) + #print(s.read()) + #延时1秒 + time.sleep(1) + #关闭socket + s.close() + print("end") + + + +#main() +client() + + diff --git a/share/qtcreator/examples/14-Socket/tcp_server.py b/share/qtcreator/examples/14-Socket/tcp_server.py new file mode 100644 index 00000000000..b2b81ecd92a --- /dev/null +++ b/share/qtcreator/examples/14-Socket/tcp_server.py @@ -0,0 +1,68 @@ +#配置 tcp/udp socket调试工具 +import socket +import network +import time +PORT=60000 + + +CONTENT = b""" +Hello #%d from k230 canmv MicroPython! +""" + + + +def server(): + counter=1 + #获取地址及端口号 对应地址 + #ai = socket.getaddrinfo("10.100.228.5", 8000) + ai = socket.getaddrinfo("0.0.0.0", PORT) + print("Address infos:", ai,PORT) + addr = ai[0][-1] + + print("Connect address:", addr) + #建立socket + s = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0) + #设置属性 + s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) + #绑定 + s.bind(addr) + #监听 + s.listen(5) + print("tcp server %s port:%d\n" % ((network.LAN().ifconfig()[0]),PORT)) + + + while True: + #接受连接 + res = s.accept() + client_sock = res[0] + client_addr = res[1] + print("Client address:", client_addr) + print("Client socket:", client_sock) + client_sock.setblocking(False) + + client_stream = client_sock + #发送字符传 + client_stream.write(CONTENT % counter) + + while True: + #读取内容 + h = client_stream.read() + if h != b"" : + print(h) + #回复内容 + client_stream.write("recv :%s" % h) + + if "end" in h : + #关闭socket + client_stream.close() + break + + counter += 1 + if counter > 10 : + print("server exit!") + #关闭 + s.close() + break + +#main() +server() diff --git a/share/qtcreator/examples/14-Socket/udp_clinet.py b/share/qtcreator/examples/14-Socket/udp_clinet.py new file mode 100644 index 00000000000..d3f03d29b8b --- /dev/null +++ b/share/qtcreator/examples/14-Socket/udp_clinet.py @@ -0,0 +1,38 @@ +#配置 tcp/udp socket调试工具 +import socket +import time + + +def udpclient(): + #获取地址和端口号 对应地址 + ai = socket.getaddrinfo("10.100.228.5", 60000) + #ai = socket.getaddrinfo("10.10.1.94", 60000) + print("Address infos:", ai) + addr = ai[0][-1] + + print("Connect address:", addr) + #建立socket + s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) + + + + for i in range(10): + str="K230 udp client send test {0} \r\n".format(i) + print(str) + #发送字符串 + print(s.sendto(str,addr)) + #延时 + time.sleep(0.2) + #time.sleep(1) + #print(s.recv(4096)) + #print(s.read()) + #延时 + time.sleep(1) + #关闭 + s.close() + print("end") + + + +#main() +udpclient() diff --git a/share/qtcreator/examples/14-Socket/udp_server.py b/share/qtcreator/examples/14-Socket/udp_server.py new file mode 100644 index 00000000000..68e0eed6283 --- /dev/null +++ b/share/qtcreator/examples/14-Socket/udp_server.py @@ -0,0 +1,45 @@ +#配置 tcp/udp socket调试工具 +import socket +import time +import network +PORT=60000 + + +def udpserver(): + #获取地址及端口号对应地址 + ai = socket.getaddrinfo("0.0.0.0", PORT) + #ai = socket.getaddrinfo("10.10.1.94", 60000) + print("Address infos:", ai) + addr = ai[0][-1] + + print("udp server %s port:%d\n" % ((network.LAN().ifconfig()[0]),PORT)) + #建立socket + s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) + #设置属性 + s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) + #绑定 + s.bind(addr) + print("a") + #延时 + time.sleep(1) + + for j in range(10): + try: + #接受内容 + data, addr = s.recvfrom(800) + print("b") + except: + continue + #打印内容 + print("recv %d" % j,data,addr) + #回复内容 + s.sendto(b"%s have recv count=%d " % (data,j), addr) + #关闭 + s.close() + print("udp server exit!!") + + + + +#main() +udpserver() diff --git a/share/qtcreator/examples/15-Utils/db/readme.txt b/share/qtcreator/examples/15-Utils/db/readme.txt new file mode 100644 index 00000000000..a8f1f8dd323 --- /dev/null +++ b/share/qtcreator/examples/15-Utils/db/readme.txt @@ -0,0 +1 @@ +用于存放人脸数据库 diff --git a/share/qtcreator/examples/15-Utils/db_img/id_1.jpg b/share/qtcreator/examples/15-Utils/db_img/id_1.jpg new file mode 100644 index 00000000000..0cb764acf1a Binary files /dev/null and b/share/qtcreator/examples/15-Utils/db_img/id_1.jpg differ diff --git a/share/qtcreator/examples/15-Utils/db_img/id_2.png b/share/qtcreator/examples/15-Utils/db_img/id_2.png new file mode 100644 index 00000000000..95372a281bd Binary files /dev/null and b/share/qtcreator/examples/15-Utils/db_img/id_2.png differ diff --git a/share/qtcreator/examples/15-Utils/dict.txt b/share/qtcreator/examples/15-Utils/dict.txt new file mode 100755 index 00000000000..f43275c9b26 --- /dev/null +++ b/share/qtcreator/examples/15-Utils/dict.txt @@ -0,0 +1,6549 @@ +光 +环 +主 +营 +: +热 +干 +面 +原 +汤 +创 +亿 +8 +1 +0 +香 +花 +桥 +社 +区 +( +街 +道 +) +藍 +色 +经 +典 +承 +接 +国 +内 +外 +针 +梭 +织 +订 +单 +欢 +迎 +来 +料 +加 +工 +P +H +O +E +N +I +X +T +A +电 +话 +3 +6 +4 +9 +5 +B +7 +申 +滨 +路 +爱 +拓 +升 +密 +斯 +全 +屋 +售 +票 +对 +讲 +机 +元 +茶 +刷 +、 +纸 +巾 +无 +限 +极 +安 +得 +装 +饰 +九 +龙 +休 +闲 +足 +浴 +中 +心 +凭 +身 +份 +证 +领 +手 +信 +用 +能 +当 +现 +金 +人 +民 +财 +产 +保 +险 +股 +有 +公 +司 +美 +食 +餐 +厅 +厨 +卫 +韵 +达 +灯 +箱 +裙 +楼 +助 +教 +聚 +时 +地 +线 +2 +- +同 +乐 +坊 +l +n +t +h +e +a +m +o +f +v +秀 +沃 +尔 +玛 +帮 +万 +家 +企 +包 +宁 +波 +埃 +柯 +铜 +阀 +门 +联 +系 +车 +养 +护 +站 +方 +時 +空 +購 +物 +小 +牛 +肉 +萬 +事 +興 +奢 +缇 +郭 +氏 +生 +堂 +, +支 +付 +宝 +和 +微 +都 +可 +集 +团 +纺 +品 +销 +棉 +被 +您 +! +碧 +水 +缘 +座 +椅 +定 +制 +. +脚 +垫 +临 +富 +园 +烟 +酒 +业 +幼 +儿 +拼 +音 +寒 +暑 +假 +贺 +佳 +节 +福 +官 +学 +育 +世 +红 +璞 +男 +鞋 +始 +于 +C +点 +粥 +粉 +饭 +类 +满 +送 +板 +栗 +入 +太 +仓 +市 +优 +特 +利 +贸 +易 +麻 +木 +前 +列 +腺 +一 +果 +蜜 +婆 +嘉 +际 +大 +店 +洋 +河 +架 +丰 +鑫 +壁 +软 +背 +神 +童 +文 +具 +梦 +之 +星 +英 +语 +铁 +客 +代 +博 +技 +笑 +甲 +山 +豆 +剪 +发 +想 +成 +行 +旺 +明 +吉 +逸 +夫 +程 +館 +酸 +辣 +盲 +按 +摩 +疗 +祝 +健 +康 +泰 +兴 +场 +监 +督 +管 +理 +局 +城 +分 +间 +室 +所 +域 +冠 +京 +丽 +华 +便 +活 +动 +价 +。 +详 +情 +苑 +村 +南 +烩 +天 +连 +锁 +宏 +汇 +厦 +印 +象 +S +馆 +饮 +何 +叶 +馍 +锦 +标 +志 +上 +海 +黄 +浦 +化 +政 +执 +法 +广 +东 +老 +年 +共 +务 +研 +究 +武 +汉 +档 +案 +L +本 +油 +不 +使 +银 +卡 +德 +壹 +作 +多 +润 +滑 +U +V +W +尚 +约 +超 +越 +展 +港 +体 +彩 +液 +气 +折 +哈 +五 +暖 +哥 +烫 +甬 +涛 +建 +筑 +刚 +网 +纱 +窗 +帘 +风 +眠 +艺 +术 +由 +吧 +肤 +• +形 +设 +计 +羊 +火 +锅 +洛 +塔 +做 +喜 +雪 +诚 +正 +办 +夏 +傅 +西 +服 +双 +梅 +婚 +摄 +项 +租 +房 +沟 +炎 +灰 +指 +址 +二 +层 +速 +宾 +早 +唐 +精 +修 +洗 +衣 +冰 +柜 +琶 +洲 +派 +出 +R +d +u +澳 +投 +资 +号 +居 +介 +昊 +好 +下 +停 +高 +回 +铝 +G +Y +Z +窖 +轩 +苏 +· +御 +钵 +晾 +遇 +见 +祥 +如 +意 +洪 +府 +导 +才 +厂 +直 +沙 +泥 +配 +件 +党 +平 +李 +向 +轮 +周 +口 +颜 +就 +景 +韩 +霞 +医 +礼 +州 +白 +云 +古 +奥 +格 +汽 +新 +北 +烤 +y +长 +辉 +旅 +游 +左 +转 +米 +廣 +告 +焖 +鸡 +镇 +各 +F +s +i +牌 +策 +划 +推 +拉 +开 +锈 +钢 +防 +胎 +祭 +女 +招 +珍 +津 +粮 +维 +通 +子 +权 +交 +咨 +询 +位 +已 +谢 +晚 +末 +百 +友 +低 +至 +傲 +帝 +纪 +图 +徽 +纳 +住 +材 +庄 +b +p +伊 +甸 +劳 +遣 +艾 +灸 +幸 +狐 +狸 +桂 +林 +科 +野 +批 +零 +囍 +鸭 +飞 +雲 +書 +畫 +恭 +头 +袖 +布 +庭 +智 +慧 +D +选 +铺 +烈 +王 +芳 +药 +习 +青 +打 +蜡 +呢 +商 +为 +快 +丁 +舞 +跆 +淀 +委 +备 +煲 +质 +量 +盖 +鲜 +盒 +部 +疆 +辰 +络 +会 +淋 +淮 +膳 +芝 +士 +绒 +衫 +杏 +槐 +院 +胖 +烧 +饼 +条 +寓 +侬 +瞰 +敏 +久 +把 +散 ++ +观 +翠 +阁 +型 +请 +陶 +专 +磊 +喇 +叭 +马 +瓦 +罐 +煨 +寳 +貿 +豪 +吊 +顶 +義 +藝 +術 +顺 +睫 +半 +永 +姐 +擀 +罡 +杨 +待 +阿 +腔 +颌 +晨 +副 +鱼 +移 +川 +床 +铣 +块 +乳 +K +呈 +禾 +扭 +伤 +关 +膜 +骨 +坏 +死 +力 +“ +评 +” +余 +记 +猪 +孕 +婴 +陈 +唯 +旧 +书 +M +羽 +调 +解 +员 +汾 +竹 +味 +雕 +垃 +圾 +走 +进 +益 +寿 +千 +里 +蒲 +庙 +榨 +落 +附 +众 +宫 +廷 +桃 +酥 +昌 +欣 +抗 +裂 +渗 +四 +季 +麦 +硕 +脑 +潮 +汕 +杂 +咸 +容 +纤 +俱 +拆 +迁 +处 +货 +阳 +自 +田 +J +静 +瑞 +日 +贷 +款 +估 +过 +户 +后 +刻 +名 +聘 +师 +费 +课 +夹 +® +况 +源 +税 +征 +收 +馈 +乡 +湖 +井 +靖 +江 +数 +卖 +刀 +郎 +三 +两 +七 +酉 +库 +土 +求 +到 +期 +湘 +妈 +妆 +喷 +画 +卤 +菜 +姚 +表 +驾 +校 +杭 +颐 +秋 +职 +苍 +梧 +球 +遍 +看 +淘 +默 +片 +写 +真 +绘 +亚 +克 +字 +善 +溢 +歡 +衡 +: +胡 +春 +榮 +济 +秸 +坎 +器 +械 +敬 +亭 +律 +皮 +担 +筋 +凉 +灌 +肠 +锐 +隐 +眼 +镜 +造 +免 +放 +盘 +邢 +台 +先 +的 +个 +性 +辅 +构 +消 +残 +r +恩 +饺 +宸 +宇 +露 +样 +炉 +深 +巷 +● +插 +者 +珠 +退 +菱 +鼻 +秘 +传 +统 +酵 +绿 +含 +添 +剂 +蚕 +丝 +套 +松 +虹 +踏 +农 +k +变 +劉 +响 +娟 +呵 +托 +雅 +迪 +合 +受 +十 +远 +立 +盗 +鹦 +鹉 +首 +排 +序 +境 +c +购 +守 +腰 +妹 +流 +鲢 +吃 +g +减 +兰 +额 +存 +预 +置 +悦 +签 +涮 +脆 +栋 +县 +— +影 +视 +听 +诊 +乾 +坤 +盛 +然 +宴 +石 +橱 +梯 +搓 +拖 +鹰 +实 +兄 +弟 +渔 +带 +徐 +頡 +范 +睿 +缆 +押 +简 +胜 +贝 +佛 +玩 +Ω +饯 +炒 +糖 +警 +月 +培 +基 +八 +义 +控 +稀 +闵 +陇 +第 +荣 +棋 +邹 +泉 +池 +甘 +贵 +钦 +巨 +总 +碗 +拿 +雄 +伟 +属 +结 +亮 +姿 +梵 +运 +息 +玺 +竞 +鲲 +鹏 +与 +冈 +瓷 +塑 +照 +重 +庆 +弗 +尼 +留 +铪 +弥 +饵 +镦 +巧 +兹 +宗 +馄 +饨 +码 +融 +釜 +佰 +業 +扑 +换 +屏 +蛋 +糕 +Λ +蔬 +瓜 +墙 +藏 +夜 +盟 +; +翻 +腐 +贴 +砖 +毛 +峰 +г +寄 +私 +边 +煌 +馨 +岭 +朝 +菲 +目 +避 +風 +塘 +式 +劵 +钟 +威 +胶 +z +底 +' +/ +次 +芙 +灵 +刺 +柿 +媒 +妇 +汗 +激 +示 +霍 +强 +题 +复 +哦 +, +霸 +j +欧 +俪 +鸿 +運 +围 +削 +榻 +蛟 +帐 +篷 +振 +反 +郑 +仕 +恒 +闸 +肯 +玻 +璃 +俗 +互 +相 +攀 +比 +滤 +军 +队 +离 +确 +凤 +纯 +枕 +只 +蓝 +筒 +整 +种 +螺 +母 +训 +凯 +省 +伪 +协 +清 +蛙 +频 +央 +奶 +冷 +择 +我 +们 +没 +错 +验 +雷 +杰 +辛 +啤 +谊 +己 +蝎 +组 +腾 +仔 +尾 +巴 +严 +君 +宜 +再 +鲁 +迅 +帽 +颖 +别 +致 +责 +任 +准 +罕 +启 +温 +需 +登 +杜 +良 +其 +湾 +测 +起 +丹 +斓 +感 +未 +鹅 +魅 +族 +妻 +肺 +角 +汪 +豫 +砂 +柳 +磚 +血 +病 +航 +枫 +叉 +浇 +焗 +怡 +稻 +槎 +宽 +串 +综 +治 +报 +勿 +吸 +蒸 +扶 +扁 +沪 +草 +勇 +琪 +丶 +伞 +紫 +虾 +嘿 +冬 +薪 +咖 +啡 +诉 +拌 +炖 +趣 +班 +伸 +缩 +q +酬 +朗 +蔡 +莲 +卷 +圣 +痛 +在 +违 +章 +犬 +姜 +驻 +群 +净 +效 +你 +麒 +麟 +郡 +常 +酷 +享 +补 +》 +今 +蜘 +蛛 +梳 +鼎 +耒 +玥 +莎 +坛 +墅 +暴 +笼 +缔 +敖 +肥 +寰 +浩 +粒 +也 +芦 +敢 +追 +卓 +供 +耳 +焊 +氩 +弧 +赖 +召 +捷 +钣 +虎 +崇 +寺 +H +Z +L +酱 +页 +赁 +稞 +宠 +孚 +寸 +炭 +念 +锋 +雀 +巢 +思 +冻 +羲 +输 +歌 +毂 +改 +曲 +她 +彭 +荒 +坪 +愿 +帆 +洁 +止 +辆 +参 +颈 +鹿 +漆 +森 +骏 +晓 +铭 +这 +是 +裤 +知 +度 +泵 +谷 +旗 +舰 +漕 +够 +更 +衔 +岁 +还 +犹 +? +刘 +遥 +蹈 +長 +规 +菊 +递 +陕 +萍 +齐 +翡 +赌 +要 +找 +版 +盐 +禁 +充 +豊 +着 +最 +奇 +讯 +读 +鸣 +昕 +诺 +莉 +雨 +熟 +w +荟 +瓶 +缝 +纫 +检 +损 +张 +少 +樱 +箭 +钻 +此 +若 +船 +芯 +& +兵 +割 +摸 +叔 +幅 +震 +讠 +根 +饸 +伴 +唱 +戏 +载 +披 +萨 +蟹 +茜 +均 +翼 +账 +慎 +谐 +兼 +帛 +诱 +惑 +媚 +匕 +貢 +棒 +沂 +禅 +蚊 +趟 +等 +弄 +摆 +兽 +宵 +幻 +】 +突 +破 +扣 +畅 +潢 +瘾 +盆 +算 +隆 +虫 +睛 +鹤 +曼 +尖 +埔 +将 +授 +菘 +驰 +牙 +练 +壶 +岗 +午 +应 +磁 +汝 +財 +進 +俊 +鸟 +绣 +颗 +醋 +贤 +叮 +咚 +粤 +蜂 +播 +铮 +燕 +树 +嘴 +惠 +完 +勤 +钥 +匙 +继 +续 +裕 +Q +幢 +x +奋 +柏 +查 +洱 +途 +录 +轿 +榕 +圆 +磐 +认 +刮 +痧 +斤 +压 +潜 +宿 +咕 +噜 +喵 +覆 +浆 +骜 +仰 +历 +盈 +栏 +镶 +馒 +皇 +铃 +仪 +像 +碟 +诗 +倩 +牢 +國 +步 +缴 +以 +言 +黛 +援 +甜 +麗 +柠 +檬 +→ +买 +宋 +每 +六 +挖 +屯 +掘 +圈 +忆 +蒙 +扯 +汁 +触 +及 +巫 +率 +注 +册 +湯 +厉 +! +赛 +遗 +审 +荨 +祖 +恶 +魔 +瑜 +伽 +他 +链 +尊 +磨 +冀 +孩 +聪 +崧 +澜 +弯 +轨 +劲 +廊 +奠 +晶 +槽 +纹 +镀 +锌 +郸 +疹 +荘 +驿 +耀 +郏 +丨 +桑 +剔 +茴 +朋 +返 +炫 +箐 +嵩 +探 +独 +抛 +猫 +裁 +澄 +嵌 +齿 +蘭 +麺 +臻 +晋 +賀 +值 +罗 +捐 +赠 +靜 +學 +妍 +妙 +桶 +車 +卉 +邮 +婷 +倪 +泾 +钱 +擎 +彪 +痕 +咪 +邀 +伦 +非 +拳 +舍 +绍 +挂 +靠 +丫 +娃 +试 +潭 +苹 +闪 +琴 +声 +睡 +醇 +¥ +冒 +戒 +廉 +棚 +娱 +考 +级 +故 +è +蔚 +泡 +顾 +琳 +杠 +舒 +适 +绕 +荷 +悠 +肝 +胆 +胃 +横 +依 +慕 +势 +袭 +媳 +界 +弘 +眉 +泸 +贰 +傻 +旭 +茂 +茅 +染 +固 +靓 +增 +扫 +燃 +灶 +毒 +烙 +髪 +俏 +紅 +開 +炸 +寻 +% +椹 +酿 +核 +蓉 +绅 +因 +急 +啊 +祯 +宣 +施 +紧 +抵 +邯 +翔 +另 +滋 +瀚 +借 +氢 +沫 +槟 +榔 +览 +玲 +厘 +丸 +亨 +沥 +混 +凝 +肩 +胸 +那 +莊 +梨 +幕 +葛 +黑 +莱 +凡 +剑 +荆 +旋 +从 +臨 +門 +肌 +献 +赏 +许 +丢 +失 +概 +负 +漫 +鮮 +铸 +苗 +乘 +玫 +铂 +嗨 +席 +毅 +岛 +匠 +邦 +農 +肴 +湃 +瑰 +怀 +3 +菠 +萝 +蒂 +泛 +昆 +邻 +右 +勁 +畸 +刑 +辩 +而 +议 +喝 +榞 +莞 +断 +霖 +辽 +乌 +怕 +滩 +奕 +橡 +隔 +圳 +咬 +芬 +馅 +涡 +封 +釉 +飘 +マ +シ +サ +ジ +泊 +扎 +甩 +斬 +访 +稳 +恋 +當 +佩 +黎 +奈 +烘 +棕 +券 +椒 +醛 +引 +裱 +旦 +盱 +眙 +旁 +穗 +赔 +尧 +赵 +难 +绳 +陪 +锭 +卢 +冲 +绝 +揽 +〇 +脸 +拔 +孙 +爆 +饹 +寶 +楚 +岳 +氪 +篇 +捞 +斋 +栓 +端 +扒 +钜 +侨 +桌 +几 +詩 +帕 +絲 +爽 +茗 +编 +發 +救 +孤 +困 +玉 +杯 +涌 +提 +袋 +汛 +署 +褔 +匾 +奖 +煮 +晟 +觅 +罚 +狗 +龍 +氧 +資 +忠 +乖 +馋 +让 +瑩 +繡 +障 +泳 +椎 +蓄 +泽 +兑 +蝶 +击 +描 +吴 +茸 +窝 +柔 +種 +仁 +圃 +笔 +仙 +顿 +舟 +宰 +给 +杆 +亲 +遮 +毯 +陽 +丘 +除 +害 +骑 +韶 +坚 +功 +显 +说 +演 +坐 +産 +冯 +弹 +韦 +橘 +晒 +澡 +斗 +尝 +取 +橙 +摇 +蕊 +殊 +魏 +樊 +模 +束 +卜 +宛 +素 +墓 +积 +透 +鲈 +孟 +枪 +荔 +舌 +坦 +状 +篓 +袜 +虑 +患 +纠 +纷 +崽 +笋 +蓬 +跌 +渐 +翟 +籽 +碍 +疼 +腿 +脊 +轴 +嬰 +翅 +瑾 +丄 +搬 +跃 +伐 +宅 +仟 +岩 +葱 +蘸 +睐 +战 +孝 +( +) +寨 +檀 +楠 +煎 +贫 +饲 +陵 +普 +熙 +宙 +翰 +钅 +袁 +郊 +昶 +捆 +擦 +圪 +硫 +脲 +桐 +矫 +秦 +硅 +藻 +态 +誉 +猛 +腩 +渝 +拾 +挥 +侠 +痔 +瘘 +挡 +堡 +烽 +贾 +華 +采 +予 +辊 +沌 +坝 +堆 +梁 +牡 +熨 +耕 +鹌 +鹑 +豹 +履 +植 +觉 +鲤 +醉 +菇 +筝 +蜻 +蜓 +莫 +闯 +涯 +乃 +剧 +墨 +革 +雾 +掌 +煤 +肾 +扦 +藕 +命 +齋 +漏 +芭 +荧 +創 +偉 +順 +納 +湿 +鸥 +即 +弦 +驶 +疾 +纂 +闺 +察 +枞 +浪 +碳 +盾 +姻 +锥 +滏 +禹 +畵 +闽 +缓 +邝 +桦 +又 +渡 +瘦 +啦 +逍 +爪 +壽 ++ +娌 +繁 +纟 +柴 +翁 +垂 +钓 +促 +沐 +龄 +短 +溶 +淼 +去 +熏 +漾 +咀 +嚼 +壳 +騰 +肚 +了 +敲 +膏 +艇 +卿 +绞 +冚 +肿 +胀 +楷 +瀛 +嫂 +诞 +湛 +灾 +募 +… +漂 +奔 +葡 +萄 +搏 +伍 +曹 +慈 +; +牧 +淞 +熊 +穿 +孔 +沧 +绸 +丧 +葬 +孛 +赢 +聊 +段 +貴 +堵 +滁 +沈 +馥 +冮 +婦 +羅 +废 +荤 +倍 +耐 +姓 +瀘 +痘 +鱿 +仿 +差 +降 +峡 +斜 +慢 +恢 +切 +番 +茄 +薇 +脉 +驭 +尿 +耗 +朱 +疯 +狂 +储 +虔 +砍 +旨 +珊 +萊 +堰 +牵 +阖 +曾 +涎 +蠡 +捕 +莺 +凰 +据 +咏 +悍 += +悟 +夷 +跟 +妊 +枣 +什 +么 +拍 +稽 +炮 +粘 +脱 +樂 +谨 +溪 +董 +氟 +芒 +爵 +吞 +抄 +扬 +识 +Ⓡ +恺 +倾 +妮 +貂 +阪 +赣 +炙 +★ +撕 +焙 +猬 +岸 +腱 +尃 +斑 +頭 +举 +近 +揭 +甫 +必 +橄 +榄 +薯 +叠 +毓 +兆 +⊥ +芊 +朵 +锨 +淳 +糯 +抓 +钧 +闭 +异 +佑 +篮 +丑 +怪 +玖 +腹 +鼠 +赐 +隍 +鳝 +倡 +惊 +阜 +枇 +杷 +㸃 +鸽 +鲫 +沼 +睦 +芜 +绽 +狮 +滬 +瘀 +疚 +秤 +缺 +襄 +鳳 +藥 +凌 +抚 +丞 +栈 +硬 +谭 +亍 +巡 +判 +蒋 +岚 +映 +初 +敌 +曙 +逢 +肘 +筷 +濠 +伯 +葉 +鏡 +菌 +蘇 +尤 +谱 +乔 +貝 +祛 +h +殡 +暨 +殿 +腊 +厕 +迈 +趋 +淇 +桔 +尺 +媄 +奓 +娄 +祺 +希 +望 +叁 +袍 +缸 +挑 +辭 +舊 +歲 +飲 +姣 +艳 +俄 +宦 +塾 +茱 +【 +戴 +玄 +践 +邱 +斌 +候 +弍 +虚 +醒 +镂 +碎 +锤 +# +妥 +《 +鉴 +辑 +骝 +約 +烛 +冶 +乎 +钝 +陂 +愛 +吹 +穆 +辐 +谦 +疮 +粽 +E +V +R +暗 +隹 +亻 +筏 +~ +弱 +索 +娜 +拇 +筛 +杀 +陆 +淡 +兜 +往 +藤 +萃 +榜 +贡 +飾 +經 +綸 +钰 +贞 +颛 +症 +嘻 +褥 +帅 +奉 +盔 +澈 +锯 +灡 +泓 +哪 +彬 +癌 +峩 +芽 +锡 +絮 +鄂 +『 +泗 +砭 +』 +迷 +遁 +羿 +臣 +搭 +饿 +莆 +瀑 +| +笨 +略 +驳 +禧 +簧 +猴 +優 +币 +碱 +熹 +粑 +铰 +辫 +卧 +杉 +危 +豐 +鞭 +記 +兿 +聖 +似 +乙 +胚 +茭 +吻 +碚 +舜 +赫 +否 +鳯 +答 +疑 +磅 +刁 +框 +亏 +柱 +浮 +归 +撑 +迦 +尘 +銀 +渎 +葵 +偿 +潘 +垣 +终 +忘 +颂 +∧ +И +炔 +氮 +祸 +黔 +侧 +疏 +浚 +嚞 +糊 +句 +扌 +勘 +争 +咅 +圗 +尽 +涂 +胗 +幺 +疤 +嘀 +嗒 +滚 +痣 +逗 +節 +髙 +随 +懋 +畜 +敦 +令 +坑 +栽 +蝴 +跳 +伏 +裹 +懿 +璜 +烨 +匹 +蚝 +偏 +禽 +史 +努 +细 +昇 +晴 +‘ +貌 +缤 +珂 +蕾 +閏 +鞍 +肖 +钉 +島 +の +珑 +璇 +庵 +厝 +戈 +粱 +倒 +嶺 +妞 +赤 +父 +姨 +飙 +狼 +轻 +號 +枢 +纽 +幽 +° +掏 +誠 +闻 +猜 +G +I +A +俭 +皆 +匆 +忙 +贯 +彻 +葒 +蕃 +晏 +柘 +纶 +∶ +喱 +缅 +累 +專 +ㆍ +氣 +跑 +曜 +占 +姆 +蔓 +惦 +倫 +愉 +垦 +洽 +娇 +滘 +泷 +郅 +焕 +顔 +槿 +澧 +箔 +浙 +朕 +衰 +俺 +逆 +é +捧 +奎 +焦 +稷 +铅 +矿 +忄 +韓 +燎 +濤 +钊 +蕙 +携 +榆 +沉 +鳶 +潍 +蹄 +皖 +啵 +铄 +夕 +汥 +乓 +炬 +棵 +衤 +菏 +饶 +’ +问 +賓 +喻 +閣 +薬 +攻 +兔 +熬 +钎 +呱 +谌 +吒 +乱 +邪 +煞 +耄 +梓 +摊 +幂 +豌 +「 +」 +樟 +脖 +苦 +荞 +喽 +攸 +熘 +竺 +蔻 +获 +咻 +薰 +駿 +挝 +镯 +坠 +尹 +萧 +陡 +坡 +輕 +" +窃 +戚 +撒 +煜 +蹦 +颁 +皙 +椰 +愚 +很 +憨 +馬 +壕 +译 +昂 +延 +俞 +茵 +棱 +谈 +桢 +蛳 +炝 +钭 +唇 +點 +審 +喊 +樓 +榭 +琉 +呷 +哺 +③ +巩 +乒 +婕 +蒜 +厚 +媛 +滙 +哲 +沿 +▏ +渭 +硼 +阴 +持 +東 +决 +筹 +并 +隽 +忧 +邑 +骄 +诵 +夬 +沁 +蜀 +卦 +礻 +懒 +浅 +阅 +卯 +炕 +藜 +汐 +莜 +碣 +雞 +艮 +洞 +← +逹 +郝 +乍 +鲨 +湟 +迮 +竿 +葫 +誌 +劝 +浑 +儒 +彦 +燚 +喏 +酪 +極 +抢 +般 +禮 +墩 +珀 +簡 +廖 +稚 +芃 +纵 +灿 +網 +電 +枝 +粟 +吗 +妃 +麵 +催 +著 +仃 +揚 +汀 +绵 +剛 +堅 +▪ +赞 +佬 +该 +萱 +阻 +颊 +羔 +淑 +呼 +铕 +坞 +綢 +盼 +漢 +勐 +晰 +孬 +楊 +徒 +崔 +` +豚 +脯 +酝 +溜 +厢 +沽 +龟 +励 +鳄 +涉 +邺 +笙 +谋 +唢 +呐 +伙 +磷 +溝 +栖 +秃 +肛 +裟 +菩 +绎 +阎 +庚 +彝 +佐 +拨 +勒 +個 +靴 +蜕 +喆 +吕 +狱 +辜 +且 +嫁 +裳 +逊 +丛 +棍 +抽 +叫 +烹 +饪 +键 +粗 +吾 +滇 +喉 +ä +嘎 +芸 +仲 +瓮 +允 +跨 +犀 +煦 +凿 +寬 +刃 +肢 +陳 +猎 +來 +骓 +债 +師 +範 +涤 +锣 +侯 +皂 +棠 +萌 +哒 +摘 +匝 +浓 +骞 +樸 +碑 +耘 +勋 +疣 +叻 +潼 +弓 +须 +趙 +欠 +| +瞳 +堤 +瘤 +輪 +際 +團 +刹 +射 +祎 +驴 +佧 +崎 +礦 +遂 +骆 +驼 +污 +仆 +[ +] +@ +莓 +潞 +腕 +泪 +拐 +菁 +呆 +陟 +诠 +佗 +函 +箕 +浒 +翘 +亘 +酌 +郫 +麓 +鄉 +場 +緣 +璐 +浜 +內 +奂 +揸 +愧 +诸 +届 +凳 +扇 +灏 +佤 +達 +臭 +慶 +嫚 +蚁 +谛 +ɔ +妯 +薛 +娘 +捏 +旱 +蟠 +昔 +課 +挺 +扳 +桩 +籁 +駕 +匯 +亞 +笠 +荠 +郦 +隧 +吓 +禄 +称 +箫 +鋪 +孫 +彼 +韭 +赋 +丙 +昭 +舶 +璟 +憾 +掉 +渣 +煊 +奴 +则 +卅 +秒 +挞 +铖 +颍 +栅 +括 +撸 +鲸 +¥ +驹 +镁 +钛 +覃 +邓 +帜 +鄺 +寝 +涨 +鲍 +郁 +薄 +寇 +咳 +> +琥 +靛 +規 +劃 +會 +顽 +癣 +飯 +垛 +或 +悔 +楽 +徳 +爬 +浏 +燈 +晗 +媽 +馿 +晕 +繪 +圖 +標 +焱 +躁 +麋 +论 +刊 +灞 +傣 +榴 +龚 +罩 +醫 +É +疫 +绥 +拦 +卸 +鎮 +垒 +硚 +連 +较 +窕 +蕉 +勝 +蔷 +裝 +虞 +胳 +膊 +痒 +涵 +粪 +裘 +\ +瀾 +醚 +覺 +烂 +贩 +Ⅱ +亩 +抿 +逐 +參 +З +毡 +肽 +敷 +吖 +滴 +例 +鈴 +抖 +閑 +隋 +骗 +鲮 +玑 +撼 +郴 +[ +撤 +從 +園 +搂 +壤 +遒 +坋 +纬 +讼 +皓 +渊 +虽 +踪 +盤 +嘣 +峨 +嵋 +瑶 +荐 +殖 +鞘 +覌 +蓓 +拢 +涩 +呦 +腌 +苪 +墟 +埠 +喬 +姬 +畐 +砀 +𣇉 +昀 +铵 +夺 +衬 +棺 +窦 +腻 +洒 +枉 +間 +嬉 +舫 +檔 +耍 +鯨 +粼 +珞 +娣 +俩 +圭 +杼 +鸳 +鸯 +現 +鳞 +拥 +絡 +復 +恂 +顏 +軒 +茨 +厄 +雍 +倚 +雁 +苔 +藓 +& +柚 +亢 +逃 +眸 +馐 +矾 +鹭 +爸 +缎 +耶 +偷 +镗 +氛 +醪 +糟 +溉 +剐 +錦 +偶 +憩 +镐 +錧 +庞 +賢 +戶 +鹃 +兮 +钩 +邨 +覚 +屈 +娅 +俵 +敟 +瘊 +阶 +遵 +曦 +妤 +茉 +滞 +珏 +啰 +径 +勃 +篱 +扩 +姑 +厮 +職 +㐂 +垚 +癫 +痫 +茯 +苓 +K +N +膝 +鷹 +熔 +窑 +镖 +螃 +泌 +础 +錢 +锂 +侣 +桉 +霜 +魂 +凉 +役 +塗 +凬 +糙 +粹 +纲 +滕 +濮 +闿 +毕 +昵 +鄭 +哚 +椿 +馫 +蚨 +莹 +泼 +ü +仞 +肇 +砼 +枱 +屌 +跖 +佘 +抱 +見 +芷 +砚 +岢 +晖 +糁 +濟 +翌 +瘢 +疙 +瘩 +噢 +拜 +箩 +蚂 +菀 +芹 +劈 +矸 +痨 +嫩 +鼓 +鲱 +怎 +虏 +靡 +皱 +释 +鴻 +滿 +糍 +㐱 +烊 +霆 +骐 +桼 +沾 +苟 +徕 +碾 +邵 +崛 +潤 +揪 +佶 +雎 +臊 +瞬 +岐 +棟 +锻 +❋ +惩 +淄 +# +蛤 +瑙 +逅 +凹 +凸 +茬 +咽 +彤 +劇 +瓣 +侈 +惜 +咔 +讨 +孖 +氨 +酯 +賴 +漳 +嘟 +竖 +礴 +芮 +雯 +词 +塞 +柒 +趾 +趴 +锘 +銘 +朴 +歸 +弋 +〉 +琼 +蕴 +符 +谣 +肃 +谥 +荥 +氵 +脂 +崃 +㙱 +挣 +瑭 +绶 +漯 +鬼 +骋 +姗 +崖 +壬 +祠 +織 +唔 +堑 +肪 +‧ +炊 +笃 +產 +苝 +埌 +竭 +析 +琅 +穴 +棘 +铎 +戀 +亦 +栾 +睢 +邸 +珈 +朽 +刨 +褂 +啃 +操 +丈 +機 +構 +魚 +汶 +閃 +膽 +陋 +哮 +喘 +帼 +澤 +綫 +氰 +胺 +些 +呀 +紗 +飬 +勺 +荻 +叙 +嘢 +霉 +│ +胞 +熠 +踩 +臂 +犯 +罪 +婁 +態 +陌 +窄 +伈 +壮 +杖 +跪 +帥 +衢 +燦 +燘 +仅 +扮 +闷 +悸 +铱 +循 +剃 +哎 +茹 +闫 +① +蘑 +钙 +⑪ +爍 +_ +應 +粵 +挽 +䝉 +尓 +枸 +杞 +握 +濑 +鏢 +卑 +蛇 +沔 +撬 +碶 +簸 +耦 +颠 +② +醴 +遠 +谅 +F +T +窨 +哨 +拱 +笛 +硒 +糜 +㎡ +瞿 +喀 +寕 +夀 +唛 +哆 +雙 +訓 +孵 +挎 +闰 +谚 +嗽 +戋 +烜 +茫 +護 +膚 +迹 +莘 +既 +獭 +鎖 +2 +輝 +講 +複 +荫 +a +r +黃 +琦 +廰 +懮 +務 +幛 +哇 +杳 +辖 +褪 +栎 +/ +挫 +述 +炼 +懂 +误 +* +歇 +籍 +抑 +舱 +捡 +矩 +毫 +诈 +株 +嘛 +抹 +渠 +它 +- +拟 +挤 +穷 +廿 +羞 +谁 +截 +钳 +搞 +填 +但 +焰 +炳 +匀 +蜗 +饱 +酶 +奏 +± +吨 +× +某 +悬 +暇 +礁 +辈 +毁 +契 +亡 +悉 +稍 +绑 +骤 +尴 +尬 +澎 +韧 +烷 +~ +堪 +詹 +搜 +妾 +祁 +1 +惧 +酮 +蚌 +恨 +谜 +绩 +叹 +侦 +$ +朔 +阵 +惯 +烁 +绪 +堇 +燥 +灭 +忽 +彰 +哟 +歧 +败 +烦 +恼 +逾 +. +肆 +虐 +枚 +距 +喔 +翎 +伶 +措 +却 +帖 +竟 +拒 +浸 +褐 +圩 +勾 +埋 +驱 +吐 +阡 +柑 +骚 +氯 +磺 +仍 +啥 +匪 +臀 +蛮 +咋 +剥 +孜 +硝 +钮 +潇 +砸 +無 +遭 +暂 +痴 +梗 +挪 +赴 +胁 +惨 +衍 +霾 +℃ +扰 +溴 +酰 +轧 +弃 +瑕 +苞 +踢 +迫 +妖 +畏 +² +恤 +缕 +厌 +逻 +枯 +稣 +? +迩 +挚 +擅 +删 +摔 +岔 +唤 +庐 +宪 +隙 +忍 +勉 +陀 +摧 +矮 +耽 +剩 +榛 +蚀 +峻 +※ +烯 +囊 +侵 +愈 +雇 +亳 +泄 +欲 +浣 +讽 +噶 +瑚 +瑟 +羚 +赶 +拯 +阔 +淆 +雌 +坂 +恰 +哭 +慌 +碌 +酚 +祈 +琢 +慰 +骂 +羯 +悲 +裸 +筱 +替 +欺 +碰 +桓 +躺 +稿 +螨 +矛 +孢 +恐 +怖 +镊 +敛 +惟 +甚 +C +拽 +俯 +叛 +诬 +陷 +鸦 +< +爷 +黏 +噪 +仑 +璨 +仗 +辟 +闹 +褚 +咯 +贼 +捉 +唑 +锰 +钠 +秉 +苣 +秩 +聿 +罢 +僧 +嫌 +☆ +钒 +乏 +阮 +痿 +拘 +肋 +漠 +婿 +癸 +髓 +璧 +渍 +喂 +镍 +隶 +疲 +炜 +旬 +哑 +耿 +斥 +膀 +轰 +惭 +恳 +瘪 +哝 +嗓 +泣 +忸 +怩 +劫 +捂 +嚎 +悄 +蹲 +黯 +咧 +贻 +搅 +瞧 +柬 +蠢 +垮 +怒 +睬 +扛 +颤 +眯 +蟆 +吼 +窘 +吆 +滔 +凶 +狠 +愤 +佣 +聂 +跻 +迟 +脾 +凑 +绉 +抬 +吵 +瞌 +耸 +牺 +牲 +瞪 +膨 +惹 +揉 +懦 +犟 +憋 +绰 +惋 +咙 +蔼 +躲 +狡 +黠 +淌 +辕 +辙 +搁 +呃 +吩 +咐 +嚷 +庸 +咱 +噩 +斩 +哧 +歹 +昏 +绊 +歪 +妨 +吟 +啼 +慷 +慨 +忿 +眨 +漓 +轶 +扔 +贪 +譬 +谎 +捶 +哼 +窍 +啪 +绢 +冤 +屁 +屉 +惕 +擂 +坟 +屡 +辨 +蔑 +脏 +嗯 +擒 +谍 +馁 +愁 +忌 +狭 +怔 +晃 +蜷 +辞 +庇 +擞 +兢 +屑 +拎 +蠕 +睁 +沸 +衷 +矢 +吁 +豁 +曝 +怅 +踮 +坯 +敞 +婉 +狈 +憎 +哐 +跺 +踹 +婶 +羡 +耻 +挨 +搔 +佼 +唠 +鄙 +哀 +梆 +犒 +舅 +妄 +佯 +嘘 +柄 +舆 +荡 +惴 +夸 +瞒 +霹 +雳 +嗬 +瞻 +挠 +缠 +馊 +踱 +愣 +堕 +讳 +漉 +潦 +拭 +歉 +趁 +翩 +僵 +摞 +侄 +怵 +魁 +簿 +怨 +叽 +怜 +瞥 +嘲 +揍 +抡 +唬 +赦 +咄 +逼 +侮 +蹑 +霎 +眶 +诀 +篆 +傍 +瘫 +魄 +捅 +叨 +噼 +祟 +哄 +捣 +仇 +嗷 +娑 +啸 +弛 +捺 +恪 +殷 +缭 +锲 +爹 +拄 +炯 +辱 +讶 +撇 +诡 +睽 +掀 +沮 +岂 +昨 +宕 +趔 +颓 +斟 +蜇 +掖 +诧 +箍 +羹 +诌 +唉 +嚓 +懈 +岑 +趄 +奚 +沓 +懑 +沛 +○ +贬 +忐 +忑 +聋 +蚤 +誓 +怂 +恿 +砰 +拙 +冥 +谓 +襟 +掺 +楞 +咒 +鞠 +诩 +裆 +蹚 +髦 +劣 +匿 +抒 +垢 +嗦 +扉 +苛 +贿 +赂 +搡 +蹭 +鲛 +攥 +舵 +塌 +胫 +琐 +癞 +晦 +崩 +夭 +搽 +咛 +嗖 +褒 +悚 +诲 +怦 +懊 +踊 +揶 +揄 +躬 +懵 +渴 +悯 +喳 +垠 +撞 +吱 +叼 +熄 +吝 +骷 +髅 +趿 +俘 +僻 +猾 +赚 +曰 +抠 +阐 +倦 +楔 +迂 +逛 +铆 +凛 +奸 +逝 +寞 +嘤 +昧 +哗 +癖 +坷 +寂 +姊 +汲 +惮 +呗 +皈 +霄 += +鲶 +褀 +芋 +張 +臺 +< +> +鳌 +汴 +j +昼 +剁 +侃 +體 +硂 +啄 +_ +薏 +砌 +\ +羌 +鹞 +碁 +樽 +畔 +疱 +" +辋 +毽 +↑ +尕 +稠 +獨 +玮 +桁 +莅 +卵 +捌 +鱬 +忻 +醍 +铧 +№ +扞 +涧 +牟 +锚 +浔 +傢 +俬 +] +垧 +涇 +糠 +麸 +掂 +蔗 +笺 +隅 +瘁 +巍 +氙 +葆 +霏 +@ +叢 +秆 +鶏 +缪 +峪 +斐 +缙 +甄 +钯 +` +胭 +眩 +統 +鄱 +盅 +嵊 +铲 +菓 +❤ +呕 +泻 +龅 +圧 +洺 +呛 +π +翊 +餘 +翥 +芎 +铬 +沱 +婵 +裔 +橦 +暢 +煸 +唻 +腋 +莽 +镭 +穂 +樵 +蕲 +乞 +暄 +剖 +狄 +蓟 +捍 +鼾 +珙 +拷 +屠 +茁 +堃 +裴 +皋 +炽 +屹 +頔 +剎 +迺 +廬 +兒 +膠 +脐 +颢 +畈 +竣 +卒 +缦 +蟾 +艰 +夢 +崋 +萘 +撮 +倌 +晞 +荪 +痹 +炤 +娓 +傳 +琨 +筠 +蕳 +髮 +樹 +溃 +疡 +瑪 +峯 +恕 +勑 +洼 +④ +蘘 +鰻 +裡 +夙 +昱 +谕 +钞 +赊 +琵 +坨 +哓 +寅 +邂 +锟 +鷄 +環 +縣 +痰 +矶 +飛 +昽 +痤 +皲 +霓 +馕 +娲 +冉 +赉 +匍 +瓢 +廠 +岙 +親 +稼 +勅 +锄 +^ +為 +戎 +麯 +绮 +* +铠 +設 +浠 +榈 +瞄 +芥 +钿 +泺 +氽 +鴿 +涞 +諾 +麾 +蔺 +祙 +仨 +芪 +囧 +蝇 +褊 +溧 +姥 +嚏 +嗅 +嗡 +崂 +癜 +喨 +扙 +‰ +颅 +恍 +惚 +侍 +↓ +汊 +岽 +涕 +峙 +甏 +溸 +枭 +億 +掩 +蒄 +涫 +拴 +扪 +爺 +補 +歆 +嶪 +遐 +囱 +灼 +札 +邳 +袄 +〈 +岱 +籣 +鐵 +锴 +豉 +鍋 +鋒 +赎 +砋 +垓 +頤 +蓮 +崴 +} +炀 +{ +様 +趵 +飚 +耙 +粢 +泩 +皿 +於 +哔 +匡 +枋 +柛 +溯 +砥 +砺 +係 +犇 +灣 +靳 +禺 +饦 +區 +铛 +鄯 +滟 +鲩 +鈺 +黍 +跷 +楂 +潔 +埗 +靈 +徜 +徉 +嘬 +戊 +铞 +鳅 +秭 +í +猗 +聲 +旌 +酩 +馏 +憬 +實 +鹊 +總 +衖 +矗 +蝉 +囡 +疥 +缨 +ʌ +聞 +钾 +廟 +沅 +娠 +骁 +鍊 +葳 +耵 +沣 +鄢 +笈 +圜 +胪 +啾 +瑁 +蒡 +廓 +夯 +臼 +湓 +煋 +丼 +珺 +瘙 +仺 +矽 +動 +Ƨ +汆 +宓 +弈 +翱 +³ +処 +専 +賣 +跤 +镒 +娽 +蔽 +雏 +琛 +巅 +虱 +龢 +滢 +踺 +壓 +阙 +и +绨 +铍 +斛 +龛 +缐 +飨 +忱 +韬 +嵘 +類 +聯 +嗝 +′ +町 +梏 +蛊 +瑛 +伢 +囗 +淅 +協 +爾 +讴 +菡 +桖 +嗲 +囿 +湄 +诛 +憶 +荃 +棛 +鵬 +屿 +夌 +俨 +弊 +亟 +娶 +缥 +缈 +― +舐 +忏 +瞭 +觑 +岌 +尸 +乩 +砝 +傀 +儡 +汰 +妓 +淫 +轼 +阗 +曳 +岿 +莒 +僚 +掷 +绠 +浊 +眢 +矣 +殃 +哉 +晤 +婢 +鼋 +鳏 +曩 +湫 +偃 +袱 +嶷 +惫 +帚 +悼 +倘 +腑 +撺 +﹒ +涸 +祀 +遏 +猿 +辄 +俟 +绛 +绡 +叩 +呻 +迄 +寐 +逵 +涅 +蒿 +滥 +艘 +镣 +铐 +焉 +惆 +窥 +} +妗 +诘 +簪 +氆 +氇 +兀 +匈 +唾 +渺 +冕 +舷 +僮 +笥 +怼 +伺 +溥 +杵 +捽 +叱 +贱 +袤 +■ +寡 +慑 +馔 +匮 +寥 +捭 +颇 +噤 +媪 +垄 +檄 +顷 +暮 +藉 +莠 +髻 +疵 +窜 +啖 +漱 +溟 +孰 +惬 +旎 +骘 +恙 +瘥 +氓 +黝 +豕 +痢 +鸢 +嫔 +韪 +讷 +磕 +狲 +睹 +︰ +辗 +跬 +瘸 +欷 +抨 +掠 +撩 +睒 +蟒 +涣 +骇 +嫉 +妒 +啬 +驯 +颔 +◆ +舁 +娥 +椭 +诣 +膦 +帷 +摹 +嫦 +狎 +龋 +訾 +涝 +槛 +蛹 +滦 +盏 +胧 +妪 +龁 +凄 +盎 +劾 +觇 +{ +渲 +髯 +衾 +孑 +婺 +萦 +谒 +惰 +桎 +婊 +鳃 +褫 +怙 +à +迥 +鼍 +赀 +─ +甾 +裨 +遽 +瘟 +娡 +陛 +囚 +哩 +浐 +扈 +慵 +桅 +兖 +酊 +舄 +蹴 +佟 +缢 +啜 +吏 +哌 +赡 +掣 +萎 +悴 +屦 +扃 +缚 +棹 +奘 +闱 +唆 +逶 +迤 +绷 +苡 +纨 +郜 +辇 +蒗 +阑 +簇 +怠 +雉 +嘱 +悖 +娩 +殆 +腥 +咫 +阱 +驮 +焚 +瞅 +佻 +聆 +藩 +嗪 +捋 +袴 +泯 +揣 +拂 +袈 +衅 +瞑 +愕 +彀 +蹊 +榇 +滓 +敝 +颦 +咎 +斧 +笞 +脓 +驺 +麂 +腮 +濒 +迭 +翦 +辔 +汹 +隘 +伉 +臧 +癀 +觊 +觎 +琏 +霁 +丕 +龈 +赍 +谴 +谏 +苎 +睾 +祚 +庥 +瘠 +耎 +缜 +秽 +沦 +咤 +⑾ +钏 +抟 +砧 +戍 +衙 +攫 +藿 +愦 +盂 +鸩 +眷 +喧 +瘗 +瞽 +幔 +恃 +苯 +袒 +劬 +踞 +匣 +俚 +眈 +碉 +啻 +〔 +〕 +撰 +戌 +豳 +銮 +亵 +吮 +谬 +镑 +綦 +箸 +褶 +痪 +缉 +嗤 +濡 +膛 +睚 +腆 +糗 +髭 +尉 +锹 +骸 +赟 +殉 +攒 +饷 +茎 +铿 +歼 +挟 +鲠 +峭 +靶 +笄 +碘 +瞩 +涔 +鳖 +抉 +貉 +睇 +嫖 +娼 +衲 +⑦ +讹 +禀 +倭 +徙 +叟 +趺 +毙 +伫 +鸾 +朦 +濯 +怏 +蹙 +玳 +偕 +粳 +驸 +旷 +卞 +爇 +猝 +溺 +喙 +瞠 +□ +昙 +檐 +窠 +蟋 +禳 +逡 +攘 +诫 +穹 +磋 +奄 +踝 +骅 +噬 +彊 +榷 +⑵ +夤 +筐 +璀 +忤 +赧 +篝 +豺 +徘 +徊 +晌 +孺 +丐 +戟 +飕 +蒽 +褓 +遶 +嗥 +纭 +溲 +褴 +庶 +辍 +篡 +剿 +畀 +逮 +酆 +猖 +闾 +犁 +纣 +镌 +怯 +墉 +酣 +溅 +胱 +酋 +铀 +觥 +舛 +唏 +鏖 +肮 +婪 +遹 +呶 +嫡 +倔 +幌 +殂 +戮 +侑 +憧 +赘 +赃 +筵 +呜 +饥 +锷 +鬓 +诮 +诋 +瞎 +祜 +毋 +廛 +迕 +恝 +峥 +颚 +缗 +遴 +蓖 +唁 +恬 +桀 +骠 +獐 +踵 +霭 +剽 +洟 +姝 +雹 +锢 +霣 +溷 +髫 +寤 +惶 +歔 +吭 +俸 +哂 +濂 +厥 +皎 +骡 +喹 +篙 +扼 +咆 +敕 +伎 +嶂 +盯 +狩 +殴 +镪 +蛆 +镳 +骛 +坌 +邴 +谙 +飒 +琮 +Р +П +О +旖 +俑 +饕 +⑤ +糅 +撵 +牯 +蹿 +砣 +婧 +姮 +甥 +紊 +踌 +躇 +谲 +掐 +璋 +谀 +噱 +缄 +嗜 +贮 +嗔 +蚡 +髋 +迸 +侏 +箦 +椽 +蹰 +醮 +萤 +邈 +橐 +栉 +猕 +珰 +恻 +臾 +祷 +兕 +奁 +赈 +蚓 +骼 +澹 +伛 +偻 +俎 +傩 +纾 +鬣 +烬 +钗 +揖 +⑧ +怿 +暧 +鲧 +瞟 +袅 +β +呓 +赇 +蜈 +拮 +谑 +樯 +囤 +氤 +氲 +阕 +宥 +喋 +卮 +娈 +嘶 +圹 +嬖 +诏 +酞 +罄 +恹 +淹 +锏 +蜚 +矜 +蚣 +邃 +鸠 +疸 +掼 +栩 +洮 +耜 +毗 +頫 +畴 +痞 +躯 +悒 +孽 +梃 +绯 +嘈 +诿 +骰 +鬟 +崭 +铙 +斡 +袂 +彷 +渤 +疋 +痉 +挛 +眦 +芈 +啕 +纰 +刍 +忒 +祗 +膺 +畲 +獬 +豸 +Ⅲ +﹐ +庖 +啧 +壑 +襁 +痈 +恸 +诟 +楹 +吠 +痊 +荼 +樾 +苌 +讪 +蹬 +贳 +娴 +潸 +搪 +倨 +纥 +醺 +忡 +拣 +蟭 +鑙 +蜿 +蜒 +酡 +罹 +谩 +◎ +溍 +锗 +麽 +遨 +亥 +泮 +樗 +捎 +伧 +牦 +憔 +幡 +煽 +郪 +眺 +俐 +冗 +漪 +蚩 +痂 +椟 +漩 +嗟 +诽 +谤 +枷 +饽 +棣 +卬 +幞 +帔 +镬 +牍 +诳 +詈 +阆 +掇 +荏 +觌 +逑 +稔 +歙 +缮 +盹 +儋 +厍 +睨 +畦 +酗 +栀 +逞 +徇 +蚯 +摒 +炷 +鹜 +鹂 +谶 +绚 +臬 +罔 +枥 +瓯 +甑 +亓 +庠 +唿 +拗 +谗 +窟 +噎 +岫 +凋 +叵 +牒 +簋 +蛰 +噔 +拚 +鸮 +岖 +蹂 +躏 +徨 +掳 +涪 +屎 +绾 +箧 +拈 +茕 +殒 +黟 +薜 +噉 +嫣 +戛 +涟 +冁 +邋 +遢 +菽 +悭 +囔 +彘 +徼 +⑨ +锺 +顼 +掬 +廪 +捻 +俾 +愆 +窒 +釂 +後 +嚣 +腓 +哽 +铉 +靥 +颧 +疟 +雒 +搀 +汞 +阂 +杲 +唧 +佞 +娆 +妩 +浃 +叡 +⑩ +蹇 +祐 +缀 +谄 +梢 +臆 +胰 +蠹 +胤 +━ +狙 +谳 +俛 +翕 +瓠 +盥 +咂 +衿 +鐘 +惇 +な +て +诅 +畿 +枳 +跛 +泫 +孳 +巉 +飓 +迨 +垩 +焘 +– +恚 +箴 +疽 +讣 +窈 +妳 +噫 +魍 +魉 +爨 +╱ +诙 +狰 +狞 +踣 +汜 +尻 +缁 +⑥ +犷 +闼 +珩 +觞 +鸵 +蝠 +擢 +拧 +蓼 +晁 +瘴 +槊 +邕 +粜 +縯 +豭 +媵 +佚 +衩 +阊 +坳 +湍 +⑴ +铨 +俅 +嚬 +粕 +罘 +畑 +瞀 +瓻 +蹶 +搴 +祧 +冢 +秧 +缰 +邬 +诃 +聩 +糌 +骈 +佈 +┌ +┐ +蜥 +蜴 +痍 +∈ +墀 +渥 +缧 +孥 +咿 +勖 +恫 +刽 +嗣 +郓 +惘 +羁 +蝗 +枵 +绦 +弩 +踰 +馓 +蓦 +黜 +苷 +胯 +遑 +侩 +铤 +惺 +桨 +诨 +砾 +磬 +龊 +骊 +喃 +鬲 +渖 +锉 +坍 +鲇 +苒 +腼 +狻 +猊 +廨 +诒 +蛾 +爰 +裾 +舔 +桧 +裀 +悻 +讫 +奭 +戳 +膘 +倬 +殚 +峤 +颉 +戾 +葺 +薮 +涓 +卺 +饴 +椁 +榧 +镵 +怆 +怛 +翳 +踉 +挈 +迢 +踧 +羸 +胄 +戗 +價 +浼 +喟 +菟 +驷 +俳 +簏 +僦 +桠 +Ⅰ +咦 +^ +掰 +彗 +蝼 +匳 +胥 +⑿ +弑 +冏 +愠 +陨 +罂 +倜 +傥 +搠 +镛 +傈 +僳 +嫜 +鬻 +噻 +鸬 +鹚 +腉 +摁 +欻 +牖 +鹄 +√ +腭 +缵 +肄 +唳 +÷ +钨 +◇ +鸨 +撝 +挹 +黾 +倏 +绐 +麝 +蝙 +睑 +儆 +牝 +猷 +≤ +裰 +啁 +甭 +聒 +蹋 +蓐 +耆 +闳 +骥 +渚 +锽 +逋 +贽 +跋 +獠 +虬 +铢 +嘹 +α +羟 +匐 +肱 +椀 +焯 +鳍 +潺 +殛 +妁 +傕 +蛀 +巿 +偎 +芫 +狝 +楮 +淤 +絷 +珅 +肓 +犊 +镕 +魇 +∩ +氦 +贲 +脔 +窭 +谟 +愫 +媲 +珥 +旃 +磔 +嶙 +峋 +陬 +喑 +琖 +徭 +峦 +摈 +猱 +蕨 +婀 +Ⅳ +舂 +夥 +藐 +驽 +骀 +帏 +谪 +弁 +襆 +镰 +⒀ +窬 +棂 +鞫 +诰 +皴 +玷 +跣 +恣 +绫 +钤 +怍 +篑 +腈 +涿 +姒 +冽 +埒 +巳 +獗 +啮 +阈 +绔 +媾 +簌 +钺 +侥 +砒 +劙 +峇 +阏 +榫 +旒 +偌 +罴 +钼 +坭 +纮 +劓 +刖 +缞 +绖 +苫 +苻 +猢 +脍 +徵 +燠 +冑 +帧 +茧 +罥 +幄 +踔 +愬 +瓒 +辘 +猥 +槃 +荀 +酹 +∵ +锱 +﹑ +囫 +囵 +戢 +愍 +縠 +屣 +忝 +≠ +揆 +崚 +犴 +蹒 +佃 +楫 +獾 +嗾 +窿 +苇 +薨 +绌 +荚 +蕤 +逦 +锵 +耋 +佝 +桡 +晡 +钲 +槌 +檠 +鬃 +讥 +訇 +搢 +泠 +歘 +泅 +暾 +孪 +淬 +妲 +殓 +愎 +祇 +厩 +剜 +蛔 +俦 +迓 +藁 +凇 +跄 +萸 +嗄 +哙 +舀 +珐 +刓 +赝 +噙 +缱 +绻 +遘 +鞚 +媸 +戆 +嵯 +骶 +圯 +仡 +鳜 +赅 +愀 +殇 +膈 +辎 +馀 +滂 +▲ +乂 +襦 +葩 +壅 +砷 +巽 +瀹 +蹀 +躞 +嗫 +嚅 +筮 +瘳 +楗 +诤 +嗳 +皤 +柩 +剌 +忖 +殪 +髑 +钹 +嗌 +蹩 +缒 +囹 +圄 +讦 +钡 +蜍 +臛 +喁 +偈 +氡 +阍 +殁 +淙 +枰 +棰 +轲 +楣 +陲 +蛎 +悌 +岬 +邰 +臃 +搒 +讙 +⒈ +粲 +邛 +粝 +欤 +髀 +豨 +凫 +苋 +榼 +飧 +姹 +阃 +墠 +榱 +畎 +忪 +衽 +腴 +耨 +扢 +氅 +谧 +搐 +罅 +絜 +顒 +诂 +悞 +殄 +≥ +堙 +噭 +橇 +眛 +缳 +釐 +泞 +菅 +汭 +爲 +骧 +湮 +捱 +暹 +噗 +镞 +斫 +仫 +娉 +铳 +碓 +夔 +嫱 +跸 +蛐 +拊 +绀 +疃 +跚 +蓍 +谯 +柢 +钚 +茆 +蜃 +鹳 +刭 +锑 +暲 +篾 +饬 +蒺 +啶 +隗 +燔 +趸 +弼 +燹 +胾 +旻 +浍 +踽 +阉 +衮 +眇 +芍 +绁 +浞 +祓 +嘌 +刈 +葭 +鹫 +揩 +鄄 +⑶ +煅 +炟 +觖 +贇 +魑 +弭 +瓴 +圉 +竽 +莴 +撷 +疝 +镉 +鹘 +觐 +甙 +縢 +嬛 +柈 +僖 +舡 +谮 +孱 +岷 +荜 +坼 +鞑 +⒊ +湎 +赭 +嗑 +碴 +啐 +鲑 +鸱 +芘 +闟 +慙 +焜 +汩 +隳 +芡 +茏 +踟 +赪 +侗 +狒 +窣 +谇 +瞾 +郯 +螫 +纔 +澍 +泱 +瘐 +伻 +蹻 +烝 +燮 +咣 +旄 +鹗 +擘 +酢 +篁 +△ +悫 +淖 +猩 +皑 +戕 +恽 +纻 +髃 +镝 +碛 +侪 +绺 +癔 +谆 +懜 +朐 +阚 +鈇 +刎 +苕 +匏 +蹉 +稗 +郄 +虮 +蛩 +嬗 +儇 +蔫 +豢 +椐 +蚰 +昃 +柞 +峄 +蛲 +曷 +赳 +珮 +杪 +虢 +螂 +呤 +唶 +昴 +鄣 +茔 +仄 +劭 +鞣 +杓 +姁 +薤 +膻 +氐 +醵 +杌 +笫 +穰 +螭 +跂 +褛 +燧 +郢 +哏 +撂 +韂 +﹖ +痼 +琰 +脁 +隼 +穑 +槁 +羖 +僭 +蒯 +孀 +骖 +龌 +潴 +﹔ +盍 +莩 +讵 +跽 +觳 +垝 +橹 +钴 +缶 +鸷 +遛 +翮 +鹣 +汨 +珪 +祉 +鞅 +怫 +缯 +噌 +濞 +庑 +斲 +洙 +趹 +玕 +颀 +轵 +髡 +嘭 +讧 +笤 +⒉ +磛 +繇 +疴 +沬 +趱 +鲔 +铩 +⒆ +跹 +胝 +酤 +寖 +轫 +貔 +嬴 +玦 +滈 +瘵 +曛 +馎 +楸 +晔 +笆 +缟 +庾 +茀 +爻 +弢 +赜 +遫 +睪 +郧 +鲋 +帑 +璆 +驩 +縻 +踯 +鲐 +崆 +峒 +餍 +邠 +螳 +喾 +嗛 +⒂ +颡 +苴 +瞋 +闇 +膑 +帨 +躅 +觚 +刳 +逖 +犍 +掾 +诎 +轸 +揜 +殽 +猃 +狁 +皁 +⒄ +鄜 +郿 +亶 +洹 +荦 +蛭 +紬 +柰 +寘 +羑 +嫪 +侔 +纡 +徂 +鲰 +乜 +餮 +鄠 +氖 +嵬 +虺 +蝮 +锾 +⒅ +埤 +棓 +苄 +悝 +渑 +觜 +莨 +轺 +酎 +鹆 +郾 +秏 +狃 +殳 +瀍 +蟥 +郗 +凃 +淦 +蟀 +昝 +嵇 +檩 +鄞 +荸 +仵 +邗 +铡 +琊 +芗 +蓥 +酃 +嘁 +鲅 +陉 +妫 +蛉 +璩 +濉 +畹 +蚪 +蒉 +溆 +謇 +掸 +仝 +浉 +逯 +麴 +誊 +邙 +仉 +珲 +逄 +邡 +坻 +眭 +磴 +渌 +僪 +埇 +缑 +禚 +沭 +欎 +螟 +笪 +庹 +茌 +邶 +秕 +隰 +秫 +圻 +冼 +杈 +蚜 +祢 +埂 +猇 +浈 +佴 +蝌 +貊 + +© \ No newline at end of file diff --git a/share/qtcreator/examples/15-Utils/fist.bin b/share/qtcreator/examples/15-Utils/fist.bin new file mode 100644 index 00000000000..be382d13408 --- /dev/null +++ b/share/qtcreator/examples/15-Utils/fist.bin @@ -0,0 +1,1313 @@ +~w{quylpuimrfkoaei]af[`dZ^bW[`TY]TX]TX]UZ^X\`Z^c\`e]beaegeijkporwvx}}~tvw\_aMPUMPUKOUGKPBEK=AF8=CBCHGKMMNPPUWWeggorrvxx|||tttlllddd[[[RRRJJJBCC;=>369/28-08-08+/6),3&)1$'/!$,")'%%%$%%$%%& ' (!(")"' "#" %$"'&#('$)($)(%*)(-,,//011466<>>BEEHJJOPPWWW^^^fffnnnyyy}}}tttddeYXYSSSLLLBBB<<<666000+++%%% ''&'''(('''''(!* ",!#-#&/#&/$&0#&/"%.!#-#+"(!%!!!  !! !!!###%&&())++.//212444656999AA@KJJWVV`__hggtrr}|}xuuihhZZZNNNEED===333,,,(((#$$ "#$$%%%%&( *!+!#-!#-!", *)(%#" %%%./.565;<:CA@IHHUVUgfeywwxurhedTQPEB?<:66320..)('"!  $#%'&*,,),/ ")$%'&&&&%%&%%''&%$#!!     ! !# % $#    "! %#"''$/-*623>;;FEDTRRged~}|zwd^[LGD@=951/'#! +  !!"%%$(((-,-1137947;(*2 +!- #."-!,+*)))(())&#! "####$' (#"*##*##*!"( !&$!)'&A?>b``}}xwjc`TLIB;74-*-'%*&%*%$)%#($#&" #     !!$"%!&')+++++******('&%#""!!"##$&&&%%%%$####!%$$222===KKK`_appr}}xvkfd[TTMEE>75-'%  '##,((,))(%%$!!!!$#$&#$&!"$#$!$!$ #! $%%$$$##" ! ! !     +  + + +           !! !!  "!%./2<>ALOQ^_btv{}xymhiXSTIDD>996122,--('&! $!#&#&%#%! # ""#($'+ $( $!$ $$#!           +  +     +        !   + "&+.27:>FINX[`nqvc^_C>?9572.0%!#!""& $ ##! !!"#%%&%##"! !   + !!!! " " " " ! "!          $(('& $ ! +      +!#"##     "! $"' #&"  +!""''+036;ADIUX]qty½pkm\WYJFG511%!"!  !!$ !(!#*&"! $&%"   +   +  + +   +   "% +( +-/0121136"8$:&<&<&<$:#9!76!8#9!863/+'"      +    !!"#"#!( %+$*$! ! #&)/:=DNRWhkoýz{]XY@;<'$& (%(&%(#"&#"&#"&#"&#!'%"(%#)$$)$%+"$,($"""!"''$ !  + !%*- 27"='A",G&1L*5P.9T.9T1X.8Q*5L$.E&>!62/+ *'%"  !"   +#'(%####""&#+#+&#"$)025GHLcdg~niiRLM<78.)*% !! ! $"$%$($"#"       +$ +& ' '#  % +-';#1E&5I+;O4DX=LcFUlP^uXh`rj{ou~wp}hv]kQ`zFUn>Mf6E]+:S#2I(@": 0 " + !# !  !"# +$ +# %)",!&/$)3!'0)!#!( $*$'+)*./16<>AVUWvuwibcSNM=79%!"#$!&%#)$#,(%%%$#  + &* , . -*'(0&<"0G,;T;JcGVnL]uRd|^ogzpztk}`rSeEWr7Hc->Y$3N%?876 3 +/,)&" !  +$* , ,,(! %"+&!'(,@@Ca`blkoYWYJEG<672,-("$!$'""-%&1#%0 #/ .+'# +  ( +1 :'A#0J,;T9HaIYrUd~_oo|tfx\mQbGZw=Ok1B^%6R+E!:. +## ('%%%!  $ *"+ #+"(""0/2IGHgeeonrJIM0/3"$!)#(+&+)%*#%+%&4"%3!10.-+) '$# " #/">%3O7EaGUrXhhxw{fzRf>Sp.B].I 7+ +$ " # #$%$! #((% ! !"+%*2'*2#&.%101RPPpmlecfLKN==B116%%)"#)%+,*0,*2)&/"!)"  + ")%  +   "$& *28'A-8T=IhHXwYim}~|m]pJ^z9Lh+;W!0J(@8 0'  &' $   $)$-"+$,+,866PML}xwghm>?C $!"(,,3,,3 !'#$$$ %!!($#-&&0&&1 "-'  $ ,5)B0;V=HeNZwiwxcvTfEUp0?[!0I#: + " +     +   #' (!("&!% !$ !$""EBAlhh¾jkoKLP359!$)  ) +& )"#,("#(*($ %/$>%0K3?ZBNiU`|jw{xaqO_z@Oh-;R*@3 +*$  +    !#' #*"%)!$("0..GDC_[Z~yywwzPQU027" %&$#%''%!       +   +# +&# %6%1J8D^KWrZfiwyw^lDSl-'   +  !$!% "'(),,+.&%' + + )''TRRy{yUYW9>=(-,#&("% "!'1#(3".% +  !&*+ +' "! +$ +1%A/:UEOl\gvw^l3BY*A 7)   " "   + **.0/2(')213TSUyxzſotpKON,0/""&+!'!'",)$     " %(), 1!?)6TCOm^kxrXjARk'7N!7 (#!   #%)++/)),#"$ 0.0HGIb`cptpZ_[GLJ165##$#*$- )#  $ +$ + " 1#@#0M2@^IWwbox|[k=Md#2H4# + !#  ! !"!# +*,==>YXY^c^8=:$)&!"!&#)!*$& &  "2*E/VO_zoerANk&1M,  $&%$&-#$0"$-%$&'678ZY[MW]#.5  !!$'13#,0"   $'" 35F`hytVb~5?X'?0 ' + -)+8.0;$'/!889hhhzKTZ#,3  "&$$$""&( +# !)$:9E^_o~am=Ic"+C.  % #0-/:%'/  &')CCDonorv|BHN'/    *(4!/ $!.#,8& +  + "/#3 +& + +"2HI[rhs?Jb!)A +   $". )#!#)$$(*),@?Akiiotx=@G$  $$1*90!+!)9.    +(/ 0%;1@YXjnxCMf *A '  ! #$"$")'(/$$*  <9;fddjor>BG%  ","2"3.& +#',+$   2&6OIZsoq}DNg&?( " ".#-)!)&'/$%+" #ECDwst^_c69>& *#1!1-*,. ! + ('&61?Z[jxKWs)3M4 +(  #1 %0'"%%0.0MIKuprfhj005 )$+8%,;"3) +&,4 ,)#/HNZvyfr8B]5 +(&  +  (%)D?Csoqpqr@BD % $0")7$5- ' +'+/ ,$# *)GP\yam)3P , +   +&!&#%IFJ~iiiBCD"$&& "-"/+ % " +% * +* &" )"A6@`ZcS^|-8S6 ! !)#'((2,*4#  )&,XTYffcCCC+./ ! #-#$0-$  +& +) +& +(=8Eedq^k:E]/ ',)%)'(3,,7' 315[W\ghd<<:,--&() #&"#/"#0-& " ! % + -3'F=Hhamdp)2M ( # #$%+"".& !305WRWvvsBC@)))$$%!! !("$/#$2/'  +!0 ;%,JGPpr|S\y!*E +- %  + !$%)(#" &85;fbg|}zGHE)*(#####$!%*,+ # +!!<3:ULTp{}O[v)D (  &+$##$&$*EBGzvzLMK$&$ ##""! +/6 *) +   ##!'!#)69>ikpEEE  ###///(&(!! '!  #3>BTfl~ap1;W % -+  + !()+1# !&CEJ|~QRQ'(' ###" $"  -9=?(&(-,.#"% ('+$  &56BX[i~zKZq"0E '!$%!!%,#',  #&,148fhkklmGFH$#%! !!!#""%!'',! (  #-/F[Q]ser{_q):S* '(   +!(!"#)!!&()-TUYxqwh`gYQXHBI4/7$!( !"$"&$#'!#'!%$$%# #.* #    "$  &"7!,B*5J5>S=EZKRfgkJ\v)B*'%!"" + %*,1 %449styĿ|e]dMEL;3:+#+#" +   + !!$'(''*&%*$%) "'%"!   # /)  "&$ ! '+/BRVg~r=Lf$<'   #   ),-3 &$%+STZuqv\X^HAJ:2<0'1#%  ""! #&"$(!#(!'%# !! !   ! +"  +  " $ $ "   #%# + +'"&9<@S[]qno~fx2A[ 2  # " +  *#$,"$8:?cdigfhKHL617&!)$!$$'$("%"&#'$($ +%",%#.#$- ")&""#$$"  "$&((  + $ (( & #   !'+ ' #' +* ) +& ! + --/C>?UNQgknWg"2J)## !  '"("&,+.3JJPooslkmTSV>=A+(- "%+,&3,$3'!.$+!*)('%#      "%),," ,32+#  '/ '$ $#  +!1./D+0/!&% $$),*1,*2(%0!*#"$#""$&'('$        +  "!  ! +# %((' % #) .*$ "# +% $ + ! 0'0JCMkaoiy6D^4   +&) "  $$!$ %89=ttxy}QWV176)0.-20$))"#"*&%/&$/$!/".("!$&),/.*&        + " $ $!""" '*,* & +# " # " $ (+//+ #  +% ,">-;WJ[ymUe$2L' ! '#  " '*/UW[jqo?FD!''&*+%)+ # #$&(+-*$ ! ! + ! + " " $ " %. 2"4$7$7$8$;&>'?&@$@!=9 5 43 2 1 0/.+ '"  +! #& +))( ' $!  +  $ ) +) %( 5.K;Lk\ovDTo$< +  -'  !%#"&+=@Dikofkl;AA$% $'+#&+"' %%%$"  #%'& "  &0 7 &<)1E1\):W"3P*G!? 6/ ) &(+*(% !    ! +# +$ % ') )% $'/$A,>[Nbzk~.AY+!.' !&!(!%*'+/AEIwz{{UY[389!! ##),'+/!$*!"#""#%(*) ) +,/4!=+.I7Oo5Dd+:Y!0O(G$A$?"<4 -% +  ! './- $  +  ! " +") 9*=XUh}K^x$=(%( / +$%"*#) #(UX[`ef?DF(,/!# #'*%,0&,0!%+!(%!  %)( # +%5&+G6:WCHeOUq[_}hnu|xjyXgJYz?Nm0=Z*F";2'  $-22 )  +$()+,7*8QEVpi|u7Id";)')#  #"+%,8;@kmp~HMO!'*!(+#,0$-1%+0!' %"--& + $+. ( "728TSXwoutYfX%= .&* -'#"  + +& +&   *,ACWk|tnh~e{aw^t\rZpViTfUfTeSdPbPaQaRbTdUeUeXhaqhxl|qv{}dz4Hc . )# + $'1(0 &$ %/26W[]^ae9Y';W&9T$7R"6Q 2N /K!/K .K .J,H,I-K .L"0M#1P$1P&4R+8X/=]3Aa7Ee    !&%%$* $*%(,59;ilmghl/16 !%$*'.'.$+&!  (3&@-5P;F`Q\wozk=Qi'> 4 4'#( (#  # $ $ 0DWe{}n~bsXgL\vCTn#C)I%0P.9Y3=^;GfLYwXf`nkzvh}-D\ / + +"  "&.$*1#("$EHIikm>?D "#)0$,5#+ + +  "   + "95>ZZd}cw>Ri 2G +/%"#!  !**  "/CXf{drQ`xL\rFTj;H^/=S%5J+B$:4/, *(&# !  ! " +# " !$ ( - 135 :%A$/J,:S9G`FTmUd}hw{Sj$7M# +" '!   %-$*0#*..mqq`bc=?B"#(   +"#)0$+4 )&'& +' 82Shwq^nXg}LYo,8N#8"51 )#        $%%%&&&'&&%%% $#  +" , 7#1G6DYKYn`qo|GZr4 ! %! ! '"( #$SWXeeg89;" %")"+ )" ##-+83@SWb|tvFYo/C#%%  ##$8FTkuueuVf}FVn9H_."6 +%#  +  +      ! " #" # '*))*)) ) ( +&$#$ +' +( $  !  " # ! " "$' , 3*<&5G-Oh,S-D4* # + + !# $$"  + + +    !   "# ! ! !"#$%%%%% $ $ $ $ # ! !!      # +$ # # +$ % & ')*** & +  ! $ % # #$7"0E,:P@Pg_nziz1AR,    #%  !!&)$)++/0<@Aeijkhl<:>#"& +/426   " % $" +$#  + % 5-Y:@^Bc?EdAHe@He>Fe\4>Z0;W)5R%/N!-I)E%A <9 62- *%"! $ % #     " + + +  '#';LTiR^}!*G#!)' (*($ !&./9Z\c^]f74?+'2%! %"$(  +5APcpn#/E & +)% (. ++# + 58E^l{yWf5E^(@32 540(  +$ "   "'*..* ' & &(),.. +0 369">&B*D",H%/L)5P0;V8B^=JdBNhHRnJVqN[uU_{Xa}[dajfmgoipktmvmvluisfqcmal^iXcS^}M[wGVqAOl;If5C`0=Z'6S.K$B:654. *& #      $ % $%% #%' $  +8AX|r~4>[) (' ( ' $ ##!"-@AIttz<;D&%!"$'" $ %1>K\iy=Ja +$ ) +( " + )$% # + &+CS`w[j=Lf'3L5 +) ( ( )( ( $ +  + !   &), + ( #" +'/ 8#@*H&3R.=[7GdDPpJXvO_|Zg`ndskxsx~}wrl{crWiM]{GWsAOk8E`4@Z0U#8/ )  +'%# + .'  ,8DZjvkx>Ka%;*   ( #    +   + + + + $*!0!2.%# +6"D!0T0Bd?QrP`^pk~|o|dp^kR_yBMh-8Q$;( #    + +" &#  "1J^$8 + ! +" $ #  %&%#" ! &$ *)?.:R7Fa=Nl?SuH]Uhdwq|weqS`zCPh3@V'2F%:0 *%  $ +  +#,ER]wU`~)E && * % %(!*WV]|>CH! '%%$"!$$ +%6R^qLZr,C . $   &.)23 ) +"-.'  +!%&%$!     " -/C7J_Tg~k{xbpIXo5D['5L'=1' "  " % $)(@DNhs|9Da +0"', ' +#%!#"#/>>DX^c") ("",'"  "(( (7BVfv,:S4  !$ ! + % % ,(0BHOafmbi|BJ[&.=-%    +        +    #("1*;%2C/Ni+;V-H)E".L-:V>Kh`mp|.:X6+&  ! ")&(4# #%-TU]vz}BHL&,1%$&1&)3 +&$  +#%87BV\jhv@Kf)@ )  "#    %+,# "&$!   $"     "    !0"-=/;K>L]N[n`ntnZjJZwHXvSb_oo~IUr!-J 1 +"  # $)!$3* (;=EjjpMQU!%!%* %-$".&)5 -**   $*>P]sfr3>U. !&+ $ (.("  %# %# + +  +"(:;GZUatky]j3@]9 ! + $ #$ .!-%&(*3NOW|6:>"'.%*2%)!/)+- ! 35CXp~KVo":  &/*" "))     &% !     *)<3>QMZmo|uDRo%? #$#" "'+)%(:Ec ' +,.-%   +#%"  %  +&, # %3"+D:D\[e~~lz2A\:*$ # % $ +& "%.7:CknwGJO"'"$! & -) %%:>LduNWu9 ( , ' +$ "    $" #  ''"$ #" *$>8B^XcxN^z#1M)   +!( -'  )+.8[]gilp/27$'#%*' +),;Qbpep*3Q4 +- +#  %+%  & + + +"(&& (!5*3MHQnlwm|:Hd . !!  %++& % *DHRPSW #( '$-(%' +# 6DSl}BLk%C 0 # %*$ + +   $/!,@=H^]i~Q^{8&#  (+&#"%!38AlorLdk{R\|(F+ & +' +% !* & ! )3'>&6MFWnvSd$3L 3 "!+ +#!.!*5(&ZsK\E$ *4.- + % $ $ +! &!.IhyXi!2M 3 !!%$  '( 37<+)))(* +    & +#1BSqat"/V),7 /- *! +!(A`nj|0C_!< "" +(!   )"+$!).3JOTfhf//++)))(*! +  $+)FSfs/Cj6.50 + & " #%=S_zJ]z!1K& ! +(! +($-#+$!',=CH|MNM *(((')" %94EblCY B, +.. * & %&#;DOjyfy3C\) + +(! %#,$,%#(17Md'" "'& '""'*Y^anpq778" ,*)'%&  3ANgrGYz$B)& & +  # % +$$ /FVqQ^w* +)   $"&HMQ_ab++,%##+))'$% ! #9N[vRe+I-% !  " " )>Lhwcq4%!2. +$  + % 8=AFHI !" $##"! $!" "   +&&2H^l`s$6S 4 #   ! ! +!8C^l}w%4J11 & +#   "*! +',0kpr}668(%%$""    23@Wkzj},>Z : # +   !"3=Vct9G^&;." #  # &-% "'VZ]kmq203)$%'"#  &;@Ohvn4E`%=% +   +  -6PYjJXo$1E1 ! $ #$+$  'NRWUV[,*/)#$'"#  '2GQazt;Mh+C)      (0KPa~wan2@U 3   $ $!(!'EIN|~sw|=>C'&*!!(#$&"#   8CXfw}EWr#3J ,   + ",GHYvr|DQi"8 +$  +%& %7<@jlp^`e--2&%)#"%'"'&"$$ " %JVlzRc}+:P/ +   +)CBSpnUc|'>#! !$"#$,05Z[`POU#"'('+*',+%+&!$$ "  .Xf|\m2@U/ +   ! &@>Mkk~es#.F ! # #"'$$(-MNTAAG'&*+(-*%*&!$%!"   &9etfw8GZ/   +   !  !;6Daexu.9Q& $$  "*%"'ABH77>%$('$*(#''"%&"$  +   &1Ern=M` /      + 3,:V^q;H^0  %&  !!$,%"89?ppv007! $"$$#(#%'"% + "4?S}wBRe 0      *!0LVjLZp(>  %'  #!$,$128aaguvz++2" ""($%'!$  +,DQdHYk"1      %(DQd_l-9O +% #' +% #+!*+1RRXklp,,1   &!$% $  (8Q_qO_q#1 +"    $@Qcmy5AX ) #'   (!(%'-IJO}}bcg++/!  #$ #  +(3D\kSbv#3#   + $@Rcx>Kc." (!(' #)?BGpquVVY$$'! "!#"$""'"   +&/>ShzSaw#5 #   ! '&DL]~Tb{$<  ( !  $("'14:[]bMLN"!#"!$""&!$*! + +-3F^pP`w#8 + #   +$" +!-(FGXygu*/H +) $  *' ''*2KNTJHH! " !# !%#*"  49MfvPax#9 + # !  +#" ,(FHYyl|,4N ++$ !*)"($(0DINyyyECC " %!*" + !&;CWp~Pax$9 +# !  " *'DHZyu/:U! $ -#!)*% *"&.>CHnnn<::" %!)" #-BPc|Qay%: $ ! +  " *'DHZy7E`(' .#(+' *"*48>|~bbb421! %!(# &"3H]oQay&< $ ! +  " )'DJ\{AQl 2* .# '+( *%*.3jlrXXX,*)! %!($ ''8MgzP`w'= # ! +  " ))FL^}M]y!=- +,# %*()""&+[]bNNN%##"&'$  +),=RqM]t'<! " !  + " +*+HOa\k .J /)##((' %MOTEEE   "&&# +1AWzIZq&<! " !  + +# +.KSehx,;X 3'# #'(&"BCI<<<#!$ "$#"-7G^yEVm%;! " !   % ,"1MWit;Kh9&# #''% 89@ww}{}~656" &$(!"!  "1=Mds@Pg#8 !  !   ! & ! .&5R\nL\y$@ % ! $&&$008jjpxz{1/1$ ")'*    $ 3AQgn=Md"7 !  !   " & ! +0)7T^pUe)E $ + "'(&$,-4ccisuv,+,%"#+)+    '"7EUktdp\idqxj|:Kb"6!     # '" 1+8Uas]o/J #  $()($)*1\\bjlm,+,!)') ##" +  (*?L]szVc~>Le1=V%.H &B%+G6=YQ[wvfw3CZ2    +  # & ! + 3.:Xdvp%7S +! +&*   (*"!")PPUade,,-&$'$#' &%# *(3HTd{vGUq$1K 1 " &'BGQms~ar,,   $&#$()'hhoWX[&%)!"*',%#)##9@Kco~P[w/ & )+ +(!  ($?NYuv?Pk*A' + ""  + !)2MXgOa+F 0   %($$) ", __fTUY%#("#+(-&$*$ $9BMerKVq. " +' +& ( +# (&AP\xn5Fa%<' ##  !09TcrUi!3N5  %)$#*"$/WW^TUY$"'"#+(-&$+$!!$9DOgtyGQl. $ +' # %! # +.",HUa~fz,=X 7'" +!##  !9B\n}\q(9V$;  $)$#*"$/RRYSTX#!&!!*'-%"+$!"#9FQiwr~BKf , % +& +" % $! # '5)5R\g[o"4O 2&# +!##  " !BLf{ey0A_(A# +"(%#) "- LLS¾RSW" $ )%,$!*#!#$9HRkylw;FcozEYx#>,&# + !   &(P[vt?Pn%1J,   "$#$% + @@G}|OPT! +%")"(!!$$7HUl~hr8>X + $ #  (- % #">>Jgv?Qo 8*%"      (+Vb|zDUr(5M0 !  &$"" ==Dxw}NOS" " (%!!#'9IVl}`l08O " #  +%* & # &%AEQm~7Gb 0(&#  !  & 1YfN^z,8Q.   +$&$" ;:AontMMS% !(#!"!!+Vv\m2>W , #)&#! 48=^_dIHQ%$ +% #.>N\nQ^v#.D!    $ +%" (#0IS_z|HWm&>' +! !"   $ " *$=BOics6BZ-")& " " 25;Z\aHGP%%!,%$/>O]oN[s",C"   ! % % '$1JVb}m{8E\ 7& ""$& ", -HQ_yhx9E].")& ! #!039VW\HGP%%!,%$/>O]oLYr!+A"   +" & % +&$1KWdbn+7N3& "$ +)) #.(6Q_nn~=I`/") %   # !.17QRWIHQ&% ,$#/>N\mKXp +@# +  ! % $ +&$1KWcVc} ,D1 +* % + ,$ +03A\o~tAMe1 !) % + $""+.4LMRKJS(% *#!-Lg}yEQi3 !) %  &$"),2FIN}MLU )$)" *9HVhzLYq!,A#    +$ $ & -FQ]x~>Ke1 *+ !  ' *(%"U)%,$  + %()+,E\kKXq 6 !) $  ()$%(.>?DvtzSR[#",$) " &5BQbuO\u!,B#  +"  ! $# %)CKWrap$2H +  !, ' #!!$'* +/$2LgwMZr!6 !) $  (*%$'-<qouVU^'&0# *'    -6DUlzdq:E[ * !  '.#86C^nzbp(5N$ **+( # + /%2KN_yVbz(=  +" #$!%&!$*78=pntWV_('0# *'   +-4BTkyjv?J_,   '.$5/' ! +" + ##  +5Igsy^j@Mi,I, -4 +01683+03 *4T_xWc{(>  " "" $% #)68=pntts|21:%   +   + ,,=IWm2;N $ & +"## ! !%, % =]kl*rovxw43<&!   + + *'9DRh=GZ* %   +# %" !%- $ 5LZyWk(E &!! $  +  $5]fz{bs@Om(5Q$@/ '$ % +& (** &! % !  % *$+BZc{Uay&; + +" ##!%'"%+8:?sqwyz55?'"   + +! (#5@NeDPe/ +# +  "' +$  #-"->Ml{FZz5 " +&)#   (1Cs}rWeCRr0?]'C +3+ ' +% +% % % (-. ' +&. !. ' &#^ * %&,-&   *AJ^mxP[{8Dc%1O%C9 /+, / /, & # &, /,, +1))3-# $ <3B^bpR^v$: + !  !#"$&#%+9<@rtw27?    % %   +*65DZiv@Kb!4 + "'# "/!3NPg{=Oo<# ( ,/.' + '/6IdmiuU`y@Jc)3L";/) % ! # +) , ,)% %* / /.*$ &-124 ="2R?Oo`qKYp"6 # +#  + $! "$*;?Csvz6DK!!   & ($ +!*65@[Vb{$+?  " +     "0(A^iO]~$0N 3 () +,( +& % +"  + %!(;GNa}}ftQ_q>J\&1B-    $ &# # $% * 0 2 0*"! +)05 +>,O5Il\oyDRi1 +!"  +#"#(-ADIx{GMT )#    $ '# '32=X}gs)1D + " +  $ +" +$/JWrgx6Ed!>, &(($% % +"  ))0AKRd}hvQ_uDRg6EW#2C%6/ &   $ +% & ( ( ) * )& %%( +- 1 0, '# "3"3TCUxattAOe/ !  +##%*0EHM|PV]!'0#   +# % ! $ +/.9Tux/7J    +#) $#>Ic}tM^)7V +5 +&  ! "! &) % '"):U)6M,B 3&   +  !#$# !  # ' +)' # +"  # $ &.;$2O4A`DSs^nvjx9G]-   +"").1PTW`em,1;   #" +!,&2NdqQ\r#+=&    $( #) 7NB\ryYj,;X$@) (-'  !&+#+=AGZW]tho~l|XfGRo0H`08T&0K&?2' !  !$ * +(" +  !)./ , ) $ % - 1.. 19)C%6R8JfQcqbp4BV-   ""  +04Y]`msy5:B   $$# +$."0MVdNXm#,> + +' '"" " $, # ")=Sg}bp#1K* " " ('# !    ! +   "+";37OEIbOSn^c~~o|^kP]tANe4AX$2I!7( +  #*02 ,#  -"4 )!# ' +0!6!74 -(( +% %3)D2B]K\wdv~\j1?R-  + + !#$  +.26`cety9?G  $'(% &/!/MP^}cn/:N" ' '$% "& 4).?Qer4B\+ ",+ +&"   $( !  +!8),D6:RILfX[vcfpszslybnYezQ^sHTiZEXsewVd}.DL $ +) +((/ .KJXw|y=Ia)%%$% "&6. %)8J^mFSk2  (( % #  +)$6#4( %)'   '+4"%:2,7'+D48QCG`QUn]azgknrswtyuzx}{|}{vo{gs_kWczQ^tJWlDPe=H[4?S.9M)4H$-A(<#71,++ ) +&"     !  ! $ (* )% $ -- . -',8"/I9IdI[wWii}Q_x+9L +   #"$&  58S.6J(0E"+>%8 2- ) & +%!      $ ( &"  $ ( (&" + +& *'& % " " &.$=-:T=LfN_ycwsN\u)7J + #"% '  7;=moqHMT +  # +( +**/(F=Mmm_m ,G ( #" +" " +', & )9T_rxIRg"!4"6! +   +# # +!.1' +  +! #&% + (& #%')*++.0#3#&7$(:$*<',=+.@+0B,2C/2D/2D.2D,0B+/A)/@'.?%,>#)<&8"50-) & $ +#!! #  +    +$(( +% ###$$##% ++ -( # +! ! "' 3*E/?YEVpapsIWp&3F ' "!%")  ;?AsuvUZa!!%*+,."B6FghyKZv'C $ (*  "  $1:Klw~JRh"*= "#.0'   !   & +! #$$   +  +  +   +'((' !  +  +      +    +  +     +   +         +" %& % +"! " (+ ( ( +(%"!#' ,35,(, 17*D,;UCQm]mww?Mf!-@ "     &$+AEG]bj  #' ( +& +' +;-?`ariy.<[ **, &     +/JSeZav,3F+%'#$# + "&  $   !" +  ! !$ &))' #  !&&!  +        + + + +    ! " +#!  " % % % % % & ' * , + *( " # '.7*F-`^qARr7&* ))%  +   -5G`i|dk7=Q"4)# +  + +  %&  +#.!5"8 1' " " +! +  $ */22/*%  !   !%'!) *#       +      +  + ! +$ $ $ $ $ # !  +    !# & *, )''&% !" ' (0"/L8GdJZwdthv0>W'9"   # ("&+NPQgnu ) #++ +$$ +#D7Jmh{]p)L1, % * * + "-@GZpygn9@T/ "$$ !$! !1-8M@McAOf4DZ!1J5( +$  +#( +* + +( $ !    +! " +! + +      "!     + +      +   +! "$ & ' ' & +&%# ! ! ! ! " +! +  +  "')$ !#&), 1 9(C)4Q5B`9FfFSseu|^l&4L!3 #  %#+#&+!UWYtz'/9 ! # " $3 1RI[~vy0Dg A1#(+  + "%  !*0BQZm}hp>EZ#5  !% $ + +-+7KWcyxzj|Tf5Gb/K#? +5) +$ +$ #!  !"!           " $ % % % $ $ +#!   "" " #&& % +#! "'*'&* 2 ;+F%7R4GcGYw[moyTb*A- #   %%,!%* $\^`DLV&2+"  (!?2Ef]pVi):\= )& & " + #*&  /1:LWbu`j9AX#7%   $ + '%  + $0CLYnpYnF[y3Gf 3P$B 81 )$" +$ %"  ! #"        +  #' ' ' & +#"  # # %)+** )+1 7#=)C.H"5O,>Z8IfDXwRfdwvKYw 8& " +    $$+#(!$'dggjr|=FQ$.< ( + +$ 9%8XM`v~HY| 0O 4 $ ""$)(   + /.:LUav^j6@X5 +# +  !*# /@L^n{x`vF]}4Hi(9Y)I;0 ) ' ' $ !   !#& ( '" ! +! " $ & ' ) )) ( %# ! !% ),04=+I#3Q.@^FZwUj\qh}wDRp 2! " !  + #!'$&)-knnir}GP^'2C#6!7%>/J2DbOaqhz8Hg)F& + %#"!#  -)7KO^tz}R`|3@Y%< %   "  $4>Ndoyh}YmIZ{8Ii(8X)H"@< 72,% +   "#     !!"##  + ! " $ # $ % $'+..+ *-4 ;"B*J"1S+;\7HjGYzQc^ry 61,( $  # '+ / 5 <#?)E(6T3?_5Dd;LmFXzQd\newoy~3B^*     " :<=vVb'/H   #&*(! #&:6Jae{gz@Pp+I1+ -*(#  *;?L`gusf{\rQiF^}:Sq1Jg)B_"9V1N)F#@ <98754333210011-,17#>)D#.J)5Q/;X6C`=Ki@OmEVtUe`qgyp~o~*9U&    !# #%GII~KQj 4  +"&&+- ( $&<6Ka^u|Zm5Hj%E0 ( &()( "    69Ddiu{smgawYoRgMaJ]}H\zGZxGYvHXtHWsGUrETpERpFQoGRoHSnIToITpJUpJTpKVrO\vUa|[g`neskxo~u{_n.J #  !  &$ (*,TVWho15K+$# ")$ + +(-##.)?2G^Sjui}H[+>_$D/.. +#   %" '+6NTa~}|{{ywwyz|~Ud%B ! "  ""*$ $.02\]^MVn$+@-(&(+  /,& +) * +,#:'7N@QiZnx~Xk7Hj-M;0*, + " +! "  !!'7DM^p{IWw;    $$+$!569kjlx@Mf)?0,, '  .!50 &!    '$94DY[muZh9Fbgw`m%0J * ! + "'!) !' "[Y[qAPi+C* *36)*/)#     !."0@4CUBPgP]y^lp~zasN_>No(7U#>52 ++* ))) ' +#   +$,/+#) 6"0LCRnn~Q^z&@&"!!&(%)(*hfho|IVp$1L80 + +' $ &+,/-% +  $ % ! &0%; -G*8T9FbKWrXdaojxquxwwxwqkzetdsbq\jQ`~BPo2?]$1O'C6-+' " "'-0, % " $   ( ,&,!0KLZwzCOk5 " #"$# /.0usuYe6A]#? +* +% )03.-+ " + ',* %  + + ( .4";)B#/H(5M.;S5B[:I`>Md@OgKYsRa{L[tBQi:Hc4B\-;T"0H#; +/ &  #)+&  $,/*! $  '& & +/$2MN]zx6A]+  +$"!868it@Hg!? )$ ',,* ' # +! !%(*)&   +  ! # # #$%&( ,03!6&<")? &:!652 -&  "&) +,( +  +" *- +' " ")%@5D`^lnz,7S +$ #" !!A@BvJNn#%D * ! $ $ # &*+*&"    + # % +%%#" ! + +   +  +&. * $( ( " " +) +* # * +- & ''CERosbo%0J!" #"&$)ONP{OTv*.N6 )! '-/,% +  !$%'( ( &$##"! +       !# ( +) '%' )&  /!<%A*5QP]y}Ub})B  !"$ "1/3]\^ciEIk(*M1& + & (*)& # " #&'%# # +$"   " # #! $)& +" ! + !## !  + !&( +) +) (% ! "* :*5QCOkdquGTn";  !$""!&==?lkm_e>Bd(,L >6- # "&)&      " " " # &'%"    !%&&"   +  #&'% ! &5&A&1M6C^P^{s`o6E\3  "%! )(-LKN}|~~`f@Cd*-M8  +# % $ +" +" +"   " +$ % $ $% $    +  + #'(&$ +"   "$ 1*8RDQlWdmzSa|+9P ,  ' ") //3YXZouSXy:@^%A0 +($" $)' "    ! &(' % + " + ! + ! + ##         "((+!92?YYgwGSn -E +( !(!#( 668fegtcnU`zCMe'3H1"  "" ! % & % %),* # +! + # $ +!((&  +! ,";(4N@KfZgxx6C\5'% +"  %#)><>wuwzSbv/?Q&6$    #+-* & ') ' # % + %&% +! + "*4$=#-F4>WHQl^j{m{)7P)% *  "*(.#!'"GEG¿Yj}2DU&6#  '/0+ +$!!"!$ ), %! + '))().6%&@,,F/3L:@YHQkT_zcnx]j -F $ & +%$("!%*)-USV|H[l2A( ! *12 ,#" ,5 !>"&C:/29; ="#@')F13P>A^LOkUXuZ`|goyJXr$<   $ $ ! $548daeK_p0?&" '/2 -# ,$(E8;YEGfLPmEKh?C`AFbGKiKNmNQqRUuY]}chqvr7E^2 $' !?>Broq=Qb"2! ! +# +/ +#!!>GLjeiuyYh%4K -#&" JIL~j%:L!  ! +& * (" '.3PgnAPj%< )  "%""' +  WVWYp'9 "! $ +#  +:A\~|*9S +/'  + + "!#,)).  '$(ifhRfz . #%   +   +,?Gbfu(A((    + "&,)*. 3/2zwyP`v*#' +(#  + + "$ /DNhO_{5& ,%  #'%%)B?AJWm#( +( ) & + & ,#7O\qx9Hd+$/ )" "!##!"WUWAK`&! % ( #/&7"0A\k}jz-9U! !1,!  +  !  .*-hfgymvhskwwv9AV!  & #"3".@-;MgvZj#+F-)  ! "%!&=9<}{|lsU]qI#0)! &2"0NP]}lu3;N#     "'<7B\r~Td})@" $# #,*%#$"&DCG\]g +  " %;7Ddcsgq/7I#      #(<6C\rsAPg4 $*( %+(! #-+/YX\EHQ  #  + ! %#! +0)6VVfbn,5F#    ")<7E]t^n.DpotNPW)#, #   ( ++ +& (8*8Vfur7@S  %  %82AZp:Ja5' +   '+$!(RPVWZ`')3)- $ ! * , ')7$0M[j{KhJYi! + + )( "8FYtM\q,>,    "%$!109]\d55<&&,- #  +! +" +-4?\wSct' ' + )*   -2C_x4BV %"  )) GEOAAG$%% -,  ## )(4O_n`o!1##.  &(#  %!1Mast!/B +!!+#%/%  ^]fSSW%%+  ++  # #&+DL]xl}#3D, +  + +""   #*FRd[h{%6     &#&."$, "!*zzjim0/5)) + $ $ #!9:Kdt{8HY%3 &       %#>ARouo?K]-  +    #*'*0%87@@?D)) &'  " -$3LasRbs0@#   +!"  & +3(8U[lM\n"-? $  $ #(,0,-4SR[RPT * * ((  #%8Oa|k{-?P#  +  +$& #)+"?@Rr}n}0?Q*   !&%(,0@AHsr{cac"!, * (( + & +'AOj@Rd *      ' )', '/,<\fwVdw+=! +  !$!%)!&*-2YZausv((,"#",( %& !(4=WWi}&8   &* +* /$#(HL\~yv%#*#"+$ "" ',1Jo{l~%6K( + # ( * /$;3DfZmXh~'7H +' +!!%%(, "&!"&GGKTPU2/5#!(  %"(B]h;Kb$7"  $ ) +. +&! 4.O:Mpvt;L`'7"!%')-88RgoXb"*H )( ,.1% *94 % %#@;Mjo~JXv -I-& * (%#  #$MOOuty>>D + (!, +  " ("*>MUl@Jl<1./ 4 ( ) 7." .+H;Olez\k)6R 1 +" " +& +%  !$  $-01lmmXX^&!#. !  +(25?Vcp,8\ B30 +5-  (/ +%" '+.4"?,>[ZoBNj < ' +" ##  + &&),.FFHssy$$+ %"$.    !+ +!,ChtTd-;`B54 0 ' & +%  & +1 4 +0, (5+>Z]scp.9V 3(&$   + )'!;<=jik87? $ "+  +  +$+#0LWpN`%6Y @20+" ! *0 0* $" ':)^0 +$''- %  ! +' ) '#-,J>Qpcu]q>Qj!3J 0# +,- +. % $# +"%*)!206RQUFGL%'-" "% %!  !"Oh*8P%; /*(%$& , +# ++,;P[jVm,@[6 $ "#" + + * ,"((-edh{z>>D""( ( !) +( "   +' 9FTpxdzCYz+?\%? '& #&(( % +#"$ ( *41N@RogxwduJ\q2CZ,B , +#%$  $,BIYps8Li!< ' +" & $ +& *#87:UU[)*/!!'"&( +  )/%++8SZhWn/Ab&D7 .   + "!  %%$# $ +% % #/"?+9VFUqcq}hyL]q7H\)9L /B%9 +-#  + ##! +&$+9D[sUi.I'  +" ! !$&$+YW[pqu68;"#(* ! + $/.'%!:;HduZn&;Y2&"  + ! %* &""" )9'3OCPl_m{hvIXh-Rm'A +" #%"($87;rrthgjCCG #+#.$ + + % */'>APjfv1?Z-  ! +''   +    #' (''( , ,( +%"+&B4>ZJVrfr}t`k~S]oAL].:H *8!.' "   ! " $ %#   +# *. /*%$ " 6CVoVi#3M+*"1 "!'&',QPT[Z]%%)$",& $- +)*%3Kdrhy6Fa3 +      +  "'(%%( & !!  ! 0'B,7S7B^@KfIVpVc}htxlv`j~T_qBK]3  !%$ "!)8<@egjNOR "' &"   !%"(5I`owBRm(@$%-'  + +#$ +    $00>BBQ=>L()8#  #%" !(.* $ " +& * +* & $* 1:*D)6P7D^ESkTa{eqmzu~sybhPVmAG]3:N'/B$9/* %  +  !$& " + + +  ! & ( ) ($ +* 6(B&8SThN[v*C* ((  +!"*$+ %QTXoqr58; +$ )&   + *=MaQay /F) '"0(   &+'  + +%(->NRdmqznsIN`$5 +&/2231, +$ ! "' .5%=!,H$0L'5P2@\?LhHTrP^{]ljxs{xly^lVcOZxBJf9AY4;R.3J#'?4+ # +  %))' "    % *&#" ! ,*D0B\CUpWkfq'0J +" " #)( + '#*$ %6:?sv{OQS  #*#&.'    #2Dj{hx4CZ2 " '&  &+, # "1*2IOXnzW_v2:P 6 &   $%&-1. & # $ +!   ' +19'B'4O3@Z=KeGVqPazYjcuhzo|zsjv`mWdMYyBNn6Cb)6V".M%C0 % $ #   % ( ' & & ' ' & "  !!#+ 4&@(:TGXtewzuAKf-%'' #  #+! (-2Z_cnno#$( !%$'-'     0KZk{qCRi+A*$$ "  +  &' %  ##<;Fa`j{dn?I`&>% !%% + ! !(.1/* '&' & % $ +" !!'+ +/4!8%<)@-D 0H&6M*:R.>U3CZ8H_;Kb?OfETkJXnLZqO\tQ\uS_wXcz\f~\g[fXcT`|P[xMWuIUqHSpGQoFQnDPlBMk@Ki=If;Fc8B`5A]1KeO_yduyLWt",G  %($ +!!((- BHK==@!&# +! +  +%*:JWgyK[r#3H2 * ) &  $ !  +""<>KhkywQ]w6?W$'>- # " ! +    "'+,* ' ( * +)'& % $ # $ $%&'&&'((((((), +/ 0 1 24 6"9%;'=)@+A!,C%.F(0G(1J'1L$/K -H)E&C$@#?!>!< ;;:74 2 +1 0/-,+ * ( (%$ *-* * + ) ( ) * + ,. . ) " #'('&$"##*"8+6MDPg]ju}PZx+3P1 %(# "%,#,1#'&03hnq\[\113!"& + +! /6EVkzfw.=T/% +*- #  # ! 28Gcn~zdmELb+.D3( + + $& $ + ! " ' %# $ # " #%'* +, . + +''& $ # " " ! "$&'(())*+ ++ -./10145443 2 1 1 +/. . +.- ) +& % +&%#""! ! -50. , ' +# """ # && +#! +" # #%), 1#:!/E6BXNXpep{Zg0:W7 +# ! %(    $$-23<@U^a{{{MMO014"! +  +  *:SauVf~+? + +&, "( ! %#0M_pt{UWo==T''=)&)* %  '++ + * +* ) +) +* +* + ,* ) ** &# ! +  + + +   !#" !       +   ! $ % % % +% # "$# +# % ' % & & # "# # +# & ' ( +, . - ++ ) +&#!!!$ +& ' ' (+/ +10+ % +$(4&>!1J7E_N\vkxq}>Ig"? % + & $( +    +&04R[^nop?BC"    ! *=J]ouAOf0  & & "'& # +$0#0JL\zbc~CD^//J"#=1 ' ! +# +" " " #$ ' ( * +* +* +) +' +% +$ "    !"# +$ & % #"!! ! !  +  +# %&& '&% ' &&)*)*+('(&&(++ &#& +' ( * * + + * ) ( +' % +& ( ** ) +& '1'B,AZ?SmTfq}HSq)D , # %% !%  +   ?HLUWW(*+  "  "$1DETiwQay'4J"  ('   "- &-3@XWg~fhHJh23Q%(E;1 ' +"%& ( ' +   + ! "$&''&&%$%& % % % +$ ! #&&')))+++, ,+(&'((''&%$ $&&!!% )- 1 4 4 2. * +'$$$% &+3'C+@\H]yh~~P[x(B # '-) !  !"" 17>nuznppSrQfh|}]i1=X1 ! % !  $)$ !'.JQW}BDE" % $+& + ( $ "7>NfuARm-F 7)!    +!,+&>LZt~gsS^zHVpANh5A['5M,C$; 5 0 +- *&#    !## " " +        # % % +& &&$#"" ! !   ! ! ! !#$$##"" ! ! !#%)0 5 9=??@C$F,N*7[/=`8IjUej{tes-Qb|l~*;W 3 +. ++$ $+ +% 4@Xm{}n~\nM_w>Ph1D[%8O,C&;3 ,&"  +  +   + ! +# +# " +" + + +       "&0 9B(M(3W1=`7DhAMrKY|Yhjyujz;Ie: " $)'&%#  (,3ehpNPQ!$'"#&*"'   +"$# #/>Xi{[m*F &' +* &   +# & ' !/S^xp`vPf?Vm5La0CX*:P%2H"+@%80* ' +% +% $#!     #"      "$# # ( *.5"9%=!+I(2V2>bALqP[_lkyy_o2A]'C +0&  + +!'*#-!*  $)OSXwyz68:"&!&&   , ,"P.;M#0B&8#5 42/ - . +- ,-+*+ + +) +) +* +) * -.034 4$:!)?#,A%.D)2J.9N4>T;D\BLbIShQ[rT^vXc}grs}yu2A^ 6 / 10&  +#&#* %'+.IMPPRS!$$+$   -/  -'7RP`{q>On :" "()$ (+ '$ )2>I`|vqlzftds`nP_sCRe@Ob>La9E^5A[2?X/;V-9U,8S*5R'3O&2N)5O-7O,7N-7N-7P-8P/9Q2] 7 +% "&(',, %%2'1HV`y{xurn|lziwhvgteqboaocpepepfqgrhsjumxozq|utwq?Nj,G7 +-$& ' #$ )$) &+*FJHtyufhi?AA#%'"(!%-'#" +!#%''61@YiyL\}.M 6, % % %& + +$%!8Vjxn}(8X0 + '" &, +& ! $$0HutFPm(D( !  *$1!+#+$+0!  ,1-MQK~:9< $%"   +!((&"9@Mhn{l{)8W/ '" +%, +, +) " #&)5MxkwCNj *E+ $ +(  ,&2!*!)$*.!.20KPJvyrùede113%&"   # $ +% ,$.HMZvp)8V )&%" $ &' ($& +.:Rwfp;Ea&A , %) &!  #%! !$!"(-*KOKtxqXWY"!$!#($!      ++*6P\hu+:W )$ +& +$ +% $ $('( ./@&&)%&)#&#  +   #73>X]hy.Sd}n{HTm)2I-  + ##"$&"*-2IJOoprTPS)')%&# +    !#6@J_t~AQi&=($( -( !# !8BTl~an6B[$: (   + '*&   !'IKOz|{}KIJ&%'##") '"  $ ! "(;HTh}N]t#2I+  "*) !$ +  +3+@XcxXf0Oi"0I +) %%##&&#&!$*!(!"+/5SU[cbc<;=! $"$($*# *#0,  !1CQbyR`r*= +#("#%$" &=[kap6EZ+?/   !%$!#)#+' "048TY\qpqEDF%$' %!   (!., +9LXgp|p|(6F ' ' " +& %""! ! 5=Kao}jzO]q5BU'7) #  +!"%$).$*. + 046bghihi<;<  "&&&&# &(3@FP]fp}w7BP%4$2! +% % !#% +#/DO\q{Wh{5BT%6&    "# $#&!  *//Z_____333  #))#  !&%.;IR_hq~ajw=FS$.<$2!! $! # ( +%!-0 -" +  !! + !!'$(-'*/#&+ #&*=ACaefKOT(-2" &%*0#(."  "'&  +(%   +  $$   # &$&$: +& +#$   !"!%%(*&)+ #% "(*-FHKjnotw{INQ058 #(#&*'+0!$) "#*& + "*"+    +  *,&#6BPi~|cvK\s4EZ+>$  !'!$' %'(-/0:<=QSTuwx}RVY/15 %!   #$ +!*!&.##    +  -!4*";P^yoN`z1D\(= .)!  +  ")#("$&%()#&'#&&*+,:9;TUWy||{}GGK##( + ##*#!   %* +# +#.#6-  +6QfszZk;Jc%2H2 $ ')& ! "$).#'* !!"# !"*,+DCDjikutvFFG(*+ !!$%"&'!#&!  +  % -,+-. ) "46D_crap>Ne 0H / +&#."*4'-6(.4%)-!%##$$%%"""BCAwvv|||PPQ557*+-"%&   +    " .#2(   +( * $& 96D_lyhzL\t1?W&;*    $($+"'-$(."%( %$#0/-766FDD\[[_^`>>?%&'"!!"   !"  +)/ +"(2 ,%'$>DQmudvIYs0@W.C 5(## +$ $%'&$ ,*)CA?ca`iii?@@())#$$#%% #$! +  + #$'+$    " +!  +$ * )( %()DDTpiyg{N`|2A\)C 1)"  "*!+$&#* '  "#%)**.-,754NLKkiglkkKKK555$%% "" +  ""  + + !"%(& #(%@>Lhiyz`s?Om,9V/H8.(&# +#&%""!%!,,+EDCecc¿|||UUU332$%% ! ""$&' "  "      +# +$$$ # +" 'BFUrwdvL^|.@] +3#''' +' % #  + +$+$  ')+023(*+ A?>nlkWWW566!"!! "#   +     + ! #& & %# " !,*FBNmo{dvGXw)9W!= - & &&&"   + #%*/(,1 #(  !!-..CBA`^]^__578&()!#$!! +   "%')) &$ !  " & (-%C>Iho|z_oDTo)9U">- +,2- )"    $##"''+/,/2$',"##JII~|{npqILM,./"     #&()( & +#! ! " $*-7".KDPmp~s\gBNl)7R"<, +% +$,/'# %&%##%'&!!%&'*,-.0=??^]\}MOP')+ !   !$$% $ +# "! !%)-++ /'C@MjjwzlwWb|>Ic)3N!< +. $ " +,0 '" "#%'( )'# &()>@A^]_^ad?BG,/3!! #(!  + +      !!! !& ** % *(F?Kidrn{]iMWp>G_,4K6 +( " +! +! +! +! ! +%+*%#$%$ %#&'')(*+578UVXzy{}OSW(+.     + +  +   !#$ + # + - +*.6'3POZxuZg>Kg/;V$.F1$ + ',--.-,)))!"-#%. "))&(<:<^^_VY]259 $   "      ! +# +% +% + " +,24 +1*"-$.MS_}o}S`|8C_ ,H 4*    $'*,/10,)((!)!)##!#;8:NKMmjljns>BF"&+! % )(%"!  !" # % +$ +$ (, *% $(,-*+:'4PDRnm{v`nKYv;Hd1=Y%2N&A5*" %('$      %,,&! !%335RQRywwsw|NRW8<@*-5"&. *('%"   ! # "! +& + +' #! $*-- 1 6&B7C_^khwTa@Mj)6R#? +2 .,*' % $ "  ( . .)"  +    (+'$$#&*025GILiikruyUY^8;B #-&%! !!    +  &* )% ! ! +# $ '+3#=,8PKXokzjwSa}CPl8F_0$   ",02<(*5$ "%&#      #'8-:K;IYHWiZj{gxtuitahY^tINc6Ie2=V'.D %92* +" +          +&)%%(#!-'&0+)432Jd3AX)6M*D#=4$   ! $% $ +!       +  $%%" "! +21CBLLKUZYckjstv{YZaFGN78@()2( "%'$   !%&$ !  +   !#' ),1%8$.A-9M1?T6F[@TjJazPgWmbxls{{ri{_qWiO`HYu?Ok7Fb3?Z.:S&4M-F$=4 / /1. +)# +  +      +    !#()'%&&$! !!(!&-"'.'*2<(A*C,F!.J$1M'5O)7Q)8Q)8Q*9R*9R+:S+9S+9S)8Q(7P'5N$2K"0J .H,E+D)C)B&?#< 9 5 0-) # #,2 +-*)() + +, ++ (&$       "&&$$%)!+ ", )$  +    $(*-25:=7:7242-/-'*' %!! !!"""!"" !$&&$"!#$$%#"    ! $$%)**.01599=<>BABFMMQVUX]\^gfhsrv{~rlnibd_Y[XRTQLMJFGC?@;683/1.+-*'*($'%!$#"  !!%%"(%")#"' $ %%% &!&&%$" !"#$% $$$&(()))*++-..143599;BADMMOYZ[gfinnqxwz|zzsnohfe_^]XUWQOPJHIECDB?@>;<97:5382040.2-,/++--+./*0-*.*),)$*% %!! $##(!$'"#'##("$'!#'"!' "&!$$"!!"&$%*%(*'(,**/-/313777<9;?77:68:89;98<89;9:<;:>9;=9;<<;?::=:<>HHKRQURTUUUXZZ]^`bcfgkkopqtuwy}|}~}|{|{xzwvxvvxvvxwvywwyxxzzy|zz|{{}~}~}þ¾ \ No newline at end of file diff --git a/share/qtcreator/examples/15-Utils/five.bin b/share/qtcreator/examples/15-Utils/five.bin new file mode 100644 index 00000000000..4cfe3920579 --- /dev/null +++ b/share/qtcreator/examples/15-Utils/five.bin @@ -0,0 +1,999 @@ +y|ilqY[aMNVBDL:=G<@JCFQIMVOS[[^flnvihlCEJ26;(+2"  +  #%(!%016>NQXqt|{~_aiMRZKOR~ikqGJQ47>*.6 +"  ')% "$".(/99>ILOYdfpBBI#  + **  +# !!  3:8MSN}~ghnBEK%)2 + !   +  "01PfEZqE[t?Wo1F^'9T*D +4( "" +!!   (08bhoBIR$."   !  . % / 8"="=82/,& "& *,( +  !*.8`enyx34@ $'$$ +"$0BTcwrvytYn8Nj 2M9+  ##)& +&6=F{DJT   " ! + *#:,E+:S5F`;Li9Lj.B`#5S#>+,) #  + #+ " +&4;DdksEDL +  ! .,.AS]o{|\l8Ea&C* (- "  +4:Dsx?EN  $%    ,(4ICQgsyvgzUfK[v5B]#< " # & #    +%-7^gp^\f''& + 8;Ngk~cn@Kg4 & (+& "$.5?hmwQXa$++) % +&%7AMbr~[h9F_*@ , + "*(&#"&0=BKuq{-*5 # +  +'7I\lyp}@Ld'> )* $"&/LNX50: ##/  (;BUnyqX,  % $7>Eqx4:C& #!# .:F]{t=O_0? )    '0\`hLIQ &4jrq)7T," / +! + 4X)#'  LRV?DL# +0 &0LXp5H[ +' (# %)dgkQNV"$'#=U]zn~*>V# '-3adh`bl!$. '8C\>Qh"8!   6Csv|%(1&   '/7C\bt 0E"(" +KPP}* +%! .LXt;H^" ) %!# (ST[W[`#' &=Wc~M^v5   !&*TYY\_j! ) +$ 9kyu.=S %  +"%!+zz9;B' +#,9Sv3D^& + +ILZ%"+ +$$/In{ 2&$&$$DDO$&0 +  # !IUpm/H*  $.  ouv:=L&# % %4A\EPn(+  $) ,GITVYa  +   !2ftRc}!9" ,! @EG(-< "  +0FSmCGux(!"'>esdi')   ! -cjtdfn$ '1lwDQj  + 049fkmnq " #%"+Dn}=CZ ! "- $3:GNPX  &!+Dgv*% &)/DHLcgq  !$&1Kv'0I -' +*CJX>@G # $&1=V|)8P )  $&*0x|Y]h +!&)7SU_x#9 +! +',!#2nv24;  !# /?KfO^x,A#  VY`OS^ + $).>Y=J`( * % +7?Lhhm!#*" +  "!9Ua|y3BX +( $  + 36 +  # +ZZ^@DO +  ' , :Jfz(7L ( # *RZd=>C& +*CKf{3?U & +?>Cvy~dgmTX^HJTABL>BJ5C>DHLPU[aelqvv{<@K + + $ ,!@PlPbx ," /)2;159# 0Yb{M[r $ 217yw|}~RSWFIN;>D,/7!#,#!   "'./398:@MOWgkp~;?J # ++!CRn0AU'"!0 & !*QY`svz'*/""3lxjx' +# &%+\Z^jln78<      )#,"    )/4=V[dz}=AL" *DSoZl.C% ( * !")bej!)! +!7|(>1  ;9BM!) BRm6F`%"%%KVYFIN!)  &/FO\s * *(+`ce469! + + $ +  + +"! # ( (#    %& && $ '$)2;?Dcgj@DO  +!(>OjVi%= $ % .7;}Mg % +$ #Wdf59=    )5=V +A   +VWYVXY,/3$) &   +$9+B%0H,8Q2@Z;HaAPgAQiDRjCQi?Me9G_-;S#1I":* + 0 *  ' %-&)0XZ`EIT &'6B^fv="$ %  +.:>039 ,;E^.:Q   89;TSV!   +!)&;5AWLYq]jkyy|_mCPj/Y@Mm*# -!MY\.2: +BLe@Ld) +   #$x~~ZY].-1%$)  $.*5 +-EPdcqrsKZw-8V$B +3 (- +%-)(BDKNR]   +%'-8Tm|: ,:  ,/+/6  -KTmXe|$7  aggQPT"!%)(-  #'5#,:0:L[fysRa-9[9 +#   0) CEMZ^i   $)'2N~/<[ #&8 +"^ij'*2 + +"1T]wt07L   NSSZXZ   &  #1JTezP\|(D80$ +! +   "+58=hlw "(",Ho`l ,H + '" + +6@A"%-  + #4[d}FLa   + +  +>FJ|( +'%9 &%4T|P`{ -#   +  $$(utw7;F   $4@Z!/I % +& #! + &vw|!   !!5]j5?S%   09Z +  +!# + *+6}}-1; ' +BOjt!-G( "# ,-6 (  "1Vd|*3G   + "*V\cTY\!  + /Ta}kv'D +#  "AAJ7;D (  "*6PDPm +, !'  +!#+jjp"*! #/Uc{4?T &  + #GMT4:? ++(3N~KVt(C !% "UV]GIS  +& #=kxes7 "' #BCK #+ !0Say;H^*  + ;AGdik! +  +.DPlGTp) %   ,.6oqvkmu"+ +  ( )LZu+8T' % !  {{$'/ + 2KXpERk )  ,29y~PUW  + +D`nt".H $#*$?AI39@ & #8F`P]{ 9  " +_bf),4 .FSkS^z3  %+3ou{9>A ':Gb_m , +*  !MPXNTZ    !#0Irz.8R   +!" <>E03;#  *@Mecp'?  + !)bhow|}$).!  1P^z4?Z' $"  %'0orylpw'  ! 3N\vKWq )  $  (_af8;B% +  (9F^s'1L !   + TY`^ce!#$;aqy+8N $, +48?/4<! " +!)9Sn{!)B ! #! =@E@CJ'  '3@X2DJ6<> 3B[=K_ +$   BGKmrx"+ + ,DVren '?' +  GHLQT[$-  +"#1HuLWu 0  #  49A*02  !ETlt'8  +! #PUX17A + % $$6Ro9@X +$&!"'Z]e$)2 + (@n|[g4 #  ).7|~$*, +  +  *Q_wIUf&! * -18EJS  & 9[kGQl/(  xx}^af'-5  # !;kyfr6$ "&0tu~(/0   + (Ue}?H\  ! BDMlox!,  #$ !CPlgu(4N ' OPX{~59A + " *Ykr%B %#,ST_(.0  + &Whq{&-A +! %   +''6bdq:=G" +  % (+6Slx9Hd  +./;|~@BH '$J]}z!/N)**AAP"&) +  +[mV^q!5.& '(946Dcen"%1! # #%8JUrIXt.*$ $ '\^gFJM' ! (?Qq)6V# &%'@?O#& +/`q4;N(%( +QQ^01>  +$ "/Khw(D +1$  ! + ;>FOSW +& % '.A_3@` ( $ ""86F"& # +1aq\cu+ + !   24?GHR  % 4\j9Gc /!$ +(*2xxW[_$ % +&!1On=Ji +, + ! +   /.>!%( # 0_n6@R # $/KMXnny$&2  # $).;WZk 1 && &RRZ`dh% " " +#$A]mERq 0   ('8%(,  -Xg~eo/ %& "'$*3git?AK + ! + +)( 3ly$@ ( %  46?nrv%*1   # 6K\{N[{3  !!0+.2     (M[q>L^3#  %>CM_`j" +# +* %JUq9Ea 1' & ""#-wx28@  +  #,;JgXe7 +" )138 +    &IVmp~!-B %% '0`fnuw')5"  , !*4NxVc$?* %SV\;?H  & $/=Y_m9 % $G' + + +# ")3Hnz]i'?!  TW]z}!$-    "8Yey)D # +  +eitbeh ") /_k{'7N +,#  +T]ajms%'1 +  "  3GSkw1CK02;  + +.?Kd#/I"  + Z^i}+/4 +  %MWpJ[v"8! ' " +#-2w:VvYe~&; +*']ciKJR  +6BYm|HUo' " 04?GFN *+#-G0@X( FLP25=$+, $ 8Zh~)3J)+ +CIPZ[`$0FXeM[w +. " )/:rqy('1 +  "0Yc{r&= %  !, %-loslow#(1 &.' /(4M}AOj* ) & $(1x}rqr   'GSkRb .   $,821; +' !)4KDSk ,$ "#$FHQ=BK  -+'3[iSe8%$&  "]aj..1 +  9E^Te 0  ! "*6|GGQ#* + )dp~(7M('')4Z\dglu + +&$+;Iak~.H "*CGPMMP  )5MyXi 5 # &2v}een ,IUlM[t !*& *,5;>I  %%)B\k3Fa ++%+5srw!!(   8P^z]n#: "#!"/qx/19  #2>Tzz'@ *!   !yxbcn*  % . "#" ,mt}WYa!# + $:O[sLXq1 + *  IIR23A + +"# "$1L|m$;   "V[dA?H! # # $6vdu(?  "$)ipy}} ) ) )/:O~$.I "-''1rqzTVa  +#! -P`|0?V ("=CL[Zd#% +&# '^kiz+B + "%&elu9;C!4$';[h~MVt8 (NMW( # ( #3MxGYo) & '.7--8& +& +#DPol}.E  "&#ahqmnu# +.  31>St&.L$" !  + +10=_^g9:F   " ' 2SbSe{ 0 + )nr{DDO( +& #0:Xrm~1H  " ' !]dm/1: + &,0p}U_. ( " +'::Eopz&)5 +  +%'*+9Tsm}': "  SYc`_i +' +%&#@Tcr 2I  " & + +^enUX_ #  % BOe",M,  + %&2opyCFQ   #%6JZw'8L") =CNwx$% +#& 0?Mlw"4K "#% +_fo-08  + + )>boQ\~= >AJfgp#-$" ! +,(5S>Qd - &!,2=x}13?& " +!,5Bax&7N$$% +]dmVYa + +$  !0Z%$ % [^c79B +  $,G* ( #!:x2D['$% V]fMOW +%+ + $;Ea*Ta{fw +1 ! % +9?A`dm"* )#$ :?Mp:M\ %  $ (/2y~3;D  %.DXv1BY# # U\e`bj  +! )&(4L"1K +0$ " " UY\!,,%! *$0PhvJ^p- +& #yBJQ $,-A[u3@Y" "T[d<;D +  + +" #  +MZrOa~*E +# +' +% + 00:ZZa"$-  + '6:Gdg|&3G"   TZ_[bg !1%:[o:E]$  R[dgdm$ %  1=Q|v3E`/ % ' + &TR]57= !�Kx}2E\  =BHnvw%) +  , ,L_r;D]$ + R[d?=G + ) $2R`wUg'B #'  + +,+5PQW" +   5RaH]v,  + +'-4lou29> +' 7J^9C\$   S\ezx%%/"$ )9Mduz8Ga !,"c`jrry,-5 +"! +$(6Uudw&@ !  #MQVGLQ & $4Gz7BZ% " T\eJJV + 04C[cs5+& ==E}|NMV + +#%! +3HYy~,;W # #  49?Y_d + ' &8ex3?X% "$ T^gvv&* +%"4iz4CZ "%  + MNSvu~% *% %5RwDUq &%%(0gjopv{%  ' 0Pcv}.;U $ "$W`iHHS*,  !1AXds'< !% + (+0svz;=G   %):O`ew 3 $ !)HJP(07 +#" +*;L`{*8R #"$ [bk44@$+ "# -VhGVl% & ADHkmu*   * &.)8V{$2N +*!  #02;?GN!  $(8Kv%4N " #% +]dmQS]$ () 2&3Mts 6  !  {46A  ' +/,7Wf8Gc"9 +* " +"+rtzSY`!+ *=sv"2L !#% `fo56?' !  +$ .AOj?Ng +% +:?A]^i#/) , $,8Fcfu",F$- " EDMkqx(.9 + "4\lp!1K +  # +( "dgpden"  * "'@vp&4K +'  v{z@CN  ' $(#ASf7A\ *$  !",uv|x}16A  # 0IXlm}!0I +  ! ( %fip>AI! +!  + +,;IdM^x 0 +$$  LOQghs%# * " %6GhXb}/ QRY7:E$ !" ,;J^l{-F +  % $ilpz{'(3 +  #% "4Wet'2K &" %(,lnq69F + * 0Mox$+D ! ! :;BDHS&  "%*8Lgw*B + "  $norYYb()-& #(5QqL[w) + +#& HJO[^l +$9J]}ENi(  ++5vt|V[f" -Bsds&=   !$'rtt>>H #&( $(>Liz$3N' +# )-2nmn',,  %(7Vsmv' )  !FELimx%  (;hu_m"7   ')*yzxomv" -(('C_sN\{ :' +%;;<;>L + $% &7Ra~ ; +( +  (~{y{#'2#   %8Zg~Yg2    + /./}RP\  %' +2/E`vz4?Z#&#_][þy{$(9+2 %/E78@34:./5+.4+/4,/5*.2'*/&(-(+/+.4038HMQfimz}NUZ$ $-DesPVs2 ! ?>IOUX +# ( )!;NZvP^t 4  %#  (,>**)$$*9Oxu)L&' ``^lkkJLN.14$     +            !#',15;JNThkpsy{!(+ +"  (8F]t|!)F !$ 'uu~uz{!(' +%" (/9T7DZ +  $+% " ) ( ! 1CQjj$F% % jkigccB=?*)+!  +   +!'(%%  + + +    + !!    +   %+.6<@FY]bz}Z__$% 3SawMVu ) +%( !??KFMR#3 "  + +"$?isp}.9Q/ !-+ $! +%& +")$3LgwazC("zzzlgi945    +  "#"" +$      "   ! +!      + #&-BFLgjoDJH +  '2Fao}:$+ $)%VXcotx'+9  +!28C^Ta|!.G$0/. +% '* ;>NiZsA!-  + pms74: ! " +  %' +  % *+ ) +&#   + !&'      & )#%.HKQ,0/!" !4@ULWu / ). # )89Guw7:F  + " * :zl{#1J +$26Gb|@Y 1+$'.\[d+)2 %#   + +6%?".I+6P1;V7B\?LgESmJWmO\qVdxWeyS`uP]rJXlESg?Ma7DX4@U1:O-5H$,@"7 ,$ ! $$  !#  #6;Aegn_ca# !$5NYn.8V 24$ &.CEOLMY&!  "'_km}9E`&-H$>77'A.FS]ah~PP\!!,"   ) ( +9'4O9FbLZxarpy}rbsM^v7G_%4M+A&:"6 .%"$# #))58BuyHML (  $"5ak[h%B+ % ) + +&(3y|llx,-<  ALgduQ]yIVrP^yTau{~9Pj&4O&= 0#$  &-% + +24D)% , +GSlOU` +, & !% &"=GNloxzSb|/=T4 + #"## "BEM&-) +( +  + :t{w&6O +! + +#   %VV]10:$ % + +!5@[cio"(3 + !, ! "&ANVsu@Of%; /0 ,#  + $66=jjoipk).-$' # "7=[Xi4$ +"$ ))0{zKJT(   *4O|v{"*!#)9/ + +) +!OkwxSh,G,%.' ##-?=Fqoufgq)+6  # +8GSow0A^ $*'  E<@387 0ISplmr"%-&"-;Utbt6E_- (' 109jgm[]g#05 (!3@NmWX^! ' +4YfWd!-E# +% !(*)2"TRWafq"0 + + +*+ALf|gt!,C)"GMKfmkmru*17)-# '4Ce89@ - +##'2Kr/N_LQ\mpymnu ++#;P\vy>Mc ' $!#-%$gdilq} &4!0 " &#  +*.`n.:Q !% +    9%",Ont%3#&0efmTT]  +"'/Jv~.9P/ #  *&/qlugmw'.9 +  & % +& !!,FoyVcz(4G &  ( )'' 08Iou& %(1bbgHHR !" +$6?Zu4?V$  " 0+5snxRXa(  #* & $ #3:S}cp+8L2- *# ! 0 )/"4Yatv!3++5nkq<Nt`rv#3G +)&  dcg{z&'4   6`hjx2>T *((AAIou{,09#$1U]usvn-C )) 55U%"5 " "FFMknu36=! + *#*>T]un/E!(! *,4__egeu+  &,Hpyv0/*.  % +)5Mmzo3DZ%  %)&  ,/2oprVXb # )3M|Udz"2   $  /28}?=C   '0+6Kkycw+:N ' $   &79@lotUW_& (5LwBTf/!!. + %@CIvx},.6 %  $=I\n~5CY -" ):;F[^d!" !/Er/AS$ +%-% #TX^nqw')1 $,'9LZo{,=Q , 1 + )KMUbek"(>l}s9J_2  +   $%/RU\qtx-/6   $   )Fkmt (! '2  +  #@Pkvbu#4L )'*  *08W[_|%)/   ++Ve~Tb~!.H / " + ,%HIQWZ` * '+ ) '>BTn}Vj6! ! / ) +$bfj/49  +  &HWp`87 +& #!#-^`gNRZ% ( &""1@VVj,M1 %$)! 78@y{ $;GXqlz3>_ 5'  $ )+3{~)/8 + %! 1L[sMb(H ' ' ##0/6!'#  + 1*;V[i9* ,. + !DIHOR[  + " .CrBRs<  +%! ZY^-16%  % -'AANn7 &,) [`_49A  + " %&IZow5Fe ' +%$ + !"(iilFIP(   ) ,8o2@^" 0/  + %+*uzyknu( #  '8Jm}dt+I $ $ + 79=knt'*3 +  +&( 3M_{k|-J # ! &  +:?>BEN  !&"4FWmP]y$> + & +!!%@AE8 ' + % %qtyGKS!+   $ 9\n>Mh .  $+-fknKQW! + +BUlx,5Q %& %029ils38@  +)=Eb~.?^0'"  39?sw{)/4  "&:j~am!> &  +  VV_CEN  + )/6RWj*M.# &%  (FKRKQS #  $Sf{ANm2" ( % 66B{zOPZ "  &#.H]h?Ru ;% - +$+2ein~%( +!=Nb{5@_ * %+ ""0NNWkmt$!  +$ /1BHQW["* +  +/IWn:D_ - +"- $:T|%-H0(%  &[]b/0;  +" + ""=^iL^r . ! #)kmrLQT ! !!,>eqP[v'-F $ +#$ 244 ,* *kqy]_f  + + *7Orey 2H!$ +)$&/fhpcbj&&/ +  !%6z>Jf' +!3' DIT-08     *FTrfy*= ()  24>rt|yx*,6 +  *ixx".I %,"!-dis`ch!$, +  *""/KduI[q! ('$)+4~),6 +   %crUa}8 +!   5=H>AG +'#. +0>[}3BY3 %BDN|$)6   + +(fv5B\ - & ++joyjmq % + #  3er|7DW +%" &"$.MOYty&-; +  ,j{Yf ) , +% :BKFIN   + )"/H~) "!01BWw(3O ' (+ % 07@BHL +  ! 1+A|Vau$,@ !%$)9?Impx{}6H "#10,2HgUi/@Z 7 &* +    +& 3&AQ`{iz*C# # UW_5>K) +..9Vl_r'7R* +! '#+:JesY2A[?MjTert .F)#!ABKFNZ #  +, , + 5CVq4C] / # .09v3;E  + %)$ +2HXrL[v2  % + %&0ts}pw~2;D  %%4Xgp#<%*  %JJSotz(/6  "" .D\l#0I -*  &%/\ae%*/ + +#%(7JYi6D^!9 $ +   PSV "#  +)!0EtR_{#/G +   dcmJMO# ) +  +*=Vi~r5@X !"#IHR138 !)5(%))Y29Ido~maqN]v=Kb/=T$1H%<3 ' ! $ $ # "  +" # # # %*06)/F,2I:@WNSl_e~mu}ix.  + +$#YT`wvy%&*"+ &! 19Jcxs]qBSo#4M7 -& +       + ! ! ! +  +   + ! " + " + ! "%&&  ! *!:$-FY"4M'>,  +    $ # # " " $% & " +! # $ +" ! "" +$3)B%2I9G_Sc{m|:He+ #' =:Eaej*.3  +#' +%(A=Pl}rK[v6F_,D, #$('$#" ! # # +#!!  ! +# $ & +# "         &*) +' &'$ '#7%4I:K`]mxO]z ( %*/-9UZ_)29   *' +5@Qm~[h:Fb(1K";0$ $& ' ) '"  +    +  !"""! +  # +&)* ) +)" ( +)+)9.=NCSgbrcr, ')"!-ss}X_f" #  #"<;Ifz~`lCLh",G+  &&#!"!     #, 5';+>!-A%1E)5I+7K,8L,8L,8L*6J)5I'3G%0E ,@'; 4- ) +(%  + ! ## +'):@M`hus7 +&&$`blLX]*/ +#* +,6:HexYd(2N 0(( * ) ( ') (" +-&:+5I;EYOXn^h~kwtuz~}~zresZiL[q+9O)7K-@!4 +% !. &"  +!.* ')2GYe{})C "!%"VXbM[]!&   +%'$:?Kg|xR]y3>Y)C +* "% ' ( +%  +!% +)4!)@/9QHQlT]wfor{y{jxWe|FTj3AW$1F 2%! + !! !  /.5JHPgblw)9S, # "HJUDPR/;?$*  ))' 01SjyUc< ')/ . ! +!$?EPlt_dw&+:    +  +)$?GSpFTn #   7;F:9A *!  + #3?To|z8Fa ,F +/ $ "!")* %2/9R_jbl.6F)   " ,4:@LhKXs'   + 37ANJR("- + ,",@IUjfo&0G +! ,!95( "5JUq~H_4 $ " !'%'*D@Vqyal5>W6 +' (4 - $ 0T^zUc2 +  (.6mlr45=' !   '#0G^mo}/;Q , # ! + + +) -& &(BLb|oz7A\ ( , +,8 !+ 1LYvYg6 + + &*2xzrrx67=#/(  '.CHXp`n%2G +)## %(,/ .4,?Xf{{*2O /* +/ +% $ 0(D`r]k8 +  +$(/qs{yy~AAF! !  ' %"!*/?Ut`l *?"+ %  " " **CIVrr|09V* 1+0 , /+ +" + '!"-fgqsux9<@ +!  +((4LS_xVj0J ;& ) + $ $24>Xhqbp ; +!%##/mlvKOR +"  *#9=IcsWj)D '! +" & , " 1@@!$$#+(@@Niqdv(E$ # / $ +*3 .5Ba{Tb1 # $ !//[`z~7Fe & )  +HGTbcf57< &"!  ! % ' #)'A?Lfp|KWr5!',+** &"=\ix+9V $ !! VWbTX[*.1#"  ",09M_jQ^x,C  ") +*'5OYil{)E !! + $gjtfjk6;= "  +% # '0BVbwzKXr"-F' +* (  "$3JeuWe!<  + )sw?EG !%0$1)'!%=ZfJUn 2 + + ,%4Kdq;Ie /  &#)3msu;@C '   %'  ##:;E^hq]j)@ #/1'$9jw&3N(  ++-3E^qwq}%/E %-&% +Ckw}3 + % +&0NT]NPZ'*5 % # +$7;R_e|r}3 & , + + (MYn>EY! + +:@F__d669 # +  +-/ ! 24@]hx}~oet^mWgRb~JZv>Mj>Ok?Nk>Ok?PlBToJZvO^zZigtlyr~Xfz1 !) !) -?K`ip/4I!  OVZVYY%&)"% '* ( )1'D8Igfwr|`kOYuBNj6A\'4O!,G'B">94 0 1 1 0 . 047(D*4P*4O,7R4@YDQj[io}|[h|+9L+ +# ! +1DOcLRj &: +# ipsikjCHH&), +"(## +3$1NHWtw}lxWb|Le -G9 . +# +#$#! !# +& +& (///- + ( ( ' % $ &' " " ! +& ( &,* 5_f{ku!&< + !ELO~IOT#(1!   ! %$" +&8)9S?MfDSm;Jd&4M 1! &, ' + & + & & +$ +$ ')+ & + % +$ +$ +$ $ #!" " $ ) * )'% $-- + '25MqsHOk4  &)/ios`eo8XPXniqr}ZfxJXjR`u|=Ig1 # + "klphiv:;J.  $- - $ ( 2%A6B^\gp ,H & ! + 58BEHBN +'% +."+ABLe!-E +! ! +" $hlmQMW&#/    " ! !04@V^kZh4  !  +>ADqnuC>I" &# ''ADOhr|-:T# +lprgfl-+5+ &"$?MWsWc1  + & %)+uv|0/7  +#' + # ,)0MT^zs1;U# + # + %X]]x{9:B  ,*$+1Oen=Gc3  +$  =CF|z13: !  # ( 58Wag8 +!  + $mrs>=A +  #437TEPl $ !&$BGJ:6<" + $ + *.Knvlv6! (- +$(0hlnUPV'%,  .' ",4Nku{9C` $ $-% RT\`]e&#+%$-  ++!(?dqITq"= , &  ,-7|64<   + +%sx}pov"!+* !6KXsWi~%5 &  +""cklEGN    ) $:C^o': !(  AJIjir"$- +  /#,CwWyt$2I! CKHEHN +++5Xc?Qj % " " t{v^ah!%,  /)5?YP]z%="*! KPO9  " * " =EG&).    #$0L+G +$ +( &  $,2y`bf") +    2S^yGXu ' ' #& #-TZa9;?% + ''3K~P^~< +/+ ! +HO\mmq" 0?Kc_n&3N - )  .5A~BBI')'/Idor$1M) +" !!  %0gnv.-7 "(=Eay5@\2 ! ) +"&1X^frsz * $ )S^{@Jg+ +! ) ! !ELVKMS + $7lxLXx"> +# % +#/5?,/2    +# + +7PJVt: &- + + +15@rw~  +(GUpjw%@+   %'/pr{jnn   -aru+7R +$ +!& " +[[aQUV + $=zv2=Y '*,  + ! $WVX<@A "#5N| ! $ ,Vg);U' . +* ) $  QJT.5=!  +.[lxAH  $ +% +DRiUby(4I!4( +$ !   (SS[ILS +  $-@NbN\r&1G +'!  " ",35;QRVSVZ   +# 2?K`nR|Te$2K2 +( + &"98@rpvY[_#  !(3FsL[w%> +   + "">>Dzw~him!% %9hur@Ql(B) ! "!+POUwx{ #'   ([g{R`{*4M1%# %**  +&VU^*)-   EQfUb| 2)&,;(*8QPZ::>   -8Ko~,8Q1 ! (1 %  57Ars}QSS  # $7quO^u(5L# +! $"  *),7BDN{|lnn" " +dpt@I`*@5#2JHXql{/?R +   " +# $$'4LNY%&* + # !KWngo#+> # +(( $.)7QYjMYo&: + +* "  *,9lmx549   /;R@I\#+;1#7( &@N`{s:EZ-  ((  !"-:;Eqr|LLR!  !8dtRb/;S#3* !   '1 /+ 5'8SVhsITk'/D$  !#&% +-,4`^gllq"$,  +   0GTqv:Kh$>&   %# . - #+0JEVql~u`m4@W!6 '! !/$$!*EBIusy57> +   " #+8Rhx[k$6Q1)$"#%& ' *) #$&/0KSdXj&4N*1-   +0-6[Y_OSX!' $Oj+C.'$%  !/-6`^g@DI& 4]no6Da#> & ' /* #)(#67>hlrlp|,2? ! " %)" +-64":4@Y^n{Pb6Hb*H"  "  .'2Mbm:C\%<  ') !+HJV}|MR["+ +  $ ! /):"0C-W)>- !  %''! +&=?J]_ifgp>=H%#,  !  +  #$""&.45:)@$2G+8N.=R2@T5DV7FX7DY7DY7DY6CY5CX2@U/B&+0% +   +      " $ # # % $ % $ $ $ # " +" !  +    +&.6;FLRZaeUWd!#3 4: ,4GPneqMZo,D<@D=@F@CHCFLDGLKNTX[`dgkpsyz}215"-)   5A[dq|Raw4AV(< )  $& %&#   #'58@Y\c<=F    $ ' $# '5 .E=Kderp[lETn0@W$2I-A%700.%     #*,6QS]VYd#. + /2*8OGUneswj{Tf8Jd!3K#: .$   !    # /23AHJWegsPR]%(3#-" ,,)% + %'6MXf~|~k|XiFWr7Hc/?Z"3L$; / "  +   +"  $ )+7LOYxyINU% %  +$8*7L=J^Ucxgvt~xnbtYjTcM]y@Ql8Hb,;S.F"9 1)$ + ! '* %     +#+-4Y[dnpyQU[(.3 + ( +$ +##4%2B/;O6DW>L^DQdIWlN]sRawVf~YhVgOazH[sCUo=Mf9Hb4D^.=W%5O-G&> 94.' " +    !$ !# $# +   "(-238;CFJ]_b{}LQT %*       !( $ #  ! %*-./0!1!4!5 42 . +12/ -*(&%%%"!!#$ " #   &++057=?MQTgiky~AEI+05!( + + + +%% +       +     + + +       +  + " )%-37=CCGK[`buyzlqtCGL%*/" +   +       + +   +       '&.55=AJPUeilx}Y]b>CH$+0 +          + +         $+/3:6:BDIOX^cmsyafiFKN7BJIMSZ]fnsyty}^cfLRTAFI;@C5:>28;(.2#(-$*.$*-%+/&,0)/3,15+3607<1:?4=A8BF?HMJQWV^c`gmkqv} \ No newline at end of file diff --git a/share/qtcreator/examples/15-Utils/prior_data_320.bin b/share/qtcreator/examples/15-Utils/prior_data_320.bin new file mode 100644 index 00000000000..4d8aab972a7 Binary files /dev/null and b/share/qtcreator/examples/15-Utils/prior_data_320.bin differ diff --git a/share/qtcreator/examples/15-Utils/shear.bin b/share/qtcreator/examples/15-Utils/shear.bin new file mode 100644 index 00000000000..a739569dc53 --- /dev/null +++ b/share/qtcreator/examples/15-Utils/shear.bin @@ -0,0 +1,866 @@ +~yyxytuprylmwhjrffqcenbdo`doaepbfqfhsikulnxnoyvw}|ru}_`jNPZACM>@K68C/1<(*5!#.(#  +   +  +  +    "$%#%-0/787?A@HMMSb_hrrx|gjnUX\LPUAFK9>F47?(-5!)!    +  +   +        +  + +     #&%$.)*2//787?BAIHHNWV\jgmgkkW\]MRUADH27;&,2#)!  + +      + ! ! ! # #$%'()( ! " +" # $%&&&%"  #'  +  +  ! "*.-5:9AECHMKP_]a{w}mpuZ]aBGK/47!'*    +   ! +$ % %$ % ' ( +' +' ( ' %#    +  + !#$ +% +% +$ +% % $ +# +# % %$ !!(%,54;DCEQRQijkvyZ]dCFM8(*2"       + " & '#   +   + $##!'(*(%!!#&'+-047DKLjpsxz~aci>@H'+3&  +      !$&% $ """    + + + +  $' +.26": '?$-F&/H)2K.8P2Jb%1F$8 )    +  $"  " ! + ! (%&##(9R\j~~pO]q-:N%8#   + !&(%"  #')DHHsxxns{,4;   + +   +" &+=;JaYh~hxIXp'6J/%" ) ' !$  $)/47MQTquvFNU)0(!  + ))=>I_]jh|M^uGZvvIXt.Nh]en "( #.#:Hctz(/9   '- "*+E08@' 8".IwIPU!0 #6 <^iaij (/*+ .GSq # (,*,/7SHMP $ ) )!-Ls]ba!  $;Zi179 + $!# #?NkOPS"(, +*$ /H~-/4"  + + *ZisjrQTV"(  + ) -0(%E20O><[KOlZelyDCH   !9Uc~Si +4+:4( +* , '&!  '#;0WLUpoyPSWSRTFEI # "" 7Tc~e}'G6 +&$""!" $ ' $  &** +) ' )0(+D7=YMTsdm8=C'&)`__cch&$) # '7C\}Rh,?_)I @ 5 +1 -#"&$ +# (,,' +"  %:5EQ /01[[Zxv{ ! + !1Op\o}:CR  +-/OQQHGP#  +"7Xe~|q`oR`q>K[*6E&3'    $'!" * 8!0P>Mn`pn}&2E   $VXXvu~""+%&!>I`rYew# !!'" $)UX]a`i&' +# -Vd~rMZu+8R&< 4'(-)'  - ""@EKkq{GZu#: #!  (!%,dglIHS  & '?swBI^"';  + $*#  +1?PXzJYt&@ + +  & */3Je8 # )-+ +& &,8Ljyu+H  $'*('TSWsx#-  + 0XkXb{(? + +,+# +-;MRbxcq#A %2"&5"#)tsu¿TZb !-Fw~8@Y7  ! +% '$ *&CL #&1@XS[s#< $ +*1"$%(AH[uYg:* !#3) /,105>%  /GUobi3;R ) ) +)  # "*DL^|Ra9 *&$.B?Gsw&*4 !   8anz1G   $ & #2>Y8H_ * ! #%( ,&4Jly9 (/ ! #$6;>37? " "%)?Lg{.I& # /&  ( ,ARiFSs; 2 . " PUY(,4 +$ +# "3S_zj},G**) $ %)CYo4Cd3 1 ' )ejo}#+ % # ";fshz0G (  &.Asl}!1P2, + +04=qs{(# +$ ,CuTf4' +& !#( $ $$&$O_t\s"=% (" !^`dJLU  !#?K`R^p  %(# $3IxFVt0 $$ &$%/FHP!    ">Nb~$2B %  .5Sc{2;X5( + ".$>AKBDL    %CQe\j|0 + $*(1>Xqmx)E'$ % !'*6oq{=?G ! &GUi+:N +* 0 "3ETnBLg  +# # # 9@H ! &FThEVn4 ** .GRm+6R 0 + ILSCEN    $APdt,MaFTn)' $& % +"Ye,7S ' +'( Z_hOQZ $ +" 7FZdt - , +, AJeo|#= 5 +*+1;Y[c  +!-jzez % '$ #P]sevK\y%B!.J2;WHPl]hx>Kf $ )( +)\eo#'/ " # "6\m|0+. ;H]s^lCPl/7* $ & +* . +* ( $ % $''*'(&(&' #  + */.AZf}kx&="0   + -7@?BJ  % +( )9RLb| "9!  2]lGTs(5R&B 2 (  $ * ( " +  $ + " "'./ ) " :3Fcn.:T * & * ! v|SVY   *  /F|o'A "".FXrt}4=W)' + ! #    + !.-.0330 / %)/ + ()' +#(/>XfvR^y4 %$ +DKRkmm$%& +' %;ap(D +.* #54F`JQg +%(%( # +''!  $!3%0C/9LEQeIUjR^s[h}_m\h~P^tGUm4A]"/M= +2'"165 +"$*#5_hx~&2K%! + ! "(10// .Sa{ /K'3 ++(5/=W~EMc"4 "%    # (!();2>> + ";Ha)7P $ +(  +.,9Qv2GRRR !   !.G{8C]$    "#6KVm]k^ngzoz>Ib/ %  ! ""%!5*9MESknzAJk > !$#  +FO`ALe' !  '1pvmmm"# $ 1_lIRl ( )' +  0":&C'E'H 2R.DaM`zr;F]4$ ,,  !/E]kZd)1M- +!! 1MVhlv0! +%   KQZ036 +  # +%GRnLTl +% !"% $   '25%='5J>H]\ey@L_$(' * &!5>I`p~r{=C[( + " +$,>_k)6L +)#  + )1;LOQ + + -8R|ELd   ,( $ #+6 2* +  $$*;16K]fITf)/ '8& +)MZpFL`. ) +"1;OqP^w 5  +' %&rxloq +   8Ze@F\ "! +!  +0 * ! +   # #"9?Ifaqu~(/@ ! +"     *>UawEJZ#!! &!-,:P~o~(>2 ) IQ^"&+  +.9C\Jf .N +:-(' '6 3 +40$/-=!2 $$,,- GVi%+A " (%% !&+Awk|+F0! NWd9<@ ' "!9x.8R$!%27IgzL[|5@]$,H3 $ # %/ 7+"+) " # '  &9udm / +%(' + '6DZ4Fc! " -$ 0;Icch "( 'MUp%1K %(1=FZgsOWt2:V7 /: 6 %%' ! ! + ++Zh~6Aa%* +,%#)cqLa|-'$"$4<=B + +% ! + &.Hkwj{+E! ( +1JWoWa~&0O. ) +%+ /!( ( +  **9N&2Q  %, " 5ASex 8)#' + +%]drjjo  !  .?KgRe!=' "( !-B`oDQr!@% & +/$ '2 #7Saxfr%C +&( !  0P]p}0K.& ," >FS+,3  $ "%Bbp?Qp3(%$(4Lq]o0@\/' & " +( %*9PGTt * % * + +6H/C`6  , ' (0=WX^"& & 01<]7Ea+((&"0J3B[ ) # % +-Wgt$B #(" 1R_tZh4 $ +",ejp<=I) +( +/4N[}&4M! &,* +&%9SU`y/#  (&3JzBUt* "(""#"/E|}#7  !!RUYopz''7 ) -!!Aumx3&.( &9LgT`y0   /MZq)C +! +   ,K[t-6KDGJ=>M#' +$&&GRtKWo  , +.$.Vh@Nf/ "   5sCPk&' & &!0J{COc(  ,/3VWf( ! #"& +3%F?Jl0=U  1+)Bvw,E +), ! "%.D$/F "1 ) /XgYe{$4  $OP_. " )*18"8U`hs+A ) , 9F_Vk 2 , # 'CKcXc{ *- $  $-:X|ly".? + nqtUW`@BO$(7* #/ / 1 - '#-Jr}?J_. /$ #`nCXu +.&" !8gp&/F#  ' ) 2P_~}+9M "   [^cxy~_`h;>G$  # +'' +." +& )-2=Yjw':   2  -C+?]+ ( +%-4L\d~ *+ * #!.:Y8G\  &  ILSv{}MRU$'-& 0%- &4+ $4EMjIVn$ 1  &ANd|$B & + -'  '#(C $  '!3- .4 0 ! +"& # '8T[xCQk*  )"$6DYK^y)&"  !4do$,F $ +#  %$-Hzau 6,!#!)}pmrQQW-07  " ++ +.4+D6E_ZlctTb|(B* 1 *7`h|$>!%&!%7Zg}8Id +#)$ +$.C~DOh& %&%.R[uy1H+" &!gmu~KHR!(  "$! $*&(5/IW "' +% 1;Phs(B%'.  #09Q-AY("' R[crrx=;C$"   % &# +.$1LO]x|:F] + -0 # %ZVb7D[ $& ) $ +- *EStx$0I)-$ +:K`o~"0D! "%OXlVd} /"$ GPgF\w &$)  7?Icmr8?G( +  +""  $.>5H[Qbyyw-8P. $ ( 1-!'HGVvx5C^ 2' $(,Cn\k 3 )cm})7N( $#&.BvWm*'* )1;ovz9BI$ +"",( +% *$3GSfz%0K + %  &+ *- 7:Jh|?Ok) % $ #* 3@TlQ_t!  !-s}BPg%:#!#)Zc}ax1,)  )3t{pqy9:B$   #+0 + ''4L\l*3R ( + % +$ /!(EktPa = % )6 + "2C_GRf + +*;hx/H % '.Q   !"-9NO^{ 0 !5'.M`}*E-& KRZMOW+,5& + % +($9BNg|AJf ; + $ $"2 ( +29Ddv`r-@^1 %) # +-8Jd49M + +"  3?Sq~"/J " "/ & '.?Z 4P/$ +! AHPY\a.18& +  + "7;I_ixOYt!; "%" +!" +& ()IR]{eq!.M, +#+ ")2DZw07L %  7CYAJf . +"%&*)@x*?[/  +! ;BJKNT'  + #" *'7MQaxUb|*@    # /5"%%?AKev`g+1P! . +/ $,BRp+2G % + !AMeY`}1 # * % /[h4Hd0! +! :AIRUY36 '. ,/%#":L_|{%,@ $  +'NZqu|!> (,$;G_@Ro 1"  ;>Fvz~  !,Tby?C`3 ( ( %2ItI[x4#  8V`ni{4D](>& # % +# + + +"#,Dxn{#:( " .Xhuz3 ,) -Xf~N^z7 % +08_bb(,5   ) " $+9NhvN\r,@' !#)( "  $ 0,C7F`^nPa*:X'%#  !)* '28Slz#9( $/Yi@Ff ,$!*3A[R`| 9  +$ */7quyHf*5R%2N4C_pUc#<  $ (-5bfm'-4 $,  % 0%6MgxWd~%/G& /6303&  +-!-EP\t|\k-;X8 (,3 /"'3&%@Q[wjx!9( #0[k{Yd$= + $  +')2zIPY $/ )$ -% -!3HGYs_i*4Q +1$ #' ' &  .!+ABOjXfq~Zh:Hd&B 4- $ !,' **:Tfxjx!9)"0\k[f$> + $  &(1fju*0;   +" &*9;Lfk}tLYx%2O 1 (+  +%(( #  3(5SLXvkv~nK\w/>Z)E 1## +%! +& ++'#*2GL`yiy":*"/\k\f#= " + &(1RT^%(4   %! "$=?Rl}jy>Kj%A (1 *   $!     % 0%C3A^FUun|pRh4Hc+E+! !)1/ % & , ")'>Tjjy#;*"-Zk[e"; +" + ')2ghr46A + +#!  0,=YexwOYw*D/ "  +   + "#" +$ $ $) 4#@#3O7GdM^z`qs|f}ZoATm1D[-D 4& +! #' + + $! %"3JKbxky$;,!-YjZd:! + &(1[\g$%1 + !  1$3LEWv{gsP]x13:IBIYMUg\gfvp~}ypZiBRk9Ib*;T#8 $ $) 5, ! ++07MKXlvly&> %"-XhZf 8 +$" +.7=?I#  +  + % )%( 6,AYWmQ`.=[!=*00+)*,&     *!7.E+:Q3BY8H_@OgETkRbz`pbrgwl|qssso~jxcq^lXe~OZtFOk:C_,5O(0K )B0$ * +' +$" + $%  +0+3GFSeo|n{(A +# $ ,Ue|Yf!7 %# +-09ou|28?  %%) ( " !!9;OhxPc,G*" + !& ( " +      + "# " # &)+.1 4"6#7#7$7%6 2.,*( ( $    + &$  )25KY`vq~ *B& $ )P`xVb~ 6 $ ! +-/8TZa%  ! " $ /"1IPb|p\kN\y3@\%A +    +"'$ +!"  + + + #+" !$  +   + &"#78;Q]azu!+C'" +'L\tO[w4 #   /1:GMT&     +  4;Ibizt}iyM]y4E^ .F5 * +$         $ ) ' ( & &## ! + # % $ $ &-(% ! #  +")'/  +)',<9lqz.3<    #- )!7ERkWc-6U$,I6A^nzlyMZr;F^/6L!6( ! + +     +# $ $ +# # # " """ ! "$% "    !    $ )2>HT`hw{'/G &!'?Og?Kf 0  # :MiOYq# % % "5o 2 +! ) $/or{|}68D %*1 +)/&>gsju!,H%, 0KYk>Tr 7 &0 +''D`pXbz) # + *$ 1fxw +. + ! ',7z}bcm&&2 ! "( ) 3@Idlw5B\&% *!&%3E)>Z0)1 ' '6Qbl0! +-& 0]ohv - +  06@[Zd)  "0!:2 2:WFUo* + " 1j{3K,*, # $:C_pz!(< - # +0Oa}R_{- +! +( Ok "(% ) +/[fP]x&*( ! )Alv@Ne + +* %),9Rm~ 1%& $#kq{V\c'+     +#"9FTpaq.-% +)' %:EeBOk (, + %/HyHZs $)"&$/H}^q%%" (~W[c! +  %-!&;R_x+H ) # +!' %"Ags6C_, ( +, +" +"2mxK^v#)  "+5EIP& #% /A`lAMi* "  # &-:Eez'4P. # - & $@Jcgz 3 ! !#$0[e6Ia + ++  + 8?IMPV #+   " ".=Op~u!= !$ +) $ )#Ahup*E. * % %MWnu"; + $ +*$ $JSqy):P + ++ + + !JQZ-!& #'V`w,G $%.$C 1CPcep9 #2" *. 8fugx:- & +# +-]g);V,% *$/7QQd}%9  (w}xvz439!!%%""5ESh?Id ) .-%+?Kfdu8/-1ak6Fa 3 !## (/Ht>Ng0  + "  %-sos338    &7Vczmx$A (,% (%?Se`q 50 .4gqFXt5 +"  %#)@_j+;S '   " 04:egi+*/!"  &7Xf{KWt#! +0)+ +$+E}[l0-)(5oycw5 +) +$1CLfu)?  +  NQUdgg.25 +$)( ();^mw9- ' '' #*5E[\m0 ( &'! 8oyx-F ! &   +9AY[n 6    $(psvX\_#',  #&"(3@VvERn0 '+((hy]n0 +& %) %":mw.?X "%( ,3K6Ha *! -15SY[!%! , +&.Pi# "/' &;s*@$  " =@CU]b & +%  ! !6C\`m!-I +! -%- +-9Lxct 6 +& +$ +,)!9dnPa} )/( *[hkz /#   X[^NX[(27! (8E\Q^w % ,  %EPehy:$!*(5`jgz&< ' " !GSlIZp'# %(+|~~PUW!%  + ") +& ( &0Zd|6BX  "1?U{%5K$  +FHLMOR    %+/7E[gu -D'$ +!,30_io$@) + ( # -T`wKWp + + +# )Adr`q +/ dfgRT^!  !  "  7R_yJ^v1 0 +& $-Ioxx#/J&" #)MZueo' * +( +JTpCTe SWb+ & (  !%>NZu)=V.(+! +!1,'Dv~}*7O ! #&FSnx"9$- & ';C_|+;J 000QU]+# % 5T`{n8Og " $6#07Tw}1>V # #?>fjj'-2 $ 9P]xOYn ()* - & *+:RfyQ_x + %' +"-Gfs5?W ( "% ,"3nzDNi1  !  + cbbz~~*/0  &"  7GVqLVj! & %'- '-F_nbp)!#$ +$=VaMYt0 $' # (CPl'C $ )# +679|.25  +' +/;Me_k 4 & +# $- % + + %%>Q^xo~ 2#" 5HTpr#,H " . &*%*FsXb+ & ! YY\:>C  +  # +"5=Oew5 ' '(  +#- & !%$ 2COi$;& "-@I*/< +-2( %/Ibv:?\ +- &/ )" ++"2Eevp,E[ 4 $ ! "& ) ) '+19Rwdx'<!' ! "1;Fc~|*8O ) -&'qtz?GS "/*$&BYlEHh + ,. ++ /$;iz2E]6( + '+ &*'0HQ\u->T .& !  &!-IXh,$  "8=CIO\% + "( 1M\}{AKl>- ( ( * # -CTcz{M\t#: + !  +% $(1mVe"0J /& !%  +,?PizHPf'/F / ' +,*Ox4C\ &!*&CM^~'4Q # , + 8>Bsx&$)" 4Oax^l(A1#  '# !$(8N{s2=X 0:#C&HGZ~Wd , +& +# 01'2S@Mk 2 . " +! + "&.otxAEN+*/=Mcx+9Q +,1  .* ';PtJ[y3DcK\}u(/H +%  ,%;tht8 +%1 &( PT[npx"&0 ( * + (8Lo}y.;R& ' +&1BjMSm2 +#3IRsz+8S +$$% 46?68A # & 2P^w};F^ ! # ! ! +!-7Dwio"&> % *% -#Cy?Jf +- ( !  &(2mowrsx!*  " $'%2L~@Md (+( #  '/:v=A[ ' ( +' + + *LYxCOk1 $* %  "TU^=>E$ +   "#=IXu9H_ "'' + +$,:ip~im4! +& + + (+Ij}Xd6) #  +BBMsrw()1  $! ,DW[Z +  *";co_fx"%4 + + !"' ++CMmh|">+ " +-:" &) "<@F8=<  "(9E_[dw'.>  #( &4:V0?\ +*%  + + '>E_COm(F6!! % >?Hvww#''   +# (\gLYo"5 + $ .6;R~r:E` +(*$$;O]uP`z9  &"<=HXYY  +$0{BQh. #!$! "/5Ir~": ) &( # /Qax*:R ' $1$MLX??B  '".FXg% +    *2EyWc|/ ( % % "  1Qcy=L`* " +$ +0 IHS--0  +   .BNhLZx9$/& + #!*;?Kb&;,.# ( +*Sezu!6!) $ "#%WR`ggj $  + +%4Xe;Kh!= '(? +% " #(7kv?Md3"  %2N^yep%0I%! +"$ %PKVFGJ +  ' +(!-Dy?No +3 ( +,."  # (.=Xaq3DW #-$ 39HfHTn5 )* + + "#"*hdm/04  +'4AZKYy& 3 +& + ) $" 'OYkk"5I & +' +& +() +'@Lk|AId , %*  + &ifl%%,  /IUqSc$C,7 , '% (4:F\}f|6I^ , + * +&-0:%1Qitq|'0J +!)/ !0/5wvy¿ ! :boQb&C ' +.52 2 , $%1Mq'9O&% +, ' '"-L^iT\z": & &&# +   !%IKNsqt$ $%,Ht?So&B+ +-4 + )/ *3 4"0M]nbx1?V3 + / ,- * 3EPqAJg )'2"   KMQdbg $'09WIZv 9 +& " .1 +'3#%Cix{DPh2 %' (&1*<4?ahwan/' " +* (/6 5"DFTuzN\|#/M +' '/7.*89Buu|NKP +  + +-AJjBOr$F*%,& +# + % +&/IQr[c|'/F ((47-! :6Dd_p^q+=\<,  +&% +   %DBO~}HDM  + .DNm8Df6. &% + " +# +*/5U|_j3/' .-#)$# 66Ebm~z0B_#@4 ( +( &,NN[45> + % %Q_{:Hd 5 )* +  ( ! +'-'2Pq}>Jb +.2 .($ ! /!;,GAPjduI[w!6L.($-- #%3hht-.8  + %'VdAOj 5&* $$  ++ ,8Xh^l(4L3  )1&  +  %7$3MSftarS$ +&0" (0*  %+FJ]y}w=Lh 6 +.! +!'  "*7Y_k( +   $.bp~9Hc %# +( % #" +!4(1@Vt(3J" "++ $ + +  *. 2(@3D^^mp5B] +/ +# .&<& % +(9CM}{}% +! $ +/fs8F` - % ) ++ +- '1?Rk{EQg  (+ "%%5 # , +A@Lc]k}o~JWr)A'%,+ +'  #' -5@eoy|~( +  $ .fslz:E[T_yn{bu@Qp$4O6 )& +% % ! + !+19ciq)*3  + $-aoiw/=U )$ ! !(+*:P\t/ # 25=^`inqxQSZ+.1 + + "   +%3!+@4?UHTl`nulSg/B^#>* +! &$"  .18^ah)*3 + + $(an`n+8P -  +!$  #  " (8m~5AY0)  -.5RPN630" +   )#<$3P6FcN]|bswv_rI^|5Jg 81( $ ! !" # #  $+*.?=@MLO*+4 +  & &\iYi'?&" %(% #     $BTe'>+  89?b]Y?<7$! !    %+, /7 *F0?[@PkO^{\jdtpzvi{bsYjK\x8Ie);V/K#? 3 +.# +   "%# 969a_`24=   +) %O]yN]w 9% ( '  +$$  ") + ):Nwz 8 +% +  !\\_¾|ytTPN1.-   "$!  "$ ( & +)/4!;!(D%-I)2N-7S/;V2AZ9F`Id +' ' AAEzzYZ^69?#&."     +! $'+* ' ) * +( '&$  +   +# $! $ % $ +$ $'&   &8Z9C^ ) ##+ #  $"! +&! + 1bt'1I ! + UVZmowJLT14<*  +  !!   +   +#&(*&      "!! +     +  "(.6;BNTZpszden#   !&3OwPXs5 ! (&  #  , %5Ebgr 9  +  lnrggqJJV>?J./9#%0)!  +  !  +  +        !+**535?UZ^qvxvw*  + # (Cfwt~%-G $.*+&(*4 !+"# & ! " 0NrQYt- ! !$)rr}[[fJJV@AL24@#$3+        &)+4?@IRT]hhr"%0  # ! ;Wg^j,6O5 *!&  "/?AK8:E& $ # 8Sc>D_% # + 037xylmvefoaak^aj]_i]`g_bfaafachfglnqu{|/2=  &.3Da^j/I' %   "BRo?Gd )    &).nt{!) !  7CUqy.;T)%) %  +(1;y{^`k& . +% (Aw(1L# # &ACH18? + +! M_n1mtv#'   +&  ,ViLXu ,F% +%#""#KOW| $0!/% ,Ces&1L'' AEN4;=  + $%4DYBLk9 ) "  &  38>aekBHS  +% % +"ESjmy +. ! +. \_eV]_  !%&:`qx9B^5 !( +*-3nqu_dn!'3   +(%*AuOZv ## $ +,7{~z!%  &#9I\[k)5O - +$)(  137"+&& # 1IZsx9D]& + "  GIS*04 +  + ():l}Wh"9(0(  ACDX^e   +",& /Haq'?% &  dgpMRW  ,*HXmv0?W"7 ! ' "%*[]_pwz '"  -/":HYtDSm *  +!'-3 % '%2Fxdr&.F ( *# #/15gjj=EI * $! *8Kc)9R )#&,5HLR + +  +$ '(>Kces6 $ %,"EGLlsw#*0  $ # 4fzar(?  *INWwy*-7 + # +( ""9q5A` 736 %  !?CGCJP#     &EVk>Oj5$  + '2y{Y\g& + &+#LZpbt=Ii - '0 +  (./7VX^fko%+  ! 0C.I(# +$ + +.4@'+7  ,"'E$   "&CSin~.#&#/HNW[\f  + &6$9s~.;U. % $-@8FyvzUYa & #% !-:SBMh"$ 07@w}24>! + !4?MdLWr&0I2 ,"  MER38C !!  !!8Zi"> +$(GMTcen#    (( 0We~m{9F`6 "#" " +"  !83@piuTW`(   ( $2BZeq +!((&djq68A %. ' + , .GfvESk&< / * "  %%2HGRU'' +(.4>JQYbfl" &&)'+-7$ 4>MkUaz$1G *  $ +$#.!!.4<`fn*.6   $ / !Ni/9V 0 % ! " '(6yzdgp*! + / 05?Mi~cp9H^(='   #) %  + #:?Epuz02; +'" (Co~!<% % +!";;IX[c%'2   )5* *!+F]h|Vc{8F]&< $  +!& ! + "6NkR^y - &'*]^l[\d"   ! +) .!:=H`{kwHUk$0G2 (        "(0NU\.18   * $:ix,5O&'& 23ARV\!+  $0( +3=Rox\h@Mf%<(  +$* +  #+=ELz]`f$ ' & 65Eeep 0 & &![[i^bi (0'* +" +("5 4,6J[h~frFQk)4M8* +'&#  $-BHOty+.5   . +""0Mgw@Je ' # ++nu{*2< "+ ' ' '>09QKTlbl{t]iEPk*2L 71- * #",,% !8@Gtz`dh   ' &@Olw)2L #* # >?M1:C%   %   ! +!71;XQ[zhvrr|vjy]kO]|BOl4A^+6Q7)# % "   + +6IGKV`dotutvux~ &!% +/JZugx .E + $GHRz{ijtcep`alYYeQT^LNZKMYLOZMQ\PU^[ajkryzGKR +* !!:fv>Oi) * % kjtux|"%,   %%4D^i{ /H+ .08GIN   !"9dt8Kf3"% &HHN+-1%(8Q#=   +#$%'/wz| & -FXsL]x/ +! & "CFLOST  +! +# +*Ch{{+E(' $ !,06y|+/1'!  ->XVe~  $*% TX]xz{ ( .K^wt+9R# '&"%/VYY    /FnDVp 3&"  ORX69:   +;Ja&6Q(1   )}pps!$%"  $P_wNc"; $ ,   EJSXW`& + #!6iwq"2K * ! +  !.x}CBO + # +" '2;VS[v +($ +  &DIS31B + # $1FOkw},0K ' %  .4>nu~rp~,  (9fnIOl ' %) MS]QR_   *!!'Au)0J ! &%/u{==J $)!,3M:B]3$ +  IOZ//< ( % '?Gbjw2 +-  &1|~&&4 +  + 0Yc}1=W%%, %JPYlkw#$1  ,:lu]j(%  !",1:su\\h(  (&-I.9S & # 'Z]fRS^$ + +'.8WO^x3  &"69AGHS   (";Ff"0F # (% #fhp<>H   ( *HStLXs2#,$  +(.545@  &/P\}l}#; .# #  &.din*-8  +  & 4Vb*9R +!'  ENR $. +   & 8]kXi 2 #% *2v& " +'AVp2 & 59=&(2    ) 6UdOd*B"& +$ &jlo+.6  (/K\{y"4J #/ LOT.1:    ',GWs9Lc +, ) +" -3:69@  + " )@LhN_| / +( !.$gjoAEL  +! +%7A\k}#2K . '#;@EV[`   (3Iy7Jf ! !.   ;@F|~puw ! %8\lVg!7 #"+ lpt"% ! -ESgs&5K   +!!89=39<  %1>Pr+:N#  "  37:RXZ   +;o0BV  "# "prvhno $  )O`sSdz3 + !# _bg488     !-:Nev&4G %$"  69>C + +  + !7Hb|P^q &  "%  +.39tw{jko%#"  09IdM\r0 %  %.4Icy|=Lh2 " ( & & XbjU\f *  +#,#0[gl~(7Q'   ! +!$KRZ_ek&+2 "  +   # -#0JWfrzK[v .E) ( )& $%/"&cfmtx}FJO ) $$  + !#( :)7SK[xn^p-AGilpLOS!#,! $- $ +4$4PFWwkp8J_$6"  +  ( ! *CCJhho-,5!$.+ + '# % $$-"?':WYkOd0L.  +" $ + "! +#VTYVU\""+!  +   %1 :.I,/0:()3"#-'%%& #!#+&)/)+313:?AGLMT`ahnov{xtr{spztq{us{srwxy{~ \ No newline at end of file diff --git a/share/qtcreator/examples/15-Utils/wozai.wav b/share/qtcreator/examples/15-Utils/wozai.wav new file mode 100644 index 00000000000..bef54be0109 Binary files /dev/null and b/share/qtcreator/examples/15-Utils/wozai.wav differ