Unable to start activity ComponentInfo{com.magefitness.app/io.flutter.embedding.android.FlutterActivity}: java.lang.IllegalStateException: ensureInitializationComplete must be called after startInitialization
Welcome to Hexo! This is your very first post. Check documentation for more info. If you get any problems when using Hexo, you can find the answer in troubleshooting or you can ask me on GitHub.
Downloading https://storage.googleapis.com/tensorflow/mac/tensorflow-0.6.0-py3-none-any.whl (10.2MB) 100% |████████████████████████████████| 10.3MB 1.9MB/s Collecting numpy>=1.8.2 (from tensorflow==0.6.0) Downloading https://files.pythonhosted.org/packages/8e/75/7a8b7e3c073562563473f2a61bd53e75d0a1f5e2047e576ee61d44113c22/numpy-1.14.3-cp36-cp36m-macosx_10_6_intel.macosx_10_9_intel.macosx_10_9_x86_64.macosx_10_10_intel.macosx_10_10_x86_64.whl (4.7MB) 100% |████████████████████████████████| 4.7MB 832kB/s Collecting protobuf==3.0.0a3 (from tensorflow==0.6.0) Downloading https://files.pythonhosted.org/packages/d7/92/34c5810fa05e98082d141048110db97d2f98d318fa96f8202bf146ab79de/protobuf-3.0.0a3.tar.gz (88kB) 100% |████████████████████████████████| 92kB 18.6MB/s Requirement not upgraded as not directly required: wheel>=0.26 in ./venv/lib/python3.6/site-packages (from tensorflow==0.6.0) (0.31.1) Collecting six>=1.10.0 (from tensorflow==0.6.0) Downloading https://files.pythonhosted.org/packages/67/4b/141a581104b1f6397bfa78ac9d43d8ad29a7ca43ea90a2d863fe3056e86a/six-1.11.0-py2.py3-none-any.whl Requirement not upgraded as not directly required: setuptools in ./venv/lib/python3.6/site-packages (from protobuf==3.0.0a3->tensorflow==0.6.0) (39.1.0) Building wheels for collected packages: protobuf Running setup.py bdist_wheel for protobuf ... done Stored in directory: /Users/zowee-laisc/Library/Caches/pip/wheels/07/0a/98/ca8fbec7368a85849700304bf0cf40d2d8e183f9a5dd136795 Successfully built protobuf Installing collected packages: numpy, protobuf, six, tensorflow Successfully installed numpy-1.14.3 protobuf-3.0.0a3 six-1.11.0 tensorflow-0.6.0 (venv) zowee-laiscdeMacBook-Pro:tensorflow zowee-laisc$
就这么简单,然后测试环境ok
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
(venv) zowee-laiscdeMacBook-Pro:tensorflow zowee-laisc$ python Python 3.6.4 (v3.6.4:d48ecebad5, Dec 18 2017, 21:07:28) [GCC 4.2.1 (Apple Inc. build 5666) (dot 3)] on darwin Type "help", "copyright", "credits" or "license" for more information. >>> import tensorflow as tf >>> hello = tf.constant('hello tensorflow') >>> sess = tf.Session() I tensorflow/core/common_runtime/local_device.cc:40] Local device intra op parallelism threads: 4 I tensorflow/core/common_runtime/direct_session.cc:58] Direct session inter op parallelism threads: 4 >>> print(sess.run(hello)) b'hello tensorflow' >>> a = tf.constant(10) >>> b = tf.constant(32) >>> print(sess.run(a+b)) 42 >>> exit();
protected void setFragment() { String cameraId = chooseCamera();//选择合适的camera if (cameraId == null) { Toast.makeText(this, "No Camera Detected", Toast.LENGTH_SHORT).show(); finish(); }
Fragment fragment; if (useCamera2API) {//true 初始化fragment CameraConnectionFragment camera2Fragment = CameraConnectionFragment.newInstance( new CameraConnectionFragment.ConnectionCallback() { @Override public void onPreviewSizeChosen(final Size size, final int rotation) { Log.i("linlian","useCamera2API onPreviewSizeChosen="); previewHeight = size.getHeight(); previewWidth = size.getWidth(); CameraActivity.this.onPreviewSizeChosen(size, rotation); } }, this, getLayoutId(), getDesiredPreviewFrameSize());
camera2Fragment.setCamera(cameraId); fragment = camera2Fragment; } else { fragment = new LegacyCameraConnectionFragment(this, getLayoutId(), getDesiredPreviewFrameSize()); }
/** * {@link android.view.TextureView.SurfaceTextureListener} handles several lifecycle events on a * {@link TextureView}. */ private final TextureView.SurfaceTextureListener surfaceTextureListener = new TextureView.SurfaceTextureListener() { @Override public void onSurfaceTextureAvailable(//初始化 final SurfaceTexture texture, final int width, final int height) { openCamera(width, height); }
@Override public void onSurfaceTextureSizeChanged(//size 变化时候 final SurfaceTexture texture, final int width, final int height) { configureTransform(width, height); }
@Override public boolean onSurfaceTextureDestroyed(final SurfaceTexture texture) { return true; }
@Override public void onSurfaceTextureUpdated(final SurfaceTexture texture) {} };
打开摄像头,配置最合适的preview size
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19
/** * Opens the camera specified by {@link CameraConnectionFragment#cameraId}. */ private void openCamera(final int width, final int height) { setUpCameraOutputs();//设置预览大小 configureTransform(width, height);//旋转偏移量 final Activity activity = getActivity(); final CameraManager manager = (CameraManager) activity.getSystemService(Context.CAMERA_SERVICE); try { if (!cameraOpenCloseLock.tryAcquire(2500, TimeUnit.MILLISECONDS)) { throw new RuntimeException("Time out waiting to lock camera opening."); } manager.openCamera(cameraId, stateCallback, backgroundHandler);//打开camera } catch (final CameraAccessException e) { LOGGER.e(e, "Exception!"); } catch (final InterruptedException e) { throw new RuntimeException("Interrupted while trying to lock camera opening.", e); } }
/** * {@link android.hardware.camera2.CameraDevice.StateCallback} * is called when {@link CameraDevice} changes its state. */ private final CameraDevice.StateCallback stateCallback = new CameraDevice.StateCallback() { @Override public void onOpened(final CameraDevice cd) { // This method is called when the camera is opened. We start camera preview here. cameraOpenCloseLock.release(); cameraDevice = cd; createCameraPreviewSession(); }
@Override public void onError(final CameraDevice cd, final int error) { cameraOpenCloseLock.release(); cd.close(); cameraDevice = null; final Activity activity = getActivity(); if (null != activity) { activity.finish(); } } };
backgroundThread
1 2 3 4 5 6 7 8
/** * Starts a background thread and its {@link Handler}. */ private void startBackgroundThread() {//在onresume的时候被调用 backgroundThread = new HandlerThread("ImageListener"); backgroundThread.start(); backgroundHandler = new Handler(backgroundThread.getLooper()); }
/** * Creates a new {@link CameraCaptureSession} for camera preview. */ private void createCameraPreviewSession() { try { final SurfaceTexture texture = textureView.getSurfaceTexture(); assert texture != null;
// We configure the size of default buffer to be the size of camera preview we want. texture.setDefaultBufferSize(previewSize.getWidth(), previewSize.getHeight());
// This is the output Surface we need to start preview. final Surface surface = new Surface(texture);
// We set up a CaptureRequest.Builder with the output Surface. previewRequestBuilder = cameraDevice.createCaptureRequest(CameraDevice.TEMPLATE_PREVIEW); previewRequestBuilder.addTarget(surface);
LOGGER.i("Opening camera preview: " + previewSize.getWidth() + "x" + previewSize.getHeight());
// Create the reader for the preview frames. previewReader = ImageReader.newInstance( previewSize.getWidth(), previewSize.getHeight(), ImageFormat.YUV_420_888, 2);
// Here, we create a CameraCaptureSession for camera preview. cameraDevice.createCaptureSession( Arrays.asList(surface, previewReader.getSurface()), new CameraCaptureSession.StateCallback() {
@Override public void onConfigured(final CameraCaptureSession cameraCaptureSession) { // The camera is already closed if (null == cameraDevice) { return; }
// When the session is ready, we start displaying the preview. captureSession = cameraCaptureSession; try { // Auto focus should be continuous for camera preview. previewRequestBuilder.set( CaptureRequest.CONTROL_AF_MODE, CaptureRequest.CONTROL_AF_MODE_CONTINUOUS_PICTURE); // Flash is automatically enabled when necessary. previewRequestBuilder.set( CaptureRequest.CONTROL_AE_MODE, CaptureRequest.CONTROL_AE_MODE_ON_AUTO_FLASH);
// Finally, we start displaying the camera preview. previewRequest = previewRequestBuilder.build(); captureSession.setRepeatingRequest( previewRequest, captureCallback, backgroundHandler); } catch (final CameraAccessException e) { LOGGER.e(e, "Exception!"); } }
/** * Callback for android.hardware.Camera API */ @Override public void onPreviewFrame(final byte[] bytes, final Camera camera) { Log.i("linlian","CameraActivity.onPreviewFrame()"); if (isProcessingFrame) { LOGGER.w("Dropping frame!");//如果正在处理,则丢掉这一frame return; }
try { // Initialize the storage bitmaps once when the resolution is known. if (rgbBytes == null) { Camera.Size previewSize = camera.getParameters().getPreviewSize(); previewHeight = previewSize.height; previewWidth = previewSize.width; rgbBytes = new int[previewWidth * previewHeight];//初始化 rgbBytes onPreviewSizeChosen(new Size(previewSize.width, previewSize.height), 90); } } catch (final Exception e) { LOGGER.e(e, "Exception!"); return; }
// For examining the actual TF input. if (SAVE_PREVIEW_BITMAP) { ImageUtils.saveBitmap(croppedBitmap); } //将原始图片进行剪切处理成需要的尺寸croppedBitmap runInBackground( new Runnable() { @Override public void run() { final long startTime = SystemClock.uptimeMillis(); //进行识别 final List<Classifier.Recognition> results = classifier.recognizeImage(croppedBitmap);
private static final int INPUT_SIZE = 224; private static final int IMAGE_MEAN = 117; private static final float IMAGE_STD = 1; private static final String INPUT_NAME = "input"; private static final String OUTPUT_NAME = "output";
@Override public List<Recognition> recognizeImage(final Bitmap bitmap) { Log.i("linlian","recognizeImage"); // Log this method so that it can be analyzed with systrace. Trace.beginSection("recognizeImage");
Trace.beginSection("preprocessBitmap"); // Preprocess the image data from 0-255 int to normalized float based // on the provided parameters. bitmap.getPixels(intValues, 0, bitmap.getWidth(), 0, 0, bitmap.getWidth(), bitmap.getHeight()); Log.i("linlian","recognizeImage intValues.length="+intValues.length); for (int i = 0; i < intValues.length; ++i) { final int val = intValues[i]; floatValues[i * 3 + 0] = (((val >> 16) & 0xFF) - imageMean) / imageStd; floatValues[i * 3 + 1] = (((val >> 8) & 0xFF) - imageMean) / imageStd; floatValues[i * 3 + 2] = ((val & 0xFF) - imageMean) / imageStd; //Log.i("linlian"," i="+i+" "+floatValues[i * 3 + 0]+" "+floatValues[i * 3 + 0]+" "+floatValues[i * 3 + 0]); } Trace.endSection();
// Copy the input data into TensorFlow. 输入 Trace.beginSection("feed"); inferenceInterface.feed(inputName, floatValues, 1, inputSize, inputSize, 3); Trace.endSection();
// Run the inference call.运行 Trace.beginSection("run"); inferenceInterface.run(outputNames, logStats); Trace.endSection();
// Copy the output Tensor back into the output array. Trace.beginSection("fetch");输出 inferenceInterface.fetch(outputName, outputs); Trace.endSection();
// Find the best classifications. PriorityQueue<Recognition> pq = new PriorityQueue<Recognition>( 3, new Comparator<Recognition>() { @Override public int compare(Recognition lhs, Recognition rhs) { // Intentionally reversed to put high confidence at the head of the queue. return Float.compare(rhs.getConfidence(), lhs.getConfidence()); } }); for (int i = 0; i < outputs.length; ++i) { if (outputs[i] > THRESHOLD) { pq.add( new Recognition( "" + i, labels.size() > i ? labels.get(i) : "unknown", outputs[i], null)); } } final ArrayList<Recognition> recognitions = new ArrayList<Recognition>(); int recognitionsSize = Math.min(pq.size(), MAX_RESULTS); for (int i = 0; i < recognitionsSize; ++i) { recognitions.add(pq.poll()); } Trace.endSection(); // "recognizeImage" return recognitions; }
检测 detector
关于检测
加载的模型是由三种 可选 multi box,使用旧的API训练的模型
1 2 3 4 5 6
private static final String MB_INPUT_NAME = "ResizeBilinear"; private static final String MB_OUTPUT_LOCATIONS_NAME = "output_locations/Reshape"; private static final String MB_OUTPUT_SCORES_NAME = "output_scores/Reshape"; private static final String MB_MODEL_FILE = "file:///android_asset/multibox_model.pb"; private static final String MB_LOCATION_FILE = "file:///android_asset/multibox_location_priors.txt";
另外一部分 tensor flow object detect
1 2 3 4
private static final int TF_OD_API_INPUT_SIZE = 300; private static final String TF_OD_API_MODEL_FILE = "file:///android_asset/ssd_mobilenet_v1_android_export.pb"; private static final String TF_OD_API_LABELS_FILE = "file:///android_asset/coco_labels_list.txt";
还有yolo??
1 2 3 4 5 6 7 8 9 10 11
// Configuration values for tiny-yolo-voc. Note that the graph is not included with TensorFlow and // must be manually placed in the assets/ directory by the user. // Graphs and models downloaded from http://pjreddie.com/darknet/yolo/ may be converted e.g. via // DarkFlow (https://github.com/thtrieu/darkflow). Sample command: // ./flow --model cfg/tiny-yolo-voc.cfg --load bin/tiny-yolo-voc.weights --savepb --verbalise private static final String YOLO_MODEL_FILE = "file:///android_asset/graph-tiny-yolo-voc.pb"; private static final int YOLO_INPUT_SIZE = 416; private static final String YOLO_INPUT_NAME = "input"; private static final String YOLO_OUTPUT_NAMES = "output"; private static final int YOLO_BLOCK_SIZE = 32;
yolo 是一个实时物体识别的
You only look once (YOLO) is a state-of-the-art, real-time object detection system. On a Pascal Titan X it processes images at 30 FPS and has a mAP of 57.9% on COCO test-dev.
public static Classifier create( final AssetManager assetManager, final String modelFilename, final String labelFilename, final int inputSize) throws IOException { final TensorFlowObjectDetectionAPIModel d = new TensorFlowObjectDetectionAPIModel();
d.inferenceInterface = new TensorFlowInferenceInterface(assetManager, modelFilename);
final Graph g = d.inferenceInterface.graph();
d.inputName = "image_tensor";//输入的shap定义 // The inputName node has a shape of [N, H, W, C], where // N is the batch size // H = W are the height and width // C is the number of channels (3 for our purposes - RGB) final Operation inputOp = g.operation(d.inputName); if (inputOp == null) { throw new RuntimeException("Failed to find input Node '" + d.inputName + "'"); } d.inputSize = inputSize; // The outputScoresName node has a shape of [N, NumLocations], where N // is the batch size. 三个输出 final Operation outputOp1 = g.operation("detection_scores"); if (outputOp1 == null) { throw new RuntimeException("Failed to find output Node 'detection_scores'"); } final Operation outputOp2 = g.operation("detection_boxes"); if (outputOp2 == null) { throw new RuntimeException("Failed to find output Node 'detection_boxes'"); } final Operation outputOp3 = g.operation("detection_classes"); if (outputOp3 == null) { throw new RuntimeException("Failed to find output Node 'detection_classes'"); }
// Pre-allocate buffers. d.outputNames = new String[] {"detection_boxes", "detection_scores", "detection_classes", "num_detections"}; d.intValues = new int[d.inputSize * d.inputSize]; d.byteValues = new byte[d.inputSize * d.inputSize * 3]; d.outputScores = new float[MAX_RESULTS]; d.outputLocations = new float[MAX_RESULTS * 4]; d.outputClasses = new float[MAX_RESULTS]; d.outputNumDetections = new float[1]; return d; }
@Override public List<Recognition> recognizeImage(final Bitmap bitmap) { // Log this method so that it can be analyzed with systrace. Trace.beginSection("recognizeImage");
Trace.beginSection("preprocessBitmap"); // Preprocess the image data from 0-255 int to normalized float based // on the provided parameters. bitmap.getPixels(intValues, 0, bitmap.getWidth(), 0, 0, bitmap.getWidth(), bitmap.getHeight());
// Copy the input data into TensorFlow.输入 Trace.beginSection("feed"); inferenceInterface.feed(inputName, byteValues, 1, inputSize, inputSize, 3); Trace.endSection();
// Run the inference call.运行 Trace.beginSection("run"); inferenceInterface.run(outputNames, logStats); Trace.endSection();
// Copy the output Tensor back into the output array.结果输出 Trace.beginSection("fetch"); outputLocations = new float[MAX_RESULTS * 4]; outputScores = new float[MAX_RESULTS]; outputClasses = new float[MAX_RESULTS]; outputNumDetections = new float[1]; inferenceInterface.fetch(outputNames[0], outputLocations); inferenceInterface.fetch(outputNames[1], outputScores); inferenceInterface.fetch(outputNames[2], outputClasses); inferenceInterface.fetch(outputNames[3], outputNumDetections); Trace.endSection();
// Find the best detections. final PriorityQueue<Recognition> pq = new PriorityQueue<Recognition>( 1, new Comparator<Recognition>() { @Override public int compare(final Recognition lhs, final Recognition rhs) { // Intentionally reversed to put high confidence at the head of the queue. return Float.compare(rhs.getConfidence(), lhs.getConfidence()); } });
// Scale them back to the input size. for (int i = 0; i < outputScores.length; ++i) { final RectF detection = new RectF( outputLocations[4 * i + 1] * inputSize, outputLocations[4 * i] * inputSize, outputLocations[4 * i + 3] * inputSize, outputLocations[4 * i + 2] * inputSize); pq.add( new Recognition("" + i, labels.get((int) outputClasses[i]), outputScores[i], detection)); }
final ArrayList<Recognition> recognitions = new ArrayList<Recognition>(); for (int i = 0; i < Math.min(pq.size(), MAX_RESULTS); ++i) { recognitions.add(pq.poll()); } Trace.endSection(); // "recognizeImage" return recognitions; }
训练模型
运行手写识别
只需要直接运行fully_connected_feed.py文件,就可以开始训练:
python fully_connected_feed.py
1 2 3 4 5 6 7
Traceback (most recent call last):
File "fully_connected_feed.py", line 279, in <module>
#编译添加自己的jni 库 add_library( # Sets the name of the library. detection_based_tracker
# Sets the library as a shared library. SHARED
# Provides a relative path to your source file(s). src/main/cpp/DetectionBasedTracker_jni.cpp )
# Searches for a specified prebuilt library and stores the path as a # variable. Because CMake includes system libraries in the search path by # default, you only need to specify the name of the public NDK library # you want to add. CMake verifies that the library exists before # completing its build.
find_library( # Sets the name of the path variable. log-lib
# Specifies the name of the NDK library that # you want CMake to locate. log )
# Specifies libraries CMake should link to your target library. You # can link multiple libraries, such as libraries you define in this # build script, prebuilt third-party libraries, or system libraries.
target_link_libraries( # Specifies the target library. detection_based_tracker
lib_opencv
# Links the target library to the log library # included in the NDK. ${log-lib} )
/Measurebox2/facedection/src/main/cpp/DetectionBasedTracker_jni.cpp Error:(36) undefined reference to `cv::CascadeClassifier::detectMultiSc Error:error: linker command failed with exit code 1 (use -v to see invocation)
或者 Error:(36) undefined reference to `cv::CascadeClassi
Error:Execution failed for task ':facedection:transformNativeLibsWithStripDebugSymbolForDebug'. > A problem occurred starting process 'command '/Users/zowee-laisc/lynn/sdk/android-sdk-macosx/ndk-bundle/toolchains/mips64el-linux-android-4.9/prebuilt/darwin-x86_64/bin/mips64el-linux-android-strip''
import scrapy class ChandaoItem(scrapy.Item): # define the fields for your item here like: #bug title title = scrapy.Field() #严重等级 severity = scrapy.Field() #bug 发现者 founder = scrapy.Field() #当前责任人 current = scrapy.Field()
@Override public final Float evaluate(float fraction, Number startValue, Number endValue){ float t = mDuration * fraction; float b = startValue.floatValue(); float c = endValue.floatValue() - startValue.floatValue(); float d = mDuration; float result = calculate(t,b,c,d); for(EasingListener l : mListeners){ l.on(t,result,b,c,d); } return result; }
/** * Loading the svg from the resources. * * @param context Context object to get the resources. * @param svgResource int resource id of the svg. */ public void load(Context context, int svgResource) { if (mSvg != null) return; try { mSvg = SVG.getFromResource(context, svgResource);//通过 com.caverock.androidsvg api加载 mSvg.setDocumentPreserveAspectRatio(PreserveAspectRatio.UNSCALED); } catch (SVGParseException e) { Log.e(LOG_TAG, "Could not load specified SVG resource", e); } }
/** * Render the svg to canvas and catch all the paths while rendering. * * @param width - the width to scale down the view to, * @param height - the height to scale down the view to, * @return All the paths from the svg. */ public List<SvgPath> getPathsForViewport(final int width, final int height) { final float strokeWidth = mSourcePaint.getStrokeWidth(); Canvas canvas = new Canvas() { private final Matrix mMatrix = new Matrix();
@Override public int getWidth() { return width; }
@Override public int getHeight() { return height; }
@Override public void drawPath(Path path, Paint paint) { Path dst = new Path();
/** * Rescale the canvas with specific width and height. * * @param width The width of the canvas. * @param height The height of the canvas. * @param strokeWidth Width of the path to add to scaling. * @param canvas The canvas to be drawn. */ private void rescaleCanvas(int width, int height, float strokeWidth, Canvas canvas) { if (mSvg == null) return; final RectF viewBox = mSvg.getDocumentViewBox();
/** * Default constructor. * * @param pathView The view that must be animated. */ public AnimatorBuilder(final PathView pathView) { anim = ObjectAnimator.ofFloat(pathView, "percentage", 0.0f, 1.0f); }
通过影响percentage的值,从而影响
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
/** * Animate this property. It is the percentage of the path that is drawn. * It must be [0,1]. * * @param percentage float the percentage of the path. */ public void setPercentage(float percentage) { if (percentage < 0.0f || percentage > 1.0f) { throw new IllegalArgumentException("setPercentage not between 0.0f and 1.0f"); } progress = percentage; synchronized (mSvgLock) { updatePathsPhaseLocked();//更新path } invalidate();再重新绘制 }
根据progress,更新svgPath
1 2 3 4 5 6 7 8 9 10 11 12 13 14
/** * This refreshes the paths before draw and resize. */ private void updatePathsPhaseLocked() { final int count = paths.size(); for (int i = 0; i < count; i++) { SvgUtils.SvgPath svgPath = paths.get(i); svgPath.path.reset(); svgPath.measure.getSegment(0.0f, svgPath.length * progress, svgPath.path, true); //Given a start and stop distance, return in dst the intervening segment(s). If the segment is zero-length, return false, else return true. startD and stopD are pinned to legal values (0..getLength()). If startD <= stopD then return false (and leave dst untouched). Begin the segment with a moveTo if startWithMoveTo is true // Required only for Android 4.4 and earlier svgPath.path.rLineTo(0.0f, 0.0f); } }