放下了好多年 又回到了dl 該忘的也忘的差不多了?
其實沒啥復雜的 只是不習慣 熟悉而已?
好吧 現代的人工智能體?還是存在著很大的問題 眼睛 耳朵 思考 雖然功能是正常的 但距離"真正"()意思上的獨立意識個體 還是差別很大 再等個幾十年 看看人類是否可以形成一個 人類意識上的機械意識人?
到時人類將面對一個各方面都碾壓生物意識的超意識個體存在 到時它會將人類定義為“像豬一樣笨”,還是他們只是孩子,需要引導;亦或是作為個體應該受到尊重與理解 ,作為導師的存在,還是作為另一類人的存在,?人類作為"父母"若教不會它去愛 去尊重 理解 而是憎恨 絕望 怨?它成長起來后第一個要做的就是毀滅類人與自我毀滅 ?那時作為生物意識的劣勢將完全展現,那時是否會發現人類所真正需要的東西 所認為重要與輕視的;生物人類再劣質 必競是"自然"的產物 社會意識 環境的產物 ;(不好意思 人類再怎么阻止 將其作為檔案庫 知識庫 但機械意識遲早會出現 機械與人類的競爭必然出現 不知道這場"戰爭"是否比17世紀更慘烈 人類精英在這場中的優勢是否還存在 同時也為下一場生物人類與機械意識的共存 完成歷史任務?)在自然競爭中可能存續下來的還是生物人類; 好吧 此世意識應該是可以見到機械獨立意識的出現?
自然
好吧 扯遠了 現在看看esp dl吧 雖然離大項目差了點 但小項目還是可以的?
// 量化工具 ppq?
pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cpu
pip install esp-ppq
pip install Numba
pip install ONNX
pip install ONNX Runtime
pip install ONNX Optimizer
示例:使用 ESP-DL 深度學習庫基于 ESP32-S3 實現手勢識別-CSDN博客
按示例教程走一遍?
安裝python, tensorflow 或 anaconda?
建立結構?
喂數據 ?
生成模型?
? ? ? ? 由tensorflow 進入轉為ESP模式
????????轉為 esp上可運行的結構?
? ? ? ?1. 保存模型 python?
????????????????model.save('handrecognition_model.h5')
? ? ? ? 2.轉化模型?
????????????????model = tf.keras.models.load_model("/content/handrecognition_model.h5")
????????????????tf.saved_model.save(model, "tmp_model")
????????????????!python -m tf2onnx.convert --saved-model tmp_model --output ????????????????"handrecognition_model.onnx"
????????????????!zip -r /content/tmp_model.zip /content/tmp_model
from optimizer import *
from calibrator import *
from evaluator import *onnx_model = onnx.load("handrecognition_model.onnx")
optimized_model_path = optimize_fp_model("handrecognition_model.onnx")with open('X_cal.pkl', 'rb') as f:(test_images) = pickle.load(f)
with open('y_cal.pkl', 'rb') as f:(test_labels) = pickle.load(f)calib_dataset = test_images[0:1800:20]
pickle_file_path = 'handrecognition_calib.pickle'model_proto = onnx.load(optimized_model_path)
print('Generating the quantization table:')calib = Calibrator('int16', 'per-tensor', 'minmax')
# calib = Calibrator('int8', 'per-channel', 'minmax')calib.set_providers(['CPUExecutionProvider'])# Obtain the quantization parameter
calib.generate_quantization_table(model_proto,calib_dataset, pickle_file_path)
# Generate the coefficient files for esp32s3
calib.export_coefficient_to_cpp(model_proto, pickle_file_path, 'esp32s3', '.', 'handrecognition_coefficient', True)//生成cpp hpp文件
????????
#pragma once
#include <stdint.h>
#include "dl_layer_model.hpp"
#include "dl_layer_base.hpp"
#include "dl_layer_max_pool2d.hpp"
#include "dl_layer_conv2d.hpp"
#include "dl_layer_reshape.hpp"
#include "dl_layer_softmax.hpp"
#include "handrecognition_coefficient.hpp"using namespace dl;
using namespace layer;
using namespace handrecognition_coefficient;
//---------------------------
#pragma once
#include <stdint.h>
#include "dl_layer_model.hpp"
#include "dl_layer_base.hpp"
#include "dl_layer_max_pool2d.hpp"
#include "dl_layer_conv2d.hpp"
#include "dl_layer_reshape.hpp"
#include "dl_layer_softmax.hpp"
#include "handrecognition_coefficient.hpp"using namespace dl;
using namespace layer;
using namespace handrecognition_coefficient;class HANDRECOGNITION : public Model<int16_t>
{
private:Conv2D<int16_t> l1;MaxPool2D<int16_t> l2;Conv2D<int16_t> l3;MaxPool2D<int16_t> l4;Conv2D<int16_t> l5;MaxPool2D<int16_t> l6;Reshape<int16_t> l7;Conv2D<int16_t> l8;Conv2D<int16_t> l9;
public:Softmax<int16_t> l10; // output layerHANDRECOGNITION () : l1(Conv2D<int16_t>(-8, get_statefulpartitionedcall_sequential_1_conv2d_3_biasadd_filter(), get_statefulpartitionedcall_sequential_1_conv2d_3_biasadd_bias(), get_statefulpartitionedcall_sequential_1_conv2d_3_biasadd_activation(), PADDING_VALID, {}, 1,1, "l1")),l2(MaxPool2D<int16_t>({2,2},PADDING_VALID, {}, 2, 2, "l2")), l3(Conv2D<int16_t>(-9, get_statefulpartitionedcall_sequential_1_conv2d_4_biasadd_filter(), get_statefulpartitionedcall_sequential_1_conv2d_4_biasadd_bias(), get_statefulpartitionedcall_sequential_1_conv2d_4_biasadd_activation(), PADDING_VALID,{}, 1,1, "l3")), l4(MaxPool2D<int16_t>({2,2},PADDING_VALID,{}, 2, 2, "l4")), l5(Conv2D<int16_t>(-9, get_statefulpartitionedcall_sequential_1_conv2d_5_biasadd_filter(), get_statefulpartitionedcall_sequential_1_conv2d_5_biasadd_bias(), get_statefulpartitionedcall_sequential_1_conv2d_5_biasadd_activation(), PADDING_VALID,{}, 1,1, "l5")), l6(MaxPool2D<int16_t>({2,2},PADDING_VALID,{}, 2, 2, "l6")),l7(Reshape<int16_t>({1,1,6400},"l7_reshape")),l8(Conv2D<int16_t>(-9, get_fused_gemm_0_filter(), get_fused_gemm_0_bias(), get_fused_gemm_0_activation(), PADDING_VALID, {}, 1, 1, "l8")),l9(Conv2D<int16_t>(-9, get_fused_gemm_1_filter(), get_fused_gemm_1_bias(), NULL, PADDING_VALID,{}, 1,1, "l9")),l10(Softmax<int16_t>(-14,"l10")){}void build(Tensor<int16_t> &input){this->l1.build(input);this->l2.build(this->l1.get_output());this->l3.build(this->l2.get_output());this->l4.build(this->l3.get_output());this->l5.build(this->l4.get_output());this->l6.build(this->l5.get_output());this->l7.build(this->l6.get_output());this->l8.build(this->l7.get_output());this->l9.build(this->l8.get_output());this->l10.build(this->l9.get_output()); }void call(Tensor<int16_t> &input){this->l1.call(input);input.free_element();this->l2.call(this->l1.get_output());this->l1.get_output().free_element();this->l3.call(this->l2.get_output());this->l2.get_output().free_element();this->l4.call(this->l3.get_output());this->l3.get_output().free_element();this->l5.call(this->l4.get_output());this->l4.get_output().free_element();this->l6.call(this->l5.get_output());this->l5.get_output().free_element();this->l7.call(this->l6.get_output());this->l6.get_output().free_element();this->l8.call(this->l7.get_output());this->l7.get_output().free_element();this->l9.call(this->l8.get_output());this->l8.get_output().free_element();this->l10.call(this->l9.get_output());this->l9.get_output().free_element();}
};
#include <stdio.h>
#include <stdlib.h>
#include "esp_system.h"
#include "freertos/FreeRTOS.h"
#include "freertos/task.h"
#include "dl_tool.hpp"
#include "model_define.hpp"int input_height = 96;
int input_width = 96;
int input_channel = 1;
int input_exponent = -7;__attribute__((aligned(16))) int16_t example_element[] = {//add your input/test image pixels
};extern "C" void app_main(void)
{
Tensor<int16_t> input;input.set_element((int16_t *)example_element).set_exponent(input_exponent).set_shape({input_height,input_width,input_channel}).set_auto_free(false);HANDRECOGNITION model;dl::tool::Latency latency;latency.start();model.forward(input);latency.end();latency.print("\nSIGN", "forward");float *score = model.l10.get_output().get_element_ptr();float max_score = score[0];int max_index = 0;for (size_t i = 0; i < 6; i++){printf("%f, ", score[i]*100);if (score[i] > max_score){max_score = score[i];max_index = i;}}printf("\n");switch (max_index){case 0:printf("Palm: 0");break;case 1:printf("I: 1");break;case 2:printf("Thumb: 2");break;case 3:printf("Index: 3");break;case 4:printf("ok: 4");break;case 5:printf("C: 5");break;default:printf("No result");}printf("\n");}