mnn - YingkunZhou/transfer-learning GitHub Wiki

/* /media/loongson/phd19/home/zhou/graduate9/work/MNN/source/backend/vulkan/runtime/VulkanRuntime.cpp:206 */
static bool gResistor = []() {
    MNNInsertExtraRuntimeCreator(MNN_FORWARD_VULKAN, new VulkanRuntimeCreator, false);
    /* /media/loongson/phd19/home/zhou/graduate9/work/MNN/source/core/Backend.cpp:50 */
    bool MNNInsertExtraRuntimeCreator(MNNForwardType type, const RuntimeCreator* creator, bool needCheck) {
        auto& gExtraCreator = GetExtraCreator();
        gExtraCreator.insert(std::make_pair(type, std::make_pair(creator, needCheck)));
    }
    return false;
}();

std::shared_ptrMNN::Interpreter net(MNN::Interpreter::createFromFile(model_file.c_str()));

/* /media/loongson/phd19/home/zhou/graduate9/work/MNN/source/core/Interpreter.cpp:91 */
Interpreter* Interpreter::createFromFile(const char* file) {
    Content* net = loadModelFile(file);
    /* /media/loongson/phd19/home/zhou/graduate9/work/MNN/source/core/Interpreter.cpp:63 */
    static Content* loadModelFile(const char* file) {
        std::unique_ptr<FileLoader> loader(new FileLoader(file));
        /* /media/loongson/phd19/home/zhou/graduate9/work/MNN/source/core/FileLoader.cpp:14 */
        FileLoader::FileLoader(const char* file) {
            mFile = fopen(file, "rb");
        }
        bool result = loader->read();
        /* /media/loongson/phd19/home/zhou/graduate9/work/MNN/source/core/FileLoader.cpp:45 */
        bool FileLoader::read() {
            auto block = MNNMemoryAllocAlign(gCacheSize, MNN_MEMORY_ALIGN_DEFAULT);
            size = fread(block, 1, gCacheSize, mFile);
            mTotalSize += size;
            mBlocks.push_back(std::make_pair(size, block));
        }
        bool success = loader->merge(net->buffer);
        /* /media/loongson/phd19/home/zhou/graduate9/work/MNN/source/core/FileLoader.cpp:103 */
        bool FileLoader::merge(AutoStorage<uint8_t>& buffer) {
            buffer.reset((int)mTotalSize);
            ::memcpy(dst + offset, iter.second, iter.first);
        }
        loader.reset();
    }
    return createFromBufferInternal(net, true);
    /* /media/loongson/phd19/home/zhou/graduate9/work/MNN/source/core/Interpreter.cpp:115 */
    Interpreter* Interpreter::createFromBufferInternal(Content* net, bool enforceAuth) {
        VerifyNetBuffer(verify)
        net->net = GetNet(net->buffer.get());
        /* /media/loongson/phd19/home/zhou/graduate9/work/MNN/schema/current/MNN_generated.h:8328 */
        inline const MNN::Net *GetNet(const void *buf) {
            /* /media/loongson/phd19/home/zhou/graduate9/work/MNN/3rd_party/flatbuffers/include/flatbuffers/flatbuffers.h:1900 */
            flatbuffers::GetRoot<MNN::Net>(void const*) {
                /* /media/loongson/phd19/home/zhou/graduate9/work/MNN/3rd_party/flatbuffers/include/flatbuffers/flatbuffers.h:1893 */
                template<typename T> T *GetMutableRoot(void *buf)
            }
         }
        int opSize = net->net->oplists()->size();
        auto op = net->net->oplists()->GetAs<Op>(i);
        return new Interpreter(net);
        /* /media/loongson/phd19/home/zhou/graduate9/work/MNN/source/core/Interpreter.cpp:226 */
        Interpreter::Interpreter(Content* net) {
            mNet->bizCode = std::string(mNet->net->bizCode() ? mNet->net->bizCode()->c_str() : "");
            mNet->uuid = std::string(mNet->net->mnn_uuid() ? mNet->net->mnn_uuid()->c_str() : "");
        }
    }
}

net->setSessionMode(MNN::Interpreter::Session_Release);

/* /media/loongson/phd19/home/zhou/graduate9/work/MNN/source/core/Interpreter.cpp:156 */
void Interpreter::setSessionMode(SessionMode mode) {
    /* /media/loongson/phd19/home/zhou/graduate9/work/MNN/include/MNN/Interpreter.hpp:134 */
}

auto session = net->createSession(config);

/* /media/loongson/phd19/home/zhou/graduate9/work/MNN/source/core/Interpreter.cpp:351 */
Session* Interpreter::createSession(const ScheduleConfig& config) {
    return createMultiPathSession({config});
    /* /media/loongson/phd19/home/zhou/graduate9/work/MNN/source/core/Interpreter.cpp:249 */
    Session* Interpreter::createMultiPathSession(const std::vector<ScheduleConfig>& configs) {
        RuntimeInfo runtime = createRuntime(configs);
        /* /media/loongson/phd19/home/zhou/graduate9/work/MNN/source/core/Interpreter.cpp:614 */
        RuntimeInfo Interpreter::createRuntime(const std::vector<ScheduleConfig>& configs) {
            compute.type      = Schedule::getApprociateType(config);
            /* /media/loongson/phd19/home/zhou/graduate9/work/MNN/source/core/Schedule.cpp:31 */
            MNNForwardType Schedule::getApprociateType(const ScheduleConfig& config) {
                auto creator = MNNGetExtraRuntimeCreator(type);
                /* /media/loongson/phd19/home/zhou/graduate9/work/MNN/source/core/Backend.cpp:30 */
                const RuntimeCreator* MNNGetExtraRuntimeCreator(MNNForwardType type) {
                    registerBackend();
                    /* /media/loongson/phd19/home/zhou/graduate9/work/MNN/source/core/BackendRegister.cpp:31 */
                    void registerBackend() {
                        registerCPURuntimeCreator();
                        /* /media/loongson/phd19/home/zhou/graduate9/work/MNN/source/backend/cpu/CPUBackend.cpp:515 */
                        void registerCPURuntimeCreator() {
                            CPUBackend::initCreatorMap();
                            /* /media/loongson/phd19/home/zhou/graduate9/work/MNN/source/backend/cpu/CPUBackend.cpp:202 */
                            void CPUBackend::initCreatorMap() {
                                gCreator = new std::map<OpType, CPUBackend::Creator*>;
                            }
                            registerCPUOps();
                            /* /media/loongson/phd19/home/zhou/graduate9/work/MNN/source/backend/cpu/CPUOPRegister.cpp:71 */
                            void registerCPUOps() {
                                ___CPUCropAndResizeCreator__OpType_CropAndResize__();
                                /* /media/loongson/phd19/home/zhou/graduate9/work/MNN/source/backend/cpu/CPUCropAndResize.cpp:175 */
                                REGISTER_CPU_OP_CREATOR(CPUCropAndResizeCreator, OpType_CropAndResize);
                                /* /media/loongson/phd19/home/zhou/graduate9/work/MNN/source/backend/cpu/CPUBackend.cpp:206 */
                                bool CPUBackend::addCreator(OpType t, Creator* c)
                                ...
                            }
                            MNNCoreFunctionInit();
                            /* /media/loongson/phd19/home/zhou/graduate9/work/MNN/source/backend/cpu/compute/CommonOptFunction.cpp:2922 */
                            void MNNCoreFunctionInit() {
                                gCoreFunction->... ...
                                cpuinfo_arm_init(&gCPUInfo);
                                /* /media/loongson/phd19/home/zhou/graduate9/work/MNN/source/backend/cpu/CPURuntime.cpp:1333 */
                                void cpuinfo_arm_init(struct cpuinfo_arm_isa* cpuinfo_isa) {
                                    memset(cpuinfo_isa, 0, sizeof(struct cpuinfo_arm_isa));
                                    isa_features = (uint32_t)getauxval(AT_HWCAP);
                                    (isa_features & CPUINFO_ARM_LINUX_FEATURE_ASIMDDP) --> cpuinfo_isa->dot = true;
                                    const uint32_t fp16arith_mask = CPUINFO_ARM_LINUX_FEATURE_FPHP | CPUINFO_ARM_LINUX_FEATURE_ASIMDHP;
                                    ((isa_features & fp16arith_mask) == fp16arith_mask) --> cpuinfo_isa->fp16arith = true;
                                    <<MNN_PRINT("The device support i8sdot:%d, support fp16:%d, support i8mm: %d\n", cpuinfo_isa->dot, cpuinfo_isa->fp16arith, cpuinfo_isa->i8mm);
                                }
                                MNNCoreInt8FunctionInit();
                                /* /media/loongson/phd19/home/zhou/graduate9/work/MNN/source/backend/cpu/compute/Int8FunctionsOpt.cpp:2019 */
                                void MNNCoreInt8FunctionInit() {
                                    gCoreFunc = new CoreInt8Functions;
                                    auto core = MNNGetCoreFunctions(); {
                                        return gCoreFunction;
                                    }
                                    // do nothing
                                    MNNInt8FunctionInit();
                                }
                                // Do nothing
                                MNNFunctionInit();
                            }
                            MNNInsertExtraRuntimeCreator(MNN_FORWARD_CPU, new CPURuntimeCreator);
                            /* /media/loongson/phd19/home/zhou/graduate9/work/MNN/source/core/Backend.cpp:50 */
                            bool MNNInsertExtraRuntimeCreator(MNNForwardType type, const RuntimeCreator* creator, bool needCheck) {
                                auto& gExtraCreator = GetExtraCreator(); {
                                    /* /media/loongson/phd19/home/zhou/graduate9/work/MNN/source/core/Backend.cpp:24 */
                                    std::call_once(gInitFlag, [&]() { gExtraCreator = new std::map<MNNForwardType, std::pair<const RuntimeCreator*, bool>>; });
                                }
                                gExtraCreator.insert(std::make_pair(type, std::make_pair(creator, needCheck)));
                            }

                        }
                        SizeComputerSuite::init();
                        /* /media/loongson/phd19/home/zhou/graduate9/work/MNN/source/shape/SizeComputer.cpp:26 */
                        void SizeComputerSuite::init() {
                            gInstance = new SizeComputerSuite;
                            gInstance = new SizeComputerSuite;
                            gInstance->mRegistry.resize(OpType_MAX + 1);
                            ::memset(gInstance->mRegistry.data(), 0, gInstance->mRegistry.size() * sizeof(SizeComputer*));
                            registerShapeOps();
                            /* /media/loongson/phd19/home/zhou/graduate9/work/MNN/source/shape/ShapeRegister.cpp:113 */
                            void registerShapeOps() {
                                /* /media/loongson/phd19/home/zhou/graduate9/work/MNN/source/shape/ShapeShape.cpp:37 */
                                ___ShapeSizeComputer__OpType_Shape__();
                                /* /media/loongson/phd19/home/zhou/graduate9/work/MNN/source/shape/ShapeShape.cpp:15 */
                            }
                        }
                        GeometryComputer::init()
                        /* /media/loongson/phd19/home/zhou/graduate9/work/MNN/source/geometry/GeometryComputer.cpp:212 */
                        void GeometryComputer::init() {
                            GeometryComputerManager::init();
                            /* /media/loongson/phd19/home/zhou/graduate9/work/MNN/source/geometry/GeometryComputer.cpp:183 */
                            static void init() {
                                gInstance = new GeometryComputerManager;
                                gInstance->mTable.resize(OpType_MAX + 1);
                                gInstance->mLoopTable.resize(OpType_MAX + 1);
                            }
                            registerGeometryOps();
                            /* /media/loongson/phd19/home/zhou/graduate9/work/MNN/source/geometry/GeometryOPRegister.cpp:45 */
                            void registerGeometryOps() {
                                ___GeometryShape___create__();
                                /* /media/loongson/phd19/home/zhou/graduate9/work/MNN/source/geometry/GeometryShape.cpp:283 */
                                REGISTER_GEOMETRY(GeometryShape, _create);
                                /* /media/loongson/phd19/home/zhou/graduate9/work/MNN/source/geometry/GeometryShape.cpp:271 */
                                static void _create() {
                                    std::shared_ptr<GeometryComputer> comp(new GeometryShape);
                                    GeometryComputer::registerGeometryComputer(comp, {OpType_Shape});
                                    std::shared_ptr<GeometryComputer> comp1(new GeometryRank);
                                    GeometryComputer::registerGeometryComputer(comp1, {OpType_Rank});
                                    std::shared_ptr<GeometryComputer> comp2(new GeometryPriorBox);
                                    GeometryComputer::registerGeometryComputer(comp2, {OpType_PriorBox});
                                    std::shared_ptr<GeometryComputer> comp3(new GeometrySize);
                                    GeometryComputer::registerGeometryComputer(comp3, {OpType_Size});
                                    std::shared_ptr<GeometryComputer> comp4(new GeometryRaster);
                                    GeometryComputer::registerGeometryComputer(comp4, {OpType_Raster});
                                }
                                /* /media/loongson/phd19/home/zhou/graduate9/work/MNN/source/geometry/GeometryPermute.cpp:227 */
                                /* /media/loongson/phd19/home/zhou/graduate9/work/MNN/source/geometry/GeometryPermute.cpp:223 */
                            }
                        }
                    }
                    auto& gExtraCreator = GetExtraCreator();
                    auto iter           = gExtraCreator.find(type);
                    return iter->second.first;
                }
            }
            mRuntimes.find(compute.type) == mRuntimes.end() -->
            auto newBn = RuntimeFactory::create(compute);
            /* /media/loongson/phd19/home/zhou/graduate9/work/MNN/source/core/RuntimeFactory.cpp:15 */
            Runtime* RuntimeFactory::create(const Backend::Info& info) {
                auto creator = MNNGetExtraRuntimeCreator(info.type);
                auto runtime = creator->onCreate(info);
                /* /media/loongson/phd19/home/zhou/graduate9/work/MNN/source/backend/vulkan/runtime/VulkanRuntime.cpp:190 */
                virtual Runtime* onCreate(const Backend::Info& info) const {
                    InitVulkan()
                    /* /media/loongson/phd19/home/zhou/graduate9/work/MNN/source/backend/vulkan/runtime/vulkan_wrapper.cpp:314 */
                    int InitVulkan(void) {
                        std::call_once(gFlag, [] {gSuccess = InitVulkanOnce();});
                        /* /media/loongson/phd19/home/zhou/graduate9/work/MNN/source/backend/vulkan/runtime/vulkan_wrapper.cpp:41 */
                        int InitVulkanOnce(void) {
                            const auto& s : gVulkan_library_paths
                            libvulkan = dlopen(s.c_str(), RTLD_NOW | RTLD_LOCAL);
                            vkCreateInstance  = reinterpret_cast<PFN_vkCreateInstance>(MNN_DLSYM(libvulkan, "vkCreateInstance"));
                            vkDestroyInstance = reinterpret_cast<PFN_vkDestroyInstance>(MNN_DLSYM(libvulkan, "vkDestroyInstance"));
                            ... ...
                        }
                    }
                    _testVulkan()
                    /* /media/loongson/phd19/home/zhou/graduate9/work/MNN/source/backend/vulkan/runtime/VulkanRuntime.cpp:171 */
                    static bool _testVulkan() {
                        std::unique_ptr<VulkanInstance> instance(new VulkanInstance());
                        /* /media/loongson/phd19/home/zhou/graduate9/work/MNN/source/backend/vulkan/runtime/VulkanRuntime.cpp:171 */
                        VulkanInstance::VulkanInstance() : mOwner(true), mInstance(VK_NULL_HANDLE)
                    }
                    return new VulkanRuntime(info);
                    /* /media/loongson/phd19/home/zhou/graduate9/work/MNN/source/backend/vulkan/runtime/VulkanRuntime.cpp:38 */
                    VulkanRuntime::VulkanRuntime(const Backend::Info& info) {
                        static std::map<std::string, float> gFlopsMap
                        std::string deviceName = dev.proty().deviceName;
                        deviceName.find("Mali") == std::string::npos
                        deviceName.find("Adreno") == std::string::npos
                    }
                }
            }
            _getDefaultBackend(res);
            /* /media/loongson/phd19/home/zhou/graduate9/work/MNN/source/core/Interpreter.cpp:602 */
            static void _getDefaultBackend(RuntimeInfo& rt) {
                info.type      = defaultType;
                rt.second.reset(RuntimeFactory::create(info));
                /* /media/loongson/phd19/home/zhou/graduate9/work/MNN/source/core/RuntimeFactory.cpp:15 */
                Runtime* RuntimeFactory::create(const Backend::Info& info) {
                    auto creator = MNNGetExtraRuntimeCreator(info.type);
                    auto runtime = creator->onCreate(info);
                    /* /media/loongson/phd19/home/zhou/graduate9/work/MNN/source/backend/cpu/CPUBackend.cpp:503 */
                    virtual Runtime* onCreate(const Backend::Info& info) const override {
                        return new CPURuntime(info);
                        /* /media/loongson/phd19/home/zhou/graduate9/work/MNN/source/backend/cpu/CPUBackend.cpp:53 */
                        CPURuntime::CPURuntime(const Backend::Info& info) {
                            mFlops = MNNGetCPUFlops(mThreadNumber);
                            /* /media/loongson/phd19/home/zhou/graduate9/work/MNN/source/backend/cpu/CPURuntime.cpp:357 */
                            float MNNGetCPUFlops(uint32_t number) {
                                // only for #ifdef __ANDROID__
                            }
                        }
                    }
                }
            }
        }
        runtime.second->setExternalFile(mNet->externalFile);
        return createMultiPathSession(configs, std::move(runtime));
        /* /media/loongson/phd19/home/zhou/graduate9/work/MNN/source/core/Interpreter.cpp:259 */
        Session* Interpreter::createMultiPathSession(const std::vector<ScheduleConfig>& configs, const RuntimeInfo& runtime) {
            auto success = Schedule::schedule(info, mNet->net, configs, runtime);
            /* /media/loongson/phd19/home/zhou/graduate9/work/MNN/source/core/Schedule.cpp:205 */
            bool Schedule::schedule(ScheduleInfo& scheduleInfo, const Net* net, const std::vector<ScheduleConfig>& configs, const RuntimeInfo& runtimeInfo) {
                scheduleInfo.defaultBackend.reset(runtimeInfo.second->onCreate(&defaultConfig));
                /* /media/loongson/phd19/home/zhou/graduate9/work/MNN/source/backend/cpu/CPUBackend.cpp:120 */
                Backend* CPURuntime::onCreate(const BackendConfig* config) const {
                    flags == MNN_CPU_USE_DEFAULT_BACKEND
                    return new CPUBackend(this, precision, memory, MNN_FORWARD_CPU, 0);
                    /* /media/loongson/phd19/home/zhou/graduate9/work/MNN/source/backend/cpu/CPUBackend.cpp:215 */
                    CPUBackend::CPUBackend(const CPURuntime* runtime, BackendConfig::PrecisionMode precision, BackendConfig::MemoryMode memory, MNNForwardType type, size_t flags) : Backend(type) {
                        mCoreFunctions = MNNGetCoreFunctions();
                        mInt8CoreFunctions = MNNGetInt8CoreFunctions();
                        mCache = new CPUResizeCache;
                    }
                }
                initConstTensors(scheduleInfo.allTensors, net, scheduleInfo.defaultBackend.get(), code);
                /* /media/loongson/phd19/home/zhou/graduate9/work/MNN/source/utils/InitNet.cpp:22 */
                bool initConstTensors(std::vector<std::shared_ptr<Tensor>>& tensors, const Net* net, Backend* defaultBackend, ErrorCode& code) {
                    TensorUtils::getDescribe(tensors[index].get())->index = index;
                    /* /media/loongson/phd19/home/zhou/graduate9/work/MNN/source/core/TensorUtils.cpp:19 */
                    auto res = defaultBackend->onAcquireBuffer(output, Backend::STATIC);
                    /* /media/loongson/phd19/home/zhou/graduate9/work/MNN/source/core/Backend.cpp:76 */
                    bool Backend::onAcquireBuffer(const Tensor* tensor, StorageType storageType) {
                        auto mem = this->onAcquire(tensor, storageType);
                        /* /media/loongson/phd19/home/zhou/graduate9/work/MNN/source/backend/cpu/CPUBackend.cpp:315 */
                        Backend::MemObj* CPUBackend::onAcquire(const MNN::Tensor* nativeTensorConst, StorageType storageType) {
                            auto size = getTensorSize(nativeTensor, true);
                            /* /media/loongson/phd19/home/zhou/graduate9/work/MNN/source/backend/cpu/CPUBackend.cpp:341 */
                            int CPUBackend::getTensorSize(const Tensor* tensor, bool multiBytes) const {
                                auto des = TensorUtils::getDescribe(tensor);
                            }
                            return allocBuffer(size, nativeTensor, storageType);
                            /* /media/loongson/phd19/home/zhou/graduate9/work/MNN/source/backend/cpu/CPUBackend.cpp:262 */
                            Backend::MemObj* CPUBackend::allocBuffer(int size, Tensor* dest, StorageType storageType) {
                                auto originMem = TensorUtils::getDescribe(dest)->mem.get();
                                auto& buffer = dest->buffer();
                                auto des = TensorUtils::getDescribe(dest);
                                case STATIC: points = mStaticAllocator->alloc(size, false);
                                /* /media/loongson/phd19/home/zhou/graduate9/work/MNN/source/core/BufferAllocator.cpp:70 */
                                std::pair<void*, size_t> BufferAllocator::alloc(size_t size, bool separate, size_t align) {
                                    pointer = getFromFreeList(&mFreeList, size, true, align);
                                    /* /media/loongson/phd19/home/zhou/graduate9/work/MNN/source/core/BufferAllocator.cpp:209 */
                                    std::pair<void*, size_t> BufferAllocator::getFromFreeList(FREELIST* list, size_t size, bool permiteSplit, size_t align)
                                    pointer = mAllocator->onAlloc(size, align);
                                    /* /media/loongson/phd19/home/zhou/graduate9/work/MNN/source/core/BufferAllocator.cpp:24 */
                                    virtual std::pair<void*, size_t> onAlloc(size_t size, size_t align) {
                                        return std::make_pair(MNNMemoryAllocAlign(size, MNN_MEMORY_ALIGN_DEFAULT), 0);
                                        /* /media/loongson/phd19/home/zhou/graduate9/work/MNN/source/core/MNNMemoryUtils.cpp:19 */
                                        extern "C" void *MNNMemoryAllocAlign(size_t size, size_t alignment) {
                                            void **origin = (void **)malloc(size + sizeof(void *) + alignment);
                                            void **aligned = alignPointer(origin + 1, alignment);
                                            aligned[-1]    = origin;
                                            return aligned;
                                        }
                                    }
                                    SharedPtr<Node> node(new Node);
                                }
                                res = new CPUMemObj(mStaticAllocator.get(), points, size); | Backend::MemObj* res = nullptr;
                                /* /media/loongson/phd19/home/zhou/graduate9/work/MNN/source/backend/cpu/CPUBackend.cpp:244 */
                                CPUMemObj(BufferAllocator* allocator, std::pair<void*, int> points, int size)
                            }
                        }
                        TensorUtils::getDescribe(tensor)->mem.reset(mem);
                    }
                    OpCommonUtils::loadBlobData(defaultBackend, op, output->host<char>(), output->size());
                    /* /media/loongson/phd19/home/zhou/graduate9/work/MNN/source/core/OpCommonUtils.cpp:18 */
                    void OpCommonUtils::loadBlobData(Backend* backend, const Op* op, char* ptr, int size) {
                        result = (void*)b->float32s()->Data();
                        ::memcpy(ptr, result, size);
                    }
                }
                bool valid = initTensors(scheduleInfo.allTensors, net);
                /* /media/loongson/phd19/home/zhou/graduate9/work/MNN/source/utils/InitNet.cpp:110 */
                bool initTensors(std::vector<std::shared_ptr<Tensor>>& tensors, const Net* net) {
                    auto describes = net->extraTensorDescribe();
                    // Init all tensor except for const
                    // Set Input Tensor, if the type of input is not the same with ExtraTensorDescribe, use input parameter
                    tensor->setType(inputParam->dtype());
                    TensorUtils::getDescribe(tensor)->dimensionFormat = inputParam->dformat();
                    TensorUtils::setLinearLayout(tensor);
                    net->usage() != Usage_INFERENCE_STATIC
                }
                auto oplists      = _scheduleUnit(net, config, allTensors);
                /* /media/loongson/phd19/home/zhou/graduate9/work/MNN/source/core/Schedule.cpp:197 */
                static vector<Schedule::OpCacheInfo> _scheduleUnit(const Net* net, const ScheduleConfig& configs, const vector<shared_ptr<Tensor>>& allTensors) {
                    generateScheduleGraph(ops, net, configs, allTensors);
                    /* /media/loongson/phd19/home/zhou/graduate9/work/MNN/source/core/Schedule.cpp:83 */
                    static void generateScheduleGraph(vector<const Op*>& ops, const Net* net, const ScheduleConfig& configs, const vector<shared_ptr<Tensor>>& allTensors) {
                        ops.clear();
                        ops.reserve(net->oplists()->size());
                        auto op = net->oplists()->GetAs<Op>(i);
                        ops.emplace_back(op);
                    }
                    initPipelineInfosFromOps(oplists, ops, allTensors);
                    /* /media/loongson/phd19/home/zhou/graduate9/work/MNN/source/utils/InitNet.cpp:219 */
                    void initPipelineInfosFromOps(std::vector<Schedule::OpCacheInfo>& infos, std::vector<const Op*>& ops, const std::vector<std::shared_ptr<Tensor>>& allTensors) {
                        Schedule::OpCacheInfo opInfo;
                        opInfo.outputs.push_back(allTensors[data[j]].get());
                        opInfo.inputs.push_back(allTensors[data[j]].get());
                        needComputeOp(op)
                        /* /media/loongson/phd19/home/zhou/graduate9/work/MNN/source/utils/InitNet.cpp:16 */
                        bool needComputeOp(const Op* op) {
                            (op->type() != OpType_Input && op->type() != OpType_Const && op->type() != OpType_TrainableParam)
                        } -->
                        infos.emplace_back(std::move(opInfo));
                    }
                }
                cache.info = std::move(compute); | Schedule::BackendCache cache;
                result.emplace_back(std::make_pair(cache, std::move(oplists)));
                scheduleInfo.pipelineInfo = std::move(result);
                // get all used op's output, drop unused op, won't change op order. always insert all Input Ops
                oplists.push_back(info.op); | std::vector<const Op*> oplists;
                // set tensors' input/output usage by oplists info
                setInputOutputForOps(allTensors, oplists, net->usage() == Usage_INFERENCE_STATIC);
                /* /media/loongson/phd19/home/zhou/graduate9/work/MNN/source/utils/InitNet.cpp:243 */
                void setInputOutputForOps(std::vector<std::shared_ptr<Tensor>>& allTensors, const std::vector<const Op*>& ops, bool isStatic) {
                    // 1. insert all output/input index in outputIndexes/inputIndexes
                    outputIndexes.insert(data[j]); | auto data = op->outputIndexes()->data();
                    inputIndexes.insert(data[j]); | auto data = op->inputIndexes()->data();
                    // 2. the index in outputIndexes/inputIndexed but not in inputIndexes/outputIndexes is output/input
                    // ???
                    std::set_difference(outputIndexes.begin(), outputIndexes.end(), inputIndexes.begin(), inputIndexes.end(), std::inserter(output, output.begin()));
                    std::set_difference(inputIndexes.begin(), inputIndexes.end(), outputIndexes.begin(), outputIndexes.end(), std::inserter(input, input.begin()));
                    // 3. set usage for Tensor by index
                }
                // add output index by config info and outputName
                std::unordered_map<std::string, int> tensorNameIndexMap;
                tensorNameIndexMap[net->tensorName()->Get(i)->str()] = i;
                scheduleInfo.outputTensor.insert(std::make_pair(net->tensorName()->GetAsString(iter->second)->c_str(), t));
                // add input/output tensor to schedule's input/output
                usage == Tensor::InsideDescribe::INPUT --> scheduleInfo.inputTensors.insert(std::make_pair(net->tensorName()->GetAsString(index)->c_str(), t));
                usage == Tensor::InsideDescribe::OUTPUT && (!userSetOutput) --> scheduleInfo.outputTensor.insert(std::make_pair(net->tensorName()->GetAsString(index)->c_str(), t));
            }
            auto newSession = std::unique_ptr<Session>(new Session(std::move(info), mNet->modes, std::move(rt)));
            /* /media/loongson/phd19/home/zhou/graduate9/work/MNN/source/core/Session.cpp:49 */
            Session::Session(Schedule::ScheduleInfo&& info, const ModeGroup& mode, RuntimeInfo&& runtime) {
                _createPipelineBackend(iter, mRuntime);
                /* /media/loongson/phd19/home/zhou/graduate9/work/MNN/source/core/Session.cpp:26 */
                static void _createPipelineBackend(Schedule::PipelineInfo& iter, RuntimeInfo& runtime) {
                    auto rt    = runtime.first.find(iter.first.info.type)->second.get();
                    auto cpuRuntime = runtime.second;
                    iter.first.cache.first.reset(rt->onCreate(iter.first.info.user));
                    /* /media/loongson/phd19/home/zhou/graduate9/work/MNN/source/backend/vulkan/runtime/VulkanRuntime.cpp:150 */
                    Backend* VulkanRuntime::onCreate(const BackendConfig* config) const {
                        return new VulkanBackend(this, mInfo);
                        /* /media/loongson/phd19/home/zhou/graduate9/work/MNN/source/backend/vulkan/image/backend/VulkanBackend.cpp:59 */
                        VulkanBackend::VulkanBackend(const VulkanRuntime* runtime, const Backend::Info& info) : Backend(MNN_FORWARD_VULKAN) {
                            mDynamicMemoryPool.reset(new VulkanMemoryPool(runtime->mMemoryPool.get()));
                            auto& dev              = device();
                            mFence                 = std::make_shared<VulkanFence>(dev);
                            mDirect --> mInitBuffer.reset(runtime->mCmdPool->allocBuffer());
                        }
                    }
                    iter.first.cache.first->type() != MNN_FORWARD_CPU --> iter.first.cache.second.reset(cpuRuntime->onCreate(&defaultConfig));
                    /* /media/loongson/phd19/home/zhou/graduate9/work/MNN/source/backend/cpu/CPUBackend.cpp:120 */{
                        MNN_PRINT("cpu backend was created by runtime:%p\n", this);
                            MNN_PRINT("cpu backend create\n");
                    }
                    std::shared_ptr<Pipeline> newPipeline(new Pipeline(std::move(iter), mode.inputMode == Interpreter::Session_Input_Inside, mode.outputMode == Interpreter::Session_Output_User, attr, rt, cpuRuntime.get()));
                    mCallBackMode = mode.callBackMode;
                    mMemoryUsageMode = mode.memoryUsageMode;
                    mCodegenMode = mode.codegenMode;
                }
            }
            auto result = newSession.get();
            (validForResize && mNet->modes.inputMode == Session_Input_Inside && mNet->modes.resizeMode == Session_Resize_Direct) -->
            result->resize();
            /* /media/loongson/phd19/home/zhou/graduate9/work/MNN/source/core/Session.cpp:177 */
            ErrorCode Session::resize() {
                auto error = iter->encode(debug, permitCodegen);
                /* /media/loongson/phd19/home/zhou/graduate9/work/MNN/source/core/Pipeline.cpp:228 */
                ErrorCode Pipeline::encode(bool supportDebug, bool permitCodegen) {
                    mInfo.first.needComputeGeometry -->
                    mContext.clear();
                    // Size Compute and compute Const Begin
                    auto res = GeometryComputerUtils::shapeComputeAndGeometryTransform(mInfo.second, mContext, mInfo.first.cache.second, mUseGeometry, false, permitCodegen);
                    /* /media/loongson/phd19/home/zhou/graduate9/work/MNN/source/geometry/GeometryComputerUtils.cpp:146 */
                    ErrorCode GeometryComputerUtils::shapeComputeAndGeometryTransform {
                        // Size Compute and compute Const Begin
                        GeometryComputer::Context ctx(backupBackend);
                        /* /media/loongson/phd19/home/zhou/graduate9/work/MNN/source/geometry/GeometryComputer.cpp:21 */
                        GeometryComputer::Context::Context(std::shared_ptr<Backend> allocBackend, MNNForwardType type, BackendConfig::PrecisionMode precision) {
                            flatbuffers::FlatBufferBuilder builder(32);
                            OpBuilder opBuilder(builder);
                            opBuilder.add_type(OpType_Raster);
                            auto lastOffset = opBuilder.Finish();
                            builder.Finish(lastOffset);
                            mRasterOp.reset(new BufferStorage);
                            mRasterOp->storage = builder.ReleaseRaw(mRasterOp->allocated_size, mRasterOp->offset);
                            /* /media/loongson/phd19/home/zhou/graduate9/work/MNN/3rd_party/flatbuffers/include/flatbuffers/flatbuffers.h:1021 */
                        }
                        // Size Compute and compute Const
                        (info.type != Schedule::CONSTANT && usage != Tensor::InsideDescribe::TRAINABLE) -->
                        TensorUtils::getDescribeOrigin(t)->mContent->setBackend(nullptr);
                        TensorUtils::getDescribeOrigin(t)->mContent->mem.reset(nullptr);
                        skipShapeCompute -->
                        auto res = SizeComputer::computeOutputSize(info.op, info.inputs, info.outputs);
                        /* /media/loongson/phd19/home/zhou/graduate9/work/MNN/source/shape/SizeComputer.cpp:121 */
                        bool SizeComputer::computeOutputSize(const MNN::Op* op, const std::vector<Tensor*>& inputs, const std::vector<Tensor*>& outputs) {
                            auto computer = computeFactory->search(op->type());
                            bool ret = computer->onComputeSize(op, inputs, outputs);
                            /* /media/loongson/phd19/home/zhou/graduate9/work/MNN/source/shape/ShapeBinaryOp.cpp:38 */
                            virtual bool onComputeSize(const Op* op, const std::vector<Tensor*>& inputs, const std::vector<Tensor*>& outputs) const override {
                                MNN_ASSERT(2 == inputs.size());
                                MNN_ASSERT(1 == outputs.size());
                                const auto opType = op->main_as_BinaryOp()->opType();
                                TensorUtils::getDescribe(output)->dimensionFormat = TensorUtils::getDescribe(input0)->dimensionFormat;
                                return SizeComputer::computeBroadCastDims(op, inputs, outputs);
                                /* /media/loongson/phd19/home/zhou/graduate9/work/MNN/source/shape/SizeComputer.cpp:221 */
                            }
                        }
                        TensorUtils::adjustTensorForCompability(t);
                        /* /media/loongson/phd19/home/zhou/graduate9/work/MNN/source/core/TensorUtils.cpp:690 */
                        void TensorUtils::adjustTensorForCompability(Tensor* newTensor)
                        // Geometry Transform
                        auto geo = GeometryComputer::search(info.op->type(), compileType);
                        /* /media/loongson/phd19/home/zhou/graduate9/work/MNN/source/geometry/GeometryComputer.cpp:219 */
                        const GeometryComputer* GeometryComputer::search(int type, Runtime::CompilerType compType) {
                            return GeometryComputerManager::get()->search(type, compType);
                            /* /media/loongson/phd19/home/zhou/graduate9/work/MNN/source/geometry/GeometryComputer.cpp:165 */
                            GeometryComputer* search(int type, Runtime::CompilerType compType) {
                                Runtime::Compiler_Loop == compType
                            }
                        }
                        res = geo->onRecompute(info.op, info.inputs, info.outputs, geoContext, tempBuffer);
                        /* /media/loongson/phd19/home/zhou/graduate9/work/MNN/source/geometry/GeometryBinary.cpp:19 */
                        virtual bool onRecompute(const Op* op, const std::vector<Tensor*>& inputs, const std::vector<Tensor*>& outputs, Context& context, CommandBuffer& res) const override
                        tempBuffer.command.clear();
                        tempBuffer.extras.clear();
                        res = geo->onCompute(info.op, info.inputs, info.outputs, geoContext, tempBuffer);
                        /* /media/loongson/phd19/home/zhou/graduate9/work/MNN/source/geometry/GeometryBinary.cpp:91 */
                        virtual bool onCompute(const Op* op, const std::vector<Tensor*>& inputs, const std::vector<Tensor*>& outputs, Context& context, CommandBuffer& res) const override
                        GeometryComputerUtils::makeRaster(tempBuffer, cmdBufferReal, geoContext);
                        /* /media/loongson/phd19/home/zhou/graduate9/work/MNN/source/geometry/GeometryComputerUtils.cpp:336 */
                        void GeometryComputerUtils::makeRaster(const CommandBuffer& srcBuffer, CommandBuffer& dstBuffer, GeometryComputer::Context& ctx) {
                            OpCommonUtils::opNeedContent(type, i)
                            auto des = TensorUtils::getDescribe(cmd.inputs[i]);
                            dstBuffer.command.emplace_back(srcBuffer.command[index]);
                        }
                        auto des = TensorUtils::getDescribe(t);
                    }
                    mFlops += SizeComputer::computeFlops(cmdP->op, cmdP->inputs, cmdP->outputs);
                    /* /media/loongson/phd19/home/zhou/graduate9/work/MNN/source/shape/SizeComputer.cpp:58 */
                    float SizeComputer::computeFlops(const MNN::Op* op, const std::vector<Tensor*>& inputs, const std::vector<Tensor*>& outputs) {
                        auto computeFactory = SizeComputerSuite::get();
                        auto computer       = computeFactory->search(op->type());
                        return computer->onComputeFlops(op, inputs, outputs);
                    }
                }
                auto error = iter->allocMemory(firstMalloc, permitCodegen);
                /* /media/loongson/phd19/home/zhou/graduate9/work/MNN/source/core/Pipeline.cpp:794 */
                ErrorCode Pipeline::allocMemory(bool firstMalloc, bool permitCodegen) {
                    mTuneAttr.autoSetOpType == false
                    auto code = _createExecutions(mInfo);
                    /* /media/loongson/phd19/home/zhou/graduate9/work/MNN/source/core/Pipeline.cpp:564 */
                    static ErrorCode _createExecutions(Schedule::PipelineInfo& mInfo) {
                        iter.execution.reset(mBackend->onCreate(iter.inputs, iter.outputs, iter.op))
                        /* /media/loongson/phd19/home/zhou/graduate9/work/MNN/source/backend/vulkan/image/backend/VulkanBackend.cpp:161 */
                        Execution* VulkanBackend::onCreate(const std::vector<Tensor*>& inputs, const std::vector<Tensor*>& outputs, const MNN::Op* op) {
                            auto creator = getCreatorMap();
                            auto iter    = creator->find(op->type());
                            _supportImageSize(t)
                            /* /media/loongson/phd19/home/zhou/graduate9/work/MNN/source/backend/vulkan/image/backend/VulkanBackend.cpp:92 */
                            bool VulkanBackend::_supportImageSize(const Tensor* MTensor)
                            OpCommonUtils::opNeedContent(op->type(), i)
                            /* /media/loongson/phd19/home/zhou/graduate9/work/MNN/source/core/OpCommonUtils.cpp:446 */
                            bool OpCommonUtils::opNeedContent(int type, int index)
                            auto originExecution = (VulkanBasicExecution*)iter->second->onCreate(inputs, outputs, op, this);
                            /* /media/loongson/phd19/home/zhou/graduate9/work/MNN/source/backend/vulkan/image/execution/VulkanBinary.cpp:172 */
                            virtual VulkanBasicExecution* onCreate(const std::vector<Tensor*>& inputs, const std::vector<Tensor*>& outputs, const MNN::Op* op, Backend* backend) const override {
                                auto shader = _getShaderName(op, image);
                                /* /media/loongson/phd19/home/zhou/graduate9/work/MNN/source/backend/vulkan/image/execution/VulkanBinary.cpp:22 */
                                static std::string _getShaderName(const Op* op, bool image) {
                                    // glsl_binaryImage_MUL_comp
                                }
                                op->type() == OpType_BinaryOp --> activationType = op->main_as_BinaryOp()->activationType();
                                return new VulkanBinary(shader, backend, image, (int)inputs.size() - 1, activationType);
                                /* /media/loongson/phd19/home/zhou/graduate9/work/MNN/source/backend/vulkan/image/execution/VulkanBinary.cpp:83 */
                                VulkanBinary::VulkanBinary(const std::string& shaderName, Backend* bn, bool image, int number, int activationType) : VulkanBasicExecution(bn) {
                                    auto vkBn   = static_cast<VulkanBackend*>(bn);
                                    mBinaryPipeline = vkBn->getPipeline(shaderName, {
                                        VK_DESCRIPTOR_TYPE_STORAGE_IMAGE,
                                        VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER,
                                        VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER,
                                        VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER
                                    });
                                    /* /media/loongson/phd19/home/zhou/graduate9/work/MNN/source/backend/vulkan/image/backend/VulkanBackend.cpp:88 */
                                    const VulkanPipeline* VulkanBackend::getPipeline(const std::string& key, const std::vector<VkDescriptorType>& types, const std::vector<uint32_t>& localSize) const {
                                        return mRuntime->mPipelineFactory->getPipeline(key, types, localSize);
                                        /* /media/loongson/phd19/home/zhou/graduate9/work/MNN/source/backend/vulkan/component/VulkanPipeline.cpp:190 */
                                        const VulkanPipeline* VulkanPipelineFactory::getPipeline(const std::string& key, const std::vector<VkDescriptorType>& types, const std::vector<uint32_t>& localSize) const {
                                            SharedPtr<VulkanPipeline> pipeline = VulkanPipeline::create(mDevice, content.first, content.second, types, mCache, localSize);
                                            /* /media/loongson/phd19/home/zhou/graduate9/work/MNN/source/backend/vulkan/component/VulkanPipeline.cpp:41 */
                                            VulkanPipeline* VulkanPipeline::create(const VulkanDevice& dev, const uint8_t* data, size_t length, const std::vector<VkDescriptorType>& bufferTypes, VkPipelineCache cache, const std::vector<uint32_t>& localSize) {
                                                VkResult result = dev.createShaderModule(shaderOut, length, (const uint32_t*)data);
                                                /* /media/loongson/phd19/home/zhou/graduate9/work/MNN/source/backend/vulkan/component/VulkanDevice.cpp:396 */
                                                const VkResult VulkanDevice::createShaderModule(VkShaderModule& shaderModule, const size_t codeSize, const uint32_t* pCode, const VkAllocationCallbacks* allocator) const {
                                                    return vkCreateShaderModule(mDevice, &shaderModuleCreateInfo, allocator, &shaderModule);
                                                }
                                                CALL_VK(dev.createDescriptorSetLayout(setLayout, bindings.size(), bindings.data()));
                                                /* /media/loongson/phd19/home/zhou/graduate9/work/MNN/source/backend/vulkan/component/VulkanDevice.cpp:426 */
                                                const VkResult VulkanDevice::createDescriptorSetLayout(VkDescriptorSetLayout& setLayout, const uint32_t bindingCount, const VkDescriptorSetLayoutBinding* bindings, const VkAllocationCallbacks* allocator) const {
                                                    return vkCreateDescriptorSetLayout(mDevice, &info, allocator, &setLayout);
                                                }
                                                CALL_VK(dev.createPipelineLayout(pipelineLayout, setLayout));
                                                /* /media/loongson/phd19/home/zhou/graduate9/work/MNN/source/backend/vulkan/component/VulkanDevice.cpp:438 */
                                                const VkResult VulkanDevice::createPipelineLayout(VkPipelineLayout& pipelineLayout, const VkDescriptorSetLayout& setLayout, const VkAllocationCallbacks* allocator) const {
                                                    return vkCreatePipelineLayout(mDevice, &layoutInfo, allocator, &pipelineLayout);
                                                }
                                                auto res = dev.createComputePipeline(pipeline, shaderOut, pipelineLayout, cache, specializationInfo.get());
                                                /* /media/loongson/phd19/home/zhou/graduate9/work/MNN/source/backend/vulkan/component/VulkanDevice.cpp:464 */
                                                const VkResult VulkanDevice::createComputePipeline(VkPipeline& pipeline, const VkShaderModule& shaderMoule,
                                                   const VkPipelineLayout& pipelineLayout,
                                                   const VkPipelineCache& pipelineCache,
                                                   const VkSpecializationInfo* pSpecializationInfo,
                                                   const VkAllocationCallbacks* allocator) const {
                                                    return createComputePipelines(&pipeline, &info, 1, pipelineCache, allocator);
                                                    /* /media/loongson/phd19/home/zhou/graduate9/work/MNN/source/backend/vulkan/component/VulkanDevice.cpp:455 */
                                                    const VkResult VulkanDevice::createComputePipelines(VkPipeline* pipelines,
                                                    const VkComputePipelineCreateInfo* createInfos,
                                                    const uint32_t createInfoCount,
                                                    const VkPipelineCache& pipelineCache,
                                                    const VkAllocationCallbacks* allocator) const {
                                                        return vkCreateComputePipelines(mDevice, pipelineCache, createInfoCount, createInfos, allocator, pipelines);
                                                    }
                                                }
                                                dev.destroyShaderModule(shaderOut); {
                                                    /* /media/loongson/phd19/home/zhou/graduate9/work/MNN/source/backend/vulkan/component/VulkanDevice.cpp:408 */
                                                    vkDestroyShaderModule(mDevice, shaderModule, allocator);
                                                }
                                                return new VulkanPipeline(dev, pipeline, pipelineLayout, desPoolSize, setLayout, bufferTypes);
                                                /* /media/loongson/phd19/home/zhou/graduate9/work/MNN/source/backend/vulkan/component/VulkanPipeline.cpp:29 */
                                            }
                                            mPipelines.insert(std::make_pair(key, pipeline));
                                            return pipeline.get();
                                        }
                                    }
                                }
                            }
                            return new VulkanBasicExecutionDirect(std::shared_ptr<VulkanBasicExecution>(originExecution));
                        }
                    }
                    _SetTensorBackend(mInfo, mAllocInput);
                    /* /media/loongson/phd19/home/zhou/graduate9/work/MNN/source/core/Pipeline.cpp:608 */
                    static void _SetTensorBackend(Schedule::PipelineInfo& mInfo, bool ownInputs) {
                        // Clear Valid Tensor's Backend
                        for(auto t : iter.inputs) | for (auto t : iter.outputs)
                        auto des = TensorUtils::getDescribe(t);
                        nullptr == des->mem.get() --> des->setBackend(nullptr);
                        // Set Tensor's Backend
                        for(auto t : iter.inputs) | for (auto t : iter.outputs)
                        auto des = TensorUtils::getDescribe(t);
                        (nullptr == des->mem.get() && nullptr == des->getBackend()) --> des->setBackend(curBackend);
                    }
                    auto insertCode = _InsertCopy(mInfo, mCacheConstTensors, mShapeFixConstCache, mAllocInput, permitCodegen);
                    /* /media/loongson/phd19/home/zhou/graduate9/work/MNN/source/core/Pipeline.cpp:674 */
                    static ErrorCode _InsertCopy(Schedule::PipelineInfo& mInfo, std::map<Tensor*, std::shared_ptr<Tensor>>& mCacheConstTensors, std::map<Tensor*, std::shared_ptr<Tensor>>& shapeFixConstCache, bool ownInput, bool permitCodegen) {
                        shapeFixConstCache.clear();
                        auto& buffer = info.executeBuffer;
                        auto commands = std::move(buffer.command);
                        WrapExecution::needWrap(t, curBackend) == true
                        !des->isMutable --> newTensor = WrapExecution::copyConstCache(t, curBackend, mCacheConstTensors, permitCodegen);
                        /* /media/loongson/phd19/home/zhou/graduate9/work/MNN/source/core/WrapExecution.cpp:142 */
                        Tensor* WrapExecution::copyConstCache(Tensor* t, Backend* curBackend, std::map<Tensor*, std::shared_ptr<Tensor>>& cache, bool permitCodegen) {
                            auto constCacheiter = cache.find(t);
                            constCacheiter == cache.end() -->
                            std::shared_ptr<Tensor> wrapTensor(new Tensor);
                            TensorUtils::copyShape(t, wrapTensor.get(), true);
                            /* /media/loongson/phd19/home/zhou/graduate9/work/MNN/source/core/TensorUtils.cpp:97 */
                            void TensorUtils::copyShape(const Tensor* source, Tensor* dest, bool copyFormat, bool copyRef) {
                                // 扩展维度
                                adjustTensorForCompability(dest); {
                                    newTensor->setLength(n, 1);
                                }
                            }
                            TensorUtils::adjustTensorForCompability(wrapTensor.get());
                            auto tempRes = curBackend->onAcquireBuffer(wrapTensor.get(), Backend::STATIC);
                            /* /media/loongson/phd19/home/zhou/graduate9/work/MNN/source/core/Backend.cpp:76 */
                            bool Backend::onAcquireBuffer(const Tensor* tensor, StorageType storageType) {
                                auto mem = this->onAcquire(tensor, storageType);
                                /* /media/loongson/phd19/home/zhou/graduate9/work/MNN/source/backend/vulkan/image/backend/VulkanBackend.cpp:138 */
                                Backend::MemObj* VulkanBackend::onAcquire(const Tensor* tensor, StorageType storageType) {
                                    Backend::STATIC == storageType -->
                                    auto newBuffer = std::make_shared<VulkanTensor>(MTensor, getMemoryPool(), device().proty().limits);
                                    /* /media/loongson/phd19/home/zhou/graduate9/work/MNN/source/backend/vulkan/image/backend/VulkanTensor.cpp:71 */
                                    VulkanTensor::VulkanTensor(const Tensor* shape, const VulkanMemoryPool& pool, const VkPhysicalDeviceLimits& limits, bool separate) {
                                        mImage[y*mBlocks[0] + x] = std::make_shared<VulkanImage>(pool, separate, std::vector<int>{wReal, hReal}, shape->getType());
                                        /* /media/loongson/phd19/home/zhou/graduate9/work/MNN/source/backend/vulkan/component/VulkanImage.cpp:54 */
                                        VulkanImage::VulkanImage(const VulkanMemoryPool& pool, bool separate, const std::vector<int>& dims, halide_type_t type) : mDevice(pool.device()), mPool(pool) {
                                            auto format = _getFormat(type);
                                            (pool.permitFp16() && format == VK_FORMAT_R32G32B32A32_SFLOAT) --> // Use fp16 instead of fp32
                                            format = VK_FORMAT_R16G16B16A16_SFLOAT;
                                            mDevice.getImageMemoryRequirements(mImage.first, memRequirements)
                                            /* /media/loongson/phd19/home/zhou/graduate9/work/MNN/source/backend/vulkan/component/VulkanDevice.cpp:324 */
                                            const void VulkanDevice::getImageMemoryRequirements(const VkImage& image, VkMemoryRequirements& memoryRequirements) const {
                                                <<vkGetImageMemoryRequirements(mDevice, image, &memoryRequirements);
                                            }
                                            mMemory = const_cast<VulkanMemoryPool&>(mPool).allocMemory(memRequirements, 0, separate);
                                            /* /media/loongson/phd19/home/zhou/graduate9/work/MNN/source/backend/vulkan/component/VulkanMemoryPool.cpp:79 */
                                            std::pair<void*, int> VulkanMemoryPool::allocMemory(const VkMemoryRequirements& requirements, VkFlags extraMask, bool separate) {
                                                auto mem = mAllocators[index]->alloc(requirements.size, separate, requirements.alignment);
                                                /* /media/loongson/phd19/home/zhou/graduate9/work/MNN/source/core/BufferAllocator.cpp:67 */
                                                std::pair<void*, size_t> BufferAllocator::alloc(size_t size, bool separate, size_t align)
                                            }
                                            mDevice.bindImageMemory(mImage.first, realMem->get(), mMemory.second);
                                            CALL_VK(mDevice.createImageView(mImage.second, mImage.first, viewType, format));
                                            /* /media/loongson/phd19/home/zhou/graduate9/work/MNN/source/backend/vulkan/component/VulkanDevice.cpp:334 */
                                            const VkResult VulkanDevice::createImageView(VkImageView& view, const VkImage& image, const VkImageViewType& viewType, const VkFormat& format, const VkAllocationCallbacks* allocator) const {
                                                return <<vkCreateImageView(mDevice, &info, allocator, &view);
                                            }
                                        }
                                    }
                                    MTensor->buffer().device = (uint64_t)(newBuffer.get());
                                    return new VulkanMemRelease(newBuffer); {
                                        mTensor = t;
                                    }
                                }
                                TensorUtils::getDescribe(tensor)->mem.reset(mem);
                            }
                            outDes->setBackend(curBackend);
                            curBackend->onCopyBuffer(t, wrapTensor.get());
                            /* /media/loongson/phd19/home/zhou/graduate9/work/MNN/source/backend/vulkan/image/backend/VulkanBackend.cpp:264 */
                            void VulkanBackend::onCopyBuffer(const Tensor* srcTensor, const Tensor* dstTensor) const {
                                _finish();
                                TensorUtils::copyShape(dstTensor, tempTensor.get(), true);
                                // host->gpu
                                _allocHostBuffer(size);
                                /* /media/loongson/phd19/home/zhou/graduate9/work/MNN/source/backend/vulkan/image/backend/VulkanBackend.cpp:417 */
                                void VulkanBackend::_allocHostBuffer(size_t size) const {
                                    mHostBuffer.reset(new VulkanBuffer(getMemoryPool(), false, size, nullptr,
                                        VK_BUFFER_USAGE_STORAGE_BUFFER_BIT |
                                        VK_BUFFER_USAGE_TRANSFER_SRC_BIT |
                                        VK_BUFFER_USAGE_TRANSFER_DST_BIT,
                                        VK_SHARING_MODE_EXCLUSIVE, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT));
                                    /* /media/loongson/phd19/home/zhou/graduate9/work/MNN/source/backend/vulkan/component/VulkanBuffer.cpp:15 */
                                    VulkanBuffer::VulkanBuffer(const VulkanMemoryPool& pool, bool separate, size_t size, const void* hostData, VkBufferUsageFlags usage, VkSharingMode shared, VkFlags requirements_mask): mPool(pool) {
                                        mBuffer = const_cast<VulkanMemoryPool&>(mPool).allocBuffer(size, usage, shared);
                                        /* /media/loongson/phd19/home/zhou/graduate9/work/MNN/source/backend/vulkan/component/VulkanMemoryPool.cpp:69 */
                                        VkBuffer VulkanMemoryPool::allocBuffer(size_t size, VkBufferUsageFlags flags, VkSharingMode shared) {
                                            CALL_VK(mDevice.createBuffer(res, size, flags, shared));
                                            /* /media/loongson/phd19/home/zhou/graduate9/work/MNN/source/backend/vulkan/component/VulkanDevice.cpp:126 */
                                            const VkResult VulkanDevice::createBuffer(VkBuffer& buffer, const size_t size, const VkBufferUsageFlags usage, const VkSharingMode shared, const VkAllocationCallbacks* allocator) const {
                                                return <<vkCreateBuffer(mDevice, &info, allocator, &buffer);
                                            }
                                        }
                                        mPool.device().getBufferMemoryRequirements(mBuffer, memReq);
                                        /* /media/loongson/phd19/home/zhou/graduate9/work/MNN/source/backend/vulkan/component/VulkanDevice.cpp:139 */
                                        const void VulkanDevice::getBufferMemoryRequirements(VkBuffer buffer, VkMemoryRequirements& memoryRequirements) const {
                                            <<vkGetBufferMemoryRequirements(mDevice, buffer, &memoryRequirements);
                                        }
                                        mMemory = const_cast<VulkanMemoryPool&>(mPool).allocMemory(memReq, requirements_mask, separate);
                                        /* /media/loongson/phd19/home/zhou/graduate9/work/MNN/source/backend/vulkan/component/VulkanMemoryPool.cpp:79 */
                                        std::pair<void*, int> VulkanMemoryPool::allocMemory(const VkMemoryRequirements& requirements, VkFlags extraMask, bool separate)
                                        CALL_VK(mPool.device().bindBufferMemory(mBuffer, realMem->get(), mMemory.second));
                                        /* /media/loongson/phd19/home/zhou/graduate9/work/MNN/source/backend/vulkan/component/VulkanDevice.cpp:162 */
                                        const VkResult VulkanDevice::bindBufferMemory(const VkBuffer buffer, const VkDeviceMemory memory, const VkDeviceSize memoryOffset) const {
                                            return <<vkBindBufferMemory(mDevice, buffer, memory, memoryOffset);
                                        }
                                    }
                                    mConverters.clear();
                                }
                                MNNCPUCopyBuffer(srcTensor, tempTensor.get());
                                /* /media/loongson/phd19/home/zhou/graduate9/work/MNN/source/core/Backend.cpp:60 */
                                bool MNNCPUCopyBuffer(const Tensor* srcTensor, const Tensor* dstTensor) {
                                    auto& srcBuffer = srcTensor->buffer();
                                    auto& dstBuffer = dstTensor->buffer();
                                    auto code = CPUTensorConverter::convert(srcTensor, dstTensor);
                                    /* /media/loongson/phd19/home/zhou/graduate9/work/MNN/source/backend/cpu/CPUTensorConvert.cpp:282 */
                                    ErrorCode CPUTensorConverter::convert(const Tensor* input, const Tensor* output, const CoreFunctions* core, int tId, int numberThread) {
                                        auto source = TensorUtils::getDescribe(input)->dimensionFormat;
                                        auto dest   = TensorUtils::getDescribe(output)->dimensionFormat;
                                        nullptr == core --> core = MNNGetCoreFunctions(); {
                                            return gCoreFunction;
                                        }
                                        int bitLength = _getBytes(core, input);
                                        /* /media/loongson/phd19/home/zhou/graduate9/work/MNN/source/backend/cpu/CPUTensorConvert.cpp:271 */
                                        static int _getBytes(const CoreFunctions* core, const Tensor* output)
                                    }
                                }
                                mHostBuffer->unmap();
                                auto key    = std::make_tuple(dstTensor, true, format);
                                auto iter   = mConverters.find(key);
                                ~~iter == mConverters.end() -->~~
                                auto converter = std::make_shared<VulkanImageConverter>(this);
                                /* /media/loongson/phd19/home/zhou/graduate9/work/MNN/source/backend/vulkan/image/execution/VulkanImageConverter.cpp:15 */
                                VulkanImageConverter::VulkanImageConverter(const VulkanBackend* bn) {
                                    mSampler = bn->getCommonSampler(); {
                                        return mRuntime->mSampler.get();
                                    }
                                    mConst.reset(new VulkanBuffer(bn->getMemoryPool(), false, 8 * sizeof(int), nullptr, VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT));
                                    /* /media/loongson/phd19/home/zhou/graduate9/work/MNN/source/backend/vulkan/component/VulkanBuffer.cpp:15 */
                                    VulkanBuffer::VulkanBuffer(const VulkanMemoryPool& pool, bool separate, size_t size, const void* hostData, VkBufferUsageFlags usage, VkSharingMode shared, VkFlags requirements_mask): mPool(pool)
                                }
                                std::shared_ptr<VulkanCommandPool::Buffer> convertorBuffer(const_cast<VulkanCommandPool::Buffer*>(getPool().allocBuffer()));
                                /* /media/loongson/phd19/home/zhou/graduate9/work/MNN/source/backend/vulkan/component/VulkanCommandPool.cpp:47 */
                                VulkanCommandPool::Buffer* VulkanCommandPool::allocBuffer() const {
                                    return new Buffer(this);
                                    /* /media/loongson/phd19/home/zhou/graduate9/work/MNN/source/backend/vulkan/component/VulkanCommandPool.cpp:50 */
                                    VulkanCommandPool::Buffer::Buffer(const VulkanCommandPool* pool) : mPool(pool) {
                                        pool->mFreeBuffers.empty() -->
                                        CALL_VK(pool->mDevice.allocateCommandBuffer(pool->mPool, mBuffer));
                                        /* /media/loongson/phd19/home/zhou/graduate9/work/MNN/source/backend/vulkan/component/VulkanDevice.cpp:215 */
                                        const VkResult VulkanDevice::allocateCommandBuffer(const VkCommandPool& cmdPool, VkCommandBuffer& cmdBuffer, const VkCommandBufferLevel level) const {
                                            return allocateCommandBuffers(cmdPool, &cmdBuffer, 1, level);
                                            /* /media/loongson/phd19/home/zhou/graduate9/work/MNN/source/backend/vulkan/component/VulkanDevice.cpp:198 */
                                            const VkResult VulkanDevice::allocateCommandBuffers(const VkCommandPool& cmdPool, VkCommandBuffer* cmdBuffers, const uint32_t cmdBufferCount, const VkCommandBufferLevel level) const {
                                                return vkAllocateCommandBuffers(mDevice, &cmdBufferCreateInfo, cmdBuffers);
                                            }
                                        }
                                    }
                                }
                                convertorBuffer->begin(0);
                                /* /media/loongson/phd19/home/zhou/graduate9/work/MNN/source/backend/vulkan/component/VulkanCommandPool.cpp:92 */
                                void VulkanCommandPool::Buffer::begin(VkCommandBufferUsageFlags flag) const {
                                    vkResetCommandBuffer(mBuffer, 0);
                                    CALL_VK(vkBeginCommandBuffer(mBuffer, &cmdBufferBeginInfo));
                                }
                                auto vkTensor = reinterpret_cast<VulkanTensor*>(dstTensor->deviceId());
                                vkTensor->image(i)->barrierWrite(convertorBuffer->get());
                                /* /media/loongson/phd19/home/zhou/graduate9/work/MNN/source/backend/vulkan/component/VulkanImage.cpp:102 */
                                void VulkanImage::barrierWrite(VkCommandBuffer buffer) const {
                                    vkCmdPipelineBarrier(buffer, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, 0, 0, nullptr, 0, nullptr, 1, &barrier);
                                }
                                converter->encodeBufferToTensor(mHostBuffer->buffer(), dstTensor, mHostBuffer->size(), 0, format, convertorBuffer.get())
                                /* /media/loongson/phd19/home/zhou/graduate9/work/MNN/source/backend/vulkan/image/execution/VulkanImageConverter.cpp:55 */
                                void VulkanImageConverter::encodeBufferToTensor(VkBuffer srcBuffer, const Tensor* destTensor, const int bufferSize, VkDeviceSize bufferOffset, MNN_DATA_FORMAT srcBufferFormat, const VulkanCommandPool::Buffer* cmdBuffer) {
                                    auto destFormat   = TensorUtils::getDescribe(destTensor)->dimensionFormat;
                                    cmdBuffer->barrierSource(srcBuffer, 0, bufferSize);
                                    /* /media/loongson/phd19/home/zhou/graduate9/work/MNN/source/backend/vulkan/component/VulkanCommandPool.cpp:68 */
                                    void VulkanCommandPool::Buffer::barrierSource(VkBuffer source, size_t start, size_t size, BarrierType type) const {
                                        vkCmdPipelineBarrier(mBuffer, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT | VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT | VK_PIPELINE_STAGE_TRANSFER_BIT, 0, 0, nullptr, 1, &barrier, 0, nullptr);
                                    }
                                    _setUpPipeline(sourceFormat, destFormat, BUFFER_TO_IMAGE, tensor->buffer().type);
                                    /* /media/loongson/phd19/home/zhou/graduate9/work/MNN/source/backend/vulkan/image/execution/VulkanImageConverter.cpp:23 */
                                    void VulkanImageConverter::_setUpPipeline(MNN_DATA_FORMAT sourceFormat, MNN_DATA_FORMAT destFormat, TYPE type, halide_type_t datatype) {
                                        type == BUFFER_TO_IMAGE
                                        name = "glsl_nchwToimage_comp";
                                        mPipeline = mBackend->getPipeline(name, types);
                                        /* /media/loongson/phd19/home/zhou/graduate9/work/MNN/source/backend/vulkan/image/backend/VulkanBackend.cpp:88 */
                                        const VulkanPipeline* VulkanBackend::getPipeline(const std::string& key, const std::vector<VkDescriptorType>& types, const std::vector<uint32_t>& localSize) const
                                    }
                                    vkTensor->image(i)->barrierWrite(cmdBuffer->get());
                                    _encodeImageBufferConvert(tensor, srcBuffer, bufferSize, bufferOffset, cmdBuffer, VK_IMAGE_LAYOUT_GENERAL, srcBufferFormat);
                                    /* /media/loongson/phd19/home/zhou/graduate9/work/MNN/source/backend/vulkan/image/execution/VulkanImageConverter.cpp:67 */
                                    void VulkanImageConverter::_encodeImageBufferConvert(const Tensor* tensor, VkBuffer destBuffer, const int bufferSize, VkDeviceSize bufferOffset, const VulkanCommandPool::Buffer* cmdBuffer, VkImageLayout layout, MNN_DATA_FORMAT bufferFormat) {
                                        auto dims = (int*)mConst->map();// W, H, C, N
                                        auto nhwc = VulkanTensor::tensorShapeFormat(tensor);
                                        mConst->unmap();
                                        auto vkTensor = reinterpret_cast<VulkanTensor*>(tensor->deviceId());
                                        auto& mBlocks = vkTensor->blocks();
                                        auto& limits = mBackend->proty().limits;
                                        mOffset[index].reset(new VulkanBuffer(mBackend->getMemoryPool(), false, sizeof(offset), &offset, VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT));
                                        /* /media/loongson/phd19/home/zhou/graduate9/work/MNN/source/backend/vulkan/component/VulkanBuffer.cpp:15 */
                                        VulkanBuffer::VulkanBuffer(const VulkanMemoryPool& pool, bool separate, size_t size, const void* hostData, VkBufferUsageFlags usage, VkSharingMode shared, VkFlags requirements_mask): mPool(pool)
                                        mSet[index].reset(mPipeline->createSet());
                                        /* /media/loongson/phd19/home/zhou/graduate9/work/MNN/source/backend/vulkan/component/VulkanPipeline.cpp:128 */
                                        VulkanPipeline::DescriptorSet* VulkanPipeline::createSet() const {
                                            CALL_VK(mDevice.createDescriptorPool(descriptorPool, mDesPoolSize.size(), mDesPoolSize.data()));
                                            /* /media/loongson/phd19/home/zhou/graduate9/work/MNN/source/backend/vulkan/component/VulkanDevice.cpp:487 */
                                            const VkResult VulkanDevice::createDescriptorPool(VkDescriptorPool& descriptorPool, const uint32_t poolSizeCount, const VkDescriptorPoolSize* pPoolSizes, const VkAllocationCallbacks* allocator) const {
                                                return vkCreateDescriptorPool(mDevice, &poolInfo, allocator, &descriptorPool);
                                            }
                                            CALL_VK(mDevice.allocateDescriptorSet(descriptorSet, descriptorPool, mSetLayout));
                                            /* /media/loongson/phd19/home/zhou/graduate9/work/MNN/source/backend/vulkan/component/VulkanDevice.cpp:498 */
                                            const VkResult VulkanDevice::allocateDescriptorSet(VkDescriptorSet& descriptorSet, const VkDescriptorPool& descPool, const VkDescriptorSetLayout& setLayout) const {
                                                return vkAllocateDescriptorSets(mDevice, &allocInfo, &descriptorSet);
                                            }
                                            return new DescriptorSet(descriptorSet, descriptorPool, this);
                                            /* /media/loongson/phd19/home/zhou/graduate9/work/MNN/source/backend/vulkan/component/VulkanPipeline.hpp:39 */
                                        }
                                        mSet[index]->writeImage(image->view(), mSampler->get(), layout, 0);
                                        /* /media/loongson/phd19/home/zhou/graduate9/work/MNN/source/backend/vulkan/component/VulkanPipeline.cpp:171 */
                                        void VulkanPipeline::DescriptorSet::writeImage(VkImageView view, VkSampler sampler, VkImageLayout layout, int bind) {
                                            writeSet.descriptorType  = mPipeline->argType(bind); {
                                                return mBufferTypes[index];
                                            }
                                            mPipeline->mDevice.updateWriteDescriptorSet(writeSet);
                                            /* /media/loongson/phd19/home/zhou/graduate9/work/MNN/source/backend/vulkan/component/VulkanDevice.cpp:419 */
                                            const void VulkanDevice::updateWriteDescriptorSet(const VkWriteDescriptorSet& descriptorWrite) const {
                                                updateDescriptorSets(1, &descriptorWrite, 0, nullptr);
                                                /* /media/loongson/phd19/home/zhou/graduate9/work/MNN/source/backend/vulkan/component/VulkanDevice.cpp:415 */
                                                vkUpdateDescriptorSets(mDevice, descriptorWriteCount, pDescriptorWrites, descriptorCopyCount, pDescriptorCopies);
                                            }
                                        }
                                        mSet[index]->writeBuffer(destBuffer, 1, bufferSize, bufferOffset);
                                        /* /media/loongson/phd19/home/zhou/graduate9/work/MNN/source/backend/vulkan/component/VulkanPipeline.cpp:151 */
                                        void VulkanPipeline::DescriptorSet::writeBuffer(VkBuffer buffer, int bindIndex, size_t size, VkDeviceSize offset) {
                                            mPipeline->mDevice.updateWriteDescriptorSet(writeSet);
                                        }
                                        mSet[index]->writeBuffer(mConst->buffer(), 2, mConst->size());
                                        mSet[index]->writeBuffer(mOffset[index]->buffer(), 3, mOffset[index]->size());
                                        mPipeline->bind(cmdBuffer->get(), mSet[index]->get());
                                        /* /media/loongson/phd19/home/zhou/graduate9/work/MNN/source/backend/vulkan/component/VulkanPipeline.cpp:122 */
                                        void VulkanPipeline::bind(VkCommandBuffer cmd, VkDescriptorSet des) const {
                                            // Bind the compute pipeline.
                                            vkCmdBindPipeline(cmd, VK_PIPELINE_BIND_POINT_COMPUTE, mPipeline);
                                            // Bind descriptor set.
                                            vkCmdBindDescriptorSets(cmd, VK_PIPELINE_BIND_POINT_COMPUTE, mLayout, 0, 1, &des, 0, nullptr);
                                        }
                                        vkCmdDispatch(cmdBuffer->get(), UP_DIV(offset.size[3], 256), 1, 1);
                                    }
                                }
                                vkTensor->image(i)->barrierRead(convertorBuffer->get());
                                /* /media/loongson/phd19/home/zhou/graduate9/work/MNN/source/backend/vulkan/component/VulkanImage.cpp:122 */
                                void VulkanImage::barrierRead(VkCommandBuffer buffer) const {
                                    vkCmdPipelineBarrier(buffer, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, 0, 0, nullptr, 0, nullptr, 1, &barrier);
                                }
                                convertorBuffer->end();
                                /* /media/loongson/phd19/home/zhou/graduate9/work/MNN/source/backend/vulkan/component/VulkanCommandPool.cpp:102 */
                                void VulkanCommandPool::Buffer::end() const {
                                    CALL_VK(vkEndCommandBuffer(mBuffer));
                                }
                                mConverters.insert(std::make_pair(key, std::make_pair(converter, convertorBuffer)));
                                iter = mConverters.find(key);
                                mCmdBuffers.push_back(iter->second.second->get());
                            }
                            canReplace -->
                            outDes->stageMask |= Tensor::InsideDescribe::CONVERTED_STAGE;
                            TensorUtils::getDescribeOrigin(t)->mContent = TensorUtils::getDescribeOrigin(wrapTensor.get())->mContent; {
#0  MNN::BufferAllocator::free (this=0xaaaaaacad820, pointer=...) at /media/loongson/phd19/home/zhou/graduate9/work/MNN/source/core/BufferAllocator.cpp:159
#1  0x0000fffff7bb49ec in MNN::CPUMemObj::~CPUMemObj (this=0xaaaaaacae180, __in_chrg=<optimized out>) at /media/loongson/phd19/home/zhou/graduate9/work/MNN/source/backend/cpu/CPUBackend.cpp:250
#2  0x0000fffff7bb4a18 in MNN::CPUMemObj::~CPUMemObj (this=0xaaaaaacae180, __in_chrg=<optimized out>) at /media/loongson/phd19/home/zhou/graduate9/work/MNN/source/backend/cpu/CPUBackend.cpp:251
#3  0x0000fffff7ad14b0 in MNN::AutoRelease<MNN::Backend::MemObj>::~AutoRelease (this=0xaaaaaacadfd8, __in_chrg=<optimized out>) at /media/loongson/phd19/home/zhou/graduate9/work/MNN/source/core/AutoStorage.h:121
#4  0x0000fffff7ae5150 in MNN::Tensor::InsideDescribe::NativeInsideDescribe::~NativeInsideDescribe (this=0xaaaaaacadef0, __in_chrg=<optimized out>) at /media/loongson/phd19/home/zhou/graduate9/work/MNN/source/core/TensorUtils.hpp:77
#5  0x0000fffff7ae519c in MNN::Tensor::InsideDescribe::NativeInsideDescribe::~NativeInsideDescribe (this=0xaaaaaacadef0, __in_chrg=<optimized out>) at /media/loongson/phd19/home/zhou/graduate9/work/MNN/source/core/TensorUtils.hpp:77
#6  0x0000fffff7ef2530 in MNN::RefCount::decRef (this=0xaaaaaacadef0) at /media/loongson/phd19/home/zhou/graduate9/work/MNN/source/core/AutoStorage.h:158
#7  0x0000fffff7af69e0 in MNN::SharedPtr<MNN::Tensor::InsideDescribe::NativeInsideDescribe>::operator= (this=0xaaaaaacaded0, rp=...) at /media/loongson/phd19/home/zhou/graduate9/work/MNN/source/core/AutoStorage.h:203
#8  0x0000fffff7afddd4 in MNN::WrapExecution::copyConstCache (t=0xaaaaaacade80, curBackend=0xaaaaaab78580, cache=..., permitCodegen=false) at /media/loongson/phd19/home/zhou/graduate9/work/MNN/source/core/WrapExecution.cpp:178
                            }
                            t->buffer().host = wrapTensor->buffer().host;
                            t->buffer().device = wrapTensor->buffer().device;
                            t->buffer().dim = TensorUtils::getDescribe(wrapTensor.get())->dims;
                        }
                        iter.workInputs[v] = newTensor;
                        auto t = iter.workOutputs[v];
                        WrapExecution::needWrap(t, curBackend) == false
                        /* /media/loongson/phd19/home/zhou/graduate9/work/MNN/source/core/WrapExecution.cpp:21 */
                    }
                    // Compute RefCount Begin
                    auto& iterP : buffer.command
                    auto& iter = *iterP;
                    auto t : iter.workInputs
                    auto des = TensorUtils::getDescribe(t);
                    des->useCount = 0;
                    des->useCount += 1;
                    // Compute RefCount End
                    // Alloc tensor
                    mBackend->onResizeBegin();
                    /* /media/loongson/phd19/home/zhou/graduate9/work/MNN/source/backend/vulkan/image/backend/VulkanBackend.cpp:112 */
                    void VulkanBackend::onResizeBegin() {
                        mInitBuffer->begin(0);
                        /* /media/loongson/phd19/home/zhou/graduate9/work/MNN/source/backend/vulkan/component/VulkanCommandPool.cpp:92 */
                        void VulkanCommandPool::Buffer::begin(VkCommandBufferUsageFlags flag) const {
                            vkResetCommandBuffer(mBuffer, 0);
                            CALL_VK(vkBeginCommandBuffer(mBuffer, &cmdBufferBeginInfo));
                        }
                    }
                    auto allocRes = _allocTensor(t, curBackend, mOutputStatic); | auto t : iter.workInputs
                    auto res = _allocTensor(t, curBackend, mOutputStatic); | auto t : iter.workOutputs
                    /* /media/loongson/phd19/home/zhou/graduate9/work/MNN/source/core/Pipeline.cpp:150 */
                    static bool _allocTensor(Tensor* t, Backend* curBackend, bool outputStatic) {
                        auto memoryType = _getTensorStorageType(t, outputStatic);
                        auto bn         = TensorUtils::getDescribe(t)->getBackend();
                        auto des = TensorUtils::getDescribe(t);
                        nullptr == des->mem.get() -->
                        TensorUtils::setLinearLayout(t);
                        /* /media/loongson/phd19/home/zhou/graduate9/work/MNN/source/core/TensorUtils.cpp:124 */
                        void TensorUtils::setLinearLayout(Tensor* tensor)
                        auto res     = curBackend->onAcquireBuffer(t, memoryType);
                        /* /media/loongson/phd19/home/zhou/graduate9/work/MNN/source/core/Backend.cpp:76 */
                        bool Backend::onAcquireBuffer(const Tensor* tensor, StorageType storageType) {
                            /* /media/loongson/phd19/home/zhou/graduate9/work/MNN/source/backend/vulkan/image/backend/VulkanBackend.cpp:138 */
                            Backend::MemObj* VulkanBackend::onAcquire(const Tensor* tensor, StorageType storageType) {
                                Backend::STATIC != storageType -->
                                bool separate  = storageType == Backend::DYNAMIC_SEPERATE;
                                auto newBuffer = std::make_shared<VulkanTensor>(MTensor, getDynamicMemoryPool(), device().proty().limits, separate); {
                                    separate == true -->
                                    // alloc otherwise
                                    pointer = mAllocator->onAlloc(size, align);
                                    /* /media/loongson/phd19/home/zhou/graduate9/work/MNN/source/core/BufferAllocator.cpp:40 */
                                    virtual std::pair<void*, size_t> onAlloc(size_t size, size_t align) override {
                                        return mParent->alloc(size, false, align);
                                        /* /media/loongson/phd19/home/zhou/graduate9/work/MNN/source/core/BufferAllocator.cpp:89 */ {
                                            separate == false
                                            pointer = mAllocator->onAlloc(size, align);
                                            /* /media/loongson/phd19/home/zhou/graduate9/work/MNN/source/backend/vulkan/component/VulkanMemoryPool.cpp:30 */
                                            // TODO: 哪里来的4608?!
                                            virtual std::pair<void*, size_t> onAlloc(size_t size, size_t align) override {
                                                auto mem = new VulkanMemory(mDevice, info);
                                                /* /media/loongson/phd19/home/zhou/graduate9/work/MNN/source/backend/vulkan/component/VulkanMemoryPool.cpp:11 */
                                                VulkanMemory::VulkanMemory(const VulkanDevice& dev, const VkMemoryAllocateInfo& info) : mDevice(dev) {
                                                    CALL_VK(mDevice.allocMemory(mMemory, info));
                                                    /*  */
                                                    const VkResult VulkanDevice::allocMemory(VkDeviceMemory& memory, const VkMemoryAllocateInfo& allocateInfo, const VkAllocationCallbacks* allocator) const {
                                                        return vkAllocateMemory(mDevice, &allocateInfo, allocator, &memory);
                                                    }
                                                }
                                                return std::make_pair(mem, 0);
                                            }
                                        }
                                    }
                                }
                                MTensor->buffer().device = (uint64_t)(newBuffer.get());
                                mAllBuffers.insert(std::make_pair(MTensor->buffer().device, newBuffer));
                                return new VulkanMemRelease(newBuffer);
                            }
                        }
                    }
                    auto code = iter.execution->onResize(iter.workInputs, iter.workOutputs);
                    /* /media/loongson/phd19/home/zhou/graduate9/work/MNN/source/backend/vulkan/image/execution/VulkanBasicExecution.cpp:58 */
                    ErrorCode VulkanBasicExecutionDirect::onResize(const std::vector<Tensor *> &inputs, const std::vector<Tensor *> &outputs) {
                        auto initCmdBuffer = static_cast<VulkanBackend*>(backend())->getInitCommandBuffer(); {
                            return mInitBuffer.get();
                        }
                        _initLayout(inputs, outputs, initCmdBuffer);
                        /* /media/loongson/phd19/home/zhou/graduate9/work/MNN/source/backend/vulkan/image/execution/VulkanBasicExecution.cpp:28 */
                        static void _initLayout(const std::vector<Tensor *> &inputs, const std::vector<Tensor *> &outputs, const VulkanCommandPool::Buffer* initCmdBuffer) {
                            // the second input
                            img->currentLayout() == VK_IMAGE_LAYOUT_UNDEFINED
                            img->barrierRead(initCmdBuffer->get());
                            /* /media/loongson/phd19/home/zhou/graduate9/work/MNN/source/backend/vulkan/component/VulkanImage.cpp:122 */
                            void VulkanImage::barrierRead
                        }
                        mCmdBuffer->begin(0);
                        /* /media/loongson/phd19/home/zhou/graduate9/work/MNN/source/backend/vulkan/component/VulkanCommandPool.cpp:92 */
                        void VulkanCommandPool::Buffer::begin
                        auto code = mEncoder->onEncode(inputs, outputs, mCmdBuffer.get());
                        /* /media/loongson/phd19/home/zhou/graduate9/work/MNN/source/backend/vulkan/image/execution/VulkanBinary.cpp:99 */
                        ErrorCode VulkanBinary::onEncode(const std::vector<Tensor*>& inputs, const std::vector<Tensor*>& outputs, const VulkanCommandPool::Buffer* cmdBuffer) {
                            auto input0T = (VulkanTensor*)(inputs[0]->deviceId());
                            auto input1T = (VulkanTensor*)(inputs[1]->deviceId());
                            auto outputT = (VulkanTensor*)(outputs[0]->deviceId());
                            auto vkBn = (VulkanBackend*)backend();
                            int number = outputT->imageSize() * ((int)inputs.size() - 1);
                            mConstBuffer.size() != number -->
                            mConstBuffer.resize(number);
                            mConstBuffer[i] = std::make_shared<VulkanBuffer>(vkBn->getMemoryPool(), false, sizeof(ConstBuffer), nullptr, VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT);
                            /* /media/loongson/phd19/home/zhou/graduate9/work/MNN/source/backend/vulkan/component/VulkanBuffer.cpp:15 */
                            VulkanBuffer::VulkanBuffer(const VulkanMemoryPool& pool, bool separate, size_t size, const void* hostData, VkBufferUsageFlags usage, VkSharingMode shared, VkFlags requirements_mask): mPool(pool)
                        }
                        img->barrierRead(mCmdBuffer->get());
                        _postTreat(outputs, mCmdBuffer.get());
                        /* /media/loongson/phd19/home/zhou/graduate9/work/MNN/source/backend/vulkan/image/execution/VulkanBasicExecution.cpp:42 */
                        static void _postTreat(const std::vector<Tensor *> &outputs, const VulkanCommandPool::Buffer* initCmdBuffer) {
                            img->currentLayout() == VK_IMAGE_LAYOUT_UNDEFINED --> img->barrierRead(initCmdBuffer->get());
                        }
                        mCmdBuffer->end();
                        /* /media/loongson/phd19/home/zhou/graduate9/work/MNN/source/backend/vulkan/component/VulkanCommandPool.cpp:102 */
                        void VulkanCommandPool::Buffer::end() const {
                            CALL_VK(vkEndCommandBuffer(mBuffer));
                        }
                        return code;
                    }
                    // Free mid tensor
                    _releaseTensor(t, mAllocInput); | auto t : iter.workInputs
                    /* /media/loongson/phd19/home/zhou/graduate9/work/MNN/source/core/Pipeline.cpp:137 */
                    static void _releaseTensor(Tensor* origin, bool mAllocInput) {
                        TensorUtils::getDescribe(origin)->useCount -= 1;
                        (0 == TensorUtils::getDescribe(origin)->useCount && TensorUtils::getDescribe(origin)->memoryType == Tensor::InsideDescribe::MEMORY_BACKEND)
                        auto needRelease = _needRelease(origin, !mAllocInput);
                        /* /media/loongson/phd19/home/zhou/graduate9/work/MNN/source/core/Pipeline.cpp:123 */
                        static bool _needRelease(const Tensor* tensor, bool inputOutside) {
                            ensorUsage::CONSTANT == usage --> return false
                            MNN::Tensor::InsideDescribe::INPUT --> return true
                        }
                        nullptr != bn && needRelease -->
                        // For zeroshape may not has bn
                        bn->onReleaseBuffer(origin, Backend::DYNAMIC);
                        /* /media/loongson/phd19/home/zhou/graduate9/work/MNN/source/core/Backend.cpp:87 */
                        bool Backend::onReleaseBuffer(const Tensor* tensor, StorageType storageType) {
                            TensorUtils::getDescribe(tensor)->mem.reset(nullptr); // Free:
                        }
                    }
                    // Recycle All Dynamic Tensor
                    _recycleDynamicMemory(c.get());
                    /* /media/loongson/phd19/home/zhou/graduate9/work/MNN/source/core/Pipeline.cpp:778 */
                    void Pipeline::_recycleDynamicMemory(Command* command) {
                        auto& t : command->workOutputs | auto& t : command->workInputs
                        auto memoryType = _getTensorStorageType(t, mOutputStatic);
                        /* /media/loongson/phd19/home/zhou/graduate9/work/MNN/source/core/Pipeline.cpp:111 */
                        static Backend::StorageType _getTensorStorageType(const Tensor* tensor, bool outputStatic)
                        Backend::DYNAMIC == memoryType --> TensorUtils::getDescribe(t)->mem.reset(nullptr);
                    }
                    mBackend->onResizeEnd();
                    /* /media/loongson/phd19/home/zhou/graduate9/work/MNN/source/backend/vulkan/image/backend/VulkanBackend.cpp:118 */
                    void VulkanBackend::onResizeEnd() {
                        mInitBuffer->end(); {
                            /* /media/loongson/phd19/home/zhou/graduate9/work/MNN/source/backend/vulkan/component/VulkanCommandPool.cpp:102 */
                            void VulkanCommandPool::Buffer::end() const { CALL_VK(vkEndCommandBuffer(mBuffer)); }
                        }
                        mCmdBuffers.emplace_back(mInitBuffer->get());
                        _finish();
                        /* /media/loongson/phd19/home/zhou/graduate9/work/MNN/source/backend/vulkan/image/backend/VulkanBackend.cpp:233 */
                        void VulkanBackend::_finish() const {
                            auto fenceReal           = mFence->get();
                            mFence->reset();
                            CALL_VK(vkQueueSubmit(device().acquireDefaultDevQueue(), 1, &submit_info, fenceReal));
                            auto res = mFence->wait();
                            mCmdBuffers.clear();
                        }
                    }
                }
                mMemoryUsageMode == Interpreter::Session_Memory_Collect -->
                iter.second->onGabageCollect(0);
                /* /media/loongson/phd19/home/zhou/graduate9/work/MNN/source/backend/vulkan/runtime/VulkanRuntime.cpp:143 */
                void VulkanRuntime::onGabageCollect(int level) {
                    mBufferPool->release(false);
                    /* /media/loongson/phd19/home/zhou/graduate9/work/MNN/source/core/BufferAllocator.cpp:165 */
                    void BufferAllocator::release(bool allRelease)
                    mMemoryPool->clear();
                    mPipelineFactory->reset();
                }
            }
            // Reset cache
            result->loadCache(nullptr, 0);
            /* /media/loongson/phd19/home/zhou/graduate9/work/MNN/source/core/Session.cpp:88 */
            bool Session::loadCache(const void* buffer, size_t size) {
                auto res = iter.second->onSetCache(buffer, size);
            }
            mNet->sessions.emplace_back(std::move(newSession));
        }
    }
}
⚠️ **GitHub.com Fallback** ⚠️