SC_THREAD 中的 wait() 出现 SystemC 错误:“仅在 SC_THREAD 和 SC_CTHREAD 中允许 wait()”

问题描述 投票:0回答:1

我正在使用 SystemC 进行卷积神经网络模拟作为学校作业。我的代码包括一个带有 SC_THREAD 的模块 Conv2d,用于卷积层的前向传递,其中涉及等待输入准备就绪。当我调用 sc_start() 开始模拟时,遇到与 SC_THREAD 中使用 wait() 相关的错误。确切的错误消息是:

错误信息

Error: (E519) wait() is only allowed in SC_THREADs and SC_CTHREADs: 
        in SC_METHODs use next_trigger() instead
In file: ../../../src/sysc/kernel/sc_wait.cpp:94
make: *** [all] Error 1

尽管我已经明确地将我的前向传递函数注册为 SC_THREAD。这是我的模块定义的相关部分

模块定义(Conv2d.h)

// Conv2d.h
#ifndef CONV2D_H
#define CONV2D_H

#include <systemc.h>

SC_MODULE(Conv2d) {
private:
    // Layer configuration parameters
    unsigned int in_channels, out_channels;
    unsigned int kernel_height, kernel_width;
    unsigned int stride_height, stride_width;
    unsigned int padding_height, padding_width;
    bool apply_relu;    // Apply ReLU activation after convolution

    // Feature map dimensions
    unsigned int input_feature_map_height, input_feature_map_width;
    unsigned int output_feature_map_height, output_feature_map_width;
    // unsigned int input_feature_map_size, output_feature_map_size;    // Calculated from the above parameters

    // Layer parameters
    std::vector<std::vector<std::vector<std::vector<float>>>> weights;
    std::vector<float> bias;

public:
    // Define the ports for the module
    // Assuming input_feature_map_size and output_feature_map_size are calculated outside this module
    // We need this since the number of ports have to be determined for the module prior to forward passes
    sc_vector<sc_fifo_in<float>> input_feature_map;     // FIFOS offers buffering and prevents race conditions, in case we need to run successive inference
    sc_vector<sc_fifo_out<float>> output_feature_map;
    sc_in<bool> input_ready; // Signal indicating input is ready
    sc_out<bool> output_ready; // Signal indicating output is ready

    // Constructor with configuration parameters
    SC_HAS_PROCESS(Conv2d);
    Conv2d(sc_module_name name)
        : in_channels(1), out_channels(1),
          kernel_height(3), kernel_width(3),
          stride_height(1), stride_width(1),
          padding_height(1), padding_width(1),
          apply_relu(false),
          input_feature_map_height(3), input_feature_map_width(3),
          output_feature_map_height(3), output_feature_map_width(3),
          input_ready("input_ready"), output_ready("output_ready") {
        // initialize parameters of the convolutional layer's weights and biases
        initialize_parameters();

        // Register the forward pass function with the SystemC kernel
        SC_THREAD(forward_pass);
        sensitive << input_ready.pos();
        dont_initialize();  // Ensure the thread is not triggered upon initialization
    }

    void configure(unsigned int in_c, unsigned int out_c,
                   std::pair<unsigned int, unsigned int> kernel_size,
                   std::pair<unsigned int, unsigned int> stride,
                   std::pair<unsigned int, unsigned int> padding,
                   bool relu,
                   unsigned int in_feature_map_size, unsigned int out_feature_map_size,
                   std::pair<unsigned int, unsigned int> in_feature_map_dimension, std::pair<unsigned int, unsigned int> out_feature_map_dimension) {
        // Configure the layer with the given parameters
        in_channels = in_c;
        out_channels = out_c;
        kernel_height = kernel_size.first;
        kernel_width = kernel_size.second;
        stride_height = stride.first;
        stride_width = stride.second;
        padding_height = padding.first;
        padding_width = padding.second;
        apply_relu = relu;

        // Initialize input and output feature maps
        input_feature_map_height = in_feature_map_dimension.first;
        input_feature_map_width = in_feature_map_dimension.second;
        input_feature_map.init(in_feature_map_size);
        output_feature_map_height = out_feature_map_dimension.first;
        output_feature_map_width = out_feature_map_dimension.second;
        output_feature_map.init(out_feature_map_size);

        // Re-initialize parameters
        initialize_parameters();
    }

    // Forward computation using pure C++ primitives
    // We're assuming that the input/output feature maps are in the shape of (C, H, W)
    // and operates directly on it without reconstructing the 1D array back to 3D
    void forward_pass() {
        while(true) {
            wait(); // Wait for input_ready signal
            for (unsigned int out_c = 0; out_c < out_channels; ++out_c) {
                for (unsigned int h = 0; h < output_feature_map_height; ++h) {
                    for (unsigned int w = 0; w < output_feature_map_width; ++w) {
                        float sum = 0.0;
                        for (unsigned int in_c = 0; in_c < in_channels; ++in_c) {
                            for (unsigned int kh = 0; kh < kernel_height; ++kh) {
                                for (unsigned int kw = 0; kw < kernel_width; ++kw) {
                                    // Calculate the input index, considering stride and padding
                                    int h_index = h * stride_height + kh - padding_height;
                                    int w_index = w * stride_width + kw - padding_width;

                                    if (h_index >= 0 && h_index < input_feature_map_height && w_index >= 0 && w_index < input_feature_map_width) {
                                        int input_index = in_c * input_feature_map_height * input_feature_map_width + h_index * input_feature_map_width + w_index;
                                        sum += input_feature_map[input_index].read() * weights[out_c][in_c][kh][kw];
                                    }
                                }
                            }
                        }
                        sum += bias[out_c];
                        if (apply_relu && sum < 0) {
                            sum = 0.0;
                        }
                        int output_index = out_c * output_feature_map_height * output_feature_map_width + h * output_feature_map_width + w;
                        output_feature_map[output_index].write(sum);
                    }
                }
            }

            output_ready.write(true);   // Indicate that output is ready
            wait(1, SC_NS); // Wait for 1 ns to ensure the signal is read before resetting
            output_ready.write(false);  // Reset
        }
    }
};

#endif // CONV2D_H

用于实例化模块并将其连接到测试数据的主文件直接写入sc_main()中。这可能是问题的根源,但我不太确定将其包装在另一个测试平台模块中并使问题进一步复杂化。

测试代码(main.cpp)

// main.cpp
#include <systemc.h>

#include <vector>
#include <tuple>
#include <iostream>
#include <iomanip>
#include <fstream>

#include <Conv2d.h>
#include <helpers.h>


int sc_main(int argc, char* argv[]) {
    // Example instantiation and configuration
    Conv2d conv_layer("ConvolutionalLayer");
    conv_layer.configure(
        3, 64,
        std::make_pair(11, 11),
        std::make_pair(4, 4),
        std::make_pair(2, 2),
        true,
        150528,
        193600,
        std::make_pair(224, 224),
        std::make_pair(55, 55)
        );

    // Assuming you know the dimensions and shape (C_out, C_in, H, W) of the convolutional layer
    auto conv_layer_shape = conv_layer.weight_shape();
    int out_channels = std::get<0>(conv_layer_shape);
    int in_channels = std::get<1>(conv_layer_shape);
    int rows = std::get<2>(conv_layer_shape);
    int cols = std::get<3>(conv_layer_shape);

    // Load weights from file
    auto weights = reshape_weights(load_weights("./data/conv1_weight.txt"), out_channels, in_channels, rows, cols); // Reshape the weights flat vector into the 4D weights vector
    auto biases = load_weights("./data/conv1_bias.txt");    // Load biases from file, no need to reshape
    conv_layer.load_parameters(weights, biases); // Load the weights and biases into the layer

    // Start the simulation
    // Load image data first
    auto image_data = load_image("./data/cat.txt");

    // Connect the input and output feature maps to the layer
    sc_vector<sc_fifo<float>> input_feature_map_sig("input_feature_map_sig", 150528);
    for (size_t i = 0; i < input_feature_map_sig.size(); i++) {
        conv_layer.input_feature_map[i](input_feature_map_sig[i]);
    }
    sc_vector<sc_fifo<float>> output_feature_map_sig("output_feature_map_sig", 193600);
    for (size_t i = 0; i < output_feature_map_sig.size(); i++) {
        conv_layer.output_feature_map[i](output_feature_map_sig[i]);
    }
    sc_signal<bool> input_ready_sig;
    conv_layer.input_ready(input_ready_sig);
    sc_signal<bool> output_ready_sig;
    conv_layer.output_ready(output_ready_sig);

    // feed the data and signal the layer
    for (size_t i = 0; i < input_feature_map_sig.size(); i++) {
        input_feature_map_sig[i].write(image_data[i]);
    }
    input_ready_sig.write(true);

    // Start the simulation if using SC_THREAD or SC_METHOD for computation
    sc_start(); // Run the simulation

    return 0;
}

我已经确保我的 wait() 调用确实位于 SC_THREAD 内部(具体来说,forward_pass 方法在我的 Conv2d 模块的构造函数中注册为 SC_THREAD)。我期望模拟运行时不会出现任何与 wait() 使用相关的错误,因为据我了解,wait() 在 SC_THREAD 中正确使用。

这又是我上面模块定义的简化版本:

SC_MODULE(Conv2d) {
    // Constructor
    SC_HAS_PROCESS(Conv2d);
    Conv2d(sc_module_name name) {
        SC_THREAD(forward_pass);
        sensitive << input_ready.pos();
        dont_initialize();
    }

    void forward_pass() {
        while(true) {
            wait(); // Wait for input_ready signal
            // Forward pass computations follow...
        }
    }
};

我期望模拟开始,forward_pass 方法按照 SC_THREAD 的常规操作等待 input_ready 信号。该错误似乎表明 wait() 被滥用,但根据我的理解和 SystemC 文档,它的用法在这种情况下是正确的。

c++ multithreading simulation systemc
1个回答
0
投票

关于线程外的

wait()
的抱怨来自于
sc_fifo
中对
sc_main()
的写入。使用
scfifo.write()
是一个阻塞调用,这意味着如果 FIFO 当前无法写入(即它已调整大小且已满),它会调用
wait()

要确认这种情况,您可以在错误点放置一个断点,并按照堆栈跟踪来确认错误的根源。使用

gdb

  gdb sim.exe
  break sc_report_error
  run
  bt

问题的解决方案是创建另一个模块/线程来驱动 Conv2D 的刺激。除了由调用

sc_fifo
产生的内部进程(线程/方法)之外,不应执行写入/读取
sc_signal
sc_start()
的操作。

© www.soinside.com 2019 - 2024. All rights reserved.