Skip to content

Commit

Permalink
[Trivial] Clang format fixed and Rebased the PR
Browse files Browse the repository at this point in the history
Clang format fixed and comments removed.
Rebased PR with latest changes.

Signed-off-by: Yash Singh <[email protected]>
  • Loading branch information
yashSingh0723 committed Nov 18, 2024
1 parent 9f9e87d commit f1f67f8
Show file tree
Hide file tree
Showing 7 changed files with 18 additions and 30 deletions.
4 changes: 2 additions & 2 deletions api/ccapi/include/layer.h
Original file line number Diff line number Diff line change
Expand Up @@ -108,8 +108,8 @@ enum LayerType {
LAYER_UPSAMPLE2D, /**< Upsample 2D Layer type */
LAYER_RMSNORM = ML_TRAIN_LAYER_TYPE_RMSNORM, /**<RMS NORM Layer */
LAYER_TRANSPOSE = ML_TRAIN_LAYER_TYPE_TRANSPOSE, /**< Transpose Layer type */
LAYER_UNKNOWN = ML_TRAIN_LAYER_TYPE_UNKNOWN /**< Unknown */
LAYER_LM_HEAD = ML_TRAIN_LAYER_TYPE_LM_HEAD, /**< LM Head Layer */
LAYER_UNKNOWN = ML_TRAIN_LAYER_TYPE_UNKNOWN, /**< Unknown */
LAYER_LM_HEAD = ML_TRAIN_LAYER_TYPE_LM_HEAD, /**< LM Head Layer */
};

/**
Expand Down
16 changes: 8 additions & 8 deletions api/nntrainer-api-common.h
Original file line number Diff line number Diff line change
Expand Up @@ -62,15 +62,15 @@ typedef enum {
27, /**< Layer Normalization Layer type (Since 7.0) */
ML_TRAIN_LAYER_TYPE_POSITIONAL_ENCODING =
28, /**< Positional Encoding Layer type (Since 7.0) */
ML_TRAIN_LAYER_TYPE_IDENTITY = 29, /**< Identity Layer type (Since 8.0) */
ML_TRAIN_LAYER_TYPE_SWIGLU = 30, /**< Swiglu Layer type */
ML_TRAIN_LAYER_TYPE_WEIGHT = 31, /**< Weight Layer type (Since 9.0)*/
ML_TRAIN_LAYER_TYPE_ADD = 32, /**< Add Layer type (Since 9.0)*/
ML_TRAIN_LAYER_TYPE_SUBTRACT = 33, /**< Subtract Layer type (Since 9.0)*/
ML_TRAIN_LAYER_TYPE_MULTIPLY = 34, /**< Multiply Layer type (Since 9.0)*/
ML_TRAIN_LAYER_TYPE_DIVIDE = 35, /**< Divide Layer type (Since 9.0)*/
ML_TRAIN_LAYER_TYPE_IDENTITY = 29, /**< Identity Layer type (Since 8.0) */
ML_TRAIN_LAYER_TYPE_SWIGLU = 30, /**< Swiglu Layer type */
ML_TRAIN_LAYER_TYPE_WEIGHT = 31, /**< Weight Layer type (Since 9.0)*/
ML_TRAIN_LAYER_TYPE_ADD = 32, /**< Add Layer type (Since 9.0)*/
ML_TRAIN_LAYER_TYPE_SUBTRACT = 33, /**< Subtract Layer type (Since 9.0)*/
ML_TRAIN_LAYER_TYPE_MULTIPLY = 34, /**< Multiply Layer type (Since 9.0)*/
ML_TRAIN_LAYER_TYPE_DIVIDE = 35, /**< Divide Layer type (Since 9.0)*/
ML_TRAIN_LAYER_TYPE_TRANSPOSE = 36, /**< Transpose Layer type */
ML_TRAIN_LAYER_TYPE_LM_HEAD = 37, /**< LM Head Layer type */
ML_TRAIN_LAYER_TYPE_LM_HEAD = 37, /**< LM Head Layer type */
ML_TRAIN_LAYER_TYPE_PREPROCESS_FLIP =
300, /**< Preprocess flip Layer (Since 6.5) */
ML_TRAIN_LAYER_TYPE_PREPROCESS_TRANSLATE =
Expand Down
2 changes: 1 addition & 1 deletion nntrainer/cl_context.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -58,7 +58,7 @@ static void add_default_object(ClContext &cc) {
cc.registerFactory(nntrainer::createLayer<TransposeLayerCl>,
TransposeLayerCl::type,
ml::train::LayerType::LAYER_TRANSPOSE);

cc.registerFactory(nntrainer::createLayer<CustomLMHeadLayerCl>,
CustomLMHeadLayerCl::type,
ml::train::LayerType::LAYER_LM_HEAD);
Expand Down
2 changes: 1 addition & 1 deletion nntrainer/layers/cl_layers/addition_layer_cl.h
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@ class AdditionLayerCL : public Layer {
/**
* @brief Destructor of Addition Layer
*/
~AdditionLayerCL() {};
~AdditionLayerCL(){};

/**
* @brief Move constructor of AdditionLayer.
Expand Down
15 changes: 0 additions & 15 deletions nntrainer/layers/cl_layers/custom_vocab_selection.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -32,21 +32,6 @@ nntrainer::VocabSelectionNNTrainer::VocabSelectionNNTrainer(
this->lshBits = lshBlockNum * lshBlockSize;
this->lshData = std::vector<lshDataBlock>(this->vocabCnt * lshBlockNum);

// for (unsigned int i = 0; i < vocabCnt; ++i) {
// for (unsigned int j = 0; j < lshBlockNum; ++j) {
// unsigned int actualSize = std::min(lshBlockSize, hiddenSize -
// (int)j * lshBlockSize); lshDataBlock d; for (unsigned int k = 0; k
// < actualSize; ++k) {
// d[k] = weights.getValue<_FP16>(0, 0, i, j * lshBlockSize + k) >
// 0 ? 1 : 0;
// }
// for (unsigned int k = actualSize; k < lshBlockSize; ++k) {
// d[k] = 0;
// }
// this->lshData[i * lshBlockNum + j] = d;
// }
// }

for (unsigned int i = 0; i < lshBlockNum; ++i) {
unsigned int actualSize =
std::min(lshBlockSize, hiddenSize - (int)i * lshBlockSize);
Expand Down
2 changes: 1 addition & 1 deletion nntrainer/layers/cl_layers/custom_vocab_selection.h
Original file line number Diff line number Diff line change
Expand Up @@ -93,7 +93,7 @@ class VocabSelectionNNTrainer : public VocabSelection {
/**
* @brief Destructor of VocabSelectionNNTrainer class
*/
~VocabSelectionNNTrainer() {};
~VocabSelectionNNTrainer(){};
};

} // namespace nntrainer
Expand Down
7 changes: 5 additions & 2 deletions nntrainer/layers/cl_layers/lm_head_layer_cl.h
Original file line number Diff line number Diff line change
Expand Up @@ -80,7 +80,10 @@ class CustomLMHeadLayerCl : public LayerImpl {
*/
void finalize(nntrainer::InitLayerContext &context) override;

void initialize(nntrainer::RunLayerContext &context) override {
/**
* @copydoc Layer::intialize(RunLayerContext &context)
*/
void initialize(nntrainer::RunLayerContext &context) {
auto use_vocab_selection =
std::get<props::UseVocabSelection>(custom_lm_head_props).get();

Expand Down Expand Up @@ -123,7 +126,7 @@ class CustomLMHeadLayerCl : public LayerImpl {
* @copydoc Layer::exportTo(Exporter &exporter, ExportMethods method)
*/
void exportTo(nntrainer::Exporter &exporter,
const ml::train::ExportMethods &method) const override {};
const ml::train::ExportMethods &method) const override{};

/**
* @copydoc Layer::getType()
Expand Down

0 comments on commit f1f67f8

Please sign in to comment.