From c328eede36f104f7d00399fbc170df2069239c3b Mon Sep 17 00:00:00 2001 From: leofltt Date: Wed, 4 Jun 2025 00:24:07 +0200 Subject: [PATCH 1/2] nn~ bending for max - (initial commit) --- .../maxmsp/mc.nn_tilde/mc.nn_tilde.cpp | 79 +++++++++++++++++-- .../maxmsp/mcs.nn_tilde/mcs.nn_tilde.cpp | 72 ++++++++++++++++- src/frontend/maxmsp/nn_tilde/nn_tilde.cpp | 78 ++++++++++++++++++ 3 files changed, 222 insertions(+), 7 deletions(-) diff --git a/src/frontend/maxmsp/mc.nn_tilde/mc.nn_tilde.cpp b/src/frontend/maxmsp/mc.nn_tilde/mc.nn_tilde.cpp index e873a12..f46b47a 100644 --- a/src/frontend/maxmsp/mc.nn_tilde/mc.nn_tilde.cpp +++ b/src/frontend/maxmsp/mc.nn_tilde/mc.nn_tilde.cpp @@ -83,6 +83,74 @@ class mc_nn_tilde : public object, public mc_operator<> { attribute enable{this, "enable", true, description{"Enable / disable tensor computation"}}; + // NN BENDING MOD + outlet<> m_info_outlet{this, "info", "Info outlet for layers and weights"}; + + message<> layers{ + this, "layers", "Get available model layers", + [this](const c74::min::atoms &args, const int inlet) -> c74::min::atoms { + if (!m_model->is_loaded()) { + cerr << "Model not initialized" << endl; + return {}; + } + std::vector layers = m_model->get_available_layers(); + atoms out_atoms; + out_atoms.push_back("layers"); + for (const auto &layer : layers) { + out_atoms.push_back(layer); + } + m_info_outlet.send(out_atoms); + return {}; + }}; + + message<> get_weights{ + this, "get_weights", "Get weights for a specific layer", + [this](const c74::min::atoms &args, const int inlet) -> c74::min::atoms { + if (!m_model->is_loaded()) { + cerr << "Model not initialized" << endl; + return {}; + } + if (args.size() < 1) { + cerr << "Layer name required" << endl; + return {}; + } + std::string layer_name = args[0]; + std::vector weights = m_model->get_layer_weights(layer_name); + atoms out_atoms; + out_atoms.push_back("layer"); + for (const auto &weight : weights) { + out_atoms.push_back(weight); + } + m_info_outlet.send(out_atoms); + return {}; + }}; + + message<> set_weights{ + this, "set_weights", "Set weights for a specific layer", + [this](const c74::min::atoms &args, const int inlet) -> c74::min::atoms { + if (!m_model->is_loaded()) { + cerr << "Model not initialized" << endl; + return {}; + } + if (args.size() < 2) { + cerr << "Layer name and weights required" << endl; + return {}; + } + std::string layer_name = args[0]; + std::vector weights; + for (size_t i = 1; i < args.size(); i++) { + if (args[i].type() == message_type::float_argument) { + weights.push_back(float(args[i])); + } + } + try { + m_model->set_layer_weights(layer_name, weights); + } catch (const std::exception &e) { + cerr << "Error setting weights: " << e.what() << endl; + } + return {}; + }}; + // BOOT STAMP message<> maxclass_setup{ this, "maxclass_setup", @@ -175,16 +243,15 @@ void model_perform(mc_nn_tilde *mc_nn_instance) { mc_nn_instance->m_method, mc_nn_instance->get_batches()); } -void check_loop_buffers(mc_nn_tilde *mc_nn_instance, std::vector &in_model, std::vector &out_model) { - if (mc_nn_instance->m_in_model.size() != in_model.size()) - { +void check_loop_buffers(mc_nn_tilde *mc_nn_instance, + std::vector &in_model, + std::vector &out_model) { + if (mc_nn_instance->m_in_model.size() != in_model.size()) { in_model.clear(); for (auto &ptr : mc_nn_instance->m_in_model) in_model.push_back(ptr.get()); - } - if (mc_nn_instance->m_out_model.size() != out_model.size()) - { + if (mc_nn_instance->m_out_model.size() != out_model.size()) { out_model.clear(); for (auto &ptr : mc_nn_instance->m_out_model) out_model.push_back(ptr.get()); diff --git a/src/frontend/maxmsp/mcs.nn_tilde/mcs.nn_tilde.cpp b/src/frontend/maxmsp/mcs.nn_tilde/mcs.nn_tilde.cpp index ddfe3c3..ffb1f82 100644 --- a/src/frontend/maxmsp/mcs.nn_tilde/mcs.nn_tilde.cpp +++ b/src/frontend/maxmsp/mcs.nn_tilde/mcs.nn_tilde.cpp @@ -86,6 +86,75 @@ class mc_bnn_tilde : public object, public mc_operator<> { attribute enable{this, "enable", true, description{"Enable / disable tensor computation"}}; + // NN BENDING MOD + outlet<> m_info_outlet{this, "info", "Info outlet for layers and weights"}; + + // Message handlers for layers and weights + message<> layers{ + this, "layers", "Get available model layers", + [this](const c74::min::atoms &args, const int inlet) -> c74::min::atoms { + if (!m_model->is_loaded()) { + cerr << "Model not initialized" << endl; + return {}; + } + std::vector layers = m_model->get_available_layers(); + atoms out_atoms; + out_atoms.push_back("layers"); + for (const auto &layer : layers) { + out_atoms.push_back(layer); + } + m_info_outlet.send(out_atoms); + return {}; + }}; + + message<> get_weights{ + this, "get_weights", "Get weights for a specific layer", + [this](const c74::min::atoms &args, const int inlet) -> c74::min::atoms { + if (!m_model->is_loaded()) { + cerr << "Model not initialized" << endl; + return {}; + } + if (args.size() < 1) { + cerr << "Layer name required" << endl; + return {}; + } + std::string layer_name = args[0]; + std::vector weights = m_model->get_layer_weights(layer_name); + atoms out_atoms; + out_atoms.push_back("layer"); + for (const auto &weight : weights) { + out_atoms.push_back(weight); + } + m_info_outlet.send(out_atoms); + return {}; + }}; + + message<> set_weights{ + this, "set_weights", "Set weights for a specific layer", + [this](const c74::min::atoms &args, const int inlet) -> c74::min::atoms { + if (!m_model->is_loaded()) { + cerr << "Model not initialized" << endl; + return {}; + } + if (args.size() < 2) { + cerr << "Layer name and weights required" << endl; + return {}; + } + std::string layer_name = args[0]; + std::vector weights; + for (size_t i = 1; i < args.size(); i++) { + if (args[i].type() == message_type::float_argument) { + weights.push_back(float(args[i])); + } + } + try { + m_model->set_layer_weights(layer_name, weights); + } catch (const std::exception &e) { + cerr << "Error setting weights: " << e.what() << endl; + } + return {}; + }}; + // BOOT STAMP message<> maxclass_setup{ this, "maxclass_setup", @@ -386,7 +455,8 @@ void mc_bnn_tilde::perform(audio_bundle input, audio_bundle output) { for (int d(0); d < m_in_dim; d++) { auto in = input.samples(b * m_in_dim + d); m_in_buffer[d * get_batches() + b].put(in, vec_size); - std::cout << "populate batch " << b << "; channel " << d << " into buffer" << d * get_batches() + b << "; value : " << in[0] << std::endl; + std::cout << "populate batch " << b << "; channel " << d << " into buffer" + << d * get_batches() + b << "; value : " << in[0] << std::endl; } } diff --git a/src/frontend/maxmsp/nn_tilde/nn_tilde.cpp b/src/frontend/maxmsp/nn_tilde/nn_tilde.cpp index a091e29..f2acf38 100644 --- a/src/frontend/maxmsp/nn_tilde/nn_tilde.cpp +++ b/src/frontend/maxmsp/nn_tilde/nn_tilde.cpp @@ -82,6 +82,84 @@ class nn : public object, public vector_operator<> { return args; }}}; + // NN BENDING MOD + outlet<> m_info_outlet{this, "info", "Info outlet for layers and weights"}; + + message<> layers{ + this, "layers", "Get available model layers", + [this](const c74::min::atoms &args, const int inlet) -> c74::min::atoms { + if (!m_is_backend_init) { + cerr << "Model not initialized" << endl; + return {}; + } + std::vector layers = m_model->get_available_layers(); + atoms out_atoms; + out_atoms.push_back("layers"); + for (const auto &layer : layers) { + out_atoms.push_back(layer); + } + m_info_outlet.send(out_atoms); + return {}; + }}; + + message<> get_weights{ + this, "get_weights", "Get weights for a specific layer", + [this](const c74::min::atoms &args, const int inlet) -> c74::min::atoms { + if (!m_is_backend_init) { + cerr << "Model not initialized" << endl; + return {}; + } + if (args.size() < 1) { + cerr << "Layer name required" << endl; + return {}; + } + std::string layer_name = args[0]; + std::vector weights = m_model->get_layer_weights(layer_name); + atoms out_atoms; + out_atoms.push_back("layer"); + for (const auto &weight : weights) { + out_atoms.push_back(weight); + } + m_info_outlet.send(out_atoms); + return {}; + }}; + + message<> set_weights{ + this, "set_weights", "Set weights for a specific layer", + [this](const c74::min::atoms &args, const int inlet) -> c74::min::atoms { + if (!m_is_backend_init) { + cerr << "Model not initialized" << endl; + return {}; + } + if (args.size() < 2) { + cerr << "Layer name and weights required" << endl; + return {}; + } + std::string layer_name = args[0]; + std::vector weights; + for (size_t i = 1; i < args.size(); i++) { + if (args[i].type() == message_type::float_argument) { + weights.push_back(float(args[i])); + } + } + try { + m_model->set_layer_weights(layer_name, weights); + } catch (const std::exception &e) { + cerr << "Error setting weights: " << e.what() << endl; + } + return {}; + }}; + + message<> reload{ + this, "reload", "Reload the model", + [this](const c74::min::atoms &args, const int inlet) -> c74::min::atoms { + if (!m_is_backend_init) { + cerr << "Model not initialized" << endl; + return {}; + } + m_model->reload(); + return {}; + }}; // BOOT STAMP message<> maxclass_setup{ this, "maxclass_setup", From 2c26f9e8d9b8ad81868ad657149d295d509a17c6 Mon Sep 17 00:00:00 2001 From: leofltt Date: Wed, 4 Jun 2025 09:29:07 +0200 Subject: [PATCH 2/2] updated credits --- src/frontend/maxmsp/mc.nn_tilde/mc.nn_tilde.cpp | 8 ++++++-- src/frontend/maxmsp/mcs.nn_tilde/mcs.nn_tilde.cpp | 14 +++++++++----- src/frontend/maxmsp/nn_tilde/nn_tilde.cpp | 7 +++++-- 3 files changed, 20 insertions(+), 9 deletions(-) diff --git a/src/frontend/maxmsp/mc.nn_tilde/mc.nn_tilde.cpp b/src/frontend/maxmsp/mc.nn_tilde/mc.nn_tilde.cpp index f46b47a..8cd96df 100644 --- a/src/frontend/maxmsp/mc.nn_tilde/mc.nn_tilde.cpp +++ b/src/frontend/maxmsp/mc.nn_tilde/mc.nn_tilde.cpp @@ -31,7 +31,9 @@ class mc_nn_tilde : public object, public mc_operator<> { public: MIN_DESCRIPTION{"Multi-channel interface for deep learning models"}; MIN_TAGS{"audio, deep learning, ai"}; - MIN_AUTHOR{"Antoine Caillon, Axel Chemla--Romeu-Santos"}; + MIN_AUTHOR{ + "Antoine Caillon, Axel Chemla--Romeu-Santos, mod by Błażej Kotowski, " + "Leonardo Foletto"}; mc_nn_tilde(const atoms &args = {}); ~mc_nn_tilde(); @@ -157,7 +159,9 @@ class mc_nn_tilde : public object, public mc_operator<> { [this](const c74::min::atoms &args, const int inlet) -> c74::min::atoms { // make stamp cout << "nn~ " << VERSION << " - torch " << TORCH_VERSION - << " - 2023 - Antoine Caillon & Axel Chemla--Romeu-Santos" << endl; + << " - 2023 - Antoine Caillon & Axel Chemla--Romeu-Santos, mod by " + "Błażej Kotowski, Leonardo Foletto" + << endl; cout << "visit https://caillonantoine.github.io" << endl; // mc handle c74::max::t_class *c = args[0]; diff --git a/src/frontend/maxmsp/mcs.nn_tilde/mcs.nn_tilde.cpp b/src/frontend/maxmsp/mcs.nn_tilde/mcs.nn_tilde.cpp index ffb1f82..e5f176f 100644 --- a/src/frontend/maxmsp/mcs.nn_tilde/mcs.nn_tilde.cpp +++ b/src/frontend/maxmsp/mcs.nn_tilde/mcs.nn_tilde.cpp @@ -32,7 +32,9 @@ class mc_bnn_tilde : public object, public mc_operator<> { MIN_DESCRIPTION{ "Multi-channel interface for deep learning models (batch version)"}; MIN_TAGS{"audio, deep learning, ai"}; - MIN_AUTHOR{"Antoine Caillon, Axel Chemla--Romeu-Santos"}; + MIN_AUTHOR{ + "Antoine Caillon, Axel Chemla--Romeu-Santos, mod by Błażej Kotowski, " + "Leonardo Foletto"}; mc_bnn_tilde(const atoms &args = {}); ~mc_bnn_tilde(); @@ -78,9 +80,9 @@ class mc_bnn_tilde : public object, public mc_operator<> { "Name of the method to call during synthesis."}; argument batches_arg{this, "batches", "Number of batches"}; - argument buffer_arg{ - this, "buffer size", - "Size of the internal buffer (can't be lower than the method's ratio)."}; + argument buffer_arg{this, "buffer size", + "Size of the internal buffer (can't be lower than " + "the method's ratio)."}; // ENABLE / DISABLE ATTRIBUTE attribute enable{this, "enable", true, @@ -161,7 +163,9 @@ class mc_bnn_tilde : public object, public mc_operator<> { [this](const c74::min::atoms &args, const int inlet) -> c74::min::atoms { // make stamp cout << "nn~ " << VERSION << " - torch " << TORCH_VERSION - << " - 2023 - Antoine Caillon & Axel Chemla--Romeu-Santos" << endl; + << " - 2023 - Antoine Caillon & Axel Chemla--Romeu-Santos, mod by " + "Błażej Kotowski, Leonardo Foletto" + << endl; cout << "visit https://caillonantoine.github.io" << endl; // mc handle c74::max::t_class *c = args[0]; diff --git a/src/frontend/maxmsp/nn_tilde/nn_tilde.cpp b/src/frontend/maxmsp/nn_tilde/nn_tilde.cpp index f2acf38..42b7331 100644 --- a/src/frontend/maxmsp/nn_tilde/nn_tilde.cpp +++ b/src/frontend/maxmsp/nn_tilde/nn_tilde.cpp @@ -27,7 +27,8 @@ class nn : public object, public vector_operator<> { public: MIN_DESCRIPTION{"Interface for deep learning models"}; MIN_TAGS{"audio, deep learning, ai"}; - MIN_AUTHOR{"Antoine Caillon & Axel Chemla--Romeu-Santos"}; + MIN_AUTHOR{"Antoine Caillon & Axel Chemla--Romeu-Santos, mod by Błażej " + "Kotowski, Leonardo Foletto"}; nn(const atoms &args = {}); ~nn(); @@ -165,7 +166,9 @@ class nn : public object, public vector_operator<> { this, "maxclass_setup", [this](const c74::min::atoms &args, const int inlet) -> c74::min::atoms { cout << "nn~ " << VERSION << " - torch " << TORCH_VERSION - << " - 2023 - Antoine Caillon & Axel Chemla--Romeu-Santos" << endl; + << " - 2023 - Antoine Caillon & Axel Chemla--Romeu-Santos, mod by " + "Błażej Kotowski, Leonardo Foletto" + << endl; cout << "visit https://caillonantoine.github.io" << endl; return {}; }};