diff --git a/coreneuron/io/nrn_filehandler.hpp b/coreneuron/io/nrn_filehandler.hpp index e7d280023..99908550a 100644 --- a/coreneuron/io/nrn_filehandler.hpp +++ b/coreneuron/io/nrn_filehandler.hpp @@ -14,6 +14,7 @@ #include <sys/stat.h> #include "coreneuron/utils/nrn_assert.h" +#include "coreneuron/io/nrnsection_mapping.hpp" namespace coreneuron { /** Encapsulate low-level reading of coreneuron input data files. @@ -110,7 +111,7 @@ class FileHandler { * Read count no of mappings for section to segment */ template <typename T> - int read_mapping_info(T* mapinfo) { + int read_mapping_info(T* mapinfo, NrnThreadMappingInfo* ntmapping, CellMapping* cmap) { int nsec, nseg, n_scan; char line_buf[max_line_length], name[max_line_length]; @@ -123,14 +124,19 @@ class FileHandler { if (nseg) { std::vector<int> sec, seg; + std::vector<double> lfp_factors; sec.reserve(nseg); seg.reserve(nseg); + lfp_factors.reserve(nseg); read_array<int>(&sec[0], nseg); read_array<int>(&seg[0], nseg); + read_array<double>(&lfp_factors[0], nseg); for (int i = 0; i < nseg; i++) { mapinfo->add_segment(sec[i], seg[i]); + ntmapping->add_segment_id(seg[i]); + cmap->add_segment_lfp_factor(seg[i], lfp_factors[i]); } } return nseg; diff --git a/coreneuron/io/nrn_setup.cpp b/coreneuron/io/nrn_setup.cpp index 63a3c86d2..b078be1ef 100644 --- a/coreneuron/io/nrn_setup.cpp +++ b/coreneuron/io/nrn_setup.cpp @@ -953,7 +953,7 @@ void read_phase3(NrnThread& nt, UserParams& userParams) { // read section-segment mapping for every section list for (int j = 0; j < nseclist; j++) { SecMapping* smap = new SecMapping(); - F.read_mapping_info(smap); + F.read_mapping_info(smap, ntmapping, cmap); cmap->add_sec_map(smap); } diff --git a/coreneuron/io/nrnsection_mapping.hpp b/coreneuron/io/nrnsection_mapping.hpp index d7524fc42..5b9c7bd57 100644 --- a/coreneuron/io/nrnsection_mapping.hpp +++ b/coreneuron/io/nrnsection_mapping.hpp @@ -14,6 +14,7 @@ #include <vector> #include <map> #include <iostream> +#include <unordered_map> namespace coreneuron { @@ -73,6 +74,9 @@ struct CellMapping { /** list of section lists (like soma, axon, apic) */ std::vector<SecMapping*> secmapvec; + /** map containing segment ids an its respective lfp factors */ + std::unordered_map<int, double> lfp_factors; + CellMapping(int g) : gid(g) {} @@ -137,6 +141,11 @@ struct CellMapping { return count; } + /** @brief add the lfp factor of a segment_id */ + void add_segment_lfp_factor(const int segment_id, double factor) { + lfp_factors.insert({segment_id, factor}); + } + ~CellMapping() { for (size_t i = 0; i < secmapvec.size(); i++) { delete secmapvec[i]; @@ -153,6 +162,11 @@ struct NrnThreadMappingInfo { /** list of cells mapping */ std::vector<CellMapping*> mappingvec; + /** list of segment ids */ + std::vector<int> segment_ids; + + std::vector<double> _lfp; + /** @brief number of cells */ size_t size() const { return mappingvec.size(); @@ -181,5 +195,10 @@ struct NrnThreadMappingInfo { void add_cell_mapping(CellMapping* c) { mappingvec.push_back(c); } + + /** @brief add a new segment */ + void add_segment_id(const int segment_id) { + segment_ids.push_back(segment_id); + } }; } // namespace coreneuron diff --git a/coreneuron/io/reports/nrnreport.hpp b/coreneuron/io/reports/nrnreport.hpp index 7cdc05806..ac85d75be 100644 --- a/coreneuron/io/reports/nrnreport.hpp +++ b/coreneuron/io/reports/nrnreport.hpp @@ -76,7 +76,8 @@ enum ReportType { SynapseReport, IMembraneReport, SectionReport, - SummationReport + SummationReport, + LFPReport }; // enumerate that defines the section type for a Section report diff --git a/coreneuron/io/reports/report_configuration_parser.cpp b/coreneuron/io/reports/report_configuration_parser.cpp index 6c69662b7..528c269a6 100644 --- a/coreneuron/io/reports/report_configuration_parser.cpp +++ b/coreneuron/io/reports/report_configuration_parser.cpp @@ -138,6 +138,9 @@ std::vector<ReportConfiguration> create_report_configurations(const std::string& report_type = SynapseReport; } else if (report.type_str == "summation") { report_type = SummationReport; + } else if (report.type_str == "lfp") { + nrn_use_fast_imem = true; + report_type = LFPReport; } else { std::cerr << "Report error: unsupported type " << report.type_str << std::endl; nrn_abort(1); diff --git a/coreneuron/io/reports/report_event.cpp b/coreneuron/io/reports/report_event.cpp index 78eb6b268..6189c4526 100644 --- a/coreneuron/io/reports/report_event.cpp +++ b/coreneuron/io/reports/report_event.cpp @@ -10,6 +10,7 @@ #include "coreneuron/sim/multicore.hpp" #include "coreneuron/io/reports/nrnreport.hpp" #include "coreneuron/utils/nrn_assert.h" +#include "coreneuron/io/nrnsection_mapping.hpp" #ifdef ENABLE_BIN_REPORTS #include "reportinglib/Records.h" #endif // ENABLE_BIN_REPORTS @@ -24,11 +25,13 @@ ReportEvent::ReportEvent(double dt, double tstart, const VarsToReport& filtered_gids, const char* name, - double report_dt) + double report_dt, + ReportType type) : dt(dt) , tstart(tstart) , report_path(name) , report_dt(report_dt) + , report_type(type) , vars_to_report(filtered_gids) { nrn_assert(filtered_gids.size()); step = tstart / dt; @@ -72,12 +75,49 @@ void ReportEvent::summation_alu(NrnThread* nt) { } } +void ReportEvent::lfp_calc(NrnThread* nt) { + // Calculate lfp only on reporting steps + if (step > 0 && (static_cast<int>(step) % reporting_period) == 0) { + auto* mapinfo = static_cast<NrnThreadMappingInfo*>(nt->mapping); + double* fast_imem_rhs = nt->nrn_fast_imem->nrn_sav_rhs; + auto& summation_report = nt->summation_report_handler_->summation_reports_[report_path]; + for (const auto& kv: vars_to_report) { + int gid = kv.first; + const auto& to_report = kv.second; + const auto& cell_mapping = mapinfo->get_cell_mapping(gid); + + int count = 0; + double sum = 0.0; + for (const auto& kv: cell_mapping->lfp_factors) { + int segment_id = kv.first; + double factor = kv.second; + if (std::isnan(factor)) { + factor = 0.0; + } + double iclamp = 0.0; + for (const auto& value: summation_report.currents_[segment_id]) { + double current_value = *value.first; + int scale = value.second; + iclamp += current_value * scale; + } + sum += (fast_imem_rhs[segment_id] + iclamp) * factor; + count++; + } + *(to_report.front().var_value) = sum; + } + } +} + /** on deliver, call ReportingLib and setup next event */ void ReportEvent::deliver(double t, NetCvode* nc, NrnThread* nt) { /* reportinglib is not thread safe */ #pragma omp critical { - summation_alu(nt); + if (report_type == ReportType::SummationReport) { + summation_alu(nt); + } else if (report_type == ReportType::LFPReport) { + lfp_calc(nt); + } // each thread needs to know its own step #ifdef ENABLE_BIN_REPORTS records_nrec(step, gids_to_report.size(), gids_to_report.data(), report_path.data()); diff --git a/coreneuron/io/reports/report_event.hpp b/coreneuron/io/reports/report_event.hpp index 20d325614..0f1a07358 100644 --- a/coreneuron/io/reports/report_event.hpp +++ b/coreneuron/io/reports/report_event.hpp @@ -36,12 +36,14 @@ class ReportEvent: public DiscreteEvent { double tstart, const VarsToReport& filtered_gids, const char* name, - double report_dt); + double report_dt, + ReportType type); /** on deliver, call ReportingLib and setup next event */ void deliver(double t, NetCvode* nc, NrnThread* nt) override; bool require_checkpoint() override; void summation_alu(NrnThread* nt); + void lfp_calc(NrnThread* nt); private: double dt; @@ -52,6 +54,7 @@ class ReportEvent: public DiscreteEvent { std::vector<int> gids_to_report; double tstart; VarsToReport vars_to_report; + ReportType report_type; }; #endif // defined(ENABLE_BIN_REPORTS) || defined(ENABLE_SONATA_REPORTS) diff --git a/coreneuron/io/reports/report_handler.cpp b/coreneuron/io/reports/report_handler.cpp index 84341e7a4..e720f331e 100644 --- a/coreneuron/io/reports/report_handler.cpp +++ b/coreneuron/io/reports/report_handler.cpp @@ -35,6 +35,7 @@ void ReportHandler::create_report(double dt, double tstop, double delay) { continue; } const std::vector<int>& nodes_to_gid = map_gids(nt); + auto* mapinfo = static_cast<NrnThreadMappingInfo*>(nt.mapping); VarsToReport vars_to_report; bool is_soma_target; switch (m_report_config.type) { @@ -58,6 +59,15 @@ void ReportHandler::create_report(double dt, double tstop, double delay) { nodes_to_gid); register_custom_report(nt, m_report_config, vars_to_report); break; + case LFPReport: + // 1 lfp value per gid + mapinfo->_lfp.resize(nt.ncell); + vars_to_report = + get_lfp_vars_to_report(nt, m_report_config, mapinfo->_lfp.data(), nodes_to_gid); + is_soma_target = m_report_config.section_type == SectionType::Soma || + m_report_config.section_type == SectionType::Cell; + register_section_report(nt, m_report_config, vars_to_report, is_soma_target); + break; default: vars_to_report = get_synapse_vars_to_report(nt, m_report_config, nodes_to_gid); register_custom_report(nt, m_report_config, vars_to_report); @@ -67,7 +77,8 @@ void ReportHandler::create_report(double dt, double tstop, double delay) { t, vars_to_report, m_report_config.output_path.data(), - m_report_config.report_dt); + m_report_config.report_dt, + m_report_config.type); report_event->send(t, net_cvode_instance, &nt); m_report_events.push_back(std::move(report_event)); } @@ -341,6 +352,35 @@ VarsToReport ReportHandler::get_synapse_vars_to_report( return vars_to_report; } +VarsToReport ReportHandler::get_lfp_vars_to_report(const NrnThread& nt, + ReportConfiguration& report, + double* report_variable, + const std::vector<int>& nodes_to_gids) const { + auto& summation_report = nt.summation_report_handler_->summation_reports_[report.output_path]; + VarsToReport vars_to_report; + for (int i = 0; i < nt.ncell; i++) { + int gid = nt.presyns[i].gid_; + if (report.target.find(gid) == report.target.end()) { + continue; + } + // IClamp is needed for the LFP calculation + auto mech_id = nrn_get_mechtype("IClamp"); + Memb_list* ml = nt._ml_list[mech_id]; + for (int j = 0; j < ml->nodecount; j++) { + auto segment_id = ml->nodeindices[j]; + if ((nodes_to_gids[segment_id] == gid)) { + double* var_value = get_var_location_from_var_name(mech_id, "i", ml, j); + summation_report.currents_[segment_id].push_back(std::make_pair(var_value, -1)); + } + } + std::vector<VarWithMapping> to_report; + double* variable = report_variable + i; + to_report.push_back(VarWithMapping(i, variable)); + vars_to_report[gid] = to_report; + } + return vars_to_report; +} + // map GIDs of every compartment, it consist in a backward sweep then forward sweep algorithm std::vector<int> ReportHandler::map_gids(const NrnThread& nt) const { std::vector<int> nodes_gid(nt.end, -1); diff --git a/coreneuron/io/reports/report_handler.hpp b/coreneuron/io/reports/report_handler.hpp index 084886c96..746edbaee 100644 --- a/coreneuron/io/reports/report_handler.hpp +++ b/coreneuron/io/reports/report_handler.hpp @@ -44,6 +44,10 @@ class ReportHandler { VarsToReport get_synapse_vars_to_report(const NrnThread& nt, ReportConfiguration& report, const std::vector<int>& nodes_to_gids) const; + VarsToReport get_lfp_vars_to_report(const NrnThread& nt, + ReportConfiguration& report, + double* report_variable, + const std::vector<int>& nodes_to_gids) const; std::vector<int> map_gids(const NrnThread& nt) const; #endif // defined(ENABLE_BIN_REPORTS) || defined(ENABLE_SONATA_REPORTS) protected: